• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JohnSnowLabs / spark-nlp / 4992350528

pending completion
4992350528

Pull #13797

github

GitHub
Merge 424c7ff18 into ef7906c5e
Pull Request #13797: SPARKNLP-835: ProtectedParam and ProtectedFeature

24 of 24 new or added lines in 6 files covered. (100.0%)

8643 of 13129 relevant lines covered (65.83%)

0.66 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

56.96
/src/main/scala/com/johnsnowlabs/nlp/annotators/ner/dl/NerDLModel.scala
1
/*
2
 * Copyright 2017-2022 John Snow Labs
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *    http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16

17
package com.johnsnowlabs.nlp.annotators.ner.dl
18

19
import com.johnsnowlabs.ml.tensorflow._
20
import com.johnsnowlabs.nlp.AnnotatorType._
21
import com.johnsnowlabs.nlp._
22
import com.johnsnowlabs.nlp.annotators.common.Annotated.NerTaggedSentence
23
import com.johnsnowlabs.nlp.annotators.common._
24
import com.johnsnowlabs.nlp.annotators.ner.Verbose
25
import com.johnsnowlabs.nlp.pretrained.ResourceDownloader
26
import com.johnsnowlabs.nlp.serialization.StructFeature
27
import com.johnsnowlabs.storage.HasStorageRef
28
import org.apache.spark.broadcast.Broadcast
29
import org.apache.spark.ml.param.{BooleanParam, FloatParam, IntArrayParam, StringArrayParam}
30
import org.apache.spark.ml.util.Identifiable
31
import org.apache.spark.sql.{Dataset, SparkSession}
32

33
/** This Named Entity recognition annotator is a generic NER model based on Neural Networks.
34
  *
35
  * Neural Network architecture is Char CNNs - BiLSTM - CRF that achieves state-of-the-art in most
36
  * datasets.
37
  *
38
  * This is the instantiated model of the [[NerDLApproach]]. For training your own model, please
39
  * see the documentation of that class.
40
  *
41
  * Pretrained models can be loaded with `pretrained` of the companion object:
42
  * {{{
43
  * val nerModel = NerDLModel.pretrained()
44
  *   .setInputCols("sentence", "token", "embeddings")
45
  *   .setOutputCol("ner")
46
  * }}}
47
  * The default model is `"ner_dl"`, if no name is provided.
48
  *
49
  * For available pretrained models please see the
50
  * [[https://sparknlp.org/models?task=Named+Entity+Recognition Models Hub]]. Additionally,
51
  * pretrained pipelines are available for this module, see
52
  * [[https://sparknlp.org/docs/en/pipelines Pipelines]].
53
  *
54
  * Note that some pretrained models require specific types of embeddings, depending on which they
55
  * were trained on. For example, the default model `"ner_dl"` requires the
56
  * [[com.johnsnowlabs.nlp.embeddings.WordEmbeddingsModel WordEmbeddings]] `"glove_100d"`.
57
  *
58
  * For extended examples of usage, see the
59
  * [[https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/english/model-downloader/Create%20custom%20pipeline%20-%20NerDL.ipynb Examples]]
60
  * and the
61
  * [[https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/ner/dl/NerDLSpec.scala NerDLSpec]].
62
  *
63
  * ==Example==
64
  * {{{
65
  * import spark.implicits._
66
  * import com.johnsnowlabs.nlp.base.DocumentAssembler
67
  * import com.johnsnowlabs.nlp.annotators.Tokenizer
68
  * import com.johnsnowlabs.nlp.annotators.sbd.pragmatic.SentenceDetector
69
  * import com.johnsnowlabs.nlp.embeddings.WordEmbeddingsModel
70
  * import com.johnsnowlabs.nlp.annotators.ner.dl.NerDLModel
71
  * import org.apache.spark.ml.Pipeline
72
  *
73
  * // First extract the prerequisites for the NerDLModel
74
  * val documentAssembler = new DocumentAssembler()
75
  *   .setInputCol("text")
76
  *   .setOutputCol("document")
77
  *
78
  * val sentence = new SentenceDetector()
79
  *   .setInputCols("document")
80
  *   .setOutputCol("sentence")
81
  *
82
  * val tokenizer = new Tokenizer()
83
  *   .setInputCols("sentence")
84
  *   .setOutputCol("token")
85
  *
86
  * val embeddings = WordEmbeddingsModel.pretrained()
87
  *   .setInputCols("sentence", "token")
88
  *   .setOutputCol("bert")
89
  *
90
  * // Then NER can be extracted
91
  * val nerTagger = NerDLModel.pretrained()
92
  *   .setInputCols("sentence", "token", "bert")
93
  *   .setOutputCol("ner")
94
  *
95
  * val pipeline = new Pipeline().setStages(Array(
96
  *   documentAssembler,
97
  *   sentence,
98
  *   tokenizer,
99
  *   embeddings,
100
  *   nerTagger
101
  * ))
102
  *
103
  * val data = Seq("U.N. official Ekeus heads for Baghdad.").toDF("text")
104
  * val result = pipeline.fit(data).transform(data)
105
  *
106
  * result.select("ner.result").show(false)
107
  * +------------------------------------+
108
  * |result                              |
109
  * +------------------------------------+
110
  * |[B-ORG, O, O, B-PER, O, O, B-LOC, O]|
111
  * +------------------------------------+
112
  * }}}
113
  *
114
  * @see
115
  *   [[com.johnsnowlabs.nlp.annotators.ner.crf.NerCrfModel NerCrfModel]] for a generic CRF
116
  *   approach
117
  * @see
118
  *   [[com.johnsnowlabs.nlp.annotators.ner.NerConverter NerConverter]] to further process the
119
  *   results
120
  * @param uid
121
  *   required uid for storing annotator to disk
122
  * @groupname anno Annotator types
123
  * @groupdesc anno
124
  *   Required input and expected output annotator types
125
  * @groupname Ungrouped Members
126
  * @groupname param Parameters
127
  * @groupname setParam Parameter setters
128
  * @groupname getParam Parameter getters
129
  * @groupname Ungrouped Members
130
  * @groupprio param  1
131
  * @groupprio anno  2
132
  * @groupprio Ungrouped 3
133
  * @groupprio setParam  4
134
  * @groupprio getParam  5
135
  * @groupdesc param
136
  *   A list of (hyper-)parameter keys this annotator can take. Users can set and get the
137
  *   parameter values through setters and getters, respectively.
138
  */
139
class NerDLModel(override val uid: String)
140
    extends AnnotatorModel[NerDLModel]
141
    with HasBatchedAnnotate[NerDLModel]
142
    with WriteTensorflowModel
143
    with HasStorageRef
144
    with ParamsAndFeaturesWritable
145
    with HasEngine {
146

147
  def this() = this(Identifiable.randomUID("NerDLModel"))
1✔
148

149
  /** Input Annotator Types: DOCUMENT, TOKEN, WORD_EMBEDDINGS
150
    *
151
    * @group anno
152
    */
153
  override val inputAnnotatorTypes: Array[String] = Array(DOCUMENT, TOKEN, WORD_EMBEDDINGS)
1✔
154

155
  /** Output Annnotator type: NAMED_ENTITY
156
    *
157
    * @group anno
158
    */
159
  override val outputAnnotatorType: String = NAMED_ENTITY
1✔
160

161
  /** Minimum probability. Used only if there is no CRF on top of LSTM layer.
162
    *
163
    * @group param
164
    */
165
  val minProba = new FloatParam(
1✔
166
    this,
167
    "minProbe",
1✔
168
    "Minimum probability. Used only if there is no CRF on top of LSTM layer.")
1✔
169

170
  /** datasetParams
171
    *
172
    * @group param
173
    */
174
  val datasetParams = new StructFeature[DatasetEncoderParams](this, "datasetParams")
1✔
175

176
  /** ConfigProto from tensorflow, serialized into byte array. Get with
177
    * config_proto.SerializeToString()
178
    *
179
    * @group param
180
    */
181
  val configProtoBytes = new IntArrayParam(
1✔
182
    this,
183
    "configProtoBytes",
1✔
184
    "ConfigProto from tensorflow, serialized into byte array. Get with config_proto.SerializeToString()")
1✔
185

186
  /** Whether to include confidence scores in annotation metadata (Default: `false`)
187
    *
188
    * @group param
189
    */
190
  val includeConfidence = new BooleanParam(
1✔
191
    this,
192
    "includeConfidence",
1✔
193
    "Whether to include confidence scores in annotation metadata")
1✔
194

195
  /** whether to include all confidence scores in annotation metadata or just score of the
196
    * predicted tag
197
    *
198
    * @group param
199
    */
200
  val includeAllConfidenceScores = new BooleanParam(
1✔
201
    this,
202
    "includeAllConfidenceScores",
1✔
203
    "whether to include all confidence scores in annotation metadata")
1✔
204

205
  val classes =
206
    new StringArrayParam(this, "classes", "keep an internal copy of classes for Python")
1✔
207

208
  private var _model: Option[Broadcast[TensorflowNer]] = None
1✔
209

210
  /** Minimum probability. Used only if there is no CRF on top of LSTM layer.
211
    *
212
    * @group setParam
213
    */
214
  def setMinProbability(minProba: Float): this.type = set(this.minProba, minProba)
×
215

216
  /** datasetParams
217
    *
218
    * @group setParam
219
    */
220
  def setDatasetParams(params: DatasetEncoderParams): this.type = set(this.datasetParams, params)
1✔
221

222
  /** ConfigProto from tensorflow, serialized into byte array. Get with
223
    * config_proto.SerializeToString()
224
    *
225
    * @group setParam
226
    */
227
  def setConfigProtoBytes(bytes: Array[Int]): this.type = set(this.configProtoBytes, bytes)
×
228

229
  /** Whether to include confidence scores in annotation metadata
230
    *
231
    * @group setParam
232
    */
233
  def setIncludeConfidence(value: Boolean): this.type = set(this.includeConfidence, value)
1✔
234

235
  /** whether to include confidence scores for all tags rather than just for the predicted one
236
    *
237
    * @group setParam
238
    */
239
  def setIncludeAllConfidenceScores(value: Boolean): this.type =
240
    set(this.includeAllConfidenceScores, value)
1✔
241

242
  def setModelIfNotSet(spark: SparkSession, tf: TensorflowWrapper): this.type = {
243
    if (_model.isEmpty) {
1✔
244
      require(datasetParams.isSet, "datasetParams must be set before usage")
×
245

246
      val encoder = new NerDatasetEncoder(datasetParams.get.get)
1✔
247
      _model = Some(spark.sparkContext.broadcast(new TensorflowNer(tf, encoder, Verbose.Silent)))
1✔
248
    }
249
    this
250
  }
251

252
  /** Minimum probability. Used only if there is no CRF on top of LSTM layer.
253
    *
254
    * @group getParam
255
    */
256
  def getMinProba: Float = $(this.minProba)
×
257

258
  /** datasetParams
259
    *
260
    * @group getParam
261
    */
262
  def getConfigProtoBytes: Option[Array[Byte]] = get(this.configProtoBytes).map(_.map(_.toByte))
×
263

264
  /** ConfigProto from tensorflow, serialized into byte array. Get with
265
    * config_proto.SerializeToString()
266
    *
267
    * @group getParam
268
    */
269
  def getModelIfNotSet: TensorflowNer = _model.get.value
1✔
270

271
  /** Whether to include confidence scores in annotation metadata
272
    *
273
    * @group getParam
274
    */
275
  def getIncludeConfidence: Boolean = $(includeConfidence)
×
276

277
  /** whether to include all confidence scores in annotation metadata or just the score of the
278
    * predicted tag
279
    *
280
    * @group getParam
281
    */
282
  def getIncludeAllConfidenceScores: Boolean = $(includeAllConfidenceScores)
×
283

284
  /** get the tags used to trained this NerDLModel
285
    *
286
    * @group getParam
287
    */
288
  def getClasses: Array[String] = {
289
    val encoder = new NerDatasetEncoder(datasetParams.get.get)
1✔
290
    set(classes, encoder.tags)
1✔
291
    encoder.tags
1✔
292
  }
293

294
  setDefault(includeConfidence -> false, includeAllConfidenceScores -> false, batchSize -> 32)
1✔
295

296
  private case class RowIdentifiedSentence(
297
      rowIndex: Int,
298
      rowSentence: WordpieceEmbeddingsSentence)
299

300
  def tag(tokenized: Array[Array[WordpieceEmbeddingsSentence]]): Seq[Array[NerTaggedSentence]] = {
301
    val batch = tokenized.zipWithIndex.flatMap { case (t, i) =>
×
302
      t.map(RowIdentifiedSentence(i, _))
×
303
    }
304
    // Predict
305
    val labels = getModelIfNotSet.predict(
×
306
      batch.map(_.rowSentence),
×
307
      getConfigProtoBytes,
×
308
      includeConfidence = $(includeConfidence),
×
309
      includeAllConfidenceScores = $(includeAllConfidenceScores),
×
310
      $(batchSize))
×
311

312
    val outputBatches = Array.fill[Array[NerTaggedSentence]](tokenized.length)(Array.empty)
×
313

314
    // Combine labels with sentences tokens
315
    batch.indices.foreach { i =>
×
316
      val sentence = batch(i).rowSentence
×
317

318
      val tokens = sentence.tokens.indices.flatMap { j =>
×
319
        val token = sentence.tokens(j)
×
320
        val label = labels(i)(j)
×
321
        if (token.isWordStart) {
×
322
          Some(IndexedTaggedWord(token.token, label._1, token.begin, token.end, label._2))
×
323
        } else {
324
          None
×
325
        }
326
      }.toArray
×
327

328
      outputBatches(batch(i).rowIndex) =
×
329
        outputBatches(batch(i).rowIndex) :+ new TaggedSentence(tokens)
×
330
    }
331
    outputBatches
×
332
  }
333

334
  override protected def beforeAnnotate(dataset: Dataset[_]): Dataset[_] = {
335
    validateStorageRef(dataset, $(inputCols), AnnotatorType.WORD_EMBEDDINGS)
1✔
336
    dataset
337
  }
338

339
  override def batchAnnotate(batchedAnnotations: Seq[Array[Annotation]]): Seq[Seq[Annotation]] = {
340
    // Parse
341
    val tokenized = batchedAnnotations
342
      .map(annotations => WordpieceEmbeddingsSentence.unpack(annotations).toArray)
×
343
      .toArray
×
344

345
    // Predict
346
    val tagged = tag(tokenized)
×
347

348
    // Pack
349
    tagged.map(innerTagged => NerTagged.pack(innerTagged))
×
350
  }
351

352
  override def onWrite(path: String, spark: SparkSession): Unit = {
353
    super.onWrite(path, spark)
1✔
354
    writeTensorflowModel(
1✔
355
      path,
356
      spark,
357
      getModelIfNotSet.tensorflow,
1✔
358
      "_nerdl",
1✔
359
      NerDLModel.tfFile,
1✔
360
      configProtoBytes = getConfigProtoBytes)
1✔
361
  }
362

363
}
364

365
trait ReadsNERGraph extends ParamsAndFeaturesReadable[NerDLModel] with ReadTensorflowModel {
366

367
  override val tfFile = "tensorflow"
1✔
368

369
  def readNerGraph(instance: NerDLModel, path: String, spark: SparkSession): Unit = {
370
    val tf = readTensorflowModel(path, spark, "_nerdl")
1✔
371
    instance.setModelIfNotSet(spark: SparkSession, tf)
1✔
372
    // This allows for Python to access getClasses function
373
    val encoder = new NerDatasetEncoder(instance.datasetParams.get.get)
1✔
374
    instance.set(instance.classes, encoder.tags)
1✔
375
  }
376

377
  addReader(readNerGraph)
1✔
378
}
379

380
trait ReadablePretrainedNerDL
381
    extends ParamsAndFeaturesReadable[NerDLModel]
382
    with HasPretrained[NerDLModel] {
383
  override val defaultModelName: Some[String] = Some("ner_dl")
1✔
384

385
  override def pretrained(name: String, lang: String, remoteLoc: String): NerDLModel = {
386
    ResourceDownloader.downloadModel(NerDLModel, name, Option(lang), remoteLoc)
1✔
387
  }
388

389
  /** Java compliant-overrides */
390
  override def pretrained(): NerDLModel =
391
    pretrained(defaultModelName.get, defaultLang, defaultLoc)
1✔
392

393
  override def pretrained(name: String): NerDLModel = pretrained(name, defaultLang, defaultLoc)
×
394

395
  override def pretrained(name: String, lang: String): NerDLModel =
396
    pretrained(name, lang, defaultLoc)
×
397
}
398

399
/** This is the companion object of [[NerDLModel]]. Please refer to that class for the
400
  * documentation.
401
  */
402
object NerDLModel extends ReadablePretrainedNerDL with ReadsNERGraph
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc