• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JohnSnowLabs / spark-nlp / 4951808959

pending completion
4951808959

Pull #13792

github

GitHub
Merge efe6b42df into ef7906c5e
Pull Request #13792: SPARKNLP-825 Adding multilabel param

7 of 7 new or added lines in 1 file covered. (100.0%)

8637 of 13128 relevant lines covered (65.79%)

0.66 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

96.79
/src/main/scala/com/johnsnowlabs/nlp/annotators/keyword/yake/YakeKeywordExtraction.scala
1
/*
2
 * Copyright 2017-2022 John Snow Labs
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *    http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16

17
package com.johnsnowlabs.nlp.annotators.keyword.yake
18

19
import com.johnsnowlabs.nlp.AnnotatorType.{CHUNK, TOKEN}
20
import com.johnsnowlabs.nlp.annotators.keyword.yake.util.Token
21
import com.johnsnowlabs.nlp.annotators.keyword.yake.util.Utilities.{getTag, medianCalculator}
22
import com.johnsnowlabs.nlp.{
23
  Annotation,
24
  AnnotatorModel,
25
  HasSimpleAnnotate,
26
  ParamsAndFeaturesReadable
27
}
28
import org.apache.spark.ml.feature.StopWordsRemover
29
import org.apache.spark.ml.util.Identifiable
30
import org.slf4j.LoggerFactory
31

32
import scala.collection.immutable.ListMap
33
import scala.collection.mutable.ListBuffer
34
import scala.collection.{immutable, mutable}
35
import scala.math.sqrt
36

37
/** Yake is an Unsupervised, Corpus-Independent, Domain and Language-Independent and
38
  * Single-Document keyword extraction algorithm.
39
  *
40
  * Extracting keywords from texts has become a challenge for individuals and organizations as the
41
  * information grows in complexity and size. The need to automate this task so that text can be
42
  * processed in a timely and adequate manner has led to the emergence of automatic keyword
43
  * extraction tools. Yake is a novel feature-based system for multi-lingual keyword extraction,
44
  * which supports texts of different sizes, domain or languages. Unlike other approaches, Yake
45
  * does not rely on dictionaries nor thesauri, neither is trained against any corpora. Instead,
46
  * it follows an unsupervised approach which builds upon features extracted from the text, making
47
  * it thus applicable to documents written in different languages without the need for further
48
  * knowledge. This can be beneficial for a large number of tasks and a plethora of situations
49
  * where access to training corpora is either limited or restricted. The algorithm makes use of
50
  * the position of a sentence and token. Therefore, to use the annotator, the text should be
51
  * first sent through a Sentence Boundary Detector and then a tokenizer.
52
  *
53
  * See the parameters section for tweakable parameters to get the best result from the annotator.
54
  *
55
  * Note that each keyword will be given a keyword score greater than 0 (The lower the score
56
  * better the keyword). Therefore to filter the keywords, an upper bound for the score can be set
57
  * with `setThreshold`.
58
  *
59
  * For extended examples of usage, see the
60
  * [[https://github.com/JohnSnowLabs/spark-nlp/blob/master/examples/python/annotation/text/english/keyword-extraction/Keyword_Extraction_YAKE.ipynb Examples]]
61
  * and the
62
  * [[https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/keyword/yake/YakeTestSpec.scala YakeTestSpec]].
63
  *
64
  * '''Sources''' :
65
  *
66
  * [[https://www.sciencedirect.com/science/article/pii/S0020025519308588 Campos, R., Mangaravite, V., Pasquali, A., Jatowt, A., Jorge, A., Nunes, C. and Jatowt, A. (2020). YAKE! Keyword Extraction from Single Documents using Multiple Local Features. In Information Sciences Journal. Elsevier, Vol 509, pp 257-289]]
67
  *
68
  * '''Paper abstract:'''
69
  *
70
  * ''As the amount of generated information grows, reading and summarizing texts of large
71
  * collections turns into a challenging task. Many documents do not come with descriptive terms,
72
  * thus requiring humans to generate keywords on-the-fly. The need to automate this kind of task
73
  * demands the development of keyword extraction systems with the ability to automatically
74
  * identify keywords within the text. One approach is to resort to machine-learning algorithms.
75
  * These, however, depend on large annotated text corpora, which are not always available. An
76
  * alternative solution is to consider an unsupervised approach. In this article, we describe
77
  * YAKE!, a light-weight unsupervised automatic keyword extraction method which rests on
78
  * statistical text features extracted from single documents to select the most relevant keywords
79
  * of a text. Our system does not need to be trained on a particular set of documents, nor does
80
  * it depend on dictionaries, external corpora, text size, language, or domain. To demonstrate
81
  * the merits and significance of YAKE!, we compare it against ten state-of-the-art unsupervised
82
  * approaches and one supervised method. Experimental results carried out on top of twenty
83
  * datasets show that YAKE! significantly outperforms other unsupervised methods on texts of
84
  * different sizes, languages, and domains.''
85
  *
86
  * ==Example==
87
  * {{{
88
  * import spark.implicits._
89
  * import com.johnsnowlabs.nlp.base.DocumentAssembler
90
  * import com.johnsnowlabs.nlp.annotator.{SentenceDetector, Tokenizer}
91
  * import com.johnsnowlabs.nlp.annotators.keyword.yake.YakeKeywordExtraction
92
  * import org.apache.spark.ml.Pipeline
93
  *
94
  * val documentAssembler = new DocumentAssembler()
95
  *   .setInputCol("text")
96
  *   .setOutputCol("document")
97
  *
98
  * val sentenceDetector = new SentenceDetector()
99
  *   .setInputCols("document")
100
  *   .setOutputCol("sentence")
101
  *
102
  * val token = new Tokenizer()
103
  *   .setInputCols("sentence")
104
  *   .setOutputCol("token")
105
  *   .setContextChars(Array("(", ")", "?", "!", ".", ","))
106
  *
107
  * val keywords = new YakeKeywordExtraction()
108
  *   .setInputCols("token")
109
  *   .setOutputCol("keywords")
110
  *   .setThreshold(0.6f)
111
  *   .setMinNGrams(2)
112
  *   .setNKeywords(10)
113
  *
114
  * val pipeline = new Pipeline().setStages(Array(
115
  *   documentAssembler,
116
  *   sentenceDetector,
117
  *   token,
118
  *   keywords
119
  * ))
120
  *
121
  * val data = Seq(
122
  *   "Sources tell us that Google is acquiring Kaggle, a platform that hosts data science and machine learning competitions. Details about the transaction remain somewhat vague, but given that Google is hosting its Cloud Next conference in San Francisco this week, the official announcement could come as early as tomorrow. Reached by phone, Kaggle co-founder CEO Anthony Goldbloom declined to deny that the acquisition is happening. Google itself declined 'to comment on rumors'. Kaggle, which has about half a million data scientists on its platform, was founded by Goldbloom  and Ben Hamner in 2010. The service got an early start and even though it has a few competitors like DrivenData, TopCoder and HackerRank, it has managed to stay well ahead of them by focusing on its specific niche. The service is basically the de facto home for running data science and machine learning competitions. With Kaggle, Google is buying one of the largest and most active communities for data scientists - and with that, it will get increased mindshare in this community, too (though it already has plenty of that thanks to Tensorflow and other projects). Kaggle has a bit of a history with Google, too, but that's pretty recent. Earlier this month, Google and Kaggle teamed up to host a $100,000 machine learning competition around classifying YouTube videos. That competition had some deep integrations with the Google Cloud Platform, too. Our understanding is that Google will keep the service running - likely under its current name. While the acquisition is probably more about Kaggle's community than technology, Kaggle did build some interesting tools for hosting its competition and 'kernels', too. On Kaggle, kernels are basically the source code for analyzing data sets and developers can share this code on the platform (the company previously called them 'scripts'). Like similar competition-centric sites, Kaggle also runs a job board, too. It's unclear what Google will do with that part of the service. According to Crunchbase, Kaggle raised $12.5 million (though PitchBook says it's $12.75) since its   launch in 2010. Investors in Kaggle include Index Ventures, SV Angel, Max Levchin, Naval Ravikant, Google chief economist Hal Varian, Khosla Ventures and Yuri Milner"
123
  * ).toDF("text")
124
  * val result = pipeline.fit(data).transform(data)
125
  *
126
  * // combine the result and score (contained in keywords.metadata)
127
  * val scores = result
128
  *   .selectExpr("explode(arrays_zip(keywords.result, keywords.metadata)) as resultTuples")
129
  *   .select($"resultTuples.0" as "keyword", $"resultTuples.1.score")
130
  *
131
  * // Order ascending, as lower scores means higher importance
132
  * scores.orderBy("score").show(5, truncate = false)
133
  * +---------------------+-------------------+
134
  * |keyword              |score              |
135
  * +---------------------+-------------------+
136
  * |google cloud         |0.32051516486864573|
137
  * |google cloud platform|0.37786450577630676|
138
  * |ceo anthony goldbloom|0.39922830978423146|
139
  * |san francisco        |0.40224744669493756|
140
  * |anthony goldbloom    |0.41584827825302534|
141
  * +---------------------+-------------------+
142
  * }}}
143
  *
144
  * @groupname anno Annotator types
145
  * @groupdesc anno
146
  *   Required input and expected output annotator types
147
  * @groupname Ungrouped Members
148
  * @groupname param Parameters
149
  * @groupname setParam Parameter setters
150
  * @groupname getParam Parameter getters
151
  * @groupname Ungrouped Members
152
  * @groupprio param  1
153
  * @groupprio anno  2
154
  * @groupprio Ungrouped 3
155
  * @groupprio setParam  4
156
  * @groupprio getParam  5
157
  * @groupdesc param
158
  *   A list of (hyper-)parameter keys this annotator can take. Users can set and get the
159
  *   parameter values through setters and getters, respectively.
160
  */
161
class YakeKeywordExtraction(override val uid: String)
162
    extends AnnotatorModel[YakeKeywordExtraction]
163
    with HasSimpleAnnotate[YakeKeywordExtraction]
164
    with YakeParams {
165

166
  /** Annotator reference id. Used to identify elements in metadata or to refer to this annotator
167
    * type
168
    */
169
  def this() = this(Identifiable.randomUID("YAKE"))
1✔
170

171
  private val logger = LoggerFactory.getLogger("YakeKeywordExtraction")
1✔
172

173
  /** Output Annotator Types: CHUNK
174
    *
175
    * @group anno
176
    */
177
  override val outputAnnotatorType: AnnotatorType = CHUNK
1✔
178

179
  /** Input Annotator Types: TOKEN
180
    *
181
    * @group anno
182
    */
183
  override val inputAnnotatorTypes: Array[AnnotatorType] = Array(TOKEN)
1✔
184

185
  setDefault(
1✔
186
    maxNGrams -> 3,
1✔
187
    minNGrams -> 1,
1✔
188
    nKeywords -> 30,
1✔
189
    windowSize -> 3,
1✔
190
    threshold -> -1,
1✔
191
    stopWords -> StopWordsRemover.loadDefaultStopWords("english"))
1✔
192

193
  /** Calculates basic statistics like total Sentences in the document and assign a tag for each
194
    * token
195
    *
196
    * @param result
197
    *   Document to annotate as array of tokens with sentence metadata
198
    * @return
199
    *   Dataframe with columns SentenceID, token, totalSentences, tag
200
    */
201
  def getBasicStats(result: Array[Annotation]): Array[(String, Int)] = {
202
    val resultFlatten = result.map(x => (x.result, x.metadata.head._2))
1✔
203
    val resultFlattenIndexed = resultFlatten.map { row =>
1✔
204
      (row._1, row._2.toInt)
1✔
205
    }
206
    resultFlattenIndexed
207
  }
208

209
  def assignTags(
210
      resultFlattenIndexed: Array[(String, Int)]): Array[(String, Int, Int, String)] = {
211
    var sentenceID = 0
1✔
212
    var position = 0
1✔
213
    val tags = resultFlattenIndexed.map { case (t, sID) =>
1✔
214
      if (sID == sentenceID) {
1✔
215
        val tag = getTag(t, position)
1✔
216
        position += 1
1✔
217
        (t, sentenceID, position, tag)
1✔
218
      } else {
1✔
219
        sentenceID += 1
1✔
220
        position = 0
1✔
221
        val tag = getTag(t, position)
1✔
222
        position += 1
1✔
223
        (t, sentenceID, position, tag)
1✔
224
      }
225
    }
226
    tags
227
  }
228

229
  /** Calculate Co Occurrence for left to right given a window size
230
    *
231
    * @param sentences
232
    *   DataFrame with tokens
233
    * @return
234
    *   Co Occurrence for token x from left to right as a Map
235
    */
236
  def getCoOccurrence(
237
      sentences: ListBuffer[ListBuffer[String]],
238
      left: Boolean): mutable.Map[String, mutable.Map[String, Int]] = {
239
    val coMap: mutable.Map[String, mutable.Map[String, Int]] =
240
      mutable.HashMap[String, mutable.Map[String, Int]]()
1✔
241
    val ngrams = sentences.zipWithIndex.flatMap { case (row, _) =>
1✔
242
      (for (i <- $(minNGrams) to $(maxNGrams)) yield row.sliding(i).map(p => p.toList)).flatten
1✔
243
    }
244
    ngrams.foreach { elem =>
1✔
245
      {
246
        var head = elem.head.toLowerCase
1✔
247
        if (!left) {
1✔
248
          head = elem.last.toLowerCase
1✔
249
        }
250
        elem.foreach(x => {
1✔
251
          if (x.toLowerCase != head) {
1✔
252
            coMap.getOrElseUpdate(head, mutable.HashMap[String, Int]())
1✔
253
            coMap(head).getOrElseUpdate(x.toLowerCase, 0)
1✔
254
            coMap(head)(x.toLowerCase) += 1
1✔
255
          }
256
        })
257
      }
258
    }
259
    coMap
260
  }
261

262
  /** Calculate token scores given statistics
263
    *
264
    * Refer [[https://doi.org/10.1016/j.ins.2019.09.013 YAKE Paper]]
265
    *
266
    * T Position = ln ( ln ( 3 + Median(Sentence Index)) T Case = max(TF(U(t)) , TF(A(t))) /
267
    * ln(TF(t)) TF Norm =TF(t) / (MeanTF + 1 ∗ σ) T Rel = 1 + ( DL + DR ) * TF(t)/MaxTF T Sentence
268
    * \= SF(t)/# Sentences TS = ( TPos ∗ TRel ) / ( TCase + (( TFNorm + TSent ) / TRel ))
269
    *
270
    * @param basicStats
271
    *   Basic stats
272
    * @param coOccurLeftAggregate
273
    *   Left Co Occurrence
274
    * @param coOccurRightAggregate
275
    *   Right Co Occurrence
276
    * @return
277
    */
278
  def calculateTokenScores(
279
      basicStats: Array[(String, Int)],
280
      coOccurLeftAggregate: mutable.Map[String, mutable.Map[String, Int]],
281
      coOccurRightAggregate: mutable.Map[String, mutable.Map[String, Int]])
282
      : immutable.Iterable[Token] = {
283
    if (basicStats.isEmpty) {
1✔
284
      immutable.Iterable.empty[Token]
×
285
    } else {
1✔
286
      val tags = assignTags(basicStats)
1✔
287
      val avg = basicStats
288
        .groupBy(x => x._1.toLowerCase)
1✔
289
        .mapValues(_.length)
1✔
290
        .foldLeft(0)(_ + _._2)
1✔
291
        .toDouble /
1✔
292
        basicStats
293
          .groupBy(x => x._1.toLowerCase)
1✔
294
          .mapValues(_.length)
×
295
          .size
296
          .toDouble
1✔
297
      val std = sqrt(
1✔
298
        basicStats
299
          .groupBy(x => x._1.toLowerCase)
1✔
300
          .mapValues(_.length)
1✔
301
          .map(_._2.toDouble)
1✔
302
          .map(a => math.pow(a - avg, 2))
1✔
303
          .sum /
1✔
304
          basicStats
305
            .groupBy(x => x._1.toLowerCase)
1✔
306
            .mapValues(_.length)
×
307
            .size
308
            .toDouble)
1✔
309
      val maxTF =
310
        basicStats.groupBy(x => x._1.toLowerCase).mapValues(_.length).map(_._2.toDouble).max
1✔
311
      val nsent = basicStats.map(_._2).max + 1
1✔
312
      val tokens = basicStats
313
        .groupBy(x => x._1.toLowerCase)
1✔
314
        .mapValues(_.length)
1✔
315
        .map(x =>
1✔
316
          new Token(
1✔
317
            x._1.toLowerCase,
1✔
318
            x._2,
1✔
319
            nsent,
320
            avg,
321
            std,
322
            maxTF,
323
            coOccurLeftAggregate.getOrElse(x._1.toLowerCase, mutable.HashMap[String, Int]()),
1✔
324
            coOccurRightAggregate.getOrElse(x._1.toLowerCase, mutable.HashMap[String, Int]())))
1✔
325
      tags
326
        .filter(x => x._4 == "n")
1✔
327
        .groupBy(x => x._1.toLowerCase)
1✔
328
        .mapValues(_.length)
1✔
329
        .foreach(x => tokens.filter(y => y.token == x._1).head.nCount = x._2)
1✔
330
      tags
331
        .filter(x => x._4 == "a")
1✔
332
        .groupBy(x => x._1.toLowerCase)
1✔
333
        .mapValues(_.length)
1✔
334
        .foreach(x => tokens.filter(y => y.token == x._1).head.aCount = x._2)
1✔
335
      tags
336
        .groupBy(x => x._1.toLowerCase)
1✔
337
        .mapValues(x => medianCalculator(x.map(y => y._2)))
1✔
338
        .foreach(x => tokens.filter(y => y.token == x._1).head.medianSentenceOffset = x._2)
1✔
339
      tags
340
        .groupBy(x => x._1.toLowerCase)
1✔
341
        .mapValues(x => x.map(y => y._2).length)
1✔
342
        .foreach(x => tokens.filter(y => y.token == x._1).head.numberOfSentences = x._2)
1✔
343
      tokens
344
    }
345
  }
346

347
  /** Separate sentences given tokens with sentence metadata
348
    *
349
    * @param tokenizedArray
350
    *   Tokens with sentence metadata
351
    * @return
352
    *   separated sentences
353
    */
354
  def getSentences(tokenizedArray: Array[Annotation]): ListBuffer[ListBuffer[String]] = {
355
    val sentences: ListBuffer[ListBuffer[String]] = ListBuffer(ListBuffer())
1✔
356
    var snt = 0
1✔
357
    tokenizedArray.map(x => {
1✔
358
      if (x.metadata.getOrElse("sentence", null).toInt == snt) {
1✔
359
        sentences(snt) += x.result.toLowerCase
1✔
360
      } else {
1✔
361
        snt += 1
1✔
362
        sentences += ListBuffer()
1✔
363
        sentences(snt) += x.result.toLowerCase
1✔
364
      }
365
    })
366
    sentences
367
  }
368

369
  /** Generate candidate keywords
370
    *
371
    * @param sentences
372
    *   sentences as a list
373
    * @return
374
    *   candidate keywords
375
    */
376
  def getCandidateKeywords(
377
      sentences: Array[(String, Int, Int, String)]): mutable.Map[String, Int] = {
378
    val candidate = mutable.HashMap[String, Int]().withDefaultValue(0)
1✔
379
    sentences
380
      .groupBy(_._2)
1✔
381
      .map(row => {
1✔
382
        val ngrams = (for (i <- $(minNGrams) to $(maxNGrams))
1✔
383
          yield row._2.sliding(i).map(p => p.toList)).flatten
1✔
384
        ngrams
385
          .filter(y => (!y.map(x => x._4).contains("u")) && (!y.map(x => x._4).contains("d")))
1✔
386
          .map(x => {
1✔
387
            val firstWord = x.head._1.toLowerCase
1✔
388
            val lastWord = x.last._1.toLowerCase
1✔
389
            if (! $(stopWords).contains(firstWord) && ! $(stopWords).contains(lastWord)) {
1✔
390
              candidate(x.map(_._1).mkString(",").toLowerCase) += 1
1✔
391
            }
392
          })
393
      })
394
    candidate
395
  }
396

397
  /** Extract keywords
398
    *
399
    * @param candidate
400
    *   candidate keywords
401
    * @param tokens
402
    *   tokens with scores
403
    * @return
404
    *   keywords
405
    */
406
  def getKeywords(
407
      candidate: mutable.Map[String, Int],
408
      tokens: immutable.Iterable[Token]): ListMap[String, Double] = {
409
    val keywords = candidate.map { case (x, kf) =>
1✔
410
      var prod_s: Double = 1
1✔
411
      var sum_s: Double = 0
1✔
412
      val xi = x.split(",")
1✔
413
      xi.zipWithIndex.foreach { case (y, ind) =>
1✔
414
        val word = tokens.filter(k => k.token == y)
1✔
415
        if (! $(stopWords).contains(y) && word.nonEmpty) {
1✔
416
          prod_s *= word.head.TScore
1✔
417
          sum_s += word.head.TScore
1✔
418
        } else {
1✔
419
          val prev_token = tokens.filter(k => k.token == xi(ind - 1))
1✔
420
          var prev = 0.0
1✔
421
          var prev_prob = 0.0
1✔
422
          if (prev_token.nonEmpty) {
1✔
423
            prev = prev_token.head.rightCO.getOrElse(y, 0).toDouble
1✔
424
            prev_prob = prev / prev_token.head.termFrequency
1✔
425
          }
426
          val next_token = tokens.filter(k => k.token == y)
1✔
427
          var next = 0.0
1✔
428
          var next_prob = 0.0
1✔
429
          if (next_token.nonEmpty) {
×
430
            next = next_token.head.rightCO.getOrElse(xi(ind + 1), 0).toDouble
1✔
431
            next_prob = next / next_token.head.termFrequency
1✔
432
          }
433
          val bi_probability = prev_prob * next_prob
1✔
434
          prod_s = prod_s * (1 + (1 - bi_probability))
1✔
435
          sum_s -= (1 - bi_probability)
1✔
436
        }
437
      }
438
      val S_kw = prod_s / (kf * (1 + sum_s))
1✔
439
      (xi.mkString(" ").toLowerCase, S_kw)
1✔
440
    }
441
    var topn = ListMap(keywords.toSeq.sortWith(_._2 < _._2): _*)
1✔
442
    topn = topn.slice(0, $(nKeywords))
1✔
443
    if ($(threshold) != -1) {
×
444
      topn = topn.filter { case (_, score) => score <= $(threshold) }
1✔
445
    }
446
    topn
447
  }
448

449
  /** Execute the YAKE algorithm for each sentence
450
    *
451
    * @param annotations
452
    *   token array to annotate
453
    * @return
454
    *   annotated token array
455
    */
456
  def processSentences(annotations: Seq[Annotation]): Seq[Annotation] = {
457
    val basicStat = getBasicStats(annotations.toArray)
1✔
458
    val sentences = getSentences(annotations.toArray)
1✔
459
    val coOccurMatLeft = getCoOccurrence(sentences, left = true)
1✔
460
    val coOccurMatRight = getCoOccurrence(sentences, left = false)
1✔
461
    val tokens = calculateTokenScores(basicStat, coOccurMatLeft, coOccurMatRight)
1✔
462
    val taggedSentence = assignTags(basicStat)
1✔
463
    val candidateKeywords = getCandidateKeywords(taggedSentence)
1✔
464
    val keywords = getKeywords(candidateKeywords, tokens)
1✔
465
    val annotatedKeywords: ListBuffer[Annotation] = new ListBuffer()
1✔
466
    val annotationNGram = (for (i <- $(minNGrams) to $(maxNGrams))
1✔
467
      yield annotations.sliding(i).map(p => p.toList)).flatten
1✔
468
    annotationNGram.foreach(annotation => {
1✔
469
      val key: String = annotation.map(_.result.toLowerCase()).mkString(" ").toLowerCase
1✔
470
      if (keywords.isDefinedAt(key)) {
1✔
471
        annotatedKeywords += Annotation(
1✔
472
          outputAnnotatorType,
1✔
473
          annotation.head.begin,
1✔
474
          annotation.last.end,
1✔
475
          key,
476
          Map(
1✔
477
            "score" -> keywords.getOrElse(key, "").toString,
1✔
478
            "sentence" -> annotation.head.metadata.getOrElse("sentence", 0).toString))
1✔
479
      }
480
    })
481
    annotatedKeywords
482
  }
483

484
  override def annotate(annotations: Seq[Annotation]): Seq[Annotation] = {
485
    val keywords = processSentences(annotations)
1✔
486
    keywords
487
  }
488
}
489

490
object YakeKeywordExtraction extends ParamsAndFeaturesReadable[YakeKeywordExtraction]
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc