• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

JohnSnowLabs / spark-nlp / 13883000244

16 Mar 2025 11:44AM CUT coverage: 59.034% (-1.0%) from 60.072%
13883000244

Pull #14444

github

web-flow
Merge 6d717703b into 05000ab4a
Pull Request #14444: Sparknlp 1060 implement phi 3.5 vision

0 of 292 new or added lines in 5 files covered. (0.0%)

20 existing lines in 14 files now uncovered.

9413 of 15945 relevant lines covered (59.03%)

0.59 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/main/scala/com/johnsnowlabs/nlp/annotators/cv/Phi3Vision.scala
1
/*
2
 * Copyright 2017-2024 John Snow Labs
3
 *
4
 * Licensed under the Apache License, Version 2.0 (the "License");
5
 * you may not use this file except in compliance with the License.
6
 * You may obtain a copy of the License at
7
 *
8
 *    http://www.apache.org/licenses/LICENSE-2.0
9
 *
10
 * Unless required by applicable law or agreed to in writing, software
11
 * distributed under the License is distributed on an "AS IS" BASIS,
12
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
 * See the License for the specific language governing permissions and
14
 * limitations under the License.
15
 */
16

17
package com.johnsnowlabs.nlp.annotators.cv
18

19
import com.johnsnowlabs.ml.ai.util.Generation.GenerationConfig
20
import com.johnsnowlabs.ml.ai.Phi3V
21
import com.johnsnowlabs.ml.onnx.OnnxWrapper.DecoderWrappers
22
import com.johnsnowlabs.ml.util.LoadExternalModel.{
23
  loadJsonStringAsset,
24
  loadTextAsset,
25
  modelSanityCheck,
26
  notSupportedEngineError
27
}
28
import com.johnsnowlabs.ml.util.Openvino
29
import com.johnsnowlabs.nlp.AnnotatorType.{DOCUMENT, IMAGE}
30
import com.johnsnowlabs.nlp._
31
import org.json4s.{DefaultFormats, JValue}
32
import org.json4s.jackson.JsonMethods.parse
33
import com.johnsnowlabs.ml.openvino.{OpenvinoWrapper, ReadOpenvinoModel, WriteOpenvinoModel}
34
import com.johnsnowlabs.ml.openvino.OpenvinoWrapper.Phi3VWrappers
35
import com.johnsnowlabs.nlp.serialization.{MapFeature, StructFeature}
36
import org.apache.spark.broadcast.Broadcast
37
import org.apache.spark.ml.param.IntArrayParam
38
import org.apache.spark.ml.util.Identifiable
39
import org.apache.spark.sql.SparkSession
40

41
/** Phi3Vision can load Phi3 Vision models for visual question answering. The model consists of a
42
  * vision encoder, a text encoder as well as a text decoder. The vision encoder will encode the
43
  * input image, the text encoder will encode the input question together with the encoding of the
44
  * image, and the text decoder will output the answer to the question.
45
  *
46
  * Pretrained models can be loaded with `pretrained` of the companion object:
47
  * {{{
48
  * val visualQA = Phi3Vision.pretrained()
49
  *   .setInputCols("image_assembler")
50
  *   .setOutputCol("answer")
51
  * }}}
52
  * The default model is `"phi_3_vision_128k_instruct"`, if no name is provided.
53
  *
54
  * For available pretrained models please see the
55
  * [[https://sparknlp.org/models?task=Question+Answering Models Hub]].
56
  *
57
  * Models from the HuggingFace 🤗 Transformers library are also compatible with Spark NLP 🚀. To
58
  * see which models are compatible and how to import them see
59
  * [[https://github.com/JohnSnowLabs/spark-nlp/discussions/5669]] and to see more extended
60
  * examples, see
61
  * [[https://github.com/JohnSnowLabs/spark-nlp/blob/master/src/test/scala/com/johnsnowlabs/nlp/annotators/cv/Phi3VisionTest.scala]].
62
  *
63
  * ==Example==
64
  * {{{
65
  * import spark.implicits._
66
  * import com.johnsnowlabs.nlp.base._
67
  * import com.johnsnowlabs.nlp.annotator._
68
  * import org.apache.spark.ml.Pipeline
69
  *
70
  * val imageDF: DataFrame = ResourceHelper.spark.read
71
  *  .format("image")
72
  *  .option("dropInvalid", value = true)
73
  *  .load(imageFolder)
74
  *
75
  * val testDF: DataFrame = imageDF.withColumn("text", lit("<|user|> \n <|image_1|> \nWhat is unusual on this picture? <|end|>\n <|assistant|>\n"))
76
  *
77
  * val imageAssembler: ImageAssembler = new ImageAssembler()
78
  *   .setInputCol("image")
79
  *   .setOutputCol("image_assembler")
80
  *
81
  * val visualQAClassifier = Phi3Vision.pretrained("phi_3_vision_128k_instruct","en")
82
  *   .setInputCols("image_assembler")
83
  *   .setOutputCol("answer")
84
  *
85
  * val pipeline = new Pipeline().setStages(Array(
86
  *   imageAssembler,
87
  *   visualQAClassifier
88
  * ))
89
  *
90
  * val result = pipeline.fit(testDF).transform(testDF)
91
  *
92
  * result.select("image_assembler.origin", "answer.result").show(false)
93
  * +--------------------------------------+------+
94
  * |origin                                |result|
95
  * +--------------------------------------+------+
96
  * |[file:///content/images/cat_image.jpg]|[The unusual aspect of this picture is the presence of two cats lying on a pink couch]|
97
  * +--------------------------------------+------+
98
  * }}}
99
  *
100
  * @see
101
  *   [[CLIPForZeroShotClassification]] for Zero Shot Image Classifier
102
  * @see
103
  *   [[https://sparknlp.org/docs/en/annotators Annotators Main Page]] for a list of transformer
104
  *   based classifiers
105
  * @param uid
106
  *   required uid for storing annotator to disk
107
  * @groupname anno Annotator types
108
  * @groupdesc anno
109
  *   Required input and expected output annotator types
110
  * @groupname Ungrouped Members
111
  * @groupname param Parameters
112
  * @groupname setParam Parameter setters
113
  * @groupname getParam Parameter getters
114
  * @groupname Ungrouped Members
115
  * @groupprio param  1
116
  * @groupprio anno  2
117
  * @groupprio Ungrouped 3
118
  * @groupprio setParam  4
119
  * @groupprio getParam  5
120
  * @groupdesc param
121
  *   A list of (hyper-)parameter keys this annotator can take. Users can set and get the
122
  *   parameter values through setters and getters, respectively.
123
  */
124

125
class Phi3Vision(override val uid: String)
126
    extends AnnotatorModel[Phi3Vision]
127
    with HasBatchedAnnotateImage[Phi3Vision]
128
    with HasImageFeatureProperties
129
    with WriteOpenvinoModel
130
    with HasGeneratorProperties
131
    with HasEngine {
132

133
  /** Annotator reference id. Used to identify elements in metadata or to refer to this annotator
134
    * type
135
    */
NEW
136
  def this() = this(Identifiable.randomUID("Phi3Vision"))
×
137

138
  /** Annotator reference id. Used to identify elements in metadata or to refer to this annotator
139
    * type
140
    */
NEW
141
  override val inputAnnotatorTypes: Array[AnnotatorType] = Array(IMAGE)
×
NEW
142
  override val outputAnnotatorType: AnnotatorType = DOCUMENT
×
143

144
  /** @group setParam */
145
  def setRandomSeed(value: Int): Phi3Vision.this.type = {
NEW
146
    if (randomSeed.isEmpty) {
×
NEW
147
      this.randomSeed = Some(value)
×
148
    }
149
    this
150
  }
151

152
  /** A list of token ids which are ignored in the decoder's output (Default: `Array()`)
153
    *
154
    * @group param
155
    */
NEW
156
  var ignoreTokenIds = new IntArrayParam(
×
157
    this,
NEW
158
    "ignoreTokenIds",
×
NEW
159
    "A list of token ids which are ignored in the decoder's output")
×
160

161
  /** @group setParam */
162
  def setIgnoreTokenIds(tokenIds: Array[Int]): Phi3Vision.this.type = {
NEW
163
    set(ignoreTokenIds, tokenIds)
×
164
  }
165

166
  /** @group getParam */
NEW
167
  def getIgnoreTokenIds: Array[Int] = $(ignoreTokenIds)
×
168

169
  /** Vocabulary used to encode the words to ids with bpeTokenizer.encode
170
    *
171
    * @group param
172
    */
NEW
173
  val vocabulary: MapFeature[String, Int] = new MapFeature(this, "vocabulary").setProtected()
×
174

175
  /** @group setParam */
NEW
176
  def setVocabulary(value: Map[String, Int]): this.type = set(vocabulary, value)
×
177

178
  /** Holding merges.txt coming from RoBERTa model
179
    *
180
    * @group param
181
    */
NEW
182
  val merges: MapFeature[(String, String), Int] = new MapFeature(this, "merges").setProtected()
×
183

184
  /** @group setParam */
NEW
185
  def setMerges(value: Map[(String, String), Int]): this.type = set(merges, value)
×
186

187
  /** Additional tokens to be added to the vocabulary
188
    *
189
    * @group param
190
    */
NEW
191
  val addedTokens: MapFeature[String, Int] = new MapFeature(this, "addedTokens").setProtected()
×
192

193
  /** @group setParam */
NEW
194
  def setAddedTokens(value: Map[String, Int]): this.type = set(addedTokens, value)
×
195

196
  /** Stop tokens to terminate the generation
197
    *
198
    * @group param
199
    */
200
  override val stopTokenIds =
NEW
201
    new IntArrayParam(this, "stopTokenIds", "Stop tokens to terminate the generation")
×
202

203
  /** @group setParam */
204
  override def setStopTokenIds(value: Array[Int]): this.type = {
NEW
205
    set(stopTokenIds, value)
×
206
  }
207

208
  /** @group getParam */
NEW
209
  override def getStopTokenIds: Array[Int] = $(stopTokenIds)
×
210

NEW
211
  private var _model: Option[Broadcast[Phi3V]] = None
×
212
  val generationConfig: StructFeature[GenerationConfig] =
NEW
213
    new StructFeature(this, "generationConfig").setProtected()
×
214

215
  def setGenerationConfig(value: GenerationConfig): this.type =
NEW
216
    set(generationConfig, value)
×
217

NEW
218
  def getGenerationConfig: GenerationConfig = $$(generationConfig)
×
219

220
  /** @group setParam */
221
  def setModelIfNotSet(
222
      spark: SparkSession,
223
      onnxWrappers: Option[DecoderWrappers],
224
      openvinoWrapper: Option[Phi3VWrappers]): this.type = {
NEW
225
    if (_model.isEmpty) {
×
NEW
226
      _model = Some(
×
NEW
227
        spark.sparkContext.broadcast(
×
NEW
228
          new Phi3V(
×
229
            onnxWrappers,
230
            openvinoWrapper,
NEW
231
            $$(merges),
×
NEW
232
            $$(vocabulary),
×
NEW
233
            $$(addedTokens),
×
NEW
234
            generationConfig = getGenerationConfig)))
×
235
    }
236
    this
237
  }
238

239
  /** @group getParam */
NEW
240
  def getModelIfNotSet: Phi3V = _model.get.value
×
241

NEW
242
  setDefault(
×
NEW
243
    minOutputLength -> 0,
×
NEW
244
    maxOutputLength -> 20,
×
NEW
245
    doSample -> false,
×
NEW
246
    temperature -> 0.6,
×
NEW
247
    topK -> -1,
×
NEW
248
    topP -> 0.9,
×
NEW
249
    repetitionPenalty -> 1.0,
×
NEW
250
    noRepeatNgramSize -> 3,
×
NEW
251
    ignoreTokenIds -> Array(),
×
NEW
252
    batchSize -> 1,
×
NEW
253
    beamSize -> 1,
×
NEW
254
    maxInputLength -> 4096,
×
NEW
255
    stopTokenIds -> Array(128001))
×
256

257
  /** takes a document and annotations and produces new annotations of this annotator's annotation
258
    * type
259
    *
260
    * @param batchedAnnotations
261
    *   Annotations in batches that correspond to inputAnnotationCols generated by previous
262
    *   annotators if any
263
    * @return
264
    *   any number of annotations processed for every batch of input annotations. Not necessary
265
    *   one to one relationship
266
    */
267
  override def batchAnnotate(
268
      batchedAnnotations: Seq[Array[AnnotationImage]]): Seq[Seq[Annotation]] = {
269

270
    batchedAnnotations
271
//      .filter { annotationImages =>
272
//        annotationImages.exists(_.text.nonEmpty)
273
//      }
NEW
274
      .map { cleanAnnotationImages =>
×
NEW
275
        val validImages = cleanAnnotationImages.filter(_.result.nonEmpty)
×
NEW
276
        val questionAnnotations = extractInputAnnotation(validImages)
×
277

NEW
278
        getModelIfNotSet.predict(
×
279
          questionAnnotations,
NEW
280
          validImages.toSeq,
×
NEW
281
          batchSize = $(batchSize),
×
NEW
282
          minOutputLength = $(minOutputLength),
×
NEW
283
          maxOutputLength = $(maxOutputLength),
×
NEW
284
          doSample = $(doSample),
×
NEW
285
          temperature = $(temperature),
×
NEW
286
          topK = $(topK),
×
NEW
287
          topP = $(topP),
×
NEW
288
          repetitionPenalty = $(repetitionPenalty),
×
NEW
289
          noRepeatNgramSize = $(noRepeatNgramSize),
×
NEW
290
          randomSeed = this.randomSeed,
×
NEW
291
          ignoreTokenIds = $(ignoreTokenIds),
×
NEW
292
          beamSize = $(beamSize),
×
NEW
293
          maxInputLength = $(maxInputLength))
×
294
      }
295
  }
296

297
  private def extractInputAnnotation(
298
      annotationImages: Array[AnnotationImage]): Seq[Annotation] = {
NEW
299
    val questions = annotationImages.map(annotationImage => {
×
300
      val imageText =
NEW
301
        if (annotationImage.text.nonEmpty) annotationImage.text
×
302
        else
NEW
303
          "<|user|> \n <|image_1|> This is an image\n <|end|>\n <|assistant|>\n" // default question
×
NEW
304
      Annotation(imageText)
×
305
    })
306

NEW
307
    questions
×
308
  }
309

310
  override def onWrite(path: String, spark: SparkSession): Unit = {
NEW
311
    super.onWrite(path, spark)
×
NEW
312
    getEngine match {
×
313
      case Openvino.name =>
NEW
314
        val wrappers = getModelIfNotSet.openvinoWrapper
×
NEW
315
        writeOpenvinoModels(
×
316
          path,
317
          spark,
NEW
318
          Seq((wrappers.get.reshape, "reshape_model.xml")),
×
NEW
319
          Phi3Vision.suffix)
×
320

NEW
321
        writeOpenvinoModels(
×
322
          path,
323
          spark,
NEW
324
          Seq((wrappers.get.wte, "wte_model.xml")),
×
NEW
325
          Phi3Vision.suffix)
×
326

NEW
327
        writeOpenvinoModels(
×
328
          path,
329
          spark,
NEW
330
          Seq((wrappers.get.languageModel, "language_model.xml")),
×
NEW
331
          Phi3Vision.suffix)
×
332
      case _ =>
NEW
333
        throw new Exception(notSupportedEngineError)
×
334
    }
335
  }
336

337
}
338

339
trait ReadablePretrainedPhi3Vision
340
    extends ParamsAndFeaturesReadable[Phi3Vision]
341
    with HasPretrained[Phi3Vision] {
342

NEW
343
  override val defaultModelName: Some[String] = Some("phi_3_vision_128k_instruct")
×
344

345
  /** Java compliant-overrides */
NEW
346
  override def pretrained(): Phi3Vision = super.pretrained()
×
347

348
  override def pretrained(name: String): Phi3Vision =
NEW
349
    super.pretrained(name)
×
350

351
  override def pretrained(name: String, lang: String): Phi3Vision =
NEW
352
    super.pretrained(name, lang)
×
353

354
  override def pretrained(name: String, lang: String, remoteLoc: String): Phi3Vision =
NEW
355
    super.pretrained(name, lang, remoteLoc)
×
356

357
}
358

359
trait ReadPhi3VisionDLModel extends ReadOpenvinoModel {
360
  this: ParamsAndFeaturesReadable[Phi3Vision] =>
NEW
361
  val suffix: String = "_phi3v"
×
NEW
362
  override val openvinoFile: String = "phi3v_openvino"
×
363
  def readModel(instance: Phi3Vision, path: String, spark: SparkSession): Unit = {
NEW
364
    instance.getEngine match {
×
365
      case Openvino.name =>
366
        val reshapeWrappers =
NEW
367
          readOpenvinoModels(path, spark, Seq("reshape_model.xml"), suffix)
×
368
        val wteWrappers =
NEW
369
          readOpenvinoModels(path, spark, Seq("wte_model.xml"), suffix)
×
370

371
        val languageModelWrappers =
NEW
372
          readOpenvinoModels(path, spark, Seq("language_model.xml"), suffix)
×
373

NEW
374
        val ovWrapper = Phi3VWrappers(
×
NEW
375
          wte = wteWrappers("wte_model.xml"),
×
NEW
376
          languageModel = languageModelWrappers("language_model.xml"),
×
NEW
377
          reshape = reshapeWrappers("reshape_model.xml"))
×
NEW
378
        instance.setModelIfNotSet(spark, None, Some(ovWrapper))
×
379
      case _ => {
NEW
380
        throw new Exception(notSupportedEngineError)
×
381
      }
382
    }
383
  }
384

NEW
385
  addReader(readModel)
×
386

387
  def loadSavedModel(
388
      modelPath: String,
389
      spark: SparkSession,
390
      useOpenvino: Boolean = false): Phi3Vision = {
NEW
391
    implicit val formats: DefaultFormats.type = DefaultFormats // for json4
×
NEW
392
    val (localModelPath, detectedEngine) =
×
393
      modelSanityCheck(
394
        modelPath,
395
        isDecoder = false,
396
        custom = Some(List("reshape_model", "wte_model", "language_model")))
397
    val modelConfig: JValue =
NEW
398
      parse(loadJsonStringAsset(localModelPath, "config.json"))
×
399

400
    val beginSuppressTokens: Array[Int] =
NEW
401
      (modelConfig \ "begin_suppress_tokens").extract[Array[Int]]
×
402

403
    val suppressTokenIds: Array[Int] =
NEW
404
      (modelConfig \ "suppress_tokens").extract[Array[Int]]
×
405

406
    val forcedDecoderIds: Array[(Int, Int)] =
NEW
407
      (modelConfig \ "forced_decoder_ids").extract[Array[Array[Int]]].map {
×
NEW
408
        case idxWithTokenId: Array[Int] if idxWithTokenId.length == 2 =>
×
NEW
409
          (idxWithTokenId(0), idxWithTokenId(1))
×
410
        case _ =>
NEW
411
          throw new Exception(
×
412
            "Could not extract forced_decoder_ids. Should be a list of tuples with 2 entries.")
413
      }
414

415
    def arrayOrNone[T](array: Array[T]): Option[Array[T]] =
NEW
416
      if (array.nonEmpty) Some(array) else None
×
417

NEW
418
    val bosTokenId = (modelConfig \ "bos_token_id").extract[Int]
×
NEW
419
    val eosTokenId = (modelConfig \ "eos_token_id").extract[Int]
×
NEW
420
    val padTokenId = (modelConfig \ "eos_token_id").extract[Int]
×
NEW
421
    val vocabSize = (modelConfig \ "vocab_size").extract[Int]
×
422

423
    // Check if tokenizer.json exists
NEW
424
    val tokenizerPath = s"$localModelPath/assets/tokenizer.json"
×
NEW
425
    val tokenizerExists = new java.io.File(tokenizerPath).exists()
×
NEW
426
    val (vocabs, addedTokens, bytePairs) = if (tokenizerExists) {
×
427
      val tokenizerConfig: JValue = parse(loadJsonStringAsset(localModelPath, "tokenizer.json"))
428
      // extract vocab from tokenizer.json ( model -> vocab)
429
      var vocabs: Map[String, Int] =
430
        (tokenizerConfig \ "model" \ "vocab").extract[Map[String, Int]]
431

432
      // extract merges from tokenizer.json ( model -> merges)
433
      val bytePairs = (tokenizerConfig \ "model" \ "merges")
434
        .extract[List[String]]
435
        .map(_.split(" "))
436
        .filter(w => w.length == 2)
437
        .map { case Array(c1, c2) => (c1, c2) }
438
        .zipWithIndex
439
        .toMap
440

441
      // extract added_tokens from tokenizer.json (added_tokens)
442
      // "added_tokens": [
443
      //    {
444
      //      "id": 128000,
445
      //      "content": "<|begin_of_text|>",
446
      //      "single_word": false,
447
      //      "lstrip": false,
448
      //      "rstrip": false,
449
      //      "normalized": false,
450
      //      "special": true
451
      //    }, ...
452
      //  ]
453
      val addedTokens = (tokenizerConfig \ "added_tokens")
454
        .extract[List[Map[String, Any]]]
455
        .map { token =>
456
          val id = token("id").asInstanceOf[BigInt].intValue()
457
          val content = token("content").asInstanceOf[String]
458
          (content, id)
459
        }
460
        .toMap
461

462
      // update vocab with added tokens
463
      addedTokens.foreach { case (content, id) =>
464
        vocabs += (content -> id)
465
      }
466
      (vocabs, addedTokens, bytePairs)
467
    } else {
468
      val vocabs = loadTextAsset(localModelPath, "vocab.txt").zipWithIndex.toMap
469
      val addedTokens = loadTextAsset(localModelPath, "added_tokens.txt").zipWithIndex.toMap
470
      val bytePairs = loadTextAsset(localModelPath, "merges.txt")
471
        .map(_.split(" "))
472
        .filter(w => w.length == 2)
473
        .map { case Array(c1, c2) => (c1, c2) }
474
        .zipWithIndex
475
        .toMap
476
      (vocabs, addedTokens, bytePairs)
477
    }
478

479
    val annotatorModel = new Phi3Vision()
480
      .setGenerationConfig(
481
        GenerationConfig(
482
          bosTokenId,
483
          padTokenId,
484
          eosTokenId,
485
          vocabSize,
486
          arrayOrNone(beginSuppressTokens),
487
          arrayOrNone(suppressTokenIds),
488
          arrayOrNone(forcedDecoderIds)))
489
      .setVocabulary(vocabs)
490
      .setMerges(bytePairs)
NEW
491
      .setAddedTokens(addedTokens)
×
492

493
    val modelEngine =
494
      if (useOpenvino)
NEW
495
        Openvino.name
×
496
      else
NEW
497
        detectedEngine
×
NEW
498
    annotatorModel.set(annotatorModel.engine, modelEngine)
×
499

500
    detectedEngine match {
501
      case Openvino.name =>
502
        val reshapeWrappers =
NEW
503
          OpenvinoWrapper.read(
×
504
            spark,
505
            localModelPath,
NEW
506
            zipped = false,
×
NEW
507
            useBundle = true,
×
508
            detectedEngine = detectedEngine,
NEW
509
            modelName = "reshape_model")
×
510
        val wteWrappers =
NEW
511
          OpenvinoWrapper.read(
×
512
            spark,
513
            localModelPath,
NEW
514
            zipped = false,
×
NEW
515
            useBundle = true,
×
516
            detectedEngine = detectedEngine,
NEW
517
            modelName = "wte_model")
×
518
        val languageModelWrappers =
NEW
519
          OpenvinoWrapper.read(
×
520
            spark,
521
            localModelPath,
NEW
522
            zipped = false,
×
NEW
523
            useBundle = true,
×
524
            detectedEngine = detectedEngine,
NEW
525
            modelName = "language_model")
×
NEW
526
        val openvinoWrapper = Phi3VWrappers(
×
527
          wte = wteWrappers,
528
          languageModel = languageModelWrappers,
529
          reshape = reshapeWrappers)
NEW
530
        annotatorModel.setModelIfNotSet(spark, None, Some(openvinoWrapper))
×
531
      case _ =>
NEW
532
        throw new Exception(notSupportedEngineError)
×
533
    }
534

535
    annotatorModel
536
  }
537
}
538

539
object Phi3Vision extends ReadablePretrainedPhi3Vision with ReadPhi3VisionDLModel
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc