Coveralls logob
Coveralls logo
  • Home
  • Features
  • Pricing
  • Docs
  • Sign In

askomics / askomics / 746

28 Jun 2019 - 14:02 coverage increased (+0.003%) to 80.28%
746

Pull #354

travis-ci

9181eb84f9c35729a3bad740fb7f9d93?size=18&default=identiconweb-flow
don't logout user after 10h
Pull Request #354: some fixes

18 of 24 new or added lines in 4 files covered. (75.0%)

1 existing line in 1 file now uncovered.

4653 of 5796 relevant lines covered (80.28%)

1.61 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

65.57
/askomics/libaskomics/source_file/SourceFileTsv.py
1
#!/usr/bin/python3
2
# -*- coding: utf-8 -*-
3

4
"""
2×
5
Classes to import data from a tsv source files
6
"""
7

8
import re
2×
9
import csv
2×
10
import uuid
2×
11
import json
2×
12

13
from collections import defaultdict
2×
14
from pkg_resources import get_distribution
2×
15

16
from askomics.libaskomics.source_file.SourceFile import SourceFile,SourceFileSyntaxError
2×
17
from askomics.libaskomics.integration.AbstractedEntity import AbstractedEntity
2×
18
from askomics.libaskomics.integration.AbstractedRelation import AbstractedRelation
2×
19
from askomics.libaskomics.utils import cached_property, HaveCachedProperties, pformat_generic_object
2×
20

21
class SourceFileTsv(SourceFile):
2×
22
    """
23
    Class representing a Gff3 Source file
24
    """
25

26
    def __init__(self, settings, session, path, preview_limit, uri_set=None):
2×
27
        SourceFile.__init__(self, settings, session, path, uri_set=uri_set)
2×
28
        self.type = 'tsv'
2×
29

30
        self.preview_limit = preview_limit
2×
31

32
        self.forced_column_types = ['entity']
2×
33
        self.disabled_columns = []
2×
34
        self.key_columns = []
2×
35
        self.headers = self.get_headers_by_file
2×
36

37
        self.category_values = defaultdict(set)
2×
38

39
        self.type_dict = {
2×
40
            'numeric' : 'xsd:decimal',
41
            'text'    : 'xsd:string',
42
            'category': ':',
43
            'taxon': ':',
44
            'ref': ':',
45
            'strand': ':',
46
            'start': 'xsd:decimal',
47
            'end': 'xsd:decimal',
48
            'entity'  : ':',
49
            'entitySym'  : ':',
50
            'entity_start'  : ':',
51
            'goterm': '',
52
            'date': 'xsd:dateTime'
53
            }
54

55
        self.delims = {
2×
56
            'numeric' : ('', ''),
57
            'text'    : ('', '^^xsd:string'),
58
            'category': ('', ''),
59
            'taxon': ('', ''),
60
            'ref': ('', ''),
61
            'strand': ('', ''),
62
            'start' : ('', ''),
63
            'end' : ('', ''),
64
            'entity'  : ('', ''),
65
            'entitySym'  : ('', ''),
66
            'entity_start'  : ('', ''),
67
            'goterm': ('<http://purl.obolibrary.org/obo/GO_', '>'),
68
            'date': ('', '^^xsd:dateTime')
69
            }
70

71
    @cached_property
2×
72
    def dialect(self):
73
        """
74
        Use csv.Sniffer to predict the CSV/TSV dialect
75
        """
76
        with open(self.path, 'r', encoding="utf-8", errors="ignore") as tabfile:
2×
77
            # The sniffer needs to have enough data to guess,
78
            # and we restrict to a list of allowed delimiters to avoid strange results
79
            contents = tabfile.readline()
2×
80
            dialect = csv.Sniffer().sniff(contents, delimiters=';,\t ')
2×
81
            self.log.debug('CSV dialect in ' + str(self.path) + ': ' + str(pformat_generic_object(dialect)))
2×
82
            return dialect
2×
83

84
    @cached_property
2×
85
    def get_headers_by_file(self):
86
        """
87
        Read and return the column headers.
88

89
        :return: a List of column headers
90
        :rtype: List
91
        """
92

93
        headers = []
2×
94
        with open(self.path, 'r', encoding="utf-8", errors="ignore") as tabfile:
2×
95
            # Load the file with reader
96
            tabreader = csv.reader(tabfile, dialect=self.dialect)
2×
97

98
            # first line is header
99
            headers = next(tabreader)
2×
100
            headers = [h.strip() for h in headers]
2×
101

102
        return headers
2×
103

104
    def set_headers(self, headers):
2×
105
        """Set the headers
106

107
        :param headers: the headers list
108
        :type headers: list
109
        """
110

111
        self.headers = headers
2×
112

113
    def get_preview_data(self):
2×
114
        """
115
        Read and return the values from the first lines of file.
116

117
        :return: a List of List of column values
118
        :rtype: List
119
        """
120

121
        with open(self.path, 'r', encoding="utf-8", errors="ignore") as tabfile:
2×
122
            # Load the file with reader
123
            tabreader = csv.reader(tabfile, dialect=self.dialect)
2×
124

125
            count = 0
2×
126

127
            header = next(tabreader) # Skip header
2×
128

129
            # Loop on lines
130
            data = [[] for x in range(len(header))]
2×
131
            for row in tabreader:
2×
132

133
                # Fill data lists
134
                for i, val in enumerate(row):
2×
135
                    data[i].append(val)
2×
136

137
                # Stop after x lines
138
                count += 1
2×
139
                if count > self.preview_limit:
2×
140
                    break
!
141

142
        return data
2×
143

144
    def guess_values_type(self, values, header):
2×
145
        """
146
        From a list of values, guess the data type
147

148
        :param values: a List of values to evaluate
149
        :param num: index of the header
150
        :return: the guessed type ('taxon','ref', 'strand', 'start', 'end', 'numeric', 'text' or 'category', 'goterm')
151
        """
152

153
        # check if relationShip with an other local entity
154
        if header.find("@") > 0:
2×
155
            #general relation by default
156
            return "entity"
!
157

158
        types = {
2×
159
            'ref': ('chrom', ),
160
            'taxon': ('taxon', 'species', 'organism'),
161
            'strand': ('strand', ),
162
            'start': ('start', 'begin'),
163
            'end': ('end', 'stop'),
164
            'date': ('date', 'time', 'datetime', 'birthday')
165
        }
166

167
        date_regex = re.compile(r'^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}')
2×
168

169
        # First check if it is specific type
170
        self.log.debug('header: '+header)
2×
171
        for typ, expressions in types.items():
2×
172
            for expression in expressions:
2×
173
                regexp = '.*' + expression + '.*'
2×
174
                if re.match(regexp, header, re.IGNORECASE) is not None:
2×
175
                    # Test if start and end values are numerics
176
                    if typ in ('start', 'end') and not all(self.is_decimal(val) for val in values):
2×
177
                        self.log.debug('ERROR! '+typ+' is not decimal!')
!
178
                        break
!
179
                    # Test if strand is a category with only 2 elements max
180
                    if typ == 'strand' and len(set(values)) != 2:
2×
181
                        break
!
182
                    # test if date respect the datetime regexp
183
                    if typ == 'date' and not all(date_regex.match(val) for val in values):
2×
184
                        break
!
185
                    return typ
2×
186

187
        #check goterm
188
        if all((val.startswith("GO:") and val[3:].isdigit()) for val in values):
2×
189
            return 'goterm'
!
190

191
        # Then, check if category
192
        threshold = 10
2×
193
        if len(values) < 30:
2×
194
            threshold = 5
2×
195

196
        #if all(re.match(r'^\w+$', val) for val in values):#check if no scape chararcter
197
        if all(self.is_decimal(val) for val in values): # Then numeric
2×
198
            if all(val == '' for val in values):
2×
199
                return 'text'
!
200
            return 'numeric'
2×
201
        elif len(set(values)) < threshold:
2×
202
            return 'category'
2×
203

204
        # default is text
205
        return 'text'
2×
206

207
    @staticmethod
2×
208
    def is_decimal(value):
209
        """
210
        Determine if given value is a decimal (integer or float) or not
211

212
        :param value: the value to evaluate
213
        :return: True if the value is decimal
214
        """
215

216
        if value == "":
2×
217
            return True
!
218
        if value.isdigit():
2×
219
            return True
2×
220
        else:
221
            try:
2×
222
                float(value)
2×
223
                return True
!
224
            except ValueError:
2×
225
                return False
2×
226

227
    def set_forced_column_types(self, types):
2×
228
        """
229
        Set manually curated types for column
230

231
        :param types: a List of column types ('entity', 'entity_start', 'numeric', 'text' or 'category')
232
        """
233

234
        self.forced_column_types = types
2×
235

236
        if len(self.forced_column_types) != len(self.headers):
2×
237
            raise ValueError("forced_column_types hve a different size that headers ! forced_column_types:"+str(self.forced_column_types)+" headers:"+str(self.headers))
!
238

239
        for typ in self.forced_column_types:
2×
240
            if typ not in self.delims :
2×
241
                raise ValueError("Bad init of forced_column_filter unknown type :"+typ)
!
242

243
    def set_disabled_columns(self, disabled_columns):
2×
244
        """
245
        Set manually curated types for column
246

247
        :param disabled_columns: a List of column ids (0 based) that should not be imported
248
        """
249

250
        self.disabled_columns = disabled_columns
2×
251

252
    def set_key_columns(self, key_columns):
2×
253
        """
254
        Set all column to build unqiue ID
255

256
        :param disabled_columns: a List of column ids (0 based) that should not be imported
257
        """
258

259
        self.key_columns = key_columns
2×
260

261
    def key_id(self, row):
2×
262
        """
263
        Get the key id by concatenate all key selected
264
        """
265

266
        retval = None
2×
267

268
        for key in self.key_columns:
2×
269
            if retval is None:
2×
270
                retval = row[int(key)]
2×
271
            else:
272
                retval += "_"+ row[int(key)]
!
273

274
        # By default the first element is index
275
        if retval is None:
2×
276
            retval = row[0]
2×
277

278
        return retval
2×
279

280
    @staticmethod
2×
281
    def get_strand(strand):
282
        """
283
        Get the faldo strand in function of the strand
284
        """
285

286
        if strand is None:
2×
287
            return "askomics:none"
2×
288

289
        if strand.lower() == "plus" or strand.startswith("+"):
2×
290
            return "askomics:plus"
2×
291

292
        if strand.lower() == "minus" or strand.startswith("-"):
2×
293
            return "askomics:minus"
2×
294

295
        return "askomics:none"
!
296

297
    @staticmethod
2×
298
    def get_strand_faldo(strand):
299
        """
300
        Get the faldo strand in function of the strand
301
        """
302

303
        if strand is None:
2×
304
            return "faldo:BothStrandPosition"
2×
305

306
        if strand.lower() == "plus" or strand.startswith("+"):
2×
307
            return "faldo:ForwardStrandPosition"
2×
308

309
        if strand.lower() == "minus" or strand.startswith("-"):
2×
310
            return "faldo:ReverseStrandPosition"
2×
311

312
        return "faldo:BothStrandPosition"
!
313

314
    def get_abstraction(self):
2×
315
        """
316
        Get the abstraction representing the source file in ttl format
317

318
        :return: ttl content for the abstraction
319
        """
320

321
        # TODO use rdflib or other abstraction layer to create rdf
322

323
        if len(self.forced_column_types) <= 0:
2×
324
            raise ValueError("forced_column_types is not defined !")
!
325

326
        ttl = ''
2×
327
        ref_entity = self.headers[0]
2×
328

329
        # Store the main entity
330
        ttl += AbstractedEntity(ref_entity,self.uri[0]).get_turtle()
2×
331

332
        # Store all the relations
333
        for key, key_type in enumerate(self.forced_column_types):
2×
334
            if key > 0 and key in self.disabled_columns:
2×
335
                continue
!
336

337
            # *** CARREFULLY ***
338
            # We keep def of attribute position_ to keep cmpatibility with IHM but we don't define value for each entity
339
            # Position are defined inside a faldo:Location object
340
            #
341
            # ==> IHM detect position_ attribute and transforme all query with faldo:location/faldo:begin/faldo:reference
342
            #
343

344
            # encode uri if header do not contains ":" (otherwise it's an RDF uri)
345

346
            headkey     = self.encode_to_rdf_uri(self.headers[key],prefix="askomics:")
2×
347

348
            if key > 0 and not key_type.startswith('entity'):
2×
349
                if key_type in ('taxon', 'ref', 'strand', 'start', 'end'):
2×
350
                    uri = self.encode_to_rdf_uri('askomics:position_'+key_type)
!
351
                # elif key_type == 'taxon':
352
                #     uri = 'position_'+key_type
353
                else:
354
                    uri = self.encode_to_rdf_uri(self.headers[key],prefix="askomics:")
2×
355
                ttl += uri + ' askomics:attribute "true"^^xsd:boolean .\n'
2×
356
                # store the order of attrbutes in order to display attributes in the right order
357
                ttl += uri + ' askomics:attributeOrder "' + str(key+1) + '"^^xsd:decimal .\n'
2×
358
            elif key == 0:
2×
359
                uri_pref = self.get_param("askomics.prefix")
2×
360

361
                if key in self.uri:
2×
362
                    uri_pref = self.uri[key]
!
363

364
                headkey     = self.encode_to_rdf_uri(self.headers[key],prefix=self.uri[0])
2×
365

366
                ttl += headkey + ' askomics:prefixUri "'+uri_pref+'"^^xsd:string .\n\n'
2×
367

368
            if key > 0:
2×
369
                uriPref = "askomics:"
2×
370
                rangeRdfs = self.encode_to_rdf_uri(self.type_dict[key_type],prefix="askomics:")
2×
371

372
                if key_type.startswith('entity'):
2×
373
                    uriPref = self.uri[key]
!
374
                    idx = self.headers[key].find("@")
!
375

376
                    if idx > 0:
!
377
                        rangeRdfs = self.headers[key][idx+1:]
!
378
                    else:
379
                        rangeRdfs = self.headers[key]
!
380

381
                    rangeRdfs = self.encode_to_rdf_uri(rangeRdfs,prefix=self.uri[key])
!
382

383
                relation_uri = self.headers[key]
2×
384
                labelIdx = ""
2×
385

386
                if key_type in ('taxon', 'ref', 'strand', 'start', 'end'):
2×
387
                    relation_uri = 'position_'+key_type
!
388
                    labelIdx  = self.headers[key]
!
389

390
                    if key_type == 'taxon' or key_type == 'ref' or key_type == 'strand':
!
391
                        rangeRdfs = self.encode_to_rdf_uri(key_type+"Category",prefix="askomics:")
!
392

393
                elif key_type == 'category' :
2×
394
                    rangeRdfs = self.encode_to_rdf_uri(self.headers[key]+"Category",prefix="askomics:")
2×
395

396
                ttl += AbstractedRelation(key_type , relation_uri , labelIdx, uriPref, ref_entity, self.uri[0],rangeRdfs,"askomics:").get_turtle()
2×
397

398
        # Store the startpoint status
399
        if self.forced_column_types[0] == 'entity_start':
2×
400
            ttl += self.encode_to_rdf_uri(ref_entity,prefix=self.uri[0]) + ' askomics:startPoint "true"^^xsd:boolean .\n'
2×
401

402
        return ttl
2×
403

404
    def get_domain_knowledge(self):
2×
405
        """
406
        Get the domain knowledge representing the source file in ttl format
407

408
        :return: ttl content for the domain knowledge
409
        """
410

411
        #TODO use rdflib or other abstraction layer to create rdf
412

413
        ttl = ''
2×
414

415
        if all(types in self.forced_column_types for types in ('start', 'end')): # a positionable entity have to have a start and a end
2×
416
            ttl += self.encode_to_rdf_uri(self.headers[0],prefix=self.uri[0]) + ' askomics:is_positionable "true"^^xsd:boolean .\n'
!
417
            ttl += "askomics:is_positionable rdfs:label 'is_positionable'^^xsd:string .\n"
!
418
            ttl += "askomics:is_positionable rdf:type owl:ObjectProperty .\n"
!
419

420
        for header, categories in self.category_values.items():
2×
421
            ic=self.headers.index(header)
2×
422
            prefUri = self.uri[ic]
2×
423
            if self.forced_column_types[ic] in ('category', 'taxon', 'ref', 'strand'):
2×
424
                prefUri = "askomics:"
2×
425

426
            indent = len(header) * " " + len("askomics:category") * " " + 3 * " "
2×
427

428
            if self.forced_column_types[ic] in ('taxon', 'ref', 'strand'):
2×
429
                ttl += self.encode_to_rdf_uri(self.forced_column_types[ic]+"Category",prefix="askomics:") + " askomics:category "
!
430
            else:
431
                ttl += self.encode_to_rdf_uri(header+"Category",prefix="askomics:") + " askomics:category "
2×
432

433
            ttl += (" , \n" + indent ).join(map(lambda category : self.encode_to_rdf_uri(category,prefix="askomics:"), categories)) + " .\n"
2×
434
            for item in categories:
2×
435
                if item.strip() != "":
2×
436
                    ttl += self.encode_to_rdf_uri(item,prefix="askomics:") + " rdf:type " + self.encode_to_rdf_uri(header+"CategoryValue",prefix="askomics:") + " ;\n" + len(item) * " " + "  rdfs:label " + self.escape['text'](item,'') + "^^xsd:string .\n"
2×
437

438
        return ttl
2×
439

440

441
    @cached_property
2×
442
    def category_values(self):
443
        """
444
        A (lazily cached) dictionary mapping from column name (header) to the set of unique values.
445
        """
446

447
        self.log.warning("category_values will be computed independently, get_turtle should be used to generate both at once (better performances)")
!
448
        category_values = defaultdict(set) # key=name of a column of 'category' type -> list of found values
!
449

450
        with open(self.path, 'r', encoding="utf-8", errors="ignore") as tabfile:
!
451
            # Load the file with reader
452
            tabreader = csv.reader(tabfile, dialect=self.dialect)
!
453
            next(tabreader) # Skip header
!
454

455
            # Loop on lines
456
            for row_number, row in enumerate(tabreader):
!
457
                #blanck line
458
                if not row:
!
459
                    continue
!
460
                if len(row) != len(self.headers):
!
461
                    exc = SourceFileSyntaxError('Invalid line found: ' + str(self.headers) +
!
462
                                                ' columns expected, found ' + str(len(row)) +
463
                                                " - (last valid entity " + entity_label + ")")
464
                    exc.filename = self.path
!
465
                    exc.lineno = row_number
!
466
                    self.log.error(repr(exc))
!
467
                    raise exc
!
468

469
                for i, (header, current_type) in enumerate(zip(self.headers, self.forced_column_types)):
!
470
                    if current_type in ('category', 'taxon', 'ref', 'strand'):
!
471
                        if len(row[i].strip()) != 0:
!
472
                            # This is a category, keep track of allowed values for this column
473
                            self.category_values.setdefault(header, set()).add(row[i])
!
474

475
        return category_values
!
476

477
    def get_turtle(self, preview_only=False):
2×
478
        """
479
        Get the turtle string of a tsv file
480
        """
481

482
        # TODO use rdflib or other abstraction layer to create rdf
483

484
        self.category_values = defaultdict(set) # key=name of a column of 'category' type -> list of found values
2×
485

486
        with open(self.path, 'r', encoding="utf-8", errors="ignore") as tabfile:
2×
487
            # Load the file with reader
488
            tabreader = csv.reader(tabfile, dialect=self.dialect)
2×
489

490
            next(tabreader) # Skip header
2×
491

492
            # Loop on lines
493
            for row_number, row in enumerate(tabreader):
2×
494
                ttl = ""
2×
495
                ttl_sym = ""
2×
496
                #if len(row)>0:
497
                #    self.log.debug(row[0]+' '+str(row_number))
498
                #blanck line
499
                if not row:
2×
500
                    continue
!
501

502
                # Create the entity (first column)
503
                entity_label = row[0]
2×
504

505
                if len(row) != len(self.headers):
2×
506
                    self.log.warning("*"+', '.join(row)+"*")
!
507
                    raise Exception('Invalid line found: '+str(len(self.headers))
!
508
                                             +' columns expected, found '+str(len(row))
509
                                             +" - (last valid entity "+entity_label+")")
510
                entity_id = self.key_id(row)
2×
511

512
                indent = (2) * " "
2×
513
                ttl += self.encode_to_rdf_uri(entity_id,prefix=self.uri[0]) + " rdf:type " + self.encode_to_rdf_uri(self.headers[0],prefix=self.uri[0]) + " ;\n"
2×
514
                ttl += indent + " rdfs:label " + self.escape['text'](entity_label,"") + "^^xsd:string ;\n"
2×
515
                start_faldo = None
2×
516
                end_faldo = None
2×
517
                reference_faldo = None
2×
518
                strand_faldo = None
2×
519

520
                # check positionable
521
                positionable = False
2×
522
                if 'start' in self.forced_column_types and 'end' in self.forced_column_types :
2×
523
                    # its a positionable entity
524
                    positionable = True
!
525
                # Add data from other columns
526
                for i, header in enumerate(self.headers): # Skip the first column
2×
527
                    if i > 0 and i not in self.disabled_columns:
2×
528
                        current_type = self.forced_column_types[i]
2×
529
                        cur_prefix_uri = self.uri[i]
2×
530
                        relation_name = self.encode_to_rdf_uri(header,prefix="askomics:")
2×
531

532
                        if current_type.startswith('entity'):
2×
533
                            idx = header.find("@")
!
534
                            if idx > 0:
!
535
                                relation_name = self.encode_to_rdf_uri(header[0:idx],prefix="askomics:")
!
536

537
                        elif current_type in ('category', 'taxon', 'ref', 'strand'):
2×
538
                            # This is a category, keep track of allowed values for this column
539
                            if len(row[i].strip()) != 0:
2×
540
                                self.category_values[header].add(row[i].strip())
2×
541
                            cur_prefix_uri = "askomics:"
2×
542

543

544
                        # Create link to value
545
                        if row[i]: # Empty values are just ignored
2×
546

547
                             #check numeric type
548
                            #if current_type in ('numeric', 'start', 'end'):
549
                            #    if not row[i].isnumeric():
550
                            #        raise Exception("Type Error: Value \""+row[i]+\
551
                            #        "\" (Entity :"+entity_id+", Line "+str(row_number)+\
552
                            #        ") is not a numeric value.\n")
553

554
                            if positionable:
2×
555
                                value = row[i]
!
556
                                # positionable attributes
557
                                if current_type == 'start':
!
558
                                    start_faldo = row[i]
!
559
                                    value = row[i]
!
560
                                    relation_name = 'askomics:position_start'
!
561
                                elif current_type == 'end':
!
562
                                    end_faldo = row[i]
!
563
                                    relation_name = 'askomics:position_end'
!
564
                                    value = row[i]
!
565
                                elif current_type == 'taxon':
!
566
                                    relation_name = 'askomics:position_taxon'
!
NEW
567
                                    value = self.encode_to_rdf_uri(row[i],prefix=":")
!
568
                                elif current_type == 'ref':
!
569
                                    relation_name = 'askomics:position_ref'
!
570
                                    reference_faldo = row[i]
!
NEW
571
                                    value = self.encode_to_rdf_uri(row[i],prefix=":")
!
572
                                elif current_type == 'strand':
!
573
                                    strand_faldo = row[i]
!
574
                                    relation_name = 'askomics:position_strand'
!
NEW
575
                                    value = self.encode_to_rdf_uri(self.get_strand(row[i]),prefix=":")
!
576

577

578
                                ttl += indent + " "+ relation_name + " " + self.delims[current_type][0] + self.escape[current_type](value,cur_prefix_uri) + self.delims[current_type][1] + " ;\n"
!
579
                            else:
580
                                ttl += indent + " "+ relation_name + " " + self.delims[current_type][0] + self.escape[current_type](row[i],cur_prefix_uri) + self.delims[current_type][1] + " ;\n"
2×
581

582
                        if current_type == 'entitySym':
2×
583
                            pref = self.delims[current_type][0]
!
584
                            suf = self.delims[current_type][1]
!
585
                            ttl_sym += pref+\
!
586
                                      self.escape[current_type](row[i],self.uri[i])+\
587
                                      suf+" "+relation_name+" "+\
588
                                      self.encode_to_rdf_uri(entity_label,prefix=self.uri[i])  + " .\n"
589

590
                # Faldo position management
591
                if positionable:
2×
592
                    blockbase = 10000
!
593
                    block_idxstart = int(start_faldo) // blockbase
!
594
                    block_idxend = int(end_faldo) // blockbase
!
595

596
                    ttl += indent + ' askomics:blockstart ' + str(block_idxstart*blockbase) +';\n'
!
597
                    ttl += indent + ' askomics:blockend ' + str(block_idxend*blockbase) +';\n'
!
598

599
                    for sliceb in range(block_idxstart, block_idxend + 1):
!
600
                        if reference_faldo:
!
NEW
601
                            uriFaldoReferenceSlice = self.encode_to_rdf_uri(":"+reference_faldo+"_"+str(sliceb))
!
NEW
602
                            uriFaldoReference = self.encode_to_rdf_uri(":"+reference_faldo)
!
UNCOV
603
                            ttl += indent + ' askomics:IsIncludeInRef ' +  uriFaldoReferenceSlice +' ;\n'
!
604

605
                        ttl += indent + ' askomics:IsIncludeIn ' + str(sliceb) +' ;\n'
!
606

607
                    faldo_strand = self.get_strand_faldo(strand_faldo)
!
608

609
                    ttl += indent +    " faldo:location [ a faldo:Region ;\n"+\
!
610
                              indent + "                  faldo:begin [ a faldo:ExactPosition;\n"+\
611
                              indent + "                                a "+faldo_strand+";\n"+\
612
                              indent + "                                faldo:position "+str(start_faldo)+";\n"
613
                    if reference_faldo:
!
614
                        ttl += indent + "                                faldo:reference "+uriFaldoReference+" ;\n"
!
615

616
                    ttl += indent + "                                    ];\n"
!
617

618
                    ttl += indent + "                  faldo:end [ a faldo:ExactPosition;\n"+\
!
619
                              indent + "                              a "+faldo_strand+";\n"+\
620
                              indent + "                              faldo:position "+str(end_faldo)+";\n"
621
                    if reference_faldo:
!
622
                        ttl += indent + "                              faldo:reference "+uriFaldoReference+";\n"
!
623
                    ttl += indent + "                                  ]]   "
!
624

625
                ttl = ttl[:-2] + "."
2×
626

627

628
                #manage symmetric relation
629
                if ttl_sym != "":
2×
630
                    yield ttl_sym
!
631

632
                yield ttl
2×
633
                # Stop after x lines
634
                if preview_only and row_number > self.preview_limit:
2×
635
                    return
!
Troubleshooting · Open an Issue · Sales · Support · ENTERPRISE · CAREERS · STATUS
BLOG · TWITTER · Legal & Privacy · Supported CI Services · What's a CI service? · Automated Testing

© 2022 Coveralls, Inc