• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

evolvedbinary / elemental / 982

29 Apr 2025 08:34PM UTC coverage: 56.409% (+0.007%) from 56.402%
982

push

circleci

adamretter
[feature] Improve README.md badges

28451 of 55847 branches covered (50.94%)

Branch coverage included in aggregate %.

77468 of 131924 relevant lines covered (58.72%)

0.59 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

73.7
/exist-core/src/main/java/org/exist/storage/NativeBroker.java
1
/*
2
 * Elemental
3
 * Copyright (C) 2024, Evolved Binary Ltd
4
 *
5
 * admin@evolvedbinary.com
6
 * https://www.evolvedbinary.com | https://www.elemental.xyz
7
 *
8
 * Use of this software is governed by the Business Source License 1.1
9
 * included in the LICENSE file and at www.mariadb.com/bsl11.
10
 *
11
 * Change Date: 2028-04-27
12
 *
13
 * On the date above, in accordance with the Business Source License, use
14
 * of this software will be governed by the Apache License, Version 2.0.
15
 *
16
 * Additional Use Grant: Production use of the Licensed Work for a permitted
17
 * purpose. A Permitted Purpose is any purpose other than a Competing Use.
18
 * A Competing Use means making the Software available to others in a commercial
19
 * product or service that: substitutes for the Software; substitutes for any
20
 * other product or service we offer using the Software that exists as of the
21
 * date we make the Software available; or offers the same or substantially
22
 * similar functionality as the Software.
23
 *
24
 * NOTE: Parts of this file contain code from 'The eXist-db Authors'.
25
 *       The original license header is included below.
26
 *
27
 * =====================================================================
28
 *
29
 * eXist-db Open Source Native XML Database
30
 * Copyright (C) 2001 The eXist-db Authors
31
 *
32
 * info@exist-db.org
33
 * http://www.exist-db.org
34
 *
35
 * This library is free software; you can redistribute it and/or
36
 * modify it under the terms of the GNU Lesser General Public
37
 * License as published by the Free Software Foundation; either
38
 * version 2.1 of the License, or (at your option) any later version.
39
 *
40
 * This library is distributed in the hope that it will be useful,
41
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
42
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
43
 * Lesser General Public License for more details.
44
 *
45
 * You should have received a copy of the GNU Lesser General Public
46
 * License along with this library; if not, write to the Free Software
47
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
48
 */
49
package org.exist.storage;
50

51
import com.evolvedbinary.j8fu.function.FunctionE;
52
import it.unimi.dsi.fastutil.objects.ObjectLinkedOpenHashSet;
53
import org.apache.logging.log4j.LogManager;
54
import org.apache.logging.log4j.Logger;
55
import org.exist.Database;
56
import org.exist.collections.*;
57
import org.exist.collections.Collection;
58
import org.exist.dom.memtree.DOMIndexer;
59
import org.exist.dom.persistent.*;
60
import org.exist.dom.QName;
61
import org.exist.EXistException;
62
import org.exist.Indexer;
63
import org.exist.backup.RawDataBackup;
64
import org.exist.collections.Collection.SubCollectionEntry;
65
import org.exist.collections.triggers.*;
66
import org.exist.indexing.Index;
67
import org.exist.indexing.IndexController;
68
import org.exist.indexing.StreamListener;
69
import org.exist.indexing.StreamListener.ReindexMode;
70
import org.exist.indexing.StructuralIndex;
71
import org.exist.numbering.NodeId;
72
import org.exist.security.*;
73
import org.exist.security.internal.aider.ACEAider;
74
import org.exist.stax.EmbeddedXMLStreamReader;
75
import org.exist.stax.IEmbeddedXMLStreamReader;
76
import org.exist.storage.blob.BlobId;
77
import org.exist.storage.blob.BlobStore;
78
import org.exist.storage.btree.*;
79
import org.exist.storage.dom.DOMFile;
80
import org.exist.storage.dom.DOMTransaction;
81
import org.exist.storage.dom.NodeIterator;
82
import org.exist.storage.dom.RawNodeIterator;
83
import org.exist.storage.index.BFile;
84
import org.exist.storage.index.CollectionStore;
85
import org.exist.storage.io.VariableByteInput;
86
import org.exist.storage.io.VariableByteOutputStream;
87
import org.exist.storage.lock.*;
88
import org.exist.storage.lock.Lock.LockMode;
89
import org.exist.storage.lock.Lock.LockType;
90
import org.exist.storage.serializers.NativeSerializer;
91
import org.exist.storage.serializers.Serializer;
92
import org.exist.storage.serializers.XmlSerializerPool;
93
import org.exist.storage.sync.Sync;
94
import org.exist.storage.txn.TransactionException;
95
import org.exist.storage.txn.TransactionManager;
96
import org.exist.storage.txn.Txn;
97
import org.exist.util.*;
98
import org.exist.util.crypto.digest.DigestType;
99
import org.exist.util.crypto.digest.MessageDigest;
100
import org.apache.commons.io.input.UnsynchronizedByteArrayInputStream;
101
import org.apache.commons.io.output.UnsynchronizedByteArrayOutputStream;
102
import org.exist.util.io.InputStreamUtil;
103
import org.exist.xmldb.XmldbURI;
104
import org.exist.xquery.TerminatedException;
105
import org.exist.xquery.value.Type;
106
import org.w3c.dom.Document;
107
import org.w3c.dom.DocumentType;
108
import org.w3c.dom.Node;
109
import org.w3c.dom.NodeList;
110

111
import javax.annotation.Nullable;
112
import javax.xml.stream.XMLStreamException;
113
import java.io.*;
114
import java.net.URI;
115
import java.nio.file.Files;
116
import java.nio.file.Path;
117
import java.nio.file.Paths;
118
import java.text.NumberFormat;
119
import java.util.*;
120
import java.util.function.Function;
121
import java.util.concurrent.locks.ReentrantLock;
122
import java.util.regex.Matcher;
123
import java.util.regex.Pattern;
124

125
import org.exist.storage.dom.INodeIterator;
126
import com.evolvedbinary.j8fu.tuple.Tuple2;
127
import org.xml.sax.InputSource;
128
import org.xml.sax.SAXException;
129
import org.xml.sax.XMLReader;
130

131
import static java.nio.charset.StandardCharsets.UTF_8;
132
import static org.exist.security.Permission.DEFAULT_TEMPORARY_COLLECTION_PERM;
133
import static org.exist.util.io.InputStreamUtil.copy;
134

135
/**
136
 * Main class for the native XML storage backend.
137
 * By "native" it is meant file-based, embedded backend.
138
 *
139
 * Provides access to all low-level operations required by
140
 * the database. Extends {@link DBBroker}.
141
 *
142
 * This class dispatches the various events (defined by the methods
143
 * of {@link org.exist.storage.ContentLoadingObserver}) to indexing classes.
144
 *
145
 * @author Wolfgang Meier
146
 */
147
public class NativeBroker implements DBBroker {
148

149
    private static final Logger LOG = LogManager.getLogger(DBBroker.class);
1✔
150

151
    public static final String EXIST_STATISTICS_LOGGER = "org.exist.statistics";
152
    private static final Logger LOG_STATS = LogManager.getLogger(EXIST_STATISTICS_LOGGER);
1✔
153

154
    public static final byte COLLECTIONS_DBX_ID = 0;
155
    public static final byte VALUES_DBX_ID = 2;
156
    public static final byte DOM_DBX_ID = 3;
157
    //Note : no ID for symbols ? Too bad...
158

159
    public static final String PAGE_SIZE_ATTRIBUTE = "pageSize";
160
    public static final String INDEX_DEPTH_ATTRIBUTE = "index-depth";
161

162
    public static final String PROPERTY_INDEX_DEPTH = "indexer.index-depth";
163
    private static final byte[] ALL_STORAGE_FILES = {
1✔
164
        COLLECTIONS_DBX_ID, VALUES_DBX_ID, DOM_DBX_ID
1✔
165
    };
166

167
    private static final String EXCEPTION_DURING_REINDEX = "exception during reindex";
168
    private static final String DATABASE_IS_READ_ONLY = "Database is read-only";
169

170
    public static final String DEFAULT_DATA_DIR = "data";
171
    public static final int DEFAULT_INDEX_DEPTH = 1;
172

173
    /** check available memory after storing DEFAULT_NODES_BEFORE_MEMORY_CHECK nodes */
174
    public static final int DEFAULT_NODES_BEFORE_MEMORY_CHECK = 500;
175

176
    public static final int FIRST_COLLECTION_ID = 1;
177

178
    public static final int OFFSET_COLLECTION_ID = 0;
179

180
    public final static String INIT_COLLECTION_CONFIG = CollectionConfiguration.DEFAULT_COLLECTION_CONFIG_FILE + ".init";
181

182
    /** in-memory buffer size to use when copying binary resources */
183
    private final static int BINARY_RESOURCE_BUF_SIZE = 65536;
184

185
    private final static DigestType BINARY_RESOURCE_DIGEST_TYPE = DigestType.BLAKE_256;
1✔
186

187
    private Configuration config;
188

189
    private BrokerPool pool;
190

191
    private Deque<Subject> subject = new ArrayDeque<>();
1✔
192

193
    /**
194
     * Used when TRACE level logging is enabled
195
     * to provide a history of {@link Subject} state
196
     * changes
197
     *
198
     * This can be written to a log file by calling
199
     * {@link DBBroker#traceSubjectChanges()}
200
     */
201
    private TraceableStateChanges<Subject, TraceableSubjectChange.Change> subjectChangeTrace = LOG.isTraceEnabled() ? new TraceableStateChanges<>() : null;
1!
202

203
    private int referenceCount = 0;
1✔
204

205
    private String id;
206

207
    private final TimestampedReference<IndexController> indexController = new TimestampedReference<>();
1✔
208

209
    private final PreserveType preserveOnCopy;
210

211
    private boolean triggersEnabled = true;
1✔
212

213
    /** the database files */
214
    private final CollectionStore collectionsDb;
215
    private final DOMFile domDb;
216

217
    /** the index processors */
218
    private NativeValueIndex valueIndex;
219

220
    private final IndexSpec indexConfiguration;
221

222
    private int defaultIndexDepth;
223

224
    private final XmlSerializerPool xmlSerializerPool;
225

226
    /** used to count the nodes inserted after the last memory check */
227
    private int nodesCount = 0;
1✔
228

229
    private int nodesCountThreshold = DEFAULT_NODES_BEFORE_MEMORY_CHECK;
1✔
230

231
    private final Path dataDir;
232

233
    private final Runtime run = Runtime.getRuntime();
1✔
234

235
    private final NodeProcessor nodeProcessor = new NodeProcessor();
1✔
236

237
    private IEmbeddedXMLStreamReader streamReader;
238

239
    private final LockManager lockManager;
240

241
    /**
242
     * Observer Design Pattern: List of ContentLoadingObserver objects
243
     */
244
    private List<ContentLoadingObserver> contentLoadingObservers = new ArrayList<>();
1✔
245

246
    private ObjectLinkedOpenHashSet<Txn> currentTransactions = new ObjectLinkedOpenHashSet(4);  // 4 - we don't expect many concurrent transactions per-broker!
1✔
247

248
    // initialize database; read configuration, etc.
249
    public NativeBroker(final BrokerPool pool, final Configuration config) throws EXistException {
1✔
250
        this.config = config;
1✔
251
        this.pool = pool;
1✔
252
        this.preserveOnCopy = config.getProperty(PRESERVE_ON_COPY_PROPERTY, PreserveType.NO_PRESERVE);
1✔
253

254
        this.lockManager = pool.getLockManager();
1✔
255
        LOG.debug("Initializing broker {}", hashCode());
1✔
256

257
        this.dataDir = config.getProperty(BrokerPool.PROPERTY_DATA_DIR, Paths.get(DEFAULT_DATA_DIR));
1✔
258

259
        nodesCountThreshold = config.getInteger(BrokerPool.PROPERTY_NODES_BUFFER);
1✔
260
        if(nodesCountThreshold > 0) {
1✔
261
            nodesCountThreshold = nodesCountThreshold * 1000;
1✔
262
        }
263

264
        defaultIndexDepth = config.getInteger(PROPERTY_INDEX_DEPTH);
1✔
265
        if(defaultIndexDepth < 0) {
1✔
266
            defaultIndexDepth = DEFAULT_INDEX_DEPTH;
1✔
267
        }
268

269
        this.indexConfiguration = (IndexSpec) config.getProperty(Indexer.PROPERTY_INDEXER_CONFIG);
1✔
270
        this.xmlSerializerPool = new XmlSerializerPool(this, config, 5);
1✔
271

272
        pushSubject(pool.getSecurityManager().getSystemSubject());
1✔
273
        try {
274
            //TODO : refactor so that we can,
275
            //1) customize the different properties (file names, cache settings...)
276
            //2) have a consistent READ-ONLY behaviour (based on *mandatory* files ?)
277
            //3) have consistent file creation behaviour (we can probably avoid some unnecessary files)
278
            //4) use... *customized* factories for a better index extensibility ;-)
279
            // Initialize DOM storage
280
            final DOMFile configuredDomFile = (DOMFile) config.getProperty(DOMFile.getConfigKeyForFile());
1✔
281
            if(configuredDomFile != null) {
1✔
282
                this.domDb = configuredDomFile;
1✔
283
            } else {
1✔
284
                this.domDb = new DOMFile(pool, DOM_DBX_ID, dataDir, config);
1✔
285
            }
286
            if(domDb.isReadOnly()) {
1!
287
                LOG.warn("{} is read-only!", FileUtils.fileName(domDb.getFile()));
×
288
                pool.setReadOnly();
×
289
            }
290

291
            //Initialize collections storage
292
            final CollectionStore configuredCollectionsDb = (CollectionStore) config.getProperty(CollectionStore.getConfigKeyForFile());
1✔
293
            if(configuredCollectionsDb != null) {
1✔
294
                this.collectionsDb = configuredCollectionsDb;
1✔
295
            } else {
1✔
296
                this.collectionsDb = new CollectionStore(pool, COLLECTIONS_DBX_ID, dataDir, config);
1✔
297
            }
298
            if(collectionsDb.isReadOnly()) {
1!
299
                LOG.warn("{} is read-only!", FileUtils.fileName(collectionsDb.getFile()));
×
300
                pool.setReadOnly();
×
301
            }
302

303
            this.valueIndex = new NativeValueIndex(this, VALUES_DBX_ID, dataDir, config);
1✔
304
            if(isReadOnly()) {
1!
305
                LOG.warn(DATABASE_IS_READ_ONLY);
×
306
            }
307
        } catch(final DBException e) {
×
308
            LOG.debug(e.getMessage(), e);
×
309
            throw new EXistException(e);
×
310
        } finally {
311
            popSubject();
1✔
312
        }
313
    }
1✔
314

315
    @Override
316
    public String getId() {
317
        return id;
1✔
318
    }
319

320
    @Override
321
    public void setId(final String id) {
322
                this.id = id;
1✔
323
        }
1✔
324

325
    @Override
326
    public String toString() {
327
        return getId();
1✔
328
    }
329

330
    @Override
331
    public void prepare() {
332
        /**
333
         * Index modules should always be re-loaded in case
334
         * {@link org.exist.indexing.IndexManager#registerIndex(Index)} or
335
         * {@link org.exist.indexing.IndexManager#unregisterIndex(Index)}
336
         * has been called since the previous lease of this broker.
337
         */
338
        loadIndexModules();
1✔
339
    }
1✔
340

341
    /**
342
     * Loads the index modules via an IndexController
343
     */
344
    private void loadIndexModules() {
345
        indexController.setIfExpiredOrNull(getBrokerPool().getIndexManager().getConfigurationTimestamp(), () -> new IndexController(this));
1✔
346
    }
1✔
347

348
    @Override
349
    public void pushSubject(final Subject subject) {
350
        if(LOG.isTraceEnabled()) {
1!
351
            subjectChangeTrace.add(TraceableSubjectChange.push(subject, getId()));
×
352
        }
353
        if (subject == null) {
1!
354
            //TODO (AP) this is a workaround - what is the root cause ?
355
            LOG.warn("Attempt to push null subject ignored.");
×
356
        } else {
×
357
            this.subject.addFirst(subject);
1✔
358
        }
359
    }
1✔
360

361
    @Override
362
    public Subject popSubject() {
363
        final Subject subject = this.subject.removeFirst();
1✔
364
        if(LOG.isTraceEnabled()) {
1!
365
            subjectChangeTrace.add(TraceableSubjectChange.pop(subject, getId()));
×
366
        }
367
        return subject;
1✔
368
    }
369

370
    @Override
371
    public Subject getCurrentSubject() {
372
        return subject.peekFirst();
1✔
373
    }
374

375
    @Override
376
    public void traceSubjectChanges() {
377
        subjectChangeTrace.logTrace(LOG);
×
378
    }
×
379

380
    @Override
381
    public void clearSubjectChangesTrace() {
382
        if(!LOG.isTraceEnabled()) {
×
383
            throw new IllegalStateException("This is only enabled at TRACE level logging");
×
384
        }
385

386
        subjectChangeTrace.clear();
×
387
    }
×
388

389
    @Override
390
    public IndexController getIndexController() {
391
        return indexController.get();
1✔
392
    }
393

394
    @Override
395
    public ElementIndex getElementIndex() {
396
        return null;
1✔
397
    }
398

399
    @Override
400
    public void clearContentLoadingObservers() {
401
        contentLoadingObservers.clear();
1✔
402
    }
1✔
403

404
    @Override
405
    public void addContentLoadingObserver(final ContentLoadingObserver observer) {
406
        if (!contentLoadingObservers.contains(observer)) {
1!
407
            contentLoadingObservers.add(observer);
1✔
408
        }
409
    }
1✔
410

411
    @Override
412
    public void removeContentLoadingObserver(final ContentLoadingObserver observer) {
413
        if (contentLoadingObservers.contains(observer)) {
×
414
            contentLoadingObservers.remove(observer);
×
415
        }
416
    }
×
417

418
    @Override
419
    public Configuration getConfiguration() {
420
        return config;
1✔
421
    }
422

423
    // ============ dispatch the various events to indexing classes ==========
424

425
    private void notifyRemoveNode(final NodeHandle node, final NodePath currentPath, final String content) {
426
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
427
            observer.removeNode(node, currentPath, content);
1✔
428
        }
429
    }
1✔
430

431
    //private void notifyStoreAttribute(AttrImpl attr, NodePath currentPath, int indexingHint, RangeIndexSpec spec, boolean remove) {
432
    //    for (int i = 0; i < contentLoadingObservers.size(); i++) {
433
    //        ContentLoadingObserver observer = (ContentLoadingObserver) contentLoadingObservers.get(i);
434
    //        observer.storeAttribute(attr, currentPath, indexingHint, spec, remove);
435
    //    }        
436
    //}        
437

438
    private void notifyStoreText(final TextImpl text, final NodePath currentPath) {
439
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
440
            observer.storeText(text, currentPath);
1✔
441
        }
442
    }
1✔
443

444
    private void notifyDropIndex(final Collection collection) {
445
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
446
            observer.dropIndex(collection);
1✔
447
        }
448
    }
1✔
449

450
    private void notifyDropIndex(final DocumentImpl doc) {
451
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
452
            observer.dropIndex(doc);
1✔
453
        }
454
    }
1✔
455

456
    private void notifyRemove() {
457
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
458
            observer.remove();
1✔
459
        }
460
    }
1✔
461

462
    private void notifySync() {
463
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
464
            observer.sync();
1✔
465
        }
466
    }
1✔
467

468
    private void notifyFlush() {
469
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
470
            try {
471
                observer.flush();
1✔
472
            } catch(final DBException e) {
1✔
473
                LOG.error(e);
×
474
                //Ignore the exception ; try to continue on other files
475
            }
476
        }
477
    }
1✔
478

479
    private void notifyPrintStatistics() {
480
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
481
            observer.printStatistics();
1✔
482
        }
483
    }
1✔
484

485
    private void notifyClose() throws DBException {
486
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
487
            observer.close();
1✔
488
        }
489
        clearContentLoadingObservers();
1✔
490
    }
1✔
491

492
    private void notifyCloseAndRemove() throws DBException {
493
        for(final ContentLoadingObserver observer : contentLoadingObservers) {
1✔
494
            observer.closeAndRemove();
1✔
495
        }
496
        clearContentLoadingObservers();
1✔
497
    }
1✔
498

499
    @Override
500
    public <T extends IStoredNode> void endElement(final IStoredNode<T> node, final NodePath currentPath, final String content) {
501
        endElement(node, currentPath, content, false);
1✔
502
    }
1✔
503

504
    /**
505
     * Update indexes for the given element node. This method is called when the indexer
506
     * encounters a closing element tag. It updates any range indexes defined on the
507
     * element value and adds the element id to the structural index.
508
     *
509
     * @param node        the current element node
510
     * @param currentPath node path leading to the element
511
     * @param content     contains the string value of the element. Needed if a range index
512
     *                    is defined on it.
513
     */
514
    @Override
515
    public <T extends IStoredNode> void endElement(final IStoredNode<T> node, final NodePath currentPath, String content, final boolean remove) {
516
        final int indexType = ((ElementImpl) node).getIndexType();
1✔
517
        //TODO : do not care about the current code redundancy : this will move in the (near) future
518
        // TODO : move to NativeValueIndex
519
        if(RangeIndexSpec.hasRangeIndex(indexType)) {
1✔
520
            node.setQName(new QName(node.getQName(), ElementValue.ELEMENT));
1✔
521
            if(content == null) {
1!
522
                //NodeProxy p = new NodeProxy(node);
523
                //if (node.getOldInternalAddress() != StoredNode.UNKNOWN_NODE_IMPL_ADDRESS)
524
                //    p.setInternalAddress(node.getOldInternalAddress());
525
                content = getNodeValue(node, false);
1✔
526
                //Curious... I assume getNodeValue() needs the old address
527
                //p.setInternalAddress(node.getInternalAddress());
528
            }
529
            valueIndex.setDocument(node.getOwnerDocument());
1✔
530
            valueIndex.storeElement((ElementImpl) node, content, RangeIndexSpec.indexTypeToXPath(indexType),
1✔
531
                NativeValueIndex.IndexType.GENERIC, remove);
1✔
532
        }
533

534
        // TODO : move to NativeValueIndexByQName 
535
        if(RangeIndexSpec.hasQNameIndex(indexType)) {
1✔
536
            node.setQName(new QName(node.getQName(), ElementValue.ELEMENT));
1✔
537
            if(content == null) {
1✔
538
                //NodeProxy p = new NodeProxy(node);
539
                //if (node.getOldInternalAddress() != StoredNode.UNKNOWN_NODE_IMPL_ADDRESS)
540
                //    p.setInternalAddress(node.getOldInternalAddress());
541
                content = getNodeValue(node, false);
1✔
542
                //Curious... I assume getNodeValue() needs the old address
543
                //p.setInternalAddress(node.getInternalAddress());
544
            }
545
            valueIndex.setDocument(node.getOwnerDocument());
1✔
546
            valueIndex.storeElement((ElementImpl) node, content, RangeIndexSpec.indexTypeToXPath(indexType),
1✔
547
                NativeValueIndex.IndexType.QNAME, remove);
1✔
548
            //qnameValueIndex.setDocument((DocumentImpl) node.getOwnerDocument());
549
            //qnameValueIndex.endElement((ElementImpl) node, currentPath, content);
550
        }
551
    }
1✔
552

553
    /*
554
      private String getOldNodeContent(StoredNode node, long oldAddress) {
555
          NodeProxy p = new NodeProxy(node);
556
          if (oldAddress != StoredNode.UNKNOWN_NODE_IMPL_ADDRESS)
557
              p.setInternalAddress(oldAddress);
558
          String content = getNodeValue(node, false);
559
          //Curious... I assume getNodeValue() needs the old address
560
          p.setInternalAddress(node.getInternalAddress());
561
          return content;
562
      }
563
      */
564

565
    /**
566
     * Takes care of actually removing entries from the indices;
567
     * must be called after one or more call to {@link #removeNode(Txn, IStoredNode, NodePath, String)}.
568
     */
569
    @Override
570
    public void endRemove(final Txn transaction) {
571
        notifyRemove();
1✔
572
    }
1✔
573

574
    @Override
575
    public boolean isReadOnly() {
576
        return pool.isReadOnly();
1✔
577
    }
578

579
    public DOMFile getDOMFile() {
580
        return domDb;
1✔
581
    }
582

583
    public BTree getStorage(final byte id) {
584
        //Notice that there is no entry for the symbols table
585
        return switch (id) {
1!
586
            case DOM_DBX_ID -> domDb;
1✔
587
            case COLLECTIONS_DBX_ID -> collectionsDb;
1✔
588
            case VALUES_DBX_ID -> valueIndex.dbValues;
1✔
589
            default -> null;
×
590
        };
591
    }
592

593
    public byte[] getStorageFileIds() {
594
        return ALL_STORAGE_FILES;
×
595
    }
596

597
    public int getDefaultIndexDepth() {
598
        return defaultIndexDepth;
1✔
599
    }
600

601
    @Override
602
    public void backupToArchive(final RawDataBackup backup) throws IOException, EXistException {
603
        for(final byte i : ALL_STORAGE_FILES) {
1✔
604
            final Paged paged = getStorage(i);
1✔
605
            if(paged == null) {
1!
606
                LOG.warn("Storage file is null: {}", i);
×
607
                continue;
×
608
            }
609

610
            // do not use try-with-resources here, closing the OutputStream will close the entire backup
611
//            try(final OutputStream os = backup.newEntry(FileUtils.fileName(paged.getFile()))) {
612
            try {
613
                final OutputStream os = backup.newEntry(FileUtils.fileName(paged.getFile()));
1✔
614
                paged.backupToStream(os);
1✔
615
            } finally {
1✔
616
                backup.closeEntry();
1✔
617
            }
618
        }
619
        pool.getSymbols().backupToArchive(backup);
1✔
620
        pool.getBlobStore().backupToArchive(backup);
1✔
621
        pool.getIndexManager().backupToArchive(backup);
1✔
622
        //TODO backup counters
623
        //TODO USE zip64 or tar to create snapshots larger then 4Gb
624
    }
1✔
625

626
    @Override
627
    public IndexSpec getIndexConfiguration() {
628
        return indexConfiguration;
1✔
629
    }
630

631
    @Override
632
    public StructuralIndex getStructuralIndex() {
633
        return (StructuralIndex) getIndexController().getWorkerByIndexName(StructuralIndex.STRUCTURAL_INDEX_ID);
1✔
634
    }
635

636
    @Override
637
    public NativeValueIndex getValueIndex() {
638
        return valueIndex;
1✔
639
    }
640

641
    @Override
642
    public IEmbeddedXMLStreamReader getXMLStreamReader(final NodeHandle node, final boolean reportAttributes)
643
            throws IOException, XMLStreamException {
644
        if(streamReader == null) {
1✔
645
            final RawNodeIterator iterator = new RawNodeIterator(this, domDb, node);
1✔
646
            streamReader = new EmbeddedXMLStreamReader(this, node.getOwnerDocument(), iterator, node, reportAttributes);
1✔
647
        } else {
1✔
648
            streamReader.reposition(this, node, reportAttributes);
1✔
649
        }
650
        return streamReader;
1✔
651
    }
652

653
    @Override
654
    public IEmbeddedXMLStreamReader newXMLStreamReader(final NodeHandle node, final boolean reportAttributes)
655
            throws IOException, XMLStreamException {
656
        final RawNodeIterator iterator = new RawNodeIterator(this, domDb, node);
×
657
        return new EmbeddedXMLStreamReader(this, node.getOwnerDocument(), iterator, null, reportAttributes);
×
658
    }
659

660
    @Override
661
    public INodeIterator getNodeIterator(final NodeHandle node) {
662
        if(node == null) {
1!
663
            throw new IllegalArgumentException("The node parameter cannot be null.");
×
664
        }
665
        try {
666
            return new NodeIterator(this, domDb, node, false);
1✔
667
        } catch(final BTreeException | IOException e) {
×
668
            LOG.error("failed to create node iterator", e);
×
669
        }
670
        return null;
×
671
    }
672

673
    @Override
674
    public Serializer borrowSerializer() {
675
        return xmlSerializerPool.borrowObject();
1✔
676
    }
677

678
    @Override
679
    public void returnSerializer(final Serializer serializer) {
680
        xmlSerializerPool.returnObject(serializer);
1✔
681
    }
1✔
682

683
    @Override
684
    @Deprecated
685
    public Serializer getSerializer() {
686
        return newSerializer();
1✔
687
    }
688

689
    @Override
690
    @Deprecated
691
    public Serializer newSerializer() {
692
        return new NativeSerializer(this, getConfiguration());
1✔
693
    }
694

695
    @Override
696
    @Deprecated
697
    public Serializer newSerializer(final List<String> chainOfReceivers) {
698
        return new NativeSerializer(this, getConfiguration(), chainOfReceivers);
×
699
    }
700

701
    /**
702
     * Prepends '/db' to the URI if it is missing
703
     *
704
     * @param uri the URI
705
     *
706
     * @return the database URI
707
     *
708
     * @deprecated This is used inconsistently in NativeBroker, instead the caller should if necessary enforce the correct URI
709
     */
710
    @Deprecated
711
    private XmldbURI prepend(final XmldbURI uri) {
712
        if (uri.startsWith(XmldbURI.ROOT_COLLECTION_URI)) {
1✔
713
            return uri;
1✔
714
        }
715
        return XmldbURI.ROOT_COLLECTION_URI.append(uri);
1✔
716
    }
717

718
    /**
719
     * Creates a temporary collection
720
     *
721
     * @param transaction The transaction, which registers the acquired write locks.
722
     *                    The locks should be released on commit/abort.
723
     * @return The temporary collection
724
     * @throws LockException
725
     * @throws PermissionDeniedException
726
     * @throws IOException
727
     * @throws TriggerException
728
     */
729
    private @EnsureUnlocked Tuple2<Boolean, Collection> getOrCreateTempCollection(final Txn transaction)
730
        throws LockException, PermissionDeniedException, IOException, TriggerException {
731
        try {
732
            pushSubject(pool.getSecurityManager().getSystemSubject());
×
733
            final Tuple2<Boolean, Collection> temp = getOrCreateCollectionExplicit(transaction, XmldbURI.TEMP_COLLECTION_URI, Optional.empty(), true);
×
734
            if (temp._1) {
×
735
                temp._2.setPermissions(this, DEFAULT_TEMPORARY_COLLECTION_PERM);
×
736
                saveCollection(transaction, temp._2);
×
737
            }
738
            return temp;
×
739
        } finally {
740
            popSubject();
×
741
        }
742
    }
743

744
    private @Nullable String readInitCollectionConfig() {
745
        try {
746

747
            // 1) try and load from etc/ dir
748
            final Path fInitCollectionConfig = pool.getConfiguration().getExistHome()
1✔
749
                    .map(h -> h.resolve("etc").resolve(INIT_COLLECTION_CONFIG))
1✔
750
                    .orElse(Paths.get("etc").resolve(INIT_COLLECTION_CONFIG));
1✔
751
            if (Files.exists(fInitCollectionConfig)) {
1!
752
                return new String(Files.readAllBytes(fInitCollectionConfig), UTF_8);
×
753
            }
754

755
            // 2) fallback to attempting to load from classpath
756
            try (final InputStream is = pool.getClassLoader().getResourceAsStream(INIT_COLLECTION_CONFIG)) {
1✔
757
                if (is == null) {
1!
758
                    return null;
×
759
                }
760

761
                return InputStreamUtil.readString(is, UTF_8);
1✔
762
            }
763
        } catch(final IOException ioe) {
×
764
            LOG.error(ioe.getMessage(), ioe);
×
765
        }
766

767
        // 3) could not load!
768
        return null;
×
769
    }
770

771
    @Override
772
    public Collection getOrCreateCollection(final Txn transaction, final XmldbURI name) throws PermissionDeniedException, IOException, TriggerException {
773
        return getOrCreateCollectionExplicit(transaction, name, Optional.empty(), true)._2;
1✔
774
    }
775

776
    @Override
777
    public Collection getOrCreateCollection(final Txn transaction, final XmldbURI name, final Optional<Tuple2<Permission, Long>> creationAttributes) throws PermissionDeniedException, IOException, TriggerException {
778
        return getOrCreateCollectionExplicit(transaction, name, creationAttributes, true)._2;
1✔
779
    }
780

781
    /**
782
     * Gets the database Collection identified by the specified path.
783
     * If the Collection does not yet exist, it is created - including all ancestors.
784
     * The Collection is identified by its absolute path, e.g. /db/shakespeare.
785
     * The returned Collection will NOT HAVE a lock.
786
     *
787
     * The caller should take care to release any associated resource by
788
     * calling {@link Collection#close()}
789
     *
790
     * @param transaction The current transaction
791
     * @param path The Collection's URI
792
     * @param creationAttributes the attributes to use if the collection needs to be created,
793
     *                           the first item is a Permission (or null for default),
794
     *                           the second item is a Creation Date.
795
     * @param fireTrigger Indicates whether the CollectionTrigger should be fired.
796
     *                    Typically true, but can be set to false when creating a collection is
797
     *                    part of a composite operation like `copy`.
798
     *
799
     * @return A tuple whose first boolean value is set to true if the
800
     * collection was created, or false if the collection already existed. The
801
     * second value is the existing or created Collection
802
     *
803
     * @throws PermissionDeniedException If the current user does not have appropriate permissions
804
     * @throws IOException If an error occurs whilst reading (get) or writing (create) a Collection to disk
805
     * @throws TriggerException If a CollectionTrigger throws an exception
806
     */
807
    private Tuple2<Boolean, Collection> getOrCreateCollectionExplicit(final Txn transaction, final XmldbURI path, final Optional<Tuple2<Permission, Long>> creationAttributes, final boolean fireTrigger) throws PermissionDeniedException, IOException, TriggerException {
808
        final XmldbURI collectionUri = prepend(path.toCollectionPathURI().normalizeCollectionPath());
1✔
809
        final XmldbURI parentCollectionUri = collectionUri.removeLastSegment();
1✔
810

811
        final CollectionCache collectionsCache = pool.getCollectionsCache();
1✔
812

813
        try {
814

815
            // 1) optimize for the existence of the Collection in the cache
816
            try (final ManagedCollectionLock collectionLock = readLockCollection(collectionUri)) {
1✔
817
                final Collection collection = collectionsCache.getIfPresent(collectionUri);
1✔
818
                if (collection != null) {
1✔
819
                    return new Tuple2<>(false, collection);
1✔
820
                }
821
            }
822

823
            // 2) try and read the Collection from disk, if not on disk then create it
824
            try (final ManagedCollectionLock parentCollectionLock = writeLockCollection(parentCollectionUri.numSegments() == 0 ? XmldbURI.ROOT_COLLECTION_URI : parentCollectionUri)) {       // we write lock the parent (as we may need to add a new Collection to it)
1✔
825

826
                // check for preemption between READ -> WRITE lock, is the Collection now in the cache?
827
                final Collection collection = collectionsCache.getIfPresent(collectionUri);
1✔
828
                if (collection != null) {
1!
829
                    return new Tuple2<>(false, collection);
×
830
                }
831

832
                // is the parent Collection in the cache?
833
                if (parentCollectionUri == XmldbURI.EMPTY_URI) {
1✔
834
                    // no parent... so, this is the root collection!
835
                    return getOrCreateCollectionExplicit_rootCollection(transaction, collectionUri, collectionsCache, fireTrigger);
1✔
836
                } else {
837
                    final Collection parentCollection = collectionsCache.getIfPresent(parentCollectionUri);
1✔
838
                    if (parentCollection != null) {
1✔
839
                        // parent collection is in cache, is our Collection present on disk?
840
                        final Collection loadedCollection = loadCollection(collectionUri);
1✔
841

842
                        if (loadedCollection != null) {
1✔
843
                            // loaded it from disk
844

845
                            // add it to the cache and return it
846
                            collectionsCache.put(loadedCollection);
1✔
847
                            return new Tuple2<>(false, loadedCollection);
1✔
848

849
                        } else {
850
                            // not on disk, create the collection
851
                            return new Tuple2<>(true, createCollection(transaction, parentCollection, collectionUri, collectionsCache, creationAttributes, fireTrigger));
1✔
852
                        }
853

854
                    } else {
855
                        /*
856
                         * No parent Collection in the cache so that needs to be loaded/created
857
                         * (or will be read from cache if we are pre-empted) before we can create this Collection.
858
                         * However to do this, we need to yield the collectionLock, so we will continue outside
859
                         * the ManagedCollectionLock at (3)
860
                         */
861
                    }
862
                }
863
            }
864

865
            //TODO(AR) below, should we just fall back to recursive descent creating the collection hierarchy in the same manner that getOrCreateCollection used to do?
866

867
            // 3) No parent collection was previously found in cache so we need to call this function for the parent Collection and then ourselves
868
            final Tuple2<Boolean, Collection> newOrExistingParentCollection = getOrCreateCollectionExplicit(transaction, parentCollectionUri, creationAttributes, fireTrigger);
1✔
869
            return getOrCreateCollectionExplicit(transaction, collectionUri, creationAttributes, fireTrigger);
1✔
870

871
        } catch(final ReadOnlyException e) {
×
872
            throw new PermissionDeniedException(DATABASE_IS_READ_ONLY);
×
873
        } catch(final LockException e) {
×
874
            throw new IOException(e);
×
875
        }
876
    }
877

878
    private Tuple2<Boolean, Collection> getOrCreateCollectionExplicit_rootCollection(final Txn transaction, final XmldbURI collectionUri, final CollectionCache collectionsCache, final boolean fireTrigger) throws PermissionDeniedException, IOException, LockException, ReadOnlyException, TriggerException {
879
        // this is the root collection, so no parent, is the Collection present on disk?
880

881
        final Collection loadedRootCollection = loadCollection(collectionUri);
1✔
882

883
        if (loadedRootCollection != null) {
1!
884
            // loaded it from disk
885

886
            // add it to the cache and return it
887
            collectionsCache.put(loadedRootCollection);
×
888
            return new Tuple2<>(false, loadedRootCollection);
×
889
        } else {
890
            // not on disk, create the root collection
891
            final Collection rootCollection = createCollection(transaction, null, collectionUri, collectionsCache, Optional.empty(), fireTrigger);
1✔
892

893
            //import an initial collection configuration
894
            try {
895
                final String initCollectionConfig = readInitCollectionConfig();
1✔
896
                if(initCollectionConfig != null) {
1!
897
                    CollectionConfigurationManager collectionConfigurationManager = pool.getConfigurationManager();
1✔
898
                    if(collectionConfigurationManager == null) {
1!
899
                        if(pool.getConfigurationManager() == null) {
×
900
                            throw new IllegalStateException();
×
901
                            //might not yet have been initialised
902
                            //pool.initCollectionConfigurationManager(this, transaction);
903
                        }
904
                        collectionConfigurationManager = pool.getConfigurationManager();
×
905
                    }
906

907
                    if(collectionConfigurationManager != null) {
1!
908
                        collectionConfigurationManager.addConfiguration(transaction, this, rootCollection, initCollectionConfig);
1✔
909
                    }
910
                }
911
            } catch(final CollectionConfigurationException cce) {
1✔
912
                LOG.error("Could not load initial collection configuration for /db: {}", cce.getMessage(), cce);
×
913
            }
914

915
            return new Tuple2<>(true, rootCollection);
1✔
916
        }
917
    }
918

919
    /**
920
     * NOTE - When this is called there must be a WRITE_LOCK on collectionUri
921
     * and a WRITE_LOCK on parentCollection (if it is not null)
922
     */
923
    private @EnsureUnlocked Collection createCollection(final Txn transaction,
924
            @Nullable @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection parentCollection,
925
            @EnsureLocked(mode=LockMode.WRITE_LOCK, type=LockType.COLLECTION) final XmldbURI collectionUri,
926
            final CollectionCache collectionCache, final Optional<Tuple2<Permission, Long>> creationAttributes, final boolean fireTrigger)
927
            throws TriggerException, ReadOnlyException, PermissionDeniedException, LockException, IOException {
928

929
        if(parentCollection != null && !parentCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE)){
1✔
930
            throw new PermissionDeniedException("No write permissions for " + parentCollection.getURI().getCollectionPath());
1✔
931
        }
932

933
        final CollectionTrigger trigger;
934
        if (fireTrigger) {
1✔
935
            if (parentCollection == null) {
1✔
936
                trigger = new CollectionTriggers(this, transaction);
1✔
937
            } else {
1✔
938
                trigger = new CollectionTriggers(this, transaction, parentCollection);
1✔
939
            }
940
            trigger.beforeCreateCollection(this, transaction, collectionUri);
1✔
941
        } else {
1✔
942
            trigger = null;
1✔
943
        }
944

945
        final Collection collectionObj = createCollectionObject(transaction, parentCollection, collectionUri, creationAttributes);
1✔
946
        saveCollection(transaction, collectionObj);
1✔
947

948
        if(parentCollection != null) {
1✔
949
            parentCollection.addCollection(this, collectionObj);
1✔
950
            saveCollection(transaction, parentCollection);
1✔
951
        }
952

953
        collectionCache.put(collectionObj);
1✔
954

955
        if (fireTrigger) {
1✔
956
            trigger.afterCreateCollection(this, transaction, collectionObj);
1✔
957
        }
958

959
        return collectionObj;
1✔
960
    }
961

962
    /**
963
     * NOTE - When this is called there must be a WRITE_LOCK on collectionUri
964
     * and at least a READ_LOCK on parentCollection (if it is not null)
965
     */
966
    private Collection createCollectionObject(final Txn transaction,
967
            @Nullable @EnsureLocked(mode=LockMode.READ_LOCK) final Collection parentCollection,
968
            @EnsureLocked(mode=LockMode.WRITE_LOCK, type=LockType.COLLECTION) final XmldbURI collectionUri,
969
            final Optional<Tuple2<Permission, Long>> creationAttributes)
970
            throws ReadOnlyException, PermissionDeniedException, LockException {
971

972
        final int collectionId = getNextCollectionId(transaction);
1✔
973
        final Collection collection = creationAttributes.map(attrs -> new MutableCollection(this, collectionId, collectionUri, attrs._1, attrs._2)).orElseGet(() -> new MutableCollection(this, collectionId, collectionUri));
1✔
974

975
        //inherit the group to collection if parent-collection is setGid
976
        if(parentCollection != null) {
1✔
977
            final Permission parentPermissions = parentCollection.getPermissionsNoLock();
1✔
978
            if(parentPermissions.isSetGid()) {
1✔
979
                final Permission collectionPermissions = collection.getPermissionsNoLock();
1✔
980
                collectionPermissions.setGroupFrom(parentPermissions); //inherit group
1✔
981
                collectionPermissions.setSetGid(true); //inherit setGid bit
1✔
982
            }
983
        }
984

985
        return collection;
1✔
986
    }
987

988
    /**
989
     * Loads a Collection from disk
990
     *
991
     * @param collectionUri The URI of the Collection to load
992
     *
993
     * @return The Collection object loaded from disk, or null if the record does not exist on disk
994
     */
995
    private @Nullable @EnsureLocked(mode=LockMode.READ_LOCK, type=LockType.COLLECTION) Collection loadCollection(
996
            @EnsureLocked(mode=LockMode.READ_LOCK, type=LockType.COLLECTION) final XmldbURI collectionUri)
997
            throws PermissionDeniedException, LockException, IOException {
998
        try (final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeReadLock(collectionsDb.getLockName())) {
1✔
999
            final Value key = new CollectionStore.CollectionKey(collectionUri.toString());
1✔
1000
            final VariableByteInput is = collectionsDb.getAsStream(key);
1✔
1001
            return is == null ? null : MutableCollection.load(this, collectionUri, is);
1✔
1002
        }
1003
    }
1004

1005
    @Override
1006
    public Collection getCollection(final XmldbURI uri) throws PermissionDeniedException {
1007
        return openCollection(uri, LockMode.NO_LOCK);
1✔
1008
    }
1009

1010
    @Override
1011
    public Collection openCollection(final XmldbURI uri, final LockMode lockMode) throws PermissionDeniedException {
1012
        final XmldbURI collectionUri = prepend(uri.toCollectionPathURI().normalizeCollectionPath());
1✔
1013

1014
        final ManagedCollectionLock collectionLock;
1015
        final Runnable unlockFn;    // we unlock on error, or if there is no Collection
1016
        try {
1017
            unlockFn = switch (lockMode) {
1✔
1018
                case WRITE_LOCK -> {
1019
                    collectionLock = writeLockCollection(collectionUri);
1✔
1020
                    yield collectionLock::close;
1✔
1021
                }
1022
                case READ_LOCK -> {
1023
                    collectionLock = readLockCollection(collectionUri);
1✔
1024
                    yield collectionLock::close;
1✔
1025
                }
1026
                default -> {
1027
                    collectionLock = ManagedCollectionLock.notLocked(collectionUri);
1✔
1028
                    yield () -> {
1✔
1029
                    };
1✔
1030
                }
1031
            };
1032
        } catch(final LockException e) {
1✔
1033
            LOG.error("Failed to acquire lock on Collection: {}", collectionUri);
×
1034
            return null;
×
1035
        }
1036

1037
        final CollectionCache collectionsCache = pool.getCollectionsCache();
1✔
1038
        final Collection collection;
1039
        try {
1040
            // NOTE: getCollectionForOpen will perform the Permission.EXECUTE security check on Collection at collectionUri
1041
            collection = getCollectionForOpen(collectionsCache, collectionUri);
1✔
1042
            if (collection == null) {
1✔
1043
                unlockFn.run();
1✔
1044
                return null;
1✔
1045
            }
1046
        } catch (final IllegalStateException | PermissionDeniedException e) {
1✔
1047
            unlockFn.run();
1✔
1048
            throw e;
1✔
1049
        }
1050

1051
        // Must ALSO perform a security check up the collection hierarchy to ensure that we have Permission.EXECUTE all the way
1052
        try {
1053
            checkCollectionAncestorPermissions(collectionsCache, collection);
1✔
1054
        } catch (final IllegalStateException | PermissionDeniedException e) {
1✔
1055
            unlockFn.run();
1✔
1056
            throw e;
1✔
1057
        } catch (final LockException e) {
×
1058
            unlockFn.run();
×
1059
            LOG.error("Failed to acquire lock on Collection: {}", collectionUri);
×
1060
            return null;
×
1061
        }
1062

1063
        return new LockedCollection(collectionLock, collection);
1✔
1064
    }
1065

1066
    // NOTE: READ_LOCK in the @EnsureLocked parameter annotation here means "at least" READ
1067
    private @Nullable Collection getCollectionForOpen(final CollectionCache collectionsCache,
1068
            @EnsureLocked(type=LockType.COLLECTION, mode=LockMode.READ_LOCK) final XmldbURI collectionUri)
1069
            throws IllegalStateException, PermissionDeniedException {
1070

1071
        // 1) optimize for reading from the Collection from the cache
1072
        final Collection collection = collectionsCache.getIfPresent(collectionUri);
1✔
1073
        if (collection != null) {
1✔
1074

1075
            // sanity check
1076
            if(!collection.getURI().equalsInternal(collectionUri)) {
1!
1077
                LOG.error("openCollection: The Collection received from the cache: {} is not the requested: {}", collection.getURI(), collectionUri);
×
1078
                throw new IllegalStateException();
×
1079
            }
1080

1081
            // does the user have permission to access THIS Collection
1082
            if(!collection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.EXECUTE)) {
1✔
1083
                throw new PermissionDeniedException("Permission denied to open collection: " + collection.getURI().toString() + " by " + getCurrentSubject().getName());
1✔
1084
            }
1085

1086
            return collection;
1✔
1087

1088
        } else {
1089

1090
            // 2) if not in the cache, load from disk
1091
            final Collection loadedCollection;
1092
            try {
1093
                // NOTE: loadCollection via. MutableCollection's constructor will perform the Permission.EXECUTE security check
1094
                loadedCollection = loadCollection(collectionUri);
1✔
1095
            } catch (final IOException e) {
1✔
1096
                LOG.error(e.getMessage(), e);
×
1097
                return null;
×
1098
            } catch (final LockException e) {
×
1099
                LOG.error("Failed to acquire lock on: {}", FileUtils.fileName(collectionsDb.getFile()));
×
1100
                return null;
×
1101
            }
1102

1103
            // if we loaded a Collection add it to the cache (if it isn't already there)
1104
            if (loadedCollection != null) {
1✔
1105
                return collectionsCache.getOrCreate(collectionUri, key -> loadedCollection);
1✔
1106
            } else {
1107
                return null;
1✔
1108
            }
1109
        }
1110
    }
1111

1112
    // NOTE: READ_LOCK in the @EnsureLocked parameter annotation here means "at least" READ
1113
    private void checkCollectionAncestorPermissions(final CollectionCache collectionsCache,
1114
            @EnsureLocked(type=LockType.COLLECTION, mode=LockMode.READ_LOCK) final Collection collection)
1115
            throws IllegalStateException, PermissionDeniedException, LockException {
1116

1117
        /*
1118
            When we are called we hold either a READ or WRITE Lock on the Collection.
1119
            As we are using hierarchical locking for Collections we can
1120
            assume that we also hold either an INTENTION_READ or INTENTION_WRITE Lock
1121
            on each ancestor Collection up to the root,
1122
            therefore we don't really need to acquire any more locks.
1123

1124
            The permissions are checked bottom-up on the Collection hierarchy as we
1125
            assume that the more specific/restrictive permissions are likely to be
1126
            closer to the target Collection.
1127
         */
1128

1129
        Collection c = collection;
1✔
1130
        XmldbURI parentUri = c.getParentURI();
1✔
1131
        while (parentUri != null) {
1✔
1132
            // this will throw a PermissionDeniedException if the user does not have Permission.EXECUTE on the Collection at the parentUri
1133
            c = getCollectionForOpen(collectionsCache, parentUri);
1✔
1134
            if (c == null) {
1!
1135
                LOG.error("Parent collection {} was null for collection {} ", parentUri, collection.getURI());
×
1136
                throw new IllegalStateException();
×
1137
            }
1138

1139
            parentUri = c.getParentURI();
1✔
1140
        }
1141
    }
1✔
1142

1143
    @Override
1144
    public List<String> findCollectionsMatching(final String regexp) {
1145

1146
        final List<String> collections = new ArrayList<>();
×
1147

1148
        final Pattern p = Pattern.compile(regexp);
×
1149
        final Matcher m = p.matcher("");
×
1150

1151
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeReadLock(collectionsDb.getLockName())) {
×
1152

1153
            //TODO write a regexp lookup for key data in BTree.query
1154
            //final IndexQuery idxQuery = new IndexQuery(IndexQuery.REGEXP, regexp);
1155
            //List<Value> keys = collectionsDb.findKeysByCollectionName(idxQuery);
1156

1157
            final List<Value> keys = collectionsDb.getKeys();
×
1158
            for(final Value key : keys) {
×
1159
                final byte[] data = key.getData();
×
1160
                if(data[0] == CollectionStore.KEY_TYPE_COLLECTION) {
×
1161
                    final String collectionName = UTF8.decode(data, 1, data.length - 1).toString();
×
1162
                    m.reset(collectionName);
×
1163

1164
                    if (m.matches()) {
×
1165
                        collections.add(collectionName);
×
1166
                    }
1167
                }
1168
            }
1169
        } catch(final UnsupportedEncodingException e) {
×
1170
            //LOG.error("Unable to encode '" + uri + "' in UTF-8");
1171
            //return null;
1172
        } catch(final LockException e) {
×
1173
            LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()));
×
1174
            //return null;
1175
        } catch(final TerminatedException | IOException | BTreeException e) {
×
1176
            LOG.error(e.getMessage(), e);
×
1177
            //return null;
1178
        }
1179

1180
        return collections;
×
1181
    }
1182

1183
    @Override
1184
    public void readCollectionEntry(final SubCollectionEntry entry) throws IOException, LockException {
1185
        final XmldbURI uri = prepend(entry.getUri().toCollectionPathURI());
1✔
1186

1187
        final CollectionCache collectionsCache = pool.getCollectionsCache();
1✔
1188
        final Collection collection = collectionsCache.getIfPresent(uri);
1✔
1189
        if(collection == null) {
1!
1190
            try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeReadLock(collectionsDb.getLockName())) {
×
1191

1192
                final Value key = new CollectionStore.CollectionKey(uri.toString());
×
1193
                final VariableByteInput is = collectionsDb.getAsStream(key);
×
1194
                if(is == null) {
×
1195
                    throw new IOException("Could not find collection entry for: " + uri);
×
1196
                }
1197

1198
                //read the entry details
1199
                entry.read(is);
×
1200
            }
1201
        } else {
1202

1203
            if(!collection.getURI().equalsInternal(uri)) {
1!
1204
                throw new IOException(String.format("readCollectionEntry: The Collection received from the cache: %s is not the requested: %s", collection.getURI(), uri));
×
1205
            }
1206

1207
            entry.read(collection);
1✔
1208
        }
1209
    }
1✔
1210

1211
    @Override
1212
    public void copyCollection(final Txn transaction, final Collection collection, final Collection destination, final XmldbURI newName) throws PermissionDeniedException, LockException, IOException, TriggerException, EXistException {
1213
        copyCollection(transaction, collection, destination, newName, PreserveType.DEFAULT);
1✔
1214
    }
1✔
1215

1216
    @Override
1217
    public void copyCollection(final Txn transaction, final Collection sourceCollection, final Collection targetCollection, final XmldbURI newName, final PreserveType preserve) throws PermissionDeniedException, LockException, IOException, TriggerException, EXistException {
1218
        assert(sourceCollection != null);
1!
1219
        assert(targetCollection != null);
1!
1220
        assert(newName != null);
1!
1221

1222
        if(isReadOnly()) {
1!
1223
            throw new IOException(DATABASE_IS_READ_ONLY);
×
1224
        }
1225

1226
        if(newName.numSegments() != 1) {
1!
1227
            throw new IOException("newName name must be just a name i.e. an XmldbURI with one segment!");
×
1228
        }
1229

1230
        final XmldbURI sourceCollectionUri = sourceCollection.getURI();
1✔
1231
        final XmldbURI targetCollectionUri = targetCollection.getURI();
1✔
1232
        final XmldbURI destinationCollectionUri = targetCollectionUri.append(newName);
1✔
1233

1234
        if(sourceCollection.getId() == targetCollection.getId()) {
1✔
1235
            throw new PermissionDeniedException("Cannot copy collection to itself '" + sourceCollectionUri + "'.");
1✔
1236
        }
1237
        if(sourceCollectionUri.equals(destinationCollectionUri)) {
1!
1238
            throw new PermissionDeniedException("Cannot copy collection to itself '" + sourceCollectionUri + "'.");
×
1239
        }
1240
        if(isSubCollection(sourceCollectionUri, targetCollectionUri)) {
1✔
1241
            throw new PermissionDeniedException("Cannot copy collection '" + sourceCollectionUri + "' inside itself  '" + targetCollectionUri + "'.");
1✔
1242
        }
1243

1244
        if(!sourceCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.READ)) {
1!
1245
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " has insufficient privileges on collection to copy collection " + sourceCollectionUri);
×
1246
        }
1247
        if(!targetCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE | Permission.EXECUTE)) {
1!
1248
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " has insufficient privileges on target collection " + targetCollectionUri + " to copy collection " + sourceCollectionUri);
×
1249
        }
1250

1251
        /*
1252
         * At this point this thread should hold:
1253
         *   READ_LOCK on:
1254
         *     1) sourceCollection
1255
         *
1256
         *   WRITE_LOCK on:
1257
         *     1) targetCollection
1258
         *
1259
         *  Remember a lock on a node in the Collection tree,
1260
         *  implies locking the entire sub-tree, therefore
1261
         *  we don't need to explicitly lock sub-collections (just documents).
1262
         */
1263

1264
        pool.getProcessMonitor().startJob(ProcessMonitor.ACTION_COPY_COLLECTION, sourceCollection.getURI());
1✔
1265
        try {
1266

1267
            final XmldbURI sourceCollectionParentUri = sourceCollection.getParentURI();
1✔
1268
            // READ_LOCK the parent of the source Collection for the triggers
1269
            try(final Collection sourceCollectionParent = sourceCollectionParentUri == null ? sourceCollection : openCollection(sourceCollectionParentUri, LockMode.READ_LOCK)) {
1!
1270
                // fire before copy collection triggers
1271
                final CollectionTrigger trigger = new CollectionTriggers(this, transaction, sourceCollectionParent);
1✔
1272
                trigger.beforeCopyCollection(this, transaction, sourceCollection, destinationCollectionUri);
1✔
1273

1274
                final DocumentTrigger docTrigger = new DocumentTriggers(this, transaction);
1✔
1275

1276
                // pessimistically obtain READ_LOCKs on all descendant documents of sourceCollection, and WRITE_LOCKs on all target documents
1277
                final Collection newCollection;
1278
                try(final ManagedLocks<ManagedDocumentLock> sourceDocLocks = new ManagedLocks(lockDescendantDocuments(sourceCollection, lockManager::acquireDocumentReadLock));
1✔
1279
                        final ManagedLocks<ManagedDocumentLock> targetDocLocks = new ManagedLocks(lockTargetDocuments(sourceCollectionUri, sourceDocLocks, destinationCollectionUri, lockManager::acquireDocumentWriteLock))) {
1✔
1280

1281
                    // check all permissions in the tree to ensure a copy operation will succeed before starting copying
1282
                    checkPermissionsForCopy(sourceCollection, targetCollection, newName);
1✔
1283
                    newCollection = doCopyCollection(transaction, docTrigger, sourceCollection, targetCollection, destinationCollectionUri, true, preserve);
1✔
1284
                }
1285
                // fire after copy collection triggers
1286
                trigger.afterCopyCollection(this, transaction, newCollection, sourceCollectionUri);
1✔
1287
            }
1288

1289
        } finally {
1290
            pool.getProcessMonitor().endJob();
1✔
1291
        }
1292
    }
1✔
1293

1294
    /**
1295
     * Checks all permissions in the tree to ensure that a copy operation
1296
     * will not fail due to a lack of rights
1297
     *
1298
     * @param sourceCollection The Collection to copy
1299
     * @param targetCollection The target Collection to copy the sourceCollection into
1300
     * @param newName The new name the sourceCollection should have in the targetCollection
1301
     *
1302
     * @throws PermissionDeniedException If the current user does not have appropriate permissions
1303
     * @throws LockException If an exception occurs whilst acquiring locks
1304
     */
1305
    void checkPermissionsForCopy(@EnsureLocked(mode=LockMode.READ_LOCK) final Collection sourceCollection,
1306
            @EnsureLocked(mode=LockMode.READ_LOCK) @Nullable final Collection targetCollection, final XmldbURI newName)
1307
            throws PermissionDeniedException, LockException {
1308

1309
        if(!sourceCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.EXECUTE | Permission.READ)) {
1!
1310
            throw new PermissionDeniedException("Permission denied to copy collection " + sourceCollection.getURI() + " by " + getCurrentSubject().getName());
×
1311
        }
1312

1313
        final XmldbURI destinationCollectionUri = targetCollection == null ? null : targetCollection.getURI().append(newName);
1✔
1314
        final Collection destinationCollection = destinationCollectionUri == null ? null : getCollection(destinationCollectionUri);  // NOTE: we already have a WRITE_LOCK on destinationCollectionUri
1✔
1315

1316
        if(targetCollection != null) {
1✔
1317
            if(!targetCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.EXECUTE | Permission.WRITE)) {
1✔
1318
                throw new PermissionDeniedException("Permission denied to copy collection " + sourceCollection.getURI() + " to " + targetCollection.getURI() + " by " + getCurrentSubject().getName());
1✔
1319
            }
1320

1321
            if(destinationCollection != null) {
1✔
1322
                if(!destinationCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.EXECUTE | Permission.WRITE)) {
1✔
1323
                    throw new PermissionDeniedException("Permission denied to copy collection " + sourceCollection.getURI() + " to " + destinationCollection.getURI() + " by " + getCurrentSubject().getName());
1✔
1324
                }
1325
            }
1326
        }
1327

1328
        // check document permissions
1329
        for(final Iterator<DocumentImpl> itSrcSubDoc = sourceCollection.iteratorNoLock(this); itSrcSubDoc.hasNext(); ) {        // NOTE: we already have a READ lock on sourceCollection implicitly
1✔
1330
            final DocumentImpl srcSubDoc = itSrcSubDoc.next();
1✔
1331
            if(!srcSubDoc.getPermissions().validate(getCurrentSubject(), Permission.READ)) {
1✔
1332
                throw new PermissionDeniedException("Permission denied to copy collection " + sourceCollection.getURI() + " for resource " + srcSubDoc.getURI() + " by " + getCurrentSubject().getName());
1✔
1333
            }
1334

1335
            //if the destination resource exists, we must have write access to replace it's metadata etc. (this follows the Linux convention)
1336
            if(destinationCollection != null && !destinationCollection.isEmpty(this)) {
1✔
1337
                final DocumentImpl newDestSubDoc = destinationCollection.getDocument(this, srcSubDoc.getFileURI()); //TODO check this uri is just the filename!
1✔
1338
                if(newDestSubDoc != null) {
1✔
1339
                    if(!newDestSubDoc.getPermissions().validate(getCurrentSubject(), Permission.WRITE)) {
1!
1340
                        throw new PermissionDeniedException("Permission denied to copy collection " + sourceCollection.getURI() + " for resource " + newDestSubDoc.getURI() + " by " + getCurrentSubject().getName());
×
1341
                    }
1342
                }
1343
            }
1344
        }
1345

1346
        // descend into sub-collections
1347
        for(final Iterator<XmldbURI> itSrcSubColUri = sourceCollection.collectionIteratorNoLock(this); itSrcSubColUri.hasNext(); ) {        // NOTE: we already have a READ lock on sourceCollection implicitly
1✔
1348
            final XmldbURI srcSubColUri = itSrcSubColUri.next();
1✔
1349
            final Collection srcSubCol = getCollection(sourceCollection.getURI().append(srcSubColUri));  // NOTE: we already have a READ_LOCK on destinationCollectionUri
1✔
1350

1351
            checkPermissionsForCopy(srcSubCol, destinationCollection, srcSubColUri);
1✔
1352
        }
1353
    }
1✔
1354

1355

1356
    /**
1357
     * Copy a collection and all its sub-Collections.
1358
     *
1359
     * @param transaction The current transaction
1360
     * @param documentTrigger The trigger to use for document events
1361
     * @param sourceCollection The Collection to copy
1362
     * @param destinationCollectionUri The destination Collection URI for the sourceCollection copy
1363
     * @param copyCollectionMode false on the first call, true on recursive calls
1364
     *
1365
     * @return A reference to the Collection, no additional locks are held on the Collection
1366
     *
1367
     * @throws PermissionDeniedException If the current user does not have appropriate permissions
1368
     * @throws LockException If an exception occurs whilst acquiring locks
1369
     * @throws IOException If an error occurs whilst copying the Collection on disk
1370
     * @throws TriggerException If a CollectionTrigger throws an exception
1371
     * @throws EXistException If no more Document IDs are available
1372
     */
1373
    private Collection doCopyCollection(final Txn transaction, final DocumentTrigger documentTrigger,
1374
            @EnsureLocked(mode=LockMode.READ_LOCK) final Collection sourceCollection,
1375
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection destinationParentCollection, 
1376
            @EnsureLocked(mode=LockMode.WRITE_LOCK, type=LockType.COLLECTION) final XmldbURI destinationCollectionUri,
1377
            final boolean copyCollectionMode, final PreserveType preserve)
1378
            throws PermissionDeniedException, IOException, EXistException, TriggerException, LockException {
1379
        if(LOG.isDebugEnabled()) {
1!
1380
            LOG.debug("Copying collection to '{}'", destinationCollectionUri);
×
1381
        }
1382

1383
        // permissions and attributes for the destCollection (if we have to create it)
1384
        final Permission createCollectionPerms = PermissionFactory.getDefaultCollectionPermission(getBrokerPool().getSecurityManager());
1✔
1385
        copyModeAndAcl(sourceCollection.getPermissions(), createCollectionPerms);
1✔
1386
        final long created;
1387
        if (preserveOnCopy(preserve)) {
1✔
1388
            // only copy the owner and group from the source if we are creating a new collection and we are the DBA
1389
            if (getCurrentSubject().hasDbaRole()) {
1✔
1390
                PermissionFactory.chown(this, createCollectionPerms, Optional.of(sourceCollection.getPermissions().getOwner().getName()), Optional.of(sourceCollection.getPermissions().getGroup().getName()));
1✔
1391
            }
1392

1393
            created = sourceCollection.getCreated();
1✔
1394
        } else {
1✔
1395
            created = 0;
1✔
1396
        }
1397

1398
        final Tuple2<Boolean, Collection> destinationCollection = getOrCreateCollectionExplicit(transaction, destinationCollectionUri, Optional.of(new Tuple2<>(createCollectionPerms, created)), false);
1✔
1399

1400
        // if we didn't create destCollection but we need to preserve the attributes
1401
        if((!destinationCollection._1) && preserveOnCopy(preserve)) {
1✔
1402
            copyModeAndAcl(sourceCollection.getPermissions(), destinationCollection._2.getPermissions());
1✔
1403
        }
1404

1405
        // inherit the group to the destinationCollection if parent is setGid
1406
        if (destinationParentCollection != null && destinationParentCollection.getPermissions().isSetGid()) {
1!
1407
            destinationCollection._2.getPermissions().setGroupFrom(destinationParentCollection.getPermissions()); //inherit group
1✔
1408
            destinationCollection._2.getPermissions().setSetGid(true); //inherit setGid bit
1✔
1409
        }
1410

1411
        doCopyCollectionDocuments(transaction, documentTrigger, sourceCollection, destinationCollection._2, preserve);
1✔
1412

1413
        final XmldbURI sourceCollectionUri = sourceCollection.getURI();
1✔
1414
        for(final Iterator<XmldbURI> i = sourceCollection.collectionIterator(this); i.hasNext(); ) {
1✔
1415
            final XmldbURI childName = i.next();
1✔
1416
            final XmldbURI childUri = sourceCollectionUri.append(childName);
1✔
1417
            try (final Collection child = getCollection(childUri)) {        // NOTE: we already have a READ lock on child implicitly
1✔
1418
                if (child == null) {
1!
1419
                    throw new IOException("Child collection " + childUri + " not found");
×
1420
                } else {
1421
                    doCopyCollection(transaction, documentTrigger, child, destinationCollection._2, destinationCollection._2.getURI().append(childName), true, preserve);
1✔
1422
                }
1423
            }
1424
        }
1425

1426
        return destinationCollection._2;
1✔
1427
    }
1428

1429
    /**
1430
     * Copy the documents in one Collection to another (non-recursive)
1431
     *
1432
     * @param transaction The current transaction
1433
     * @param documentTrigger The trigger to use for document events
1434
     * @param sourceCollection The Collection to copy documents from
1435
     * @param targetCollection The Collection to copy documents to
1436
     *
1437
     * @throws PermissionDeniedException If the current user does not have appropriate permissions
1438
     * @throws LockException If an exception occurs whilst acquiring locks
1439
     * @throws IOException If an error occurs whilst copying the Collection on disk
1440
     * @throws TriggerException If a CollectionTrigger throws an exception
1441
     * @throws EXistException If no more Document IDs are available
1442
     */
1443
    private void doCopyCollectionDocuments(final Txn transaction, final DocumentTrigger documentTrigger,
1444
            @EnsureLocked(mode=LockMode.READ_LOCK) final Collection sourceCollection,
1445
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection targetCollection,
1446
            final PreserveType preserve)
1447
            throws LockException, PermissionDeniedException, IOException, TriggerException, EXistException {
1448
        for (final Iterator<DocumentImpl> i = sourceCollection.iterator(this); i.hasNext(); ) {
1✔
1449
            final DocumentImpl sourceDocument = i.next();
1✔
1450

1451
            if(LOG.isDebugEnabled()) {
1!
1452
                LOG.debug("Copying resource: '{}'", sourceDocument.getURI());
×
1453
            }
1454

1455
            final XmldbURI newDocName = sourceDocument.getFileURI();
1✔
1456
            final XmldbURI targetCollectionUri = targetCollection.getURI();
1✔
1457

1458
            try(final LockedDocument oldLockedDoc = targetCollection.getDocumentWithLock(this, newDocName, LockMode.WRITE_LOCK)) {
1✔
1459
                final DocumentImpl oldDoc = oldLockedDoc == null ? null : oldLockedDoc.getDocument();
1✔
1460
                doCopyDocument(transaction, documentTrigger, sourceDocument, targetCollection, newDocName, oldDoc, preserve);
1✔
1461
            }
1462
        }
1463
    }
1✔
1464

1465
    /**
1466
     * Copies just the mode and ACL from the src to the dest
1467
     *
1468
     * @param srcPermission The source to copy from
1469
     * @param destPermission The destination to copy to
1470
     */
1471
    private void copyModeAndAcl(final Permission srcPermission, final Permission destPermission) throws PermissionDeniedException {
1472
        final List<ACEAider> aces = new ArrayList<>();
1✔
1473
        if(srcPermission instanceof SimpleACLPermission srcAclPermission && destPermission instanceof SimpleACLPermission) {
1!
1474
            for (int i = 0; i < srcAclPermission.getACECount(); i++) {
1!
1475
                aces.add(new ACEAider(srcAclPermission.getACEAccessType(i), srcAclPermission.getACETarget(i), srcAclPermission.getACEWho(i), srcAclPermission.getACEMode(i)));
×
1476
            }
1477
        }
1478
        PermissionFactory.chmod(this, destPermission, Optional.of(srcPermission.getMode()), Optional.of(aces));
1✔
1479
    }
1✔
1480

1481
    @Override
1482
    public boolean preserveOnCopy(final PreserveType preserve) {
1483
        Objects.requireNonNull(preserve);
1✔
1484

1485
        return PreserveType.PRESERVE == preserve ||
1✔
1486
                (PreserveType.DEFAULT == preserve && PreserveType.PRESERVE == this.preserveOnCopy);
1!
1487
    }
1488

1489
    private boolean isSubCollection(@EnsureLocked(mode=LockMode.READ_LOCK) final Collection col,
1490
            @EnsureLocked(mode=LockMode.READ_LOCK) final Collection sub) {
1491
        return isSubCollection(col.getURI(), sub.getURI());
×
1492
    }
1493

1494
    private boolean isSubCollection(final XmldbURI col, final XmldbURI sub) {
1495
        return sub.startsWith(col);
1✔
1496
    }
1497

1498
    @Override
1499
    public void moveCollection(final Txn transaction, final Collection sourceCollection,
1500
            final Collection targetCollection, final XmldbURI newName)
1501
            throws PermissionDeniedException, LockException, IOException, TriggerException {
1502
        assert(sourceCollection != null);
1!
1503
        assert(targetCollection != null);
1!
1504
        assert(newName != null);
1!
1505

1506
        if(isReadOnly()) {
1!
1507
            throw new IOException(DATABASE_IS_READ_ONLY);
×
1508
        }
1509

1510
        if(newName.numSegments() != 1) {
1!
1511
            throw new IOException("newName name must be just a name i.e. an XmldbURI with one segment!");
×
1512
        }
1513

1514
        final XmldbURI sourceCollectionUri = sourceCollection.getURI();
1✔
1515
        final XmldbURI targetCollectionUri = targetCollection.getURI();
1✔
1516
        final XmldbURI destinationCollectionUri = targetCollectionUri.append(newName);
1✔
1517

1518
        if(sourceCollection.getId() == targetCollection.getId()) {
1!
1519
            throw new PermissionDeniedException("Cannot move collection to itself '" + sourceCollectionUri + "'.");
×
1520
        }
1521
        if(sourceCollectionUri.equals(destinationCollectionUri)) {
1✔
1522
            throw new PermissionDeniedException("Cannot move collection to itself '" + sourceCollectionUri + "'.");
1✔
1523
        }
1524
        if(sourceCollectionUri.equals(XmldbURI.ROOT_COLLECTION_URI)) {
1!
1525
            throw new PermissionDeniedException("Cannot move the db root collection /db");
×
1526
        }
1527
        if(isSubCollection(sourceCollectionUri, targetCollectionUri)) {
1✔
1528
            throw new PermissionDeniedException("Cannot move collection '" + sourceCollectionUri + "' inside itself '" + targetCollectionUri + "'.");
1✔
1529
        }
1530

1531
        if(!sourceCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE)) {
1!
1532
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " has insufficient privileges on collection to move collection " + sourceCollectionUri);
×
1533
        }
1534
        if(!targetCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE | Permission.EXECUTE)) {
1!
1535
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " has insufficient privileges on destination collection " + destinationCollectionUri + " to move collection " + sourceCollectionUri);
×
1536
        }
1537

1538

1539

1540
        // WRITE LOCK the parent of the sourceCollection (as we will want to remove the sourceCollection from it eventually)
1541
        final XmldbURI sourceCollectionParentUri = sourceCollectionUri.removeLastSegment();
1✔
1542
        try (final Collection sourceCollectionParent = openCollection(sourceCollectionParentUri, LockMode.WRITE_LOCK)) {
1✔
1543

1544
            if(!sourceCollectionParent.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE | Permission.EXECUTE)) {
1!
1545
                throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " have insufficient privileges on collection " + sourceCollectionParentUri + " to move collection " + sourceCollectionUri);
×
1546
            }
1547

1548
            /*
1549
             * If replacing another collection in the move
1550
             * i.e. sourceCollection=/db/col1/A, targetCollection=/db/col2, newName=A
1551
             * where /db/col2/A already exists we have to make sure the permissions to
1552
             * remove /db/col2/A are okay!
1553
             *
1554
             * So we must call removeCollection on /db/col2/A
1555
             * Which will ensure that collection can be removed and then remove it.
1556
             */
1557
            try(final Collection existingDestinationCollection = getCollection(destinationCollectionUri)) { // NOTE: we already have a WRITE lock on destinationCollection (implicitly as targetCollection is locked)
1✔
1558
                if(existingDestinationCollection != null) {
1✔
1559
                    if (!removeCollection(transaction, existingDestinationCollection)) {
1!
1560
                        throw new IOException("Destination collection '" + destinationCollectionUri + "' already exists and cannot be removed");
×
1561
                    }
1562
                }
1563
            }
1564

1565
            /*
1566
             * At this point this thread should hold WRITE_LOCKs on:
1567
             *   1) parent of sourceCollection
1568
             *   2) sourceCollection
1569
             *   3) targetCollection
1570
             *
1571
             *  Remember a lock on a node in the Collection tree,
1572
             *  implies locking the entire sub-tree, therefore
1573
             *  we don't need to explicitly lock sub-collections (just documents).
1574
             */
1575

1576
            pool.getProcessMonitor().startJob(ProcessMonitor.ACTION_MOVE_COLLECTION, sourceCollection.getURI());
1✔
1577
            try {
1578
                final CollectionTrigger trigger = new CollectionTriggers(this, transaction, sourceCollectionParent);
1✔
1579
                trigger.beforeMoveCollection(this, transaction, sourceCollection, destinationCollectionUri);
1✔
1580

1581
                // pessimistically obtain WRITE_LOCKs on all descendant documents of sourceCollection, and WRITE_LOCKs on all target documents
1582
                // we do this as whilst the document objects won't change, their method getURI() will return a different URI after the move
1583
                try(final ManagedLocks<ManagedDocumentLock> sourceDocLocks = new ManagedLocks(lockDescendantDocuments(sourceCollection, lockManager::acquireDocumentWriteLock));
1✔
1584
                        final ManagedLocks<ManagedDocumentLock> targetDocLocks = new ManagedLocks(lockTargetDocuments(sourceCollectionUri, sourceDocLocks, destinationCollectionUri, lockManager::acquireDocumentWriteLock))) {
1✔
1585

1586
                    // Need to move each collection in the source tree individually, so recurse.
1587
                    moveCollectionRecursive(transaction, trigger, sourceCollectionParent, sourceCollection, targetCollection, newName, false);
1✔
1588

1589
                }
1590
                trigger.afterMoveCollection(this, transaction, sourceCollection, sourceCollectionUri);
1✔
1591
            } finally {
1✔
1592
                pool.getProcessMonitor().endJob();
1✔
1593
            }
1594
        }
1595
    }
1✔
1596

1597
    /**
1598
     * Acquires locks on all descendant Collections of a specific Collection
1599
     *
1600
     * Locks are acquired in a top-down, left-to-right order
1601
     *
1602
     * NOTE: It is assumed that the caller holds a lock on the
1603
     *     `collection` of the same mode as those that we should acquire on the descendants
1604
     *
1605
     * @param collection The Collection whose descendant locks should be acquired
1606
     * @param lockFn A function for acquiring a lock
1607
     *
1608
     * @return A list of locks in the same order as collectionUris. Note that these should be released in reverse order
1609
     */
1610
    private List<ManagedDocumentLock> lockDescendantDocuments(final Collection collection, final FunctionE<XmldbURI, ManagedDocumentLock, LockException> lockFn) throws LockException, PermissionDeniedException {
1611
        final List<ManagedDocumentLock> locks = new ArrayList<>();
1✔
1612

1613
        try {
1614
            final Iterator<DocumentImpl> itDoc = collection.iteratorNoLock(this);
1✔
1615
            while(itDoc.hasNext()) {
1✔
1616
                final DocumentImpl doc = itDoc.next();
1✔
1617
                final ManagedDocumentLock docLock = lockFn.apply(doc.getURI());
1✔
1618
                locks.add(docLock);
1✔
1619
            }
1620

1621
            final XmldbURI collectionUri = collection.getURI();
1✔
1622
            final Iterator<XmldbURI> it = collection.collectionIteratorNoLock(this);    // NOTE: we should already have a lock on collection
1✔
1623
            while (it.hasNext()) {
1✔
1624
                final XmldbURI childCollectionName = it.next();
1✔
1625
                final XmldbURI childCollectionUri = collectionUri.append(childCollectionName);
1✔
1626
                final Collection childCollection = getCollection(childCollectionUri);  // NOTE: we don't need to lock the collection as we should already implicitly have a lock on the collection sub-tree
1✔
1627
                final List<ManagedDocumentLock> descendantLocks = lockDescendantDocuments(childCollection, lockFn);
1✔
1628
                locks.addAll(descendantLocks);
1✔
1629
            }
1630
        } catch (final PermissionDeniedException | LockException e) {
1✔
1631
            // unlock in reverse order
1632
            try {
1633
                ManagedLocks.closeAll(locks);
×
1634
            } catch (final RuntimeException re) {
×
1635
                LOG.error(re);
×
1636
            }
1637

1638
            throw e;
×
1639
        }
1640

1641
        return locks;
1✔
1642
    }
1643

1644
    /**
1645
     * Locks target documents (useful for copy/move operations).
1646
     *
1647
     * @param sourceCollectionUri The source collection URI root of the copy/move operation
1648
     * @param sourceDocumentLocks Locks on the source documents, for which target document locks should be acquired
1649
     * @param targetCollectionUri The target collection URI root of the copy/move operation
1650
     * @param lockFn The function for locking the target document.
1651
     *
1652
     * @return A list of locks on the target documents.
1653
     */
1654
    private List<ManagedDocumentLock> lockTargetDocuments(final XmldbURI sourceCollectionUri, final ManagedLocks<ManagedDocumentLock> sourceDocumentLocks, final XmldbURI targetCollectionUri, final FunctionE<XmldbURI, ManagedDocumentLock, LockException> lockFn) throws LockException {
1655
        final List<ManagedDocumentLock> locks = new ArrayList<>();
1✔
1656
        try {
1657
            for (final ManagedDocumentLock sourceDocumentLock : sourceDocumentLocks) {
1✔
1658
                final XmldbURI sourceDocumentUri = sourceDocumentLock.getPath();
1✔
1659
                final URI relativeDocumentUri = sourceCollectionUri.relativizeCollectionPath(sourceDocumentUri.getURI());
1✔
1660
                final XmldbURI targetDocumentUri = XmldbURI.create(targetCollectionUri.resolveCollectionPath(relativeDocumentUri));
1✔
1661

1662

1663
                final ManagedDocumentLock documentLock = lockFn.apply(targetDocumentUri);
1✔
1664
                locks.add(documentLock);
1✔
1665

1666
            }
1667
        } catch(final LockException e) {
1✔
1668
            // unlock in reverse order
1669
            try {
1670
                ManagedLocks.closeAll(locks);
×
1671
            } catch (final RuntimeException re) {
×
1672
                LOG.error(re);
×
1673
            }
1674

1675
            throw e;
×
1676
        }
1677

1678
        return locks;
1✔
1679
    }
1680

1681

1682
    //TODO bug the trigger param is reused as this is a recursive method, but in the current design triggers are only meant to be called once for each action and then destroyed!
1683
    /**
1684
     * Recursive-descent Collection move, only meant to be
1685
     * called from {@link #moveCollection(Txn, Collection, Collection, XmldbURI)}
1686
     *
1687
     * @param transaction The current transaction
1688
     * @param trigger The trigger to fire on Collection events
1689
     * @param sourceCollection The Collection to move
1690
     * @param targetCollection The target Collection to move the sourceCollection into
1691
     * @param newName The new name the sourceCollection should have in the targetCollection
1692
     * @param fireTrigger Indicates whether the CollectionTrigger should be fired
1693
     *     on the Collection the first time this function is called. Triggers will always
1694
     *     be fired for recursive calls of this function.
1695
     */
1696
    private void moveCollectionRecursive(final Txn transaction, final CollectionTrigger trigger,
1697
            @Nullable @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection sourceCollectionParent,
1698
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection sourceCollection,
1699
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection targetCollection, final XmldbURI newName,
1700
            final boolean fireTrigger) throws PermissionDeniedException, IOException, LockException, TriggerException {
1701
        final XmldbURI sourceCollectionUri = sourceCollection.getURI();
1✔
1702
        final XmldbURI destinationCollectionUri = targetCollection.getURI().append(newName);
1✔
1703

1704
        if(fireTrigger) {
1✔
1705
            trigger.beforeMoveCollection(this, transaction, sourceCollection, destinationCollectionUri);
1✔
1706
        }
1707

1708
        // de-reference any existing binaries in the destination from the blob store
1709
        try (final Collection dst = openCollection(destinationCollectionUri, LockMode.WRITE_LOCK)) {
1✔
1710
            if (dst != null) {
1!
1711
                final Iterator<DocumentImpl> itDoc = dst.iterator(this);
×
1712
                while (itDoc.hasNext()) {
×
1713
                    final DocumentImpl dstDoc = itDoc.next();
×
1714
                    if (dstDoc instanceof BinaryDocument binDstDoc) {
×
1715
                        try (final ManagedDocumentLock dstDocLock = lockManager.acquireDocumentWriteLock(dstDoc.getURI())) {
×
1716
                            removeBinaryResource(transaction, binDstDoc);
×
1717
                            binDstDoc.setBlobId(null);
×
1718
                        }
1719
                    }
1720
                }
1721
            }
1722
        }
1723

1724
        // remove source from parent
1725
        if (sourceCollectionParent != null) {
1✔
1726
            final XmldbURI sourceCollectionName = sourceCollectionUri.lastSegment();
1✔
1727
            sourceCollectionParent.removeCollection(this, sourceCollectionName);
1✔
1728

1729
            // if this is a rename, the save will happen after we "add the destination to the target" below...
1730
            if (!sourceCollectionParent.getURI().equals(targetCollection)) {
1!
1731
                saveCollection(transaction, sourceCollectionParent);
1✔
1732
            }
1733
        }
1734

1735
        // remove source from cache
1736
        final CollectionCache collectionsCache = pool.getCollectionsCache();
1✔
1737
        collectionsCache.invalidate(sourceCollection.getURI());
1✔
1738

1739
        // remove source from disk
1740
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
1741
            final Value key = new CollectionStore.CollectionKey(sourceCollectionUri.toString());
1✔
1742
            collectionsDb.remove(transaction, key);
1✔
1743
        }
1744

1745
        // set source path to destination... source is now the destination
1746
        sourceCollection.setPath(destinationCollectionUri, true);
1✔
1747
        saveCollection(transaction, sourceCollection);
1✔
1748

1749
        // add destination to target
1750
        targetCollection.addCollection(this, sourceCollection);
1✔
1751
        saveCollection(transaction, targetCollection);
1✔
1752

1753
        if(fireTrigger) {
1✔
1754
            trigger.afterMoveCollection(this, transaction, sourceCollection, sourceCollectionUri);
1✔
1755
        }
1756

1757
        // move the descendants
1758
        for(final Iterator<XmldbURI> i = sourceCollection.collectionIteratorNoLock(this); i.hasNext(); ) {  // NOTE: we already have a WRITE lock on sourceCollection
1✔
1759
            final XmldbURI childName = i.next();
1✔
1760
            final XmldbURI childUri = sourceCollectionUri.append(childName);
1✔
1761

1762
            final Collection child = getCollectionForOpen(collectionsCache, childUri);        // NOTE: we have a write lock on the sourceCollection, which means we don't need to lock sub-collections in the tree
1✔
1763
            if (child == null) {
1!
1764
                throw new IOException("Child collection " + childUri + " not found");
×
1765
            } else {
1766
                moveCollectionRecursive(transaction, trigger, null, child, sourceCollection, childName, true);
1✔
1767
            }
1768
        }
1769
    }
1✔
1770

1771
    @Override
1772
    public boolean removeCollection(final Txn transaction, final Collection collection) throws PermissionDeniedException, IOException, TriggerException {
1773
        if(isReadOnly()) {
1!
1774
            throw new IOException(DATABASE_IS_READ_ONLY);
×
1775
        }
1776

1777
        // WRITE LOCK the collection's parent (as we will remove this collection from it)
1778
        final XmldbURI parentCollectionUri = collection.getParentURI() == null ? XmldbURI.ROOT_COLLECTION_URI : collection.getParentURI();
1✔
1779
        try(final ManagedCollectionLock parentCollectionLock = writeLockCollection(parentCollectionUri)) {
1✔
1780
            return _removeCollection(transaction, collection);
1✔
1781
        } catch(final LockException e) {
×
1782
            LOG.error("Unable to lock Collection: {}", collection.getURI(), e);
×
1783
            return false;
×
1784
        }
1785
    }
1786

1787
    private boolean _removeCollection(final Txn transaction, @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection collection) throws PermissionDeniedException, TriggerException, IOException {
1788
        final XmldbURI collectionUri = collection.getURI();
1✔
1789

1790
        getBrokerPool().getProcessMonitor().startJob(ProcessMonitor.ACTION_REMOVE_COLLECTION, collectionUri);
1✔
1791

1792
        try {
1793

1794
            @Nullable final Collection parentCollection = collection.getParentURI() == null ? null : getCollection(collection.getParentURI());  // NOTE: we already have a WRITE lock on the parent of the Collection we set out to remove
1✔
1795

1796
            // 1) check the current user has permission to delete the Collection
1797
            //TODO(AR) the below permissions check could be optimised when descending the tree so we don't check the same collection(s) twice in some cases
1798
            if(!checkRemoveCollectionPermissions(parentCollection, collection)) {
1✔
1799
                throw new PermissionDeniedException("Account '" + getCurrentSubject().getName() + "' is not allowed to remove collection '" + collection.getURI() + "'");
1✔
1800
            }
1801

1802
            final CollectionTrigger colTrigger = new CollectionTriggers(this, transaction, parentCollection == null ? collection : parentCollection);
1✔
1803
            colTrigger.beforeDeleteCollection(this, transaction, collection);
1✔
1804

1805
            // 2) remove descendant collections
1806
            for (final Iterator<XmldbURI> subCollectionName = collection.collectionIteratorNoLock(this); subCollectionName.hasNext(); ) {   // NOTE: we already have a WRITE lock on the parent of the Collection we set out to remove
1✔
1807
                final XmldbURI subCollectionUri = collectionUri.append(subCollectionName.next());
1✔
1808
                final boolean removedSubCollection = _removeCollection(transaction, getCollection(subCollectionUri));   // NOTE: we already have a WRITE lock on the parent of the Collection we set out to remove
1✔
1809
                if(!removedSubCollection) {
1!
1810
                    LOG.error("Unable to remove Collection: {}", subCollectionUri);
×
1811
                    return false;
×
1812
                }
1813
            }
1814

1815
            //TODO(AR) this can be executed asynchronously as a task, Do we need to await the completion before unlocking the collection? or just await completion before returning from the first call to _removeCollection?
1816
            // 3) drop indexes for this Collection
1817
            notifyDropIndex(collection);
1✔
1818
            getIndexController().removeCollection(collection, this, false);
1✔
1819

1820
            // 4) remove this Collection from the parent Collection
1821
            if(parentCollection != null) {
1✔
1822
                parentCollection.removeCollection(this, collectionUri.lastSegment());
1✔
1823
                saveCollection(transaction, parentCollection);
1✔
1824
            }
1825

1826
            // 5) remove Collection from collections.dbx
1827
            if(parentCollection != null) {
1✔
1828
                try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
1829
                    final Value key = new CollectionStore.CollectionKey(collectionUri.getRawCollectionPath());
1✔
1830
                    collectionsDb.remove(transaction, key);
1✔
1831

1832
                    //TODO(AR) is this the correct place to invalidate the config?
1833
                    // Notify the collection configuration manager
1834
                    final CollectionConfigurationManager manager = pool.getConfigurationManager();
1✔
1835
                    if(manager != null) {
1!
1836
                        manager.invalidate(collectionUri, getBrokerPool());
1✔
1837
                    }
1838
                }
1839

1840
                // invalidate the cache entry
1841
                final CollectionCache collectionsCache = pool.getCollectionsCache();
1✔
1842
                collectionsCache.invalidate(collection.getURI());
1✔
1843
            } else {
1✔
1844
                // if this is the root collection we just have to save
1845
                // it to persist the removal of any subCollections to collections.dbx
1846
                saveCollection(transaction, collection);
1✔
1847
            }
1848

1849
            //TODO(AR) this could possibly be executed asynchronously as a task, we don't need to know when it completes (this is because access to documents is through a Collection, and the Collection was removed above), however we cannot recycle the collectionId until all docs are gone
1850
            // 6) unlink all documents from the Collection
1851
            try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
1852
                final Value docKey = new CollectionStore.DocumentKey(collection.getId());
1✔
1853
                final IndexQuery query = new IndexQuery(IndexQuery.TRUNC_RIGHT, docKey);
1✔
1854
                collectionsDb.removeAll(transaction, query);
1✔
1855
                if(parentCollection != null) {  // we must not free the root collection id!
1✔
1856
                    collectionsDb.freeCollectionId(collection.getId());
1✔
1857
                }
1858
            } catch(final BTreeException | IOException e) {
×
1859
                LOG.error("Unable to unlink documents from the Collection: {}", collectionUri, e);
×
1860
            }
1861

1862
            //TODO(AR) this can be executed asynchronously as a task, we need to await the completion before unlocking the collection
1863
            // 7) remove the documents nodes and binary documents of the Collection from dom.dbx
1864
            removeCollectionsDocumentNodes(transaction, collection);
1✔
1865

1866
            colTrigger.afterDeleteCollection(this, transaction, collectionUri);
1✔
1867

1868
            return true;
1✔
1869

1870
        } catch(final LockException e) {
×
1871
            LOG.error("Unable to lock Collection: {}", collectionUri, e);
×
1872
            return false;
×
1873
        } finally {
1874
            getBrokerPool().getProcessMonitor().endJob();
1✔
1875
        }
1876
    }
1877

1878
    private void removeCollectionsDocumentNodes(final Txn transaction,
1879
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection collection)
1880
            throws TriggerException, PermissionDeniedException, LockException, IOException {
1881
        final DocumentTrigger docTrigger = new DocumentTriggers(this, transaction, collection);
1✔
1882

1883
        for (final Iterator<DocumentImpl> itDocument = collection.iteratorNoLock(this); itDocument.hasNext(); ) {       // NOTE: we already have a WRITE_LOCK on the collection
1✔
1884
            final DocumentImpl doc = itDocument.next();
1✔
1885

1886
            docTrigger.beforeDeleteDocument(this, transaction, doc);
1✔
1887

1888
            //Remove doc's metadata
1889
            // WM: now removed in one step. see above.
1890
            //removeResourceMetadata(transaction, doc);
1891
            //Remove document nodes' index entries
1892
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
1893
                @Override
1894
                public Object start() {
1895
                    try {
1896
                        final Value ref = new NodeRef(doc.getDocId());
1✔
1897
                        final IndexQuery query = new IndexQuery(IndexQuery.TRUNC_RIGHT, ref);
1✔
1898
                        domDb.remove(transaction, query, null);
1✔
1899
                    } catch (final BTreeException e) {
1✔
1900
                        LOG.error("btree error while removing document", e);
×
1901
                    } catch (final IOException e) {
×
1902
                        LOG.error("io error while removing document", e);
×
1903
                    } catch (final TerminatedException e) {
×
1904
                        LOG.error("method terminated", e);
×
1905
                    }
1906
                    return null;
1✔
1907
                }
1908
            }.run();
1✔
1909

1910
            //Remove nodes themselves
1911
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
1912
                @Override
1913
                public Object start() {
1914
                    if (doc.getResourceType() == DocumentImpl.XML_FILE) {
1✔
1915
                        final NodeHandle node = (NodeHandle) doc.getFirstChild();
1✔
1916
                        domDb.removeAll(transaction, node.getInternalAddress());
1✔
1917
                    }
1918
                    return null;
1✔
1919
                }
1920
            }.run();
1✔
1921

1922
            // if it is a binary document remove the content from disk
1923
            if (doc instanceof BinaryDocument) {
1✔
1924
                removeCollectionBinary(transaction, (BinaryDocument)doc);
1✔
1925
            }
1926

1927
            docTrigger.afterDeleteDocument(this, transaction, doc.getURI());
1✔
1928

1929
            //Make doc's id available again
1930
            collectionsDb.freeResourceId(doc.getDocId());
1✔
1931
        }
1932
    }
1✔
1933

1934
    private void removeCollectionBinary(final Txn transaction, final BinaryDocument doc) throws IOException {
1935
        final BlobStore blobStore = pool.getBlobStore();
1✔
1936
        blobStore.remove(transaction, doc.getBlobId());
1✔
1937
    }
1✔
1938

1939
    /**
1940
     * Checks that the current user has permissions to remove the Collection
1941
     *
1942
     * @param parentCollection The parent Collection or null if we are testing the root Collection
1943
     * @param collection The Collection to check permissions for removal
1944
     *
1945
     * @return true if the current user is allowed to remove the Collection
1946
     */
1947
    private boolean checkRemoveCollectionPermissions(
1948
            @Nullable @EnsureLocked(mode=LockMode.READ_LOCK) final Collection parentCollection,
1949
            @EnsureLocked(mode=LockMode.READ_LOCK) final Collection collection) throws PermissionDeniedException {
1950
        // parent collection permissions
1951
        if(parentCollection != null) {
1✔
1952
            if (!parentCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE)) {
1✔
1953
                return false;
1✔
1954
            }
1955
            if (!parentCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.EXECUTE)) {
1!
1956
                return false;
×
1957
            }
1958
        }
1959

1960
        // collection permissions
1961
        if(!collection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.READ)) {
1!
1962
            return false;
×
1963
        }
1964

1965
        if(!collection.isEmpty(this)) {
1✔
1966
            if(!collection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE)) {
1!
1967
                return false;
×
1968
            }
1969

1970
            if(!collection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.EXECUTE)) {
1!
1971
                return false;
×
1972
            }
1973
        }
1974

1975
        return true;
1✔
1976
    }
1977

1978
    /**
1979
     * Acquires a write lock on a Collection
1980
     *
1981
     * @param collectionUri The uri of the collection to lock
1982
     *
1983
     * @return A managed lock for the Collection
1984
     */
1985
    private ManagedCollectionLock writeLockCollection(final XmldbURI collectionUri) throws LockException {
1986
        return lockManager.acquireCollectionWriteLock(collectionUri);
1✔
1987
    }
1988

1989
    /**
1990
     * Acquires a READ lock on a Collection
1991
     *
1992
     * @param collectionUri The uri of the collection to lock
1993
     *
1994
     * @return A managed lock for the Collection
1995
     */
1996
    private ManagedCollectionLock readLockCollection(final XmldbURI collectionUri) throws LockException {
1997
        return lockManager.acquireCollectionReadLock(collectionUri);
1✔
1998
    }
1999

2000
    @Override
2001
    public void saveCollection(final Txn transaction, final Collection collection) throws IOException {
2002
        if(collection == null) {
1!
2003
            LOG.error("NativeBroker.saveCollection called with collection == null! Aborting.");
×
2004
            return;
×
2005
        }
2006

2007
        if(isReadOnly()) {
1!
2008
            throw new IOException(DATABASE_IS_READ_ONLY);
×
2009
        }
2010

2011
        final CollectionCache collectionsCache = pool.getCollectionsCache();
1✔
2012
        collectionsCache.put(collection);
1✔
2013

2014
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
2015
            final Value name = new CollectionStore.CollectionKey(collection.getURI().toString());
1✔
2016
            try(final VariableByteOutputStream os = new VariableByteOutputStream(256)) {
1✔
2017
                collection.serialize(os);
1✔
2018
                final long address = collectionsDb.put(transaction, name, os.data(), true);
1✔
2019
                if (address == BFile.UNKNOWN_ADDRESS) {
1!
2020
                    throw new IOException("Could not store collection data for '" + collection.getURI() + "', address=BFile.UNKNOWN_ADDRESS");
×
2021
                }
2022
            }
2023
        } catch(final LockException e) {
×
2024
            throw new IOException(e);
×
2025
        }
2026
    }
1✔
2027

2028
    /**
2029
     * Get the next available unique collection id.
2030
     * @param transaction the transaction
2031
     * @return next available unique collection id
2032
     * @throws ReadOnlyException in response to an readonly error
2033
     * @throws LockException in case of a lock error
2034
     */
2035
    public int getNextCollectionId(final Txn transaction) throws ReadOnlyException, LockException {
2036
        int nextCollectionId = collectionsDb.getFreeCollectionId();
1✔
2037
        if(nextCollectionId != Collection.UNKNOWN_COLLECTION_ID) {
1✔
2038
            return nextCollectionId;
1✔
2039
        }
2040

2041
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
2042
            final Value key = new CollectionStore.CollectionKey(CollectionStore.NEXT_COLLECTION_ID_KEY);
1✔
2043
            final Value data = collectionsDb.get(key);
1✔
2044
            if(data != null) {
1✔
2045
                nextCollectionId = ByteConversion.byteToInt(data.getData(), OFFSET_COLLECTION_ID);
1✔
2046
                ++nextCollectionId;
1✔
2047
            } else {
1✔
2048
                nextCollectionId = FIRST_COLLECTION_ID;
1✔
2049
            }
2050
            final byte[] d = new byte[Collection.LENGTH_COLLECTION_ID];
1✔
2051
            ByteConversion.intToByte(nextCollectionId, d, OFFSET_COLLECTION_ID);
1✔
2052
            collectionsDb.put(transaction, key, d, true);
1✔
2053
            return nextCollectionId;
1✔
2054
        }
2055
    }
2056

2057
    @Override
2058
    public void reindexCollection(final Txn transaction, final XmldbURI collectionUri) throws PermissionDeniedException, IOException, LockException {
2059
        if(isReadOnly()) {
1!
2060
            throw new IOException(DATABASE_IS_READ_ONLY);
×
2061
        }
2062

2063
        final XmldbURI fqUri = prepend(collectionUri.toCollectionPathURI());
1✔
2064
        final long start = System.currentTimeMillis();
1✔
2065
        try(final Collection collection = openCollection(fqUri, LockMode.READ_LOCK)) {
1✔
2066
            if (collection == null) {
1!
2067
                LOG.warn("Collection {} not found!", fqUri);
×
2068
                return;
×
2069
            }
2070

2071
            LOG.info("Start indexing collection {}", collection.getURI().toString());
1✔
2072
            pool.getProcessMonitor().startJob(ProcessMonitor.ACTION_REINDEX_COLLECTION, collection.getURI());
1✔
2073
            reindexCollection(transaction, collection, IndexMode.STORE);
1✔
2074
        } catch(final PermissionDeniedException | IOException e) {
×
2075
            LOG.error("An error occurred during reindex: {}", e.getMessage(), e);
×
2076
        } finally {
2077
            pool.getProcessMonitor().endJob();
1✔
2078
            LOG.info("Finished indexing collection {} in {} ms.", fqUri, System.currentTimeMillis() - start);
1✔
2079
        }
2080
    }
1✔
2081

2082
    private void reindexCollection(final Txn transaction,
2083
            @EnsureLocked(mode=LockMode.READ_LOCK) final Collection collection, final IndexMode mode)
2084
            throws PermissionDeniedException, IOException, LockException {
2085
        if(!collection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE)) {
1!
2086
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " have insufficient privileges on collection " + collection.getURI());
×
2087
        }
2088

2089
        LOG.debug("Reindexing collection {}", collection.getURI());
1✔
2090
        if(mode == IndexMode.STORE) {
1✔
2091
            dropCollectionIndex(transaction, collection, true);
1✔
2092
        }
2093

2094
        // reindex documents
2095
        try {
2096
            for (final Iterator<DocumentImpl> i = collection.iterator(this); i.hasNext(); ) {
1✔
2097
                final DocumentImpl next = i.next();
1✔
2098
                reindexXMLResource(transaction, next, mode);
1✔
2099
            }
2100
        } catch(final LockException e) {
1✔
2101
            LOG.error("LockException while reindexing documents of collection '{}'. Skipping...", collection.getURI(), e);
×
2102
        }
2103

2104
        // descend into child collections
2105
        try {
2106
            for (final Iterator<XmldbURI> i = collection.collectionIterator(this); i.hasNext(); ) {
1✔
2107
                final XmldbURI childName = i.next();
1✔
2108
                final XmldbURI childUri = collection.getURI().append(childName);
1✔
2109
                try(final Collection child = openCollection(childUri, LockMode.READ_LOCK)) {
1✔
2110
                    if (child == null) {
1!
2111
                        throw new IOException("Collection '" + childUri + "' not found");
×
2112
                    } else {
2113
                        reindexCollection(transaction, child, mode);
1✔
2114
                    }
2115
                }
2116
            }
2117
        } catch(final LockException e) {
1✔
2118
            LOG.error("LockException while reindexing child collections of collection '{}'. Skipping...", collection.getURI(), e);
×
2119
        }
2120
    }
1✔
2121

2122
    private void dropCollectionIndex(final Txn transaction,
2123
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection collection)
2124
            throws PermissionDeniedException, IOException, LockException {
2125
        dropCollectionIndex(transaction, collection, false);
×
2126
    }
×
2127

2128
    private void dropCollectionIndex(final Txn transaction,
2129
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final Collection collection, final boolean reindex)
2130
            throws PermissionDeniedException, IOException, LockException {
2131
        if(isReadOnly()) {
1!
2132
            throw new IOException(DATABASE_IS_READ_ONLY);
×
2133
        }
2134
        if(!collection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE)) {
1!
2135
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " have insufficient privileges on collection " + collection.getURI());
×
2136
        }
2137
        notifyDropIndex(collection);
1✔
2138
        getIndexController().removeCollection(collection, this, reindex);
1✔
2139
        for (final Iterator<DocumentImpl> i = collection.iterator(this); i.hasNext(); ) {
1✔
2140
            final DocumentImpl doc = i.next();
1✔
2141
            LOG.debug("Dropping index for document {}", doc.getFileURI());
1✔
2142
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
2143
                @Override
2144
                public Object start() {
2145
                    try {
2146
                        final Value ref = new NodeRef(doc.getDocId());
1✔
2147
                        final IndexQuery query =
1✔
2148
                                new IndexQuery(IndexQuery.TRUNC_RIGHT, ref);
1✔
2149
                        domDb.remove(transaction, query, null);
1✔
2150
                        domDb.flush();
1✔
2151
                    } catch (final TerminatedException | IOException | DBException e) {
1✔
2152
                        LOG.error("Error while removing Document '{}' from Collection index: {}", doc.getURI().lastSegment(), collection.getURI(), e);
×
2153
                    }
2154
                    return null;
1✔
2155
                }
2156
            }.run();
1✔
2157
        }
2158
    }
1✔
2159

2160
    /**
2161
     * Store into the temporary collection of the database a given in-memory Document
2162
     *
2163
     * The in-memory Document is stored without a transaction and is not journalled,
2164
     * if there is no temporary collection, this will first be created with a transaction
2165
     *
2166
     * @param doc The in-memory Document to store
2167
     * @return The document stored in the temp collection
2168
     */
2169
    @Override
2170
    public DocumentImpl storeTempResource(final org.exist.dom.memtree.DocumentImpl doc)
2171
        throws EXistException, PermissionDeniedException, LockException {
2172

2173
        try {
2174
            //elevate getUser() to DBA_USER
2175
            pushSubject(pool.getSecurityManager().getSystemSubject());
×
2176

2177
            //start a transaction
2178
            final TransactionManager transact = pool.getTransactionManager();
×
2179
            //create a name for the temporary document
2180
            final XmldbURI docName = XmldbURI.create(MessageDigester.md5(Thread.currentThread().getName() + System.currentTimeMillis(), false) + ".xml");
×
2181

2182
            //get the temp collection
2183
            try(final Txn transaction = transact.beginTransaction();
×
2184
                    final ManagedCollectionLock tempCollectionLock = lockManager.acquireCollectionWriteLock(XmldbURI.TEMP_COLLECTION_URI)) {
×
2185

2186
                // if temp collection does not exist, creates temp collection (with write lock in Txn)
2187
                final Tuple2<Boolean, Collection> createdOrExistingTemp = getOrCreateTempCollection(transaction);
×
2188
                if (createdOrExistingTemp == null) {
×
2189
                    LOG.error("Failed to create temporary collection");
×
2190
                    transact.abort(transaction);
×
2191
                    return null;
×
2192
                }
2193

2194
                final Collection temp = createdOrExistingTemp._2;
×
2195

2196
                //create a temporary document
2197
                try (final ManagedDocumentLock docLock = lockManager.acquireDocumentWriteLock(temp.getURI().append(docName))) {
×
2198
                    final int tmpDocId = getNextResourceId(transaction);
×
2199
                    final Permission permission = PermissionFactory.getDefaultResourcePermission(getBrokerPool().getSecurityManager());
×
2200
                    permission.setMode(Permission.DEFAULT_TEMPORARY_DOCUMENT_PERM);
×
2201
                    final DocumentImpl targetDoc = new DocumentImpl(null, pool, temp, tmpDocId, docName, permission, 0, null, System.currentTimeMillis(), null, null, null);
×
2202

2203
                    //index the temporary document
2204
                    final DOMIndexer indexer = new DOMIndexer(this, transaction, doc, targetDoc);
×
2205
                    indexer.scan();
×
2206
                    indexer.store();
×
2207
                    //store the temporary document
2208
                    temp.addDocument(transaction, this, targetDoc);
×
2209

2210
                    storeXMLResource(transaction, targetDoc);
×
2211

2212
                    saveCollection(transaction, temp);
×
2213

2214
                    // NOTE: early release of Collection lock inline with Asymmetrical Locking scheme
2215
                    temp.close();
×
2216

2217
                    flush();
×
2218
                    closeDocument();
×
2219
                    //commit the transaction
2220
                    transact.commit(transaction);
×
2221
                    return targetDoc;
×
2222
                }
2223
            } catch (final Exception e) {
×
2224
                LOG.error("Failed to store temporary fragment: {}", e.getMessage(), e);
×
2225
            }
2226
        } finally {
×
2227
            //restore the user
2228
            popSubject();
×
2229
        }
2230

2231
        return null;
×
2232
    }
2233

2234
    /**
2235
     * remove all documents from temporary collection
2236
     *
2237
     * @param forceRemoval Should temporary resources be forcefully removed
2238
     */
2239
    @Override
2240
    public void cleanUpTempResources(final boolean forceRemoval) throws PermissionDeniedException {
2241
        try (final Collection temp = openCollection(XmldbURI.TEMP_COLLECTION_URI, LockMode.WRITE_LOCK)) {
1✔
2242
            if (temp == null) {
1!
2243
                return;
1✔
2244
            }
2245
            final TransactionManager transact = pool.getTransactionManager();
×
2246
            try (final Txn transaction = transact.beginTransaction()) {
×
2247
                removeCollection(transaction, temp);
×
2248
                transact.commit(transaction);
×
2249
            } catch (final Exception e) {
×
2250
                LOG.error("Failed to remove temp collection: {}", e.getMessage(), e);
×
2251
            }
2252
        }
2253
    }
×
2254

2255
    @Override
2256
    public DocumentImpl getResourceById(final int collectionId, final byte resourceType, final int documentId) throws PermissionDeniedException {
2257
        XmldbURI uri;
2258
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeReadLock(collectionsDb.getLockName())) {
1✔
2259

2260
            //get the collection uri
2261
            String collectionUri = null;
1✔
2262
            if (collectionId == FIRST_COLLECTION_ID) {
1✔
2263
                collectionUri = "/db";
1✔
2264
            } else {
1✔
2265
                for(final Value collectionDbKey : collectionsDb.getKeys()) {
1!
2266
                    final byte[] data = collectionDbKey.data();
1✔
2267
                    if (data[0] == CollectionStore.KEY_TYPE_COLLECTION) {
1!
2268
                        //Value collectionDbValue = collectionsDb.get(collectionDbKey);
2269

2270
                        final VariableByteInput vbi = collectionsDb.getAsStream(collectionDbKey);
1✔
2271
                        final int id = vbi.readInt();
1✔
2272
                        //check if the collection id matches (first 4 bytes)
2273
                        if (collectionId == id) {
1✔
2274
                            collectionUri = new String(Arrays.copyOfRange(data, 1, data.length));
1✔
2275
                            break;
1✔
2276
                        }
2277
                    }
2278
                }
2279
            }
2280

2281
            //get the resource uri
2282
            final Value key = new CollectionStore.DocumentKey(collectionId, resourceType, documentId);
1✔
2283
            final VariableByteInput vbi = collectionsDb.getAsStream(key);
1✔
2284
            vbi.readInt(); //skip doc id
1✔
2285
            final String resourceUri = vbi.readUTF();
1✔
2286

2287
            //get the resource
2288
            uri = XmldbURI.createInternal(collectionUri + "/" + resourceUri);
1✔
2289

2290
        } catch(final TerminatedException te) {
×
2291
            LOG.error("Query Terminated", te);
×
2292
            return null;
×
2293
        } catch(final BTreeException bte) {
×
2294
            LOG.error("Problem reading btree", bte);
×
2295
            return null;
×
2296
        } catch(final LockException e) {
×
2297
            LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()));
×
2298
            return null;
×
2299
        } catch(final IOException e) {
×
2300
            LOG.error("IOException while reading resource data", e);
×
2301
            return null;
×
2302
        }
2303

2304
        return getResource(uri, Permission.READ);
1✔
2305
    }
2306

2307
    @Override
2308
    public void storeDocument(final Txn transaction, final XmldbURI name, final InputSource source, final @Nullable MimeType mimeType, final Collection collection) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException, IOException {
2309
        collection.storeDocument(transaction, this, name, source, mimeType);
1✔
2310
    }
1✔
2311

2312
    @Override
2313
    public void storeDocument(final Txn transaction, final XmldbURI name, final InputSource source, final @Nullable MimeType mimeType, final @Nullable Date createdDate, final @Nullable Date lastModifiedDate, final @Nullable Permission permission, final @Nullable DocumentType documentType, final @Nullable XMLReader xmlReader, final Collection collection) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException, IOException {
2314
        collection.storeDocument(transaction, this, name, source, mimeType, createdDate, lastModifiedDate, permission, documentType, xmlReader);
1✔
2315
    }
1✔
2316

2317
    @Override
2318
    public void storeDocument(final Txn transaction, final XmldbURI name, final Node node, final @Nullable MimeType mimeType, final Collection collection) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException, IOException {
2319
        collection.storeDocument(transaction, this, name, node, mimeType);
1✔
2320
    }
1✔
2321

2322
    @Override
2323
    public void storeDocument(final Txn transaction, final XmldbURI name, final Node node, final @Nullable MimeType mimeType, final @Nullable Date createdDate, final @Nullable Date lastModifiedDate, final @Nullable Permission permission, final @Nullable DocumentType documentType, final @Nullable XMLReader xmlReader, final Collection collection) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException, IOException {
2324
        collection.storeDocument(transaction, this, name, node, mimeType, createdDate, lastModifiedDate, permission, documentType, xmlReader);
×
2325
    }
×
2326

2327
    /**
2328
     * store Document entry into its collection.
2329
     */
2330
    @Override
2331
    public void storeXMLResource(final Txn transaction, final DocumentImpl doc) {
2332
        try(final VariableByteOutputStream os = new VariableByteOutputStream(256);
1✔
2333
                final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
2334
            doc.write(os);
1✔
2335
            final Value key = new CollectionStore.DocumentKey(doc.getCollection().getId(), doc.getResourceType(), doc.getDocId());
1✔
2336
            collectionsDb.put(transaction, key, os.data(), true);
1✔
2337
            //} catch (ReadOnlyException e) {
2338
            //LOG.warn(DATABASE_IS_READ_ONLY);
2339
        } catch(final LockException e) {
×
2340
            LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()));
×
2341
        } catch(final IOException e) {
×
2342
            LOG.error("IOException while writing document data: {}", doc.getURI(), e);
×
2343
        }
2344
    }
1✔
2345

2346
    @Override
2347
    public void storeMetadata(final Txn transaction, final DocumentImpl doc) throws TriggerException {
2348
        final Collection col = doc.getCollection();
×
2349
        final DocumentTrigger trigger = new DocumentTriggers(this, transaction, col);
×
2350

2351
        trigger.beforeUpdateDocumentMetadata(this, transaction, doc);
×
2352

2353
        storeXMLResource(transaction, doc);
×
2354

2355
        trigger.afterUpdateDocumentMetadata(this, transaction, doc);
×
2356
    }
×
2357

2358
    @Deprecated
2359
    @Override
2360
    public void storeBinaryResource(final Txn transaction, final BinaryDocument blob, final byte[] data)
2361
            throws IOException {
2362
        try(final InputStream is = new UnsynchronizedByteArrayInputStream(data)) {
×
2363
                storeBinaryResource(transaction, blob, is);
×
2364
        }
2365
    }
×
2366

2367
    @Override
2368
    public void storeBinaryResource(final Txn transaction, final BinaryDocument blob, final InputStream is)
2369
            throws IOException {
2370
        final BlobStore blobStore = pool.getBlobStore();
1✔
2371
        final Tuple2<BlobId, Long> blobIdLen = blobStore.add(transaction, is);
1✔
2372

2373
        blob.setBlobId(blobIdLen._1);
1✔
2374
        blob.setContentLength(blobIdLen._2);
1✔
2375
    }
1✔
2376

2377
    @Override
2378
    public Document getXMLResource(final XmldbURI fileName) throws PermissionDeniedException {
2379
        return getResource(fileName, Permission.READ);
1✔
2380
    }
2381

2382
    @Override
2383
    public DocumentImpl getResource(XmldbURI fileName, final int accessType) throws PermissionDeniedException {
2384
        fileName = prepend(fileName.toCollectionPathURI());
1✔
2385
        //TODO : resolve URIs !!!
2386
        final XmldbURI collUri = fileName.removeLastSegment();
1✔
2387
        final XmldbURI docUri = fileName.lastSegment();
1✔
2388
        try(final Collection collection = openCollection(collUri, LockMode.READ_LOCK)) {
1✔
2389
            if (collection == null) {
1!
2390
                LOG.debug("collection '{}' not found!", collUri);
×
2391
                return null;
×
2392
            }
2393

2394
            //if(!collection.getPermissions().validate(getCurrentSubject(), Permission.READ)) {
2395
            //throw new PermissionDeniedException("Permission denied to read collection '" + collUri + "' by " + getCurrentSubject().getName());
2396
            //}
2397

2398
            try(final LockedDocument lockedDocument = collection.getDocumentWithLock(this, docUri, LockMode.READ_LOCK)) {
1✔
2399

2400
                // NOTE: early release of Collection lock inline with Asymmetrical Locking scheme
2401
                collection.close();
1✔
2402

2403
                if (lockedDocument == null) {
1✔
2404
                    LOG.debug("document '{}' not found!", fileName);
1✔
2405
                    return null;
1✔
2406
                }
2407

2408
                final DocumentImpl doc = lockedDocument.getDocument();
1✔
2409
                if (!doc.getPermissions().validate(getCurrentSubject(), accessType)) {
1✔
2410
                    throw new PermissionDeniedException("Account '" + getCurrentSubject().getName() + "' not allowed requested access to document '" + fileName + "'");
1✔
2411
                }
2412

2413
                return doc;
1✔
2414
            } catch(final LockException e) {
×
2415
                throw new PermissionDeniedException(e);
×
2416
            }
2417
        }
2418
    }
2419

2420
    @Override
2421
    public LockedDocument getXMLResource(XmldbURI fileName, final LockMode lockMode) throws PermissionDeniedException {
2422
        if(fileName == null) {
1!
2423
            return null;
×
2424
        }
2425
        fileName = prepend(fileName.toCollectionPathURI());
1✔
2426
        //TODO : resolve URIs !
2427
        final XmldbURI collUri = fileName.removeLastSegment();
1✔
2428
        final XmldbURI docUri = fileName.lastSegment();
1✔
2429
        final LockMode collectionLockMode = lockManager.relativeCollectionLockMode(LockMode.READ_LOCK, lockMode);
1✔
2430
        try(final Collection collection = openCollection(collUri, collectionLockMode)) {
1✔
2431
            if (collection == null) {
1✔
2432
                LOG.debug("Collection '{}' not found!", collUri);
1✔
2433
                return null;
1✔
2434
            }
2435
            try {
2436
                //if (!collection.getPermissions().validate(getCurrentSubject(), Permission.EXECUTE)) {
2437
                //    throw new PermissionDeniedException("Permission denied to read collection '" + collUri + "' by " + getCurrentSubject().getName());
2438
                //}
2439
                final LockedDocument lockedDocument = collection.getDocumentWithLock(this, docUri, lockMode);
1✔
2440

2441
                // NOTE: early release of Collection lock inline with Asymmetrical Locking scheme
2442
                collection.close();
1✔
2443

2444
                if (lockedDocument == null) {
1✔
2445
                    //LOG.debug("document '" + fileName + "' not found!");
2446
                    return null;
1✔
2447
                }
2448
                //if (!doc.getMode().validate(getUser(), Permission.READ))
2449
                //throw new PermissionDeniedException("not allowed to read document");
2450
                final DocumentImpl doc = lockedDocument.getDocument();
1✔
2451
                return lockedDocument;
1✔
2452
            } catch (final LockException e) {
×
2453
                LOG.error("Could not acquire lock on document {}", fileName, e);
×
2454
                //TODO : exception ? -pb
2455
            }
2456
        }
2457
        return null;
×
2458
    }
2459

2460
    @Override
2461
    public void readBinaryResource(final BinaryDocument blob, final OutputStream os)
2462
            throws IOException {
2463
        try (final Txn transaction = continueOrBeginTransaction()) {
1✔
2464
            readBinaryResource(transaction, blob, os);
1✔
2465
            transaction.commit();
1✔
2466
        } catch (final TransactionException e) {
×
2467
            throw new IOException(e.getMessage(), e);
×
2468
        }
2469
    }
1✔
2470

2471
    @Override
2472
    public void readBinaryResource(final Txn transaction, final BinaryDocument blob, final OutputStream os)
2473
            throws IOException {
2474
        final BlobStore blobStore = pool.getBlobStore();
1✔
2475
        try (final InputStream is = blobStore.get(transaction, blob.getBlobId())) {
1✔
2476
            if (is != null) {
1!
2477
                if (os instanceof UnsynchronizedByteArrayOutputStream) {
1✔
2478
                    ((UnsynchronizedByteArrayOutputStream)os).write(is);
1✔
2479
                } else {
1✔
2480
                    copy(is, os);
1✔
2481
                }
2482
            }
2483
        }
2484
    }
1✔
2485

2486
    @Override
2487
    public long getBinaryResourceSize(final BinaryDocument blob)
2488
            throws IOException {
2489
        return blob.getContentLength();
×
2490
    }
2491

2492
    @Override
2493
    public MessageDigest getBinaryResourceContentDigest(final Txn transaction, final BinaryDocument binaryDocument,
2494
        final DigestType digestType) throws IOException {
2495
        final BlobStore blobStore = pool.getBlobStore();
1✔
2496
        return blobStore.getDigest(transaction, binaryDocument.getBlobId(), digestType);
1✔
2497
    }
2498

2499
    @Override
2500
    public Path getBinaryFile(final BinaryDocument blob) {
2501
        throw new UnsupportedOperationException(
×
2502
                "No longer supported, use DBBroker#withBinaryFile(Txn, BinaryDocument, Function)");
×
2503
    }
2504

2505
    @Override
2506
    public <T> T withBinaryFile(final Txn transaction, final BinaryDocument binaryDocument,
2507
            final Function<Path, T> fnFile) throws IOException {
2508
        final BlobStore blobStore = pool.getBlobStore();
1✔
2509
        return blobStore.with(transaction, binaryDocument.getBlobId(), fnFile);
1✔
2510
    }
2511

2512
    @Override
2513
    public InputStream getBinaryResource(final BinaryDocument blob)
2514
            throws IOException {
2515
        // TODO(AR) how best to get the transaction?
2516
        try (final Txn transaction = continueOrBeginTransaction()) {
1✔
2517
            final InputStream is = getBinaryResource(transaction, blob);
1✔
2518

2519
            transaction.commit();
1✔
2520

2521
            return is;
1✔
2522
        } catch (final TransactionException e) {
×
2523
            throw new IOException(e.getMessage(), e);
×
2524
        }
2525
    }
2526

2527
    @Override
2528
    public InputStream getBinaryResource(final Txn transaction, final BinaryDocument blob)
2529
            throws IOException {
2530
        final BlobStore blobStore = pool.getBlobStore();
1✔
2531
        return blobStore.get(transaction, blob.getBlobId());
1✔
2532
    }
2533

2534
    //TODO : consider a better cooperation with Collection -pb
2535
    @Override
2536
    public void getCollectionResources(final Collection.InternalAccess collectionInternalAccess) {
2537
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeReadLock(collectionsDb.getLockName())) {
1✔
2538
            final Value key = new CollectionStore.DocumentKey(collectionInternalAccess.getId());
1✔
2539
            final IndexQuery query = new IndexQuery(IndexQuery.TRUNC_RIGHT, key);
1✔
2540

2541
            collectionsDb.query(query, new DocumentCallback(collectionInternalAccess));
1✔
2542
        } catch(final LockException e) {
×
2543
            LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()));
×
2544
        } catch(final IOException | BTreeException | TerminatedException e) {
×
2545
            LOG.error("Exception while reading document data", e);
×
2546
        }
2547
    }
1✔
2548

2549
    @Override
2550
    public void getResourcesFailsafe(final Txn transaction, final BTreeCallback callback, final boolean fullScan) throws TerminatedException {
2551
        assert(transaction != null && transaction.getState() == Txn.State.STARTED);
1!
2552
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeReadLock(collectionsDb.getLockName())) {
1✔
2553
            final Value key = new CollectionStore.DocumentKey();
1✔
2554
            final IndexQuery query = new IndexQuery(IndexQuery.TRUNC_RIGHT, key);
1✔
2555
            if(fullScan) {
1✔
2556
                collectionsDb.rawScan(query, callback);
1✔
2557
            } else {
1✔
2558
                collectionsDb.query(query, callback);
1✔
2559
            }
2560
        } catch(final LockException e) {
×
2561
            LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()));
×
2562
        } catch(final IOException | BTreeException e) {
×
2563
            LOG.error("Exception while reading document data", e);
×
2564
        }
2565
    }
1✔
2566

2567
    @Override
2568
    public void getCollectionsFailsafe(final Txn transaction, final BTreeCallback callback) throws TerminatedException {
2569
        assert(transaction != null && transaction.getState() == Txn.State.STARTED);
1!
2570
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeReadLock(collectionsDb.getLockName())) {
1✔
2571
            final Value key = new CollectionStore.CollectionKey();
1✔
2572
            final IndexQuery query = new IndexQuery(IndexQuery.TRUNC_RIGHT, key);
1✔
2573
            collectionsDb.query(query, callback);
1✔
2574
        } catch(final LockException e) {
×
2575
            LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()));
×
2576
        } catch(final IOException | BTreeException e) {
×
2577
            LOG.error("Exception while reading document data", e);
×
2578
        }
2579
    }
1✔
2580

2581
    @Override
2582
    public MutableDocumentSet getXMLResourcesByDoctype(final String doctypeName, final MutableDocumentSet result) throws PermissionDeniedException, LockException {
2583
        final MutableDocumentSet docs = getAllXMLResources(new DefaultDocumentSet());
×
2584
        for(final Iterator<DocumentImpl> i = docs.getDocumentIterator(); i.hasNext(); ) {
×
2585
            final DocumentImpl doc = i.next();
×
2586
            try(final ManagedDocumentLock documentLock = lockManager.acquireDocumentReadLock(doc.getURI())) {
×
2587
                final DocumentType doctype = doc.getDoctype();
×
2588
                if (doctype == null) {
×
2589
                    continue;
2590
                }
2591
                if (doctypeName.equals(doctype.getName())
×
2592
                        && doc.getCollection().getPermissionsNoLock().validate(getCurrentSubject(), Permission.READ)
×
2593
                        && doc.getPermissions().validate(getCurrentSubject(), Permission.READ)) {
×
2594
                    result.add(doc);
×
2595
                }
2596
            }
2597
        }
2598
        return result;
×
2599
    }
2600

2601
    @Override
2602
    public MutableDocumentSet getAllXMLResources(final MutableDocumentSet docs) throws PermissionDeniedException, LockException {
2603
        final long start = System.currentTimeMillis();
1✔
2604
        try(final Collection rootCollection = openCollection(XmldbURI.ROOT_COLLECTION_URI, LockMode.READ_LOCK)) {
1✔
2605
            rootCollection.allDocs(this, docs, true);
1✔
2606
            if(LOG.isDebugEnabled()) {
1!
2607
                LOG.debug("getAllDocuments(DocumentSet) - end - loading {} documents took {} ms.",
×
2608
                        docs.getDocumentCount(), (System.currentTimeMillis() - start));
×
2609
            }
2610
            return docs;
1✔
2611
        }
2612
    }
2613

2614
    @Override
2615
    public void copyResource(final Txn transaction, final DocumentImpl sourceDocument, final Collection targetCollection, final XmldbURI newName) throws PermissionDeniedException, LockException, IOException, TriggerException, EXistException {
2616
        copyResource(transaction, sourceDocument, targetCollection, newName, PreserveType.DEFAULT);
1✔
2617
    }
1✔
2618

2619
    @Override
2620
    public void copyResource(final Txn transaction, final DocumentImpl sourceDocument, final Collection targetCollection, final XmldbURI newDocName, final PreserveType preserve) throws PermissionDeniedException, LockException, IOException, TriggerException, EXistException {
2621
        assert(sourceDocument != null);
1!
2622
        assert(targetCollection != null);
1!
2623
        assert(newDocName != null);
1!
2624
        if(isReadOnly()) {
1!
2625
            throw new IOException(DATABASE_IS_READ_ONLY);
×
2626
        }
2627

2628
        if(newDocName.numSegments() != 1) {
1!
2629
            throw new IOException("newName name must be just a name i.e. an XmldbURI with one segment!");
×
2630
        }
2631

2632
        final XmldbURI sourceDocumentUri = sourceDocument.getURI();
1✔
2633
        final XmldbURI targetCollectionUri = targetCollection.getURI();
1✔
2634
        final XmldbURI targetDocumentUri = targetCollectionUri.append(newDocName);
1✔
2635

2636
        if(!sourceDocument.getPermissions().validate(getCurrentSubject(), Permission.READ)) {
1!
2637
            throw new PermissionDeniedException("Account '" + getCurrentSubject().getName() + "' has insufficient privileges to copy the resource '" + sourceDocumentUri + "'.");
×
2638
        }
2639

2640
        // we assume the caller holds a READ_LOCK (or better) on sourceDocument#getCollection()
2641
        final Collection sourceCollection = sourceDocument.getCollection();
1✔
2642
        if (!sourceCollection.getPermissions().validate(getCurrentSubject(), Permission.EXECUTE)) {
1!
2643
            throw new PermissionDeniedException("Account '" + getCurrentSubject().getName() + "' has insufficient privileges to copy the resource '" + sourceDocumentUri + "'.");
×
2644
        }
2645

2646
        if(!targetCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.EXECUTE)) {
1!
2647
            throw new PermissionDeniedException("Account '" + getCurrentSubject().getName() + "' does not have execute access on the destination collection '" + targetCollectionUri + "'.");
×
2648
        }
2649

2650
        if(targetCollection.hasChildCollection(this, newDocName.lastSegment())) {
1!
2651
            throw new EXistException("The collection '" + targetCollectionUri + "' already has a sub-collection named '" + newDocName.lastSegment() + "', you cannot create a Document with the same name as an existing collection.");
×
2652
        }
2653

2654
        try(final LockedDocument oldLockedDoc = targetCollection.getDocumentWithLock(this, newDocName, LockMode.WRITE_LOCK)) {
1✔
2655
            final DocumentTrigger trigger = new DocumentTriggers(this, transaction, targetCollection);
1✔
2656

2657
            final DocumentImpl oldDoc = oldLockedDoc == null ? null : oldLockedDoc.getDocument();
1✔
2658
            if (oldDoc == null) {
1✔
2659
                if (!targetCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE)) {
1!
2660
                    throw new PermissionDeniedException("Account '" + getCurrentSubject().getName() + "' does not have write access on the destination collection '" + targetCollectionUri + "'.");
×
2661
                }
2662
            } else {
2663
                //overwrite existing document
2664

2665
                if (sourceDocument.getDocId() == oldDoc.getDocId()) {
1!
2666
                    throw new PermissionDeniedException("Cannot copy resource to itself '" + sourceDocumentUri + "'.");
×
2667
                }
2668

2669
                if (!oldDoc.getPermissions().validate(getCurrentSubject(), Permission.WRITE)) {
1!
2670
                    throw new PermissionDeniedException("A resource with the same name already exists in the target collection '" + oldDoc.getURI() + "', and you do not have write access on that resource.");
×
2671
                }
2672

2673
                trigger.beforeDeleteDocument(this, transaction, oldDoc);
1✔
2674
                trigger.afterDeleteDocument(this, transaction, targetDocumentUri);
1✔
2675
            }
2676

2677
            doCopyDocument(transaction, trigger, sourceDocument, targetCollection, newDocName, oldDoc, preserve);
1✔
2678
        }
2679
    }
1✔
2680

2681
    /**
2682
     * Creates a new Document object for the destination document
2683
     * - copies the nodes from the source document to the destination document
2684
     * - if no existing document in the destination:
2685
     *      - adds the destination document to the destination collection
2686
     *   else, switches the existing document object for the new document in the destination collection
2687
     *
2688
     *   asynchronously deletes the nodes of the old existing document
2689
     */
2690
    private void doCopyDocument(final Txn transaction, final DocumentTrigger trigger,
2691
            final DocumentImpl sourceDocument, final Collection targetCollection, final XmldbURI newDocName,
2692
            @EnsureLocked(mode=LockMode.WRITE_LOCK) @Nullable final DocumentImpl oldDoc, final PreserveType preserve)
2693
            throws TriggerException, LockException, PermissionDeniedException, IOException, EXistException {
2694

2695
        final XmldbURI sourceDocumentUri = sourceDocument.getURI();
1✔
2696
        final XmldbURI targetCollectionUri = targetCollection.getURI();
1✔
2697
        final XmldbURI targetDocumentUri = targetCollectionUri.append(newDocName);
1✔
2698

2699
        trigger.beforeCopyDocument(this, transaction, sourceDocument, targetDocumentUri);
1✔
2700

2701
        final DocumentImpl newDocument;
2702
        final LockManager lockManager = getBrokerPool().getLockManager();
1✔
2703
        try (final ManagedDocumentLock newDocLock = lockManager.acquireDocumentWriteLock(targetDocumentUri)) {
1✔
2704
            final int copiedDocId = getNextResourceId(transaction);
1✔
2705
            if (sourceDocument.getResourceType() == DocumentImpl.BINARY_FILE) {
1✔
2706
                final BinaryDocument newDoc;
2707
                if (oldDoc != null) {
1✔
2708
                    newDoc = new BinaryDocument(null, copiedDocId, oldDoc);
1✔
2709
                } else {
1✔
2710
                    newDoc = new BinaryDocument(null, getBrokerPool(), targetCollection, copiedDocId, newDocName);
1✔
2711
                }
2712

2713
                newDoc.copyOf(this, sourceDocument, oldDoc);
1✔
2714

2715
                if (preserveOnCopy(preserve)) {
1✔
2716
                    copyResource_preserve(this, sourceDocument, newDoc, oldDoc != null);
1✔
2717
                }
2718

2719
                copyBinaryResource(transaction, (BinaryDocument)sourceDocument, newDoc);
1✔
2720
                newDocument = newDoc;
1✔
2721
            } else {
1✔
2722
                final DocumentImpl newDoc;
2723
                if (oldDoc != null) {
1✔
2724
                    newDoc = new DocumentImpl(null, copiedDocId, oldDoc);
1✔
2725
                } else {
1✔
2726
                    newDoc = new DocumentImpl(null, pool, targetCollection, copiedDocId, newDocName);
1✔
2727
                }
2728

2729
                newDoc.copyOf(this, sourceDocument, oldDoc);
1✔
2730

2731
                copyXMLResource(transaction, sourceDocument, newDoc);
1✔
2732
                if (preserveOnCopy(preserve)) {
1✔
2733
                    copyResource_preserve(this, sourceDocument, newDoc, oldDoc != null);
1✔
2734
                }
2735

2736
                newDocument = newDoc;
1✔
2737
            }
2738

2739
            /*
2740
             * Stores the document entry for newDstDoc,
2741
             * or overwrites the document entry for currentDstDoc with
2742
             * the entry for newDstDoc, in collections.dbx.
2743
             */
2744
            storeXMLResource(transaction, newDocument);
1✔
2745

2746
            // must be the last action (before cleanup), as this will make newDstDoc available to other threads!
2747
            targetCollection.addDocument(transaction, this, newDocument);
1✔
2748

2749
            // NOTE: copied document is now live!
2750

2751

2752
            // TODO (AR) this could be done asynchronously in future perhaps?
2753
            // cleanup the old destination doc (if present)
2754
            if (oldDoc != null) {
1✔
2755
                if (oldDoc.getResourceType() == DocumentImpl.XML_FILE) {
1✔
2756
                    // drop the index and dom nodes of the old document
2757
                    dropIndex(transaction, oldDoc);
1✔
2758
                    dropDomNodes(transaction, oldDoc);
1✔
2759

2760
                } else {
1✔
2761
                    // remove the blob of the old document
2762
                    final BlobStore blobStore = pool.getBlobStore();
1✔
2763
                    blobStore.remove(transaction, ((BinaryDocument)oldDoc).getBlobId());
1✔
2764
                }
2765

2766
                // remove oldDoc entry from collections.dbx
2767
                removeResourceMetadata(transaction, oldDoc);
1✔
2768

2769
                // TODO(AR) do we need a freeId flag to control this?
2770
                // recycle the id
2771
                collectionsDb.freeResourceId(oldDoc.getDocId());
1✔
2772

2773
                // The Collection object oldDstDoc is now an empty husk which is
2774
                // not available or referenced from anywhere, it will be subject
2775
                // to garbage collection
2776
            }
2777
        }
2778

2779
        trigger.afterCopyDocument(this, transaction, newDocument, sourceDocumentUri);
1✔
2780
    }
1✔
2781

2782
    /**
2783
     * Preserves attributes when copying a resource.
2784
     * e.g. `cp --preserve`
2785
     *
2786
     * @param broker the DBBroker
2787
     * @param srcDocument The source document.
2788
     * @param destDocument The destination document.
2789
     * @param overwrittingDest if true it overwrites the destination resource
2790
     * @throws PermissionDeniedException if user does not have sufficient rights
2791
     *
2792
     */
2793
    public static void copyResource_preserve(final DBBroker broker, final DocumentImpl srcDocument, final DocumentImpl destDocument, final boolean overwrittingDest) throws PermissionDeniedException {
2794
        final Permission srcPermissions = srcDocument.getPermissions();
1✔
2795
        final Permission destPermissions = destDocument.getPermissions();
1✔
2796

2797
        // only copy the owner and group from the source if we are creating a new file and we are the DBA
2798
        if ((!overwrittingDest) && broker.getCurrentSubject().hasDbaRole()) {
1✔
2799
            PermissionFactory.chown(broker, destPermissions, Optional.of(srcPermissions.getOwner().getName()), Optional.of(srcPermissions.getGroup().getName()));
1✔
2800
        }
2801

2802
        copyModeAcl(broker, srcPermissions, destPermissions);
1✔
2803

2804
        // btime (birth time)
2805
        if (!overwrittingDest) {
1✔
2806
            destDocument.setCreated(srcDocument.getLastModified());  // Indeed! ...the birth time of the dest file is the last modified time of the source file
1✔
2807
        }
2808

2809
        // mtime (modified time)
2810
        destDocument.setLastModified(srcDocument.getLastModified());
1✔
2811

2812
    }
1✔
2813

2814
    /**
2815
     * Copies the Mode and ACL (if present) from one
2816
     * object to another.
2817
     *
2818
     * @param srcPermissions The permissions of the source object.
2819
     * @param destPermissions The permissions of the destination object.
2820
     */
2821
    private static void copyModeAcl(final DBBroker broker, final Permission srcPermissions, final Permission destPermissions) throws PermissionDeniedException {
2822
        PermissionFactory.chmod(broker, destPermissions, Optional.of(srcPermissions.getMode()), Optional.empty());
1✔
2823
        if (srcPermissions instanceof SimpleACLPermission && destPermissions instanceof SimpleACLPermission) {
1!
2824
            PermissionFactory.chacl(destPermissions, newAcl ->
1✔
2825
                ((SimpleACLPermission)newAcl).copyAclOf((SimpleACLPermission)srcPermissions)
1✔
2826
            );
2827
        }
2828
    }
1✔
2829

2830
    private void copyXMLResource(final Txn transaction,
2831
            @EnsureLocked(mode=LockMode.READ_LOCK) final DocumentImpl oldDoc,
2832
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final DocumentImpl newDoc) throws IOException {
2833
        if (LOG.isDebugEnabled()) {
1!
2834
            LOG.debug("Copying document {} to {}", oldDoc.getFileURI(), newDoc.getURI());
×
2835
        }
2836
        final long start = System.currentTimeMillis();
1✔
2837
        final StreamListener listener = getIndexController().getStreamListener(newDoc, ReindexMode.STORE);
1✔
2838
        final NodeList nodes = oldDoc.getChildNodes();
1✔
2839
        for(int i = 0; i < nodes.getLength(); i++) {
1✔
2840
            final IStoredNode<?> node = (IStoredNode<?>) nodes.item(i);
1✔
2841
            try(final INodeIterator iterator = getNodeIterator(node)) {
1✔
2842
                iterator.next();
1✔
2843
                copyNodes(transaction, iterator, node, new NodePath2(), newDoc, false, listener);
1✔
2844
            }
2845
        }
2846
        flush();
1✔
2847
        closeDocument();
1✔
2848
        if (LOG.isDebugEnabled()) {
1!
2849
            LOG.debug("Copy took {} ms.", (System.currentTimeMillis() - start));
×
2850
        }
2851
    }
1✔
2852

2853
    private void copyBinaryResource(final Txn transaction, final BinaryDocument srcDoc, final BinaryDocument dstDoc) throws IOException {
2854
        final BlobStore blobStore = pool.getBlobStore();
1✔
2855
        final BlobId dstBlobId = blobStore.copy(transaction, srcDoc.getBlobId());
1✔
2856

2857
        dstDoc.setBlobId(dstBlobId);
1✔
2858
        dstDoc.setContentLength(srcDoc.getContentLength());
1✔
2859
    }
1✔
2860

2861

2862
    @Override
2863
    public void moveResource(final Txn transaction, final DocumentImpl sourceDocument, final Collection targetCollection, final XmldbURI newName) throws PermissionDeniedException, LockException, IOException, TriggerException {
2864
        assert(sourceDocument != null);
1!
2865
        assert(targetCollection != null);
1!
2866
        assert(newName != null);
1!
2867

2868
        if(isReadOnly()) {
1!
2869
            throw new IOException(DATABASE_IS_READ_ONLY);
×
2870
        }
2871

2872
        if(newName.numSegments() != 1) {
1!
2873
            throw new IOException("newName name must be just a name i.e. an XmldbURI with one segment!");
×
2874
        }
2875

2876
        final XmldbURI sourceDocumentUri = sourceDocument.getURI();
1✔
2877
        final XmldbURI targetCollectionUri = targetCollection.getURI();
1✔
2878
        final XmldbURI destinationDocumentUri = targetCollectionUri.append(newName);
1✔
2879

2880
        final Account docUser = sourceDocument.getUserLock();
1✔
2881
        if(docUser != null) {
1!
2882
            if(!getCurrentSubject().getName().equals(docUser.getName())) {
×
2883
                throw new PermissionDeniedException("Cannot move '" + sourceDocumentUri + " because is locked by getUser() '" + docUser.getName() + "'");
×
2884
            }
2885
        }
2886

2887
        /**
2888
         * As per the rules of Linux -
2889
         *
2890
         * mv is NOT a copy operation unless we are traversing filesystems.
2891
         * We consider eXist to be a single filesystem, so we only need
2892
         * WRITE and EXECUTE access on the source and destination collections
2893
         * as we are effectively just re-linking the file.
2894
         *
2895
         * - Adam 2013-03-26
2896
         */
2897

2898
        // we assume the caller holds a WRITE_LOCK on sourceDocument#getCollection()
2899
        final Collection sourceCollection = sourceDocument.getCollection();
1✔
2900
        if(!sourceCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE | Permission.EXECUTE)) {
1!
2901
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " have insufficient privileges on source Collection to move resource: " + sourceDocumentUri);
×
2902
        }
2903

2904
        if(!targetCollection.getPermissionsNoLock().validate(getCurrentSubject(), Permission.WRITE | Permission.EXECUTE)) {
1!
2905
            throw new PermissionDeniedException("Account " + getCurrentSubject().getName() + " have insufficient privileges on destination Collection '" + targetCollectionUri + "' to move resource: " + sourceDocumentUri);
×
2906
        }
2907

2908
        if(targetCollection.hasChildCollection(this, newName.lastSegment())) {
1!
2909
            throw new PermissionDeniedException(
×
2910
                "The Collection '" + targetCollectionUri + "' has a sub-collection '" + newName + "'; cannot create a Document with the same name!"
×
2911
            );
2912
        }
2913

2914
        final DocumentTrigger trigger = new DocumentTriggers(this, transaction, sourceCollection);
1✔
2915

2916
        // check if the move would overwrite a document
2917
        final DocumentImpl oldDoc = targetCollection.getDocument(this, newName);
1✔
2918
        if(oldDoc != null) {
1✔
2919

2920
            if(sourceDocument.getDocId() == oldDoc.getDocId()) {
1!
2921
                throw new PermissionDeniedException("Cannot move resource to itself '" + sourceDocumentUri + "'.");
×
2922
            }
2923

2924
            // GNU mv command would prompt for Confirmation here, you can say yes or pass the '-f' flag. As we cant prompt for confirmation we assume OK
2925
            /* if(!oldDoc.getPermissions().validate(getCurrentSubject(), Permission.WRITE)) {
2926
                throw new PermissionDeniedException("Resource with same name exists in target collection and write is denied");
2927
            }
2928
            */
2929

2930
            // remove the existing document
2931
            removeResource(transaction, oldDoc);
1✔
2932
        }
2933

2934
        final boolean renameOnly = sourceCollection.getId() == targetCollection.getId();
1!
2935

2936
        trigger.beforeMoveDocument(this, transaction, sourceDocument, destinationDocumentUri);
1✔
2937

2938
        if(sourceDocument.getResourceType() == DocumentImpl.XML_FILE) {
1!
2939
            if (!renameOnly) {
1!
2940
                dropIndex(transaction, sourceDocument);
1✔
2941
            }
2942
        }
2943

2944
        sourceCollection.unlinkDocument(this, sourceDocument);
1✔
2945
        if(!renameOnly) {
1!
2946
            saveCollection(transaction, sourceCollection);
1✔
2947
        }
2948

2949
        removeResourceMetadata(transaction, sourceDocument);
1✔
2950

2951
        sourceDocument.setFileURI(newName);
1✔
2952
        sourceDocument.setCollection(targetCollection);
1✔
2953
        targetCollection.addDocument(transaction, this, sourceDocument);
1✔
2954

2955
        if(sourceDocument.getResourceType() == DocumentImpl.XML_FILE) {
1!
2956
            if(!renameOnly) {
1!
2957
                // reindexing
2958
                reindexXMLResource(transaction, sourceDocument, IndexMode.REPAIR);
1✔
2959
            }
2960
        }
2961

2962
            // NOTE: nothing needs to be done for binary resources as the reference to the Blob does not change
2963

2964
        storeXMLResource(transaction, sourceDocument);
1✔
2965
        saveCollection(transaction, targetCollection);
1✔
2966

2967
        trigger.afterMoveDocument(this, transaction, sourceDocument, sourceDocumentUri);
1✔
2968
    }
1✔
2969

2970
    @Override
2971
    public void removeXMLResource(final Txn transaction, final DocumentImpl document, final boolean freeDocId) throws PermissionDeniedException, IOException {
2972
        if(isReadOnly()) {
1!
2973
            throw new IOException(DATABASE_IS_READ_ONLY);
×
2974
        }
2975
        try {
2976
            if(LOG.isDebugEnabled()) {
1!
2977
                LOG.debug("Removing document {} ({}) ...", document.getFileURI(), document.getDocId());
×
2978
            }
2979

2980
            final DocumentTrigger trigger = new DocumentTriggers(this, transaction);
1✔
2981

2982
            if(freeDocId) {
1✔
2983
                trigger.beforeDeleteDocument(this, transaction, document);
1✔
2984
            }
2985

2986
            dropIndex(transaction, document);
1✔
2987
            if(LOG.isDebugEnabled()) {
1!
2988
                LOG.debug("removeDocument() - removing dom");
×
2989
            }
2990
            dropDomNodes(transaction, document);
1✔
2991
            removeResourceMetadata(transaction, document);
1✔
2992

2993
            if(freeDocId) {
1✔
2994
                collectionsDb.freeResourceId(document.getDocId());
1✔
2995
                trigger.afterDeleteDocument(this, transaction, document.getURI());
1✔
2996
            }
2997
        } catch(final TriggerException e) {
1✔
2998
            LOG.error(e);
×
2999
        }
3000
    }
1✔
3001

3002
    private void dropIndex(final Txn transaction, @EnsureLocked(mode=LockMode.WRITE_LOCK) final DocumentImpl document) {
3003
        final StreamListener listener = getIndexController().getStreamListener(document, ReindexMode.REMOVE_ALL_NODES);
1✔
3004
        listener.startIndexDocument(transaction);
1✔
3005
        final NodeList nodes = document.getChildNodes();
1✔
3006
        for(int i = 0; i < nodes.getLength(); i++) {
1✔
3007
            final IStoredNode<?> node = (IStoredNode<?>) nodes.item(i);
1✔
3008
            try(final INodeIterator iterator = getNodeIterator(node)) {
1✔
3009
                iterator.next();
1✔
3010
                scanNodes(transaction, iterator, node, new NodePath2(), IndexMode.REMOVE, listener);
1✔
3011
            } catch(final IOException ioe) {
×
3012
                LOG.error("Unable to close node iterator", ioe);
×
3013
            }
3014
        }
3015
        listener.endIndexDocument(transaction);
1✔
3016
        notifyDropIndex(document);
1✔
3017
        getIndexController().flush();
1✔
3018
    }
1✔
3019

3020
    private void dropDomNodes(final Txn transaction, final DocumentImpl document) {
3021
        try {
3022
            if(!document.isReferenced()) {
1!
3023
                new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
3024
                    @Override
3025
                    public Object start() {
3026
                        final NodeHandle node = (NodeHandle) document.getFirstChild();
1✔
3027
                        domDb.removeAll(transaction, node.getInternalAddress());
1✔
3028
                        return null;
1✔
3029
                    }
3030
                }.run();
1✔
3031
            }
3032
        } catch(final NullPointerException npe0) {
1✔
3033
            LOG.error("Caught NPE in DOMTransaction to actually be able to remove the document.");
×
3034
        }
3035

3036
        final NodeRef ref = new NodeRef(document.getDocId());
1✔
3037
        final IndexQuery idx = new IndexQuery(IndexQuery.TRUNC_RIGHT, ref);
1✔
3038
        new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
3039
            @Override
3040
            public Object start() {
3041
                try {
3042
                    domDb.remove(transaction, idx, null);
1✔
3043
                } catch(final BTreeException | IOException e) {
1✔
3044
                    LOG.error("start() - " + "error while removing doc", e);
×
3045
                } catch(final TerminatedException e) {
×
3046
                    LOG.error("method terminated", e);
×
3047
                }
3048
                return null;
1✔
3049
            }
3050
        }.run();
1✔
3051
    }
1✔
3052

3053
    @Override
3054
    public void removeBinaryResource(final Txn transaction, final BinaryDocument blob) throws PermissionDeniedException, IOException {
3055
        if(isReadOnly()) {
1!
3056
            throw new IOException(DATABASE_IS_READ_ONLY);
×
3057
        }
3058

3059
        if(LOG.isDebugEnabled()) {
1!
3060
            LOG.debug("removing binary resource {}...", blob.getDocId());
×
3061
        }
3062

3063
        if (blob.getBlobId() == null) {
1!
3064
            LOG.warn("Trying to delete binary document: {}, but blobId was null", blob.getURI());
×
3065
            return;
×
3066
        }
3067

3068
        final BlobStore blobStore = pool.getBlobStore();
1✔
3069
        blobStore.remove(transaction, blob.getBlobId());
1✔
3070

3071
        // remove the file from the database metadata and indexes
3072
        removeResourceMetadata(transaction, blob);
1✔
3073
        getIndexController().setDocument(blob, ReindexMode.REMOVE_BINARY);
1✔
3074
        getIndexController().flush();
1✔
3075
    }
1✔
3076

3077
    @Override
3078
    public void removeResourceMetadata(final Txn transaction,
3079
            @EnsureLocked(mode=LockMode.WRITE_LOCK) final DocumentImpl document) {
3080
        // remove document metadata
3081
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
3082
            if(LOG.isDebugEnabled()) {
1!
3083
                LOG.debug("Removing resource metadata for {}", document.getDocId());
×
3084
            }
3085
            final Value key = new CollectionStore.DocumentKey(document.getCollection().getId(), document.getResourceType(), document.getDocId());
1✔
3086
            collectionsDb.remove(transaction, key);
1✔
3087
        } catch(final LockException e) {
×
3088
            LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()));
×
3089
        }
3090
    }
1✔
3091

3092
    @Override
3093
    public void removeResource(final Txn tx, final DocumentImpl doc) throws IOException, PermissionDeniedException {
3094
        if (doc instanceof BinaryDocument) {
1✔
3095
            removeBinaryResource(tx, (BinaryDocument) doc);
1✔
3096
        } else {
1✔
3097
            removeXMLResource(tx, doc);
1✔
3098
        }
3099
    }
1✔
3100

3101
    @Override
3102
    public void removeXMLResource(final Txn transaction, @EnsureLocked(mode=LockMode.WRITE_LOCK) final DocumentImpl document)
3103
            throws PermissionDeniedException, IOException {
3104
        removeXMLResource(transaction, document, true);
1✔
3105
    }
1✔
3106

3107
    /**
3108
     * get next Free Doc Id
3109
     *
3110
     * @throws EXistException If there's no free document id
3111
     */
3112
    @Override
3113
    public int getNextResourceId(final Txn transaction) throws EXistException, LockException {
3114
        int nextDocId = collectionsDb.getFreeResourceId();
1✔
3115
        if(nextDocId != DocumentImpl.UNKNOWN_DOCUMENT_ID) {
1✔
3116
            return nextDocId;
1✔
3117
        }
3118
        nextDocId = 1;
1✔
3119
        try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
3120
            final Value key = new CollectionStore.CollectionKey(CollectionStore.NEXT_DOC_ID_KEY);
1✔
3121
            final Value data = collectionsDb.get(key);
1✔
3122
            if(data != null) {
1✔
3123
                nextDocId = ByteConversion.byteToInt(data.getData(), 0);
1✔
3124
                ++nextDocId;
1✔
3125
                if(nextDocId == 0x7FFFFFFF) {
1!
3126
                    pool.setReadOnly();
×
3127
                    throw new EXistException("Max. number of document ids reached. Database is set to " +
×
3128
                        "read-only state. Please do a complete backup/restore to compact the db and " +
3129
                        "free document ids.");
3130
                }
3131
            }
3132
            final byte[] d = new byte[4];
1✔
3133
            ByteConversion.intToByte(nextDocId, d, 0);
1✔
3134
            collectionsDb.put(transaction, key, d, true);
1✔
3135
            //} catch (ReadOnlyException e) {
3136
            //LOG.warn("Database is read-only");
3137
            //return DocumentImpl.UNKNOWN_DOCUMENT_ID;
3138
            //TODO : rethrow ? -pb
3139
        }
3140
        return nextDocId;
1✔
3141
    }
3142

3143
    @Override
3144
    public void reindexXMLResource(final Txn txn, final DocumentImpl doc) {
3145
        reindexXMLResource(txn, doc, IndexMode.REPAIR);
×
3146
    }
×
3147

3148
    /**
3149
     * Reindex the nodes in the document. This method will either reindex all
3150
     * descendant nodes of the passed node, or all nodes below some level of
3151
     * the document if node is null.
3152
     */
3153
    @Override
3154
    public void reindexXMLResource(final Txn transaction, final DocumentImpl doc, final IndexMode mode) {
3155
        final StreamListener listener = getIndexController().getStreamListener(doc, ReindexMode.STORE);
1✔
3156
        getIndexController().startIndexDocument(transaction, listener);
1✔
3157
        try {
3158
            final NodeList nodes = doc.getChildNodes();
1✔
3159
            for (int i = 0; i < nodes.getLength(); i++) {
1✔
3160
                final IStoredNode<?> node = (IStoredNode<?>) nodes.item(i);
1✔
3161
                try (final INodeIterator iterator = getNodeIterator(node)) {
1✔
3162
                    iterator.next();
1✔
3163
                    scanNodes(transaction, iterator, node, new NodePath2(), mode, listener);
1✔
3164
                } catch (final IOException ioe) {
×
3165
                    LOG.error("Unable to close node iterator", ioe);
×
3166
                }
3167
            }
3168
        } finally {
1✔
3169
            getIndexController().endIndexDocument(transaction, listener);
1✔
3170
        }
3171
        flush();
1✔
3172
    }
1✔
3173

3174
    @Override
3175
    public void defragXMLResource(final Txn transaction, final DocumentImpl doc) {
3176
        //TODO : use dedicated function in XmldbURI
3177
        if (LOG.isDebugEnabled())
1!
3178
            LOG.debug("============> Defragmenting document {}", doc.getURI());
×
3179
        final long start = System.currentTimeMillis();
1✔
3180
        try {
3181
            final long firstChild = doc.getFirstChildAddress();
1✔
3182
            // dropping old structure index
3183
            dropIndex(transaction, doc);
1✔
3184
            // dropping dom index
3185
            final NodeRef ref = new NodeRef(doc.getDocId());
1✔
3186
            final IndexQuery idx = new IndexQuery(IndexQuery.TRUNC_RIGHT, ref);
1✔
3187
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
3188
                @Override
3189
                public Object start() {
3190
                    try {
3191
                        domDb.remove(transaction, idx, null);
1✔
3192
                        domDb.flush();
1✔
3193
                    } catch(final IOException | DBException e) {
1✔
3194
                        LOG.error("start() - " + "error while removing doc", e);
×
3195
                    } catch(final TerminatedException e) {
×
3196
                        LOG.error("method terminated", e);
×
3197
                    }
3198
                    return null;
1✔
3199
                }
3200
            }.run();
1✔
3201
            // create a copy of the old doc to copy the nodes into it
3202
            final DocumentImpl tempDoc = new DocumentImpl(null, doc.getDocId(), doc);
1✔
3203
            tempDoc.copyOf(this, doc, doc);
1✔
3204
            final StreamListener listener = getIndexController().getStreamListener(doc, ReindexMode.STORE);
1✔
3205
            // copy the nodes
3206
            final NodeList nodes = doc.getChildNodes();
1✔
3207
            for(int i = 0; i < nodes.getLength(); i++) {
1✔
3208
                final IStoredNode<?> node = (IStoredNode<?>) nodes.item(i);
1✔
3209
                try(final INodeIterator iterator = getNodeIterator(node)) {
1✔
3210
                    iterator.next();
1✔
3211
                    copyNodes(transaction, iterator, node, new NodePath2(), tempDoc, true, listener);
1✔
3212
                }
3213
            }
3214
            flush();
1✔
3215
            // remove the old nodes
3216
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
3217
                @Override
3218
                public Object start() {
3219
                    domDb.removeAll(transaction, firstChild);
1✔
3220
                    try {
3221
                        domDb.flush();
1✔
3222
                    } catch(final DBException e) {
1✔
3223
                        LOG.error("start() - error while removing doc", e);
×
3224
                    }
3225
                    return null;
1✔
3226
                }
3227
            }.run();
1✔
3228
            doc.copyChildren(tempDoc);
1✔
3229
            doc.setSplitCount(0);
1✔
3230
            doc.setPageCount(tempDoc.getPageCount());
1✔
3231
            storeXMLResource(transaction, doc);
1✔
3232
            closeDocument();
1✔
3233
            if (LOG.isDebugEnabled()) {
1!
3234
                LOG.debug("Defragmentation took {} ms.", (System.currentTimeMillis() - start));
×
3235
            }
3236
        } catch(final PermissionDeniedException | IOException e) {
×
3237
            LOG.error(e);
×
3238
        }
3239
    }
1✔
3240

3241
    /**
3242
     * consistency Check of the database; useful after XUpdates;
3243
     * called if xupdate.consistency-checks is true in configuration
3244
     */
3245
    @Override
3246
    public void checkXMLResourceConsistency(final DocumentImpl doc) throws EXistException {
3247
        boolean xupdateConsistencyChecks = false;
1✔
3248
        final Object property = pool.getConfiguration().getProperty(PROPERTY_XUPDATE_CONSISTENCY_CHECKS);
1✔
3249
        if(property != null) {
1!
3250
            xupdateConsistencyChecks = (Boolean) property;
1✔
3251
        }
3252
        if(xupdateConsistencyChecks) {
1!
3253
            LOG.debug("Checking document {}", doc.getFileURI());
×
3254
            checkXMLResourceTree(doc);
×
3255
        }
3256
    }
1✔
3257

3258
    /**
3259
     * consistency Check of the database; useful after XUpdates;
3260
     * called by {@link #checkXMLResourceConsistency(DocumentImpl)}
3261
     */
3262
    @Override
3263
    public void checkXMLResourceTree(final DocumentImpl doc) {
3264
        LOG.debug("Checking DOM tree for document {}", doc.getFileURI());
×
3265
        boolean xupdateConsistencyChecks = false;
×
3266
        final Object property = pool.getConfiguration().getProperty(PROPERTY_XUPDATE_CONSISTENCY_CHECKS);
×
3267
        if(property != null) {
×
3268
            xupdateConsistencyChecks = (Boolean) property;
×
3269
        }
3270
        if(xupdateConsistencyChecks) {
×
3271
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeReadLock(domDb.getLockName())) {
×
3272
                @Override
3273
                public Object start() throws ReadOnlyException {
3274
                    LOG.debug("Pages used: {}", domDb.debugPages(doc, false));
×
3275
                    return null;
×
3276
                }
3277
            }.run();
×
3278
            final NodeList nodes = doc.getChildNodes();
×
3279
            for(int i = 0; i < nodes.getLength(); i++) {
×
3280
                final IStoredNode node = (IStoredNode) nodes.item(i);
×
3281
                try(final INodeIterator iterator = getNodeIterator(node)) {
×
3282
                    iterator.next();
×
3283
                    final StringBuilder buf = new StringBuilder();
×
3284
                    //Pass buf to the following method to get a dump of all node ids in the document
3285
                    if(!checkNodeTree(iterator, node, buf)) {
×
3286
                        LOG.debug("node tree: {}", buf.toString());
×
3287
                        throw new RuntimeException("Error in document tree structure");
×
3288
                    }
3289
                } catch(final IOException e) {
×
3290
                    LOG.error(e);
×
3291
                }
3292
            }
3293
            final NodeRef ref = new NodeRef(doc.getDocId());
×
3294
            final IndexQuery idx = new IndexQuery(IndexQuery.TRUNC_RIGHT, ref);
×
3295
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeReadLock(domDb.getLockName())) {
×
3296
                @Override
3297
                public Object start() {
3298
                    try {
3299
                        domDb.findKeys(idx);
×
3300
                    } catch(final BTreeException | IOException e) {
×
3301
                        LOG.error("start() - " + "error while removing doc", e);
×
3302
                    }
3303
                    return null;
×
3304
                }
3305
            }.run();
×
3306
        }
3307
    }
×
3308

3309
    /**
3310
     * Store a node into the database. This method is called by the parser to
3311
     * write a node to the storage backend.
3312
     *
3313
     * @param node        the node to be stored
3314
     * @param currentPath path expression which points to this node's
3315
     *                    element-parent or to itself if it is an element.
3316
     */
3317
    @Override
3318
    public <T extends IStoredNode> void storeNode(final Txn transaction, final IStoredNode<T> node, final NodePath currentPath, final IndexSpec indexSpec) {
3319
        checkAvailableMemory();
1✔
3320
        final DocumentImpl doc = node.getOwnerDocument();
1✔
3321
        final short nodeType = node.getNodeType();
1✔
3322
        final byte[] data = node.serialize();
1✔
3323
        new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName()), doc) {
1✔
3324
            @Override
3325
            public Object start() throws ReadOnlyException {
3326
                final long address;
3327
                if(nodeType == Node.TEXT_NODE
1✔
3328
                    || nodeType == Node.ATTRIBUTE_NODE
1✔
3329
                    || nodeType == Node.CDATA_SECTION_NODE
1✔
3330
                    || node.getNodeId().getTreeLevel() > defaultIndexDepth) {
1✔
3331
                    address = domDb.add(transaction, data);
1✔
3332
                } else {
1✔
3333
                    address = domDb.put(transaction, new NodeRef(doc.getDocId(), node.getNodeId()), data);
1✔
3334
                }
3335
                if(address == BFile.UNKNOWN_ADDRESS) {
1!
3336
                    LOG.error("address is missing");
×
3337
                }
3338
                //TODO : how can we continue here ? -pb
3339
                node.setInternalAddress(address);
1✔
3340
                return null;
1✔
3341
            }
3342
        }.run();
1✔
3343
        ++nodesCount;
1✔
3344
        ByteArrayPool.releaseByteArray(data);
1✔
3345
        nodeProcessor.reset(transaction, node, currentPath, indexSpec);
1✔
3346
        nodeProcessor.doIndex();
1✔
3347
    }
1✔
3348

3349
    @Override
3350
    public <T extends IStoredNode> void updateNode(final Txn transaction, final IStoredNode<T> node, final boolean reindex) {
3351
        try {
3352
            final DocumentImpl doc = node.getOwnerDocument();
1✔
3353
            final long internalAddress = node.getInternalAddress();
1✔
3354
            final byte[] data = node.serialize();
1✔
3355
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
3356
                @Override
3357
                public Object start() throws ReadOnlyException {
3358
                    if(StorageAddress.hasAddress(internalAddress)) {
1!
3359
                        domDb.update(transaction, internalAddress, data);
1✔
3360
                    } else {
1✔
3361
                        domDb.update(transaction, new NodeRef(doc.getDocId(), node.getNodeId()), data);
×
3362
                    }
3363
                    return null;
1✔
3364
                }
3365
            }.run();
1✔
3366
            ByteArrayPool.releaseByteArray(data);
1✔
3367
        } catch(final Exception e) {
1✔
3368
            final Value oldVal = new DOMTransaction<Value>(this, domDb, () -> lockManager.acquireBtreeReadLock(domDb.getLockName())) {
×
3369
                @Override
3370
                public Value start() {
3371
                    return domDb.get(node.getInternalAddress());
×
3372
                }
3373
            }.run();
×
3374

3375
            //TODO what can we do about abstracting this out?
3376
            final IStoredNode old = StoredNode.deserialize(oldVal.data(),
×
3377
                oldVal.start(), oldVal.getLength(),
×
3378
                node.getOwnerDocument(), false);
×
3379
            LOG.error(
×
3380
                "Exception while storing {}; gid = {}; old = {}",  node.getNodeName(), node.getNodeId(), old.getNodeName(), e);
×
3381
        }
3382
    }
1✔
3383

3384
    /**
3385
     * Physically insert a node into the DOM storage.
3386
     */
3387
    @Override
3388
    public void insertNodeAfter(final Txn transaction, final NodeHandle previous, final IStoredNode node) {
3389
        final byte[] data = node.serialize();
1✔
3390
        final DocumentImpl doc = previous.getOwnerDocument();
1✔
3391
        new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName()), doc) {
1✔
3392
            @Override
3393
            public Object start() {
3394
                long address = previous.getInternalAddress();
1✔
3395
                if(address != BFile.UNKNOWN_ADDRESS) {
1!
3396
                    address = domDb.insertAfter(transaction, doc, address, data);
1✔
3397
                } else {
1✔
3398
                    final NodeRef ref = new NodeRef(doc.getDocId(), previous.getNodeId());
×
3399
                    address = domDb.insertAfter(transaction, doc, ref, data);
×
3400
                }
3401
                node.setInternalAddress(address);
1✔
3402
                return null;
1✔
3403
            }
3404
        }.run();
1✔
3405
        ByteArrayPool.releaseByteArray(data);
1✔
3406
    }
1✔
3407

3408
    private <T extends IStoredNode> void copyNodes(final Txn transaction, final INodeIterator iterator, final IStoredNode<T> node,
3409
                           final NodePath currentPath, @EnsureLocked(mode=LockMode.WRITE_LOCK) final DocumentImpl newDoc, final boolean defragment,
3410
                           final StreamListener listener) {
3411
        copyNodes(transaction, iterator, node, currentPath, newDoc, defragment, listener, null);
1✔
3412
    }
1✔
3413

3414
    private <T extends IStoredNode> void copyNodes(final Txn transaction, final INodeIterator iterator, final IStoredNode<T> node,
3415
                                                   final NodePath currentPath, @EnsureLocked(mode=LockMode.WRITE_LOCK) final DocumentImpl newDoc, final boolean defragment,
3416
                                                   final StreamListener listener, NodeId oldNodeId) {
3417
        if(node.getNodeType() == Node.ELEMENT_NODE) {
1✔
3418
            currentPath.addComponent(node.getQName());
1✔
3419
        }
3420
        final DocumentImpl doc = node.getOwnerDocument();
1✔
3421
        final long oldAddress = node.getInternalAddress();
1✔
3422
        node.setOwnerDocument(newDoc);
1✔
3423
        node.setInternalAddress(BFile.UNKNOWN_ADDRESS);
1✔
3424
        storeNode(transaction, node, currentPath, null);
1✔
3425
        if(defragment && oldNodeId != null) {
1✔
3426
            pool.getNotificationService().notifyMove(oldNodeId, node);
1✔
3427
        }
3428
        if(node.getNodeType() == Node.ELEMENT_NODE) {
1✔
3429
            //save old value, whatever it is
3430
            final long address = node.getInternalAddress();
1✔
3431
            node.setInternalAddress(oldAddress);
1✔
3432
            endElement(node, currentPath, null);
1✔
3433
            //restore old value, whatever it was
3434
            node.setInternalAddress(address);
1✔
3435
            node.setDirty(false);
1✔
3436
        }
3437
        if(node.getNodeId().getTreeLevel() == 1) {
1✔
3438
            newDoc.appendChild((NodeHandle)node);
1✔
3439
        }
3440
        node.setOwnerDocument(doc);
1✔
3441
        if(listener != null) {
1!
3442
            switch(node.getNodeType()) {
1!
3443
                case Node.TEXT_NODE:
3444
                    listener.characters(transaction, (TextImpl) node, currentPath);
1✔
3445
                    break;
1✔
3446
                case Node.ELEMENT_NODE:
3447
                    listener.startElement(transaction, (ElementImpl) node, currentPath);
1✔
3448
                    break;
1✔
3449
                case Node.ATTRIBUTE_NODE:
3450
                    listener.attribute(transaction, (AttrImpl) node, currentPath);
1✔
3451
                    break;
1✔
3452
                case Node.COMMENT_NODE:
3453
                case Node.PROCESSING_INSTRUCTION_NODE:
3454
                    break;
1✔
3455
                default:
3456
                    LOG.debug("Unhandled node type: {}", node.getNodeType());
×
3457
            }
3458
        }
3459
        if(node.hasChildNodes() || node.hasAttributes()) {
1!
3460
            final int count = node.getChildCount();
1✔
3461
            NodeId nodeId = node.getNodeId();
1✔
3462
            for(int i = 0; i < count; i++) {
1✔
3463
                final IStoredNode child = iterator.next();
1✔
3464
                oldNodeId = child.getNodeId();
1✔
3465
                if(defragment) {
1✔
3466
                    if(i == 0) {
1✔
3467
                        nodeId = nodeId.newChild();
1✔
3468
                    } else {
1✔
3469
                        nodeId = nodeId.nextSibling();
1✔
3470
                    }
3471
                    child.setNodeId(nodeId);
1✔
3472
                }
3473
                copyNodes(transaction, iterator, child, currentPath, newDoc, defragment, listener, oldNodeId);
1✔
3474
            }
3475
        }
3476
        if(node.getNodeType() == Node.ELEMENT_NODE) {
1✔
3477
            if(listener != null) {
1!
3478
                listener.endElement(transaction, (ElementImpl) node, currentPath);
1✔
3479
            }
3480
            currentPath.removeLastComponent();
1✔
3481
        }
3482
    }
1✔
3483

3484
    /**
3485
     * Removes the Node Reference from the database.
3486
     * The index will be updated later, i.e. after all nodes have been physically
3487
     * removed. See {@link #endRemove(org.exist.storage.txn.Txn)}.
3488
     * removeNode() just adds the node ids to the list in elementIndex
3489
     * for later removal.
3490
     */
3491
    @Override
3492
    public <T extends IStoredNode> void removeNode(final Txn transaction, final IStoredNode<T> node,
3493
            final NodePath currentPath, final String content) {
3494
        final DocumentImpl doc = node.getOwnerDocument();
1✔
3495
        new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName()), doc) {
1✔
3496
            @Override
3497
            public Object start() {
3498
                final long address = node.getInternalAddress();
1✔
3499
                if(StorageAddress.hasAddress(address)) {
1!
3500
                    domDb.remove(transaction, new NodeRef(doc.getDocId(), node.getNodeId()), address);
1✔
3501
                } else {
1✔
3502
                    domDb.remove(transaction, new NodeRef(doc.getDocId(), node.getNodeId()));
×
3503
                }
3504
                return null;
1✔
3505
            }
3506
        }.run();
1✔
3507
        notifyRemoveNode(node, currentPath, content);
1✔
3508
        final QName qname;
3509
        switch(node.getNodeType()) {
1✔
3510
            case Node.ELEMENT_NODE:
3511
                qname = new QName(node.getQName(), ElementValue.ELEMENT);
1✔
3512
                node.setQName(qname);
1✔
3513
                final GeneralRangeIndexSpec spec1 = doc.getCollection().getIndexByPathConfiguration(this, currentPath);
1✔
3514
                if(spec1 != null) {
1✔
3515
                    valueIndex.setDocument(doc);
1✔
3516
                    valueIndex.storeElement((ElementImpl) node, content, spec1.getType(), NativeValueIndex.IndexType.GENERIC, false);
1✔
3517
                }
3518
                final QNameRangeIndexSpec qnSpecElement = doc.getCollection().getIndexByQNameConfiguration(this, qname);
1✔
3519
                if(qnSpecElement != null) {
1✔
3520
                    valueIndex.setDocument(doc);
1✔
3521
                    valueIndex.storeElement((ElementImpl) node, content, qnSpecElement.getType(),
1✔
3522
                        NativeValueIndex.IndexType.QNAME, false);
1✔
3523
                }
3524
                break;
1✔
3525

3526
            case Node.ATTRIBUTE_NODE:
3527
                qname = new QName(node.getQName(), ElementValue.ATTRIBUTE);
1✔
3528
                node.setQName(qname);
1✔
3529
                currentPath.addComponent(qname);
1✔
3530
                //Strange : does it mean that the node is added 2 times under 2 different identities ?
3531
                final AttrImpl attr;
3532
                attr = (AttrImpl) node;
1✔
3533
                switch(attr.getType()) {
1!
3534
                    case AttrImpl.ID:
3535
                        valueIndex.setDocument(doc);
1✔
3536
                        valueIndex.storeAttribute(attr, attr.getValue(), Type.ID, NativeValueIndex.IndexType.GENERIC, false);
1✔
3537
                        break;
1✔
3538
                    case AttrImpl.IDREF:
3539
                        valueIndex.setDocument(doc);
×
3540
                        valueIndex.storeAttribute(attr, attr.getValue(), Type.IDREF, NativeValueIndex.IndexType.GENERIC, false);
×
3541
                        break;
×
3542
                    case AttrImpl.IDREFS:
3543
                        valueIndex.setDocument(doc);
×
3544
                        final StringTokenizer tokenizer = new StringTokenizer(attr.getValue(), " ");
×
3545
                        while(tokenizer.hasMoreTokens()) {
×
3546
                            valueIndex.storeAttribute(attr, tokenizer.nextToken(),Type.IDREF, NativeValueIndex.IndexType.GENERIC, false);
×
3547
                        }
3548
                        break;
3549
                    default:
3550
                        // do nothing special
3551
                }
3552
                final RangeIndexSpec spec2 = doc.getCollection().getIndexByPathConfiguration(this, currentPath);
1✔
3553
                if(spec2 != null) {
1✔
3554
                    valueIndex.setDocument(doc);
1✔
3555
                    valueIndex.storeAttribute(attr, null, spec2, false);
1✔
3556
                }
3557
                final QNameRangeIndexSpec qnSpecAttribute = doc.getCollection().getIndexByQNameConfiguration(this, qname);
1✔
3558
                if(qnSpecAttribute != null) {
1✔
3559
                    valueIndex.setDocument(doc);
1✔
3560
                    valueIndex.storeAttribute(attr, null, qnSpecAttribute, false);
1✔
3561
                }
3562
                currentPath.removeLastComponent();
1✔
3563
                break;
1✔
3564

3565
            case Node.TEXT_NODE:
3566
                break;
3567
        }
3568
    }
1✔
3569

3570
    @Override
3571
    public void removeAllNodes(final Txn transaction, final IStoredNode node, final NodePath currentPath,
3572
            final StreamListener listener) {
3573

3574
        try(final INodeIterator iterator = getNodeIterator(node)) {
1✔
3575
            iterator.next();
1✔
3576

3577
            final Deque<RemovedNode> stack = new ArrayDeque<>();
1✔
3578
            collectNodesForRemoval(transaction, stack, iterator, listener, node, currentPath);
1✔
3579
            while(!stack.isEmpty()) {
1✔
3580
                final RemovedNode next = stack.pop();
1✔
3581
                removeNode(transaction, next.node, next.path, next.content);
1✔
3582
            }
3583
        } catch(final IOException ioe) {
×
3584
            LOG.error("Unable to close node iterator", ioe);
×
3585
        }
3586
    }
1✔
3587

3588
    private <T extends IStoredNode> void collectNodesForRemoval(final Txn transaction, final Deque<RemovedNode> stack,
3589
            final INodeIterator iterator, final StreamListener listener, final IStoredNode<T> node, final NodePath currentPath) {
3590
        RemovedNode removed;
3591
        switch(node.getNodeType()) {
1!
3592
            case Node.ELEMENT_NODE:
3593
                final DocumentImpl doc = node.getOwnerDocument();
1✔
3594
                String content = null;
1✔
3595
                final GeneralRangeIndexSpec spec = doc.getCollection().getIndexByPathConfiguration(this, currentPath);
1✔
3596
                if(spec != null) {
1✔
3597
                    content = getNodeValue(node, false);
1✔
3598
                } else {
1✔
3599
                    final QNameRangeIndexSpec qnIdx = doc.getCollection().getIndexByQNameConfiguration(this, node.getQName());
1✔
3600
                    if(qnIdx != null) {
1✔
3601
                        content = getNodeValue(node, false);
1✔
3602
                    }
3603
                }
3604
                removed = new RemovedNode(node, new NodePath(currentPath), content);
1✔
3605
                stack.push(removed);
1✔
3606
                if(listener != null) {
1✔
3607
                    listener.startElement(transaction, (ElementImpl) node, currentPath);
1✔
3608
                }
3609
                if(node.hasChildNodes() || node.hasAttributes()) {
1✔
3610
                    final int childCount = node.getChildCount();
1✔
3611
                    for(int i = 0; i < childCount; i++) {
1✔
3612
                        final IStoredNode child = iterator.next();
1✔
3613
                        if(child.getNodeType() == Node.ELEMENT_NODE) {
1✔
3614
                            currentPath.addComponent(child.getQName());
1✔
3615
                        }
3616
                        collectNodesForRemoval(transaction, stack, iterator, listener, child, currentPath);
1✔
3617
                        if(child.getNodeType() == Node.ELEMENT_NODE) {
1✔
3618
                            currentPath.removeLastComponent();
1✔
3619
                        }
3620
                    }
3621
                }
3622
                if(listener != null) {
1✔
3623
                    listener.endElement(transaction, (ElementImpl) node, currentPath);
1✔
3624
                }
3625
                break;
1✔
3626
            case Node.TEXT_NODE:
3627
                if(listener != null) {
1✔
3628
                    listener.characters(transaction, (TextImpl) node, currentPath);
1✔
3629
                }
3630
                break;
1✔
3631
            case Node.ATTRIBUTE_NODE:
3632
                if(listener != null) {
1✔
3633
                    listener.attribute(transaction, (AttrImpl) node, currentPath);
1✔
3634
                }
3635
                break;
3636
        }
3637
        if(node.getNodeType() != Node.ELEMENT_NODE) {
1✔
3638
            removed = new RemovedNode(node, new NodePath(currentPath), null);
1✔
3639
            stack.push(removed);
1✔
3640
        }
3641
    }
1✔
3642

3643
    /**
3644
     * Index a single node, which has been added through an XUpdate
3645
     * operation. This method is only called if inserting the node is possible
3646
     * without changing the node identifiers of sibling or parent nodes. In other
3647
     * cases, reindex will be called.
3648
     */
3649
    @Override
3650
    public void indexNode(final Txn transaction, final IStoredNode node, final NodePath currentPath) {
3651
        indexNode(transaction, node, currentPath, IndexMode.STORE);
1✔
3652
    }
1✔
3653

3654
    @Override
3655
    public void indexNode(final Txn transaction, final IStoredNode node) {
3656
        indexNode(transaction, node, null);
×
3657
    }
×
3658

3659
    public void indexNode(final Txn transaction, final IStoredNode node, final NodePath currentPath, final IndexMode repairMode) {
3660
        nodeProcessor.reset(transaction, node, currentPath, null);
1✔
3661
        nodeProcessor.setIndexMode(repairMode);
1✔
3662
        nodeProcessor.index();
1✔
3663
    }
1✔
3664

3665
    private boolean checkNodeTree(final INodeIterator iterator, final IStoredNode node, final StringBuilder buf) {
3666
        if(buf != null) {
×
3667
            if(buf.length() > 0) {
×
3668
                buf.append(", ");
×
3669
            }
3670
            buf.append(node.getNodeId());
×
3671
        }
3672
        boolean docIsValid = true;
×
3673
        if(node.hasChildNodes() || node.hasAttributes()) {
×
3674
            final int count = node.getChildCount();
×
3675
            if(buf != null) {
×
3676
                buf.append('[').append(count).append(']');
×
3677
            }
3678
            IStoredNode previous = null;
×
3679
            for(int i = 0; i < count; i++) {
×
3680
                final IStoredNode child = iterator.next();
×
3681
                if(i > 0 && !(child.getNodeId().isSiblingOf(previous.getNodeId()) &&
×
3682
                    child.getNodeId().compareTo(previous.getNodeId()) > 0)) {
×
3683
                    LOG.fatal("node {} cannot be a sibling of {}; node read from {}", child.getNodeId(),
×
3684
                            previous.getNodeId(), StorageAddress.toString(child.getInternalAddress()));
×
3685
                    docIsValid = false;
×
3686
                }
3687
                previous = child;
×
3688
                if(child == null) {
×
3689
                    LOG.fatal("child {} not found for node: {}: {}; children = {}", i, node.getNodeName(),  node.getNodeId(), node.getChildCount());
×
3690
                    docIsValid = false;
×
3691
                    //TODO : emergency exit ?
3692
                }
3693
                final NodeId parentId = child.getNodeId().getParentId();
×
3694
                if(!parentId.equals(node.getNodeId())) {
×
3695
                    LOG.fatal("{} is not a child of {}", child.getNodeId(), node.getNodeId());
×
3696
                    docIsValid = false;
×
3697
                }
3698
                final boolean check = checkNodeTree(iterator, child, buf);
×
3699
                if(docIsValid) {
×
3700
                    docIsValid = check;
×
3701
                }
3702
            }
3703
        }
3704
        return docIsValid;
×
3705
    }
3706

3707
    /**
3708
     * Called by reindex to walk through all nodes in the tree and reindex them
3709
     * if necessary.
3710
     *
3711
     * @param iterator
3712
     * @param node
3713
     * @param currentPath
3714
     */
3715
    private void scanNodes(final Txn transaction, final INodeIterator iterator, final IStoredNode node,
3716
                           final NodePath2 currentPath, final IndexMode mode, final StreamListener listener) {
3717
        if(node.getNodeType() == Node.ELEMENT_NODE) {
1✔
3718
            currentPath.addNode(node);
1✔
3719
        }
3720
        indexNode(transaction, node, currentPath, mode);
1✔
3721
        if(listener != null) {
1!
3722
            switch(node.getNodeType()) {
1!
3723
                case Node.TEXT_NODE:
3724
                case Node.CDATA_SECTION_NODE:
3725
                    listener.characters(transaction, (AbstractCharacterData) node, currentPath);
1✔
3726
                    break;
1✔
3727
                case Node.ELEMENT_NODE:
3728
                    listener.startElement(transaction, (ElementImpl) node, currentPath);
1✔
3729
                    break;
1✔
3730
                case Node.ATTRIBUTE_NODE:
3731
                    listener.attribute(transaction, (AttrImpl) node, currentPath);
1✔
3732
                    break;
1✔
3733
                case Node.COMMENT_NODE:
3734
                case Node.PROCESSING_INSTRUCTION_NODE:
3735
                    break;
1✔
3736
                default:
3737
                    LOG.debug("Unhandled node type: {}", node.getNodeType());
×
3738
            }
3739
        }
3740
        if(node.hasChildNodes() || node.hasAttributes()) {
1✔
3741
            final int count = node.getChildCount();
1✔
3742
            for(int i = 0; i < count; i++) {
1✔
3743
                final IStoredNode child = iterator.next();
1✔
3744
                if(child == null) {
1!
3745
                    LOG.fatal("child {} not found for node: {}; children = {}", i, node.getNodeName(), node.getChildCount());
×
3746
                } else {
×
3747
                    scanNodes(transaction, iterator, child, currentPath, mode, listener);
1✔
3748
                }
3749
            }
3750
        }
3751
        if(node.getNodeType() == Node.ELEMENT_NODE) {
1✔
3752
            endElement(node, currentPath, null, mode == IndexMode.REMOVE);
1✔
3753
            if(listener != null) {
1!
3754
                listener.endElement(transaction, (ElementImpl) node, currentPath);
1✔
3755
            }
3756
            currentPath.removeLastNode();
1✔
3757
        }
3758
    }
1✔
3759

3760
    @Override
3761
    public String getNodeValue(final IStoredNode node, final boolean addWhitespace) {
3762
        return new DOMTransaction<String>(this, domDb, () -> lockManager.acquireBtreeReadLock(domDb.getLockName())) {
1✔
3763
            @Override
3764
            public String start() {
3765
                return domDb.getNodeValue(NativeBroker.this, node, addWhitespace);
1✔
3766
            }
3767
        }.run();
1✔
3768
    }
3769

3770
    @Override
3771
    public IStoredNode objectWith(final Document doc, final NodeId nodeId) {
3772
        return new DOMTransaction<IStoredNode<?>>(this, domDb, () -> lockManager.acquireBtreeReadLock(domDb.getLockName())) {
1✔
3773
            @Override
3774
            public IStoredNode<?> start() {
3775
                final Value val = domDb.get(NativeBroker.this, new NodeProxy(null, (DocumentImpl) doc, nodeId));
1✔
3776
                if(val == null) {
1✔
3777
                    if(LOG.isDebugEnabled()) {
1!
3778
                        LOG.debug("Node {} not found. This is usually not an error.", nodeId);
×
3779
                    }
3780
                    return null;
1✔
3781
                }
3782
                final IStoredNode node = StoredNode.deserialize(val.getData(), 0, val.getLength(), (DocumentImpl) doc);
1✔
3783
                node.setOwnerDocument((DocumentImpl) doc);
1✔
3784
                node.setInternalAddress(val.getAddress());
1✔
3785
                return node;
1✔
3786
            }
3787
        }.run();
1✔
3788
    }
3789

3790
    @Override
3791
    public IStoredNode objectWith(final NodeProxy p) {
3792
        if(!StorageAddress.hasAddress(p.getInternalAddress())) {
1✔
3793
            return objectWith(p.getOwnerDocument(), p.getNodeId());
1✔
3794
        }
3795
        return new DOMTransaction<IStoredNode<?>>(this, domDb, () -> lockManager.acquireBtreeReadLock(domDb.getLockName())) {
1✔
3796
            @Override
3797
            public IStoredNode<?> start() {
3798
                // DocumentImpl sets the nodeId to DOCUMENT_NODE when it's trying to find its top-level
3799
                // children (for which it doesn't persist the actual node ids), so ignore that.  Nobody else
3800
                // should be passing DOCUMENT_NODE into here.
3801
                final boolean fakeNodeId = p.getNodeId().equals(NodeId.DOCUMENT_NODE);
1✔
3802
                final Value val = domDb.get(p.getInternalAddress(), false);
1✔
3803
                if(val == null) {
1!
3804
                    LOG.debug("Node {} not found in document {}; docId = {}: {}", p.getNodeId(), p.getOwnerDocument().getURI(),
×
3805
                            p.getOwnerDocument().getDocId(), StorageAddress.toString(p.getInternalAddress()));
×
3806
                    if(fakeNodeId) {
×
3807
                        return null;
×
3808
                    }
3809
                } else {
3810
                    final IStoredNode<? extends IStoredNode> node = StoredNode.deserialize(val.getData(), 0, val.getLength(), p.getOwnerDocument());
1✔
3811
                    node.setOwnerDocument(p.getOwnerDocument());
1✔
3812
                    node.setInternalAddress(p.getInternalAddress());
1✔
3813
                    if(fakeNodeId) {
1✔
3814
                        return node;
1✔
3815
                    }
3816
                    if(p.getOwnerDocument().getDocId() == node.getOwnerDocument().getDocId() &&
1!
3817
                        p.getNodeId().equals(node.getNodeId())) {
1!
3818
                        return node;
1✔
3819
                    }
3820
                    LOG.debug(
×
3821
                        "Node {} not found in document {}; docId = {}: {}; found node {} instead", p.getNodeId(), p.getOwnerDocument().getURI(),
×
3822
                            p.getOwnerDocument().getDocId(), StorageAddress.toString(p.getInternalAddress()), node.getNodeId()
×
3823
                    );
3824
                }
3825
                // retry based on node id
3826
                final IStoredNode node = objectWith(p.getOwnerDocument(), p.getNodeId());
×
3827
                if(node != null) {
×
3828
                    p.setInternalAddress(node.getInternalAddress());
×
3829
                }  // update proxy with correct address
3830
                return node;
×
3831
            }
3832
        }.run();
1✔
3833
    }
3834

3835
    @Override
3836
    public void repair() throws PermissionDeniedException, IOException, LockException {
3837
        if(isReadOnly()) {
1!
3838
            throw new IOException(DATABASE_IS_READ_ONLY);
×
3839
        }
3840

3841
        LOG.info("Removing index files ...");
1✔
3842
        try {
3843
            notifyCloseAndRemove();
1✔
3844
            pool.getIndexManager().removeIndexes();
1✔
3845
        } catch(final DBException e) {
1✔
3846
            LOG.error("Failed to remove index files during repair: {}", e.getMessage(), e);
×
3847
        }
3848

3849
        LOG.info("Recreating index files ...");
1✔
3850
        try {
3851
            this.valueIndex = new NativeValueIndex(this, VALUES_DBX_ID, dataDir, config);
1✔
3852
        } catch(final DBException e) {
1✔
3853
            LOG.error("Exception during repair: {}", e.getMessage(), e);
×
3854
        }
3855

3856
        try {
3857
            pool.getIndexManager().reopenIndexes();
1✔
3858
        } catch(final DatabaseConfigurationException e) {
1✔
3859
            LOG.error("Failed to reopen index files after repair: {}", e.getMessage(), e);
×
3860
        }
3861

3862
        loadIndexModules();
1✔
3863
        LOG.info("Reindexing database files ...");
1✔
3864
        //Reindex from root collection
3865
        reindexCollection(null, getCollection(XmldbURI.ROOT_COLLECTION_URI), IndexMode.REPAIR);
1✔
3866
    }
1✔
3867

3868
    @Override
3869
    public void repairPrimary() {
3870
        rebuildIndex(DOM_DBX_ID);
1✔
3871
        rebuildIndex(COLLECTIONS_DBX_ID);
1✔
3872
    }
1✔
3873

3874
    private void rebuildIndex(final byte indexId) {
3875
        final BTree btree = getStorage(indexId);
1✔
3876
        try(final ManagedLock<ReentrantLock> btreeLock = lockManager.acquireBtreeWriteLock(btree.getLockName())) {
1✔
3877
            LOG.info("Rebuilding index {}", FileUtils.fileName(btree.getFile()));
1✔
3878
            btree.rebuild();
1✔
3879
            LOG.info("Index {} was rebuilt.", FileUtils.fileName(btree.getFile()));
1✔
3880
        } catch(final LockException | IOException | TerminatedException | DBException e) {
×
3881
            LOG.error("Caught error while rebuilding core index {}: {}", FileUtils.fileName(btree.getFile()), e.getMessage(), e);
×
3882
        }
3883
    }
1✔
3884

3885
    @Override
3886
    public BrokerPool getBrokerPool() {
3887
        return pool;
1✔
3888
    }
3889

3890
    @Override
3891
    public Database getDatabase() {
3892
        return pool;
1✔
3893
    }
3894

3895
    @Override
3896
    public int getReferenceCount() {
3897
        return referenceCount;
1✔
3898
    }
3899

3900
    @Override
3901
    public void incReferenceCount() {
3902
        ++referenceCount;
1✔
3903
    }
1✔
3904

3905
    @Override
3906
    public void decReferenceCount() {
3907
        --referenceCount;
1✔
3908
    }
1✔
3909

3910
    @Override
3911
    public void flush() {
3912
        notifyFlush();
1✔
3913
        try {
3914
            pool.getSymbols().flush();
1✔
3915
        } catch(final EXistException e) {
1✔
3916
            LOG.error(e);
×
3917
        }
3918
        getIndexController().flush();
1✔
3919
        nodesCount = 0;
1✔
3920
    }
1✔
3921

3922
    long nextReportTS = System.currentTimeMillis();
1✔
3923

3924
    @Override
3925
    public void sync(final Sync syncEvent) {
3926
        if(isReadOnly()) {
1!
3927
            return;
×
3928
        }
3929
        try {
3930
            new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
3931
                @Override
3932
                public Object start() {
3933
                    try {
3934
                        domDb.flush();
1✔
3935
                    } catch(final DBException e) {
1✔
3936
                        LOG.error("error while flushing dom.dbx", e);
×
3937
                    }
3938
                    return null;
1✔
3939
                }
3940
            }.run();
1✔
3941
            if(syncEvent == Sync.MAJOR) {
1✔
3942
                try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
3943
                    collectionsDb.flush();
1✔
3944
                } catch(final LockException e) {
×
3945
                    LOG.error("Failed to acquire lock on {}", FileUtils.fileName(collectionsDb.getFile()), e);
×
3946
                }
3947
                notifySync();
1✔
3948
                pool.getIndexManager().sync();
1✔
3949

3950
                if (System.currentTimeMillis() > nextReportTS) {
1✔
3951
                        final NumberFormat nf = NumberFormat.getNumberInstance();
1✔
3952
                        LOG_STATS.info("Memory: {}K total; {}K max; {}K free", nf.format(run.totalMemory() / 1024),
1✔
3953
                            nf.format(run.maxMemory() / 1024), nf.format(run.freeMemory() / 1024));
1✔
3954
                               domDb.printStatistics();
1✔
3955
                        collectionsDb.printStatistics();
1✔
3956
                        notifyPrintStatistics();
1✔
3957

3958
                    nextReportTS = System.currentTimeMillis() + (10 * 60 * 1000); // occurs after 10 minutes from now
1✔
3959
                }
3960
            }
3961
        } catch(final DBException dbe) {
1✔
3962
            dbe.printStackTrace();
×
3963
            LOG.error(dbe);
×
3964
        }
3965
    }
1✔
3966

3967
    @Override
3968
    public void shutdown() {
3969
        try {
3970
            flush();
1✔
3971
            sync(Sync.MAJOR);
1✔
3972

3973
            new DOMTransaction(NativeBroker.this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
3974
                @Override
3975
                public Object start() {
3976
                    try {
3977
                        domDb.close();
1✔
3978
                    } catch(final DBException e) {
1✔
3979
                        LOG.error(e.getMessage(), e);
×
3980
                    }
3981
                    return null;
1✔
3982
                }
3983
            }.run();
1✔
3984

3985
            try(final ManagedLock<ReentrantLock> collectionsDbLock = lockManager.acquireBtreeWriteLock(collectionsDb.getLockName())) {
1✔
3986
                collectionsDb.close();
1✔
3987
            }
3988

3989
            notifyClose();
1✔
3990
        } catch(final Exception e) {
1✔
3991
            LOG.error(e.getMessage(), e);
×
3992
        } finally {
3993
            xmlSerializerPool.close();
1✔
3994
        }
3995
    }
1✔
3996

3997
    /**
3998
     * check available memory
3999
     */
4000
    @Override
4001
    public void checkAvailableMemory() {
4002
        if(nodesCountThreshold <= 0) {
1✔
4003
            if(nodesCount > DEFAULT_NODES_BEFORE_MEMORY_CHECK) {
1✔
4004
                if(run.totalMemory() >= run.maxMemory() && run.freeMemory() < pool.getReservedMem()) {
1!
4005
                    flush();
×
4006
                }
4007
                nodesCount = 0;
1✔
4008
            }
4009
        } else if(nodesCount > nodesCountThreshold) {
1!
4010
            flush();
×
4011
            nodesCount = 0;
×
4012
        }
4013
    }
1✔
4014

4015
    //TODO UNDERSTAND : why not use shutdown ? -pb
4016
    @Override
4017
    public void closeDocument() {
4018
        new DOMTransaction(this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
4019
            @Override
4020
            public Object start() {
4021
                domDb.closeDocument();
1✔
4022
                return null;
1✔
4023
            }
4024
        }.run();
1✔
4025
    }
1✔
4026

4027
    @Override
4028
    public void addCurrentTransaction(final Txn transaction) {
4029
        synchronized (currentTransactions) {
1✔
4030
            if (!currentTransactions.add(transaction)) {
1!
4031
                throw new IllegalStateException("Transaction is already current: " + transaction.getId());
×
4032
            }
4033
        }
4034
    }
1✔
4035

4036
    @Override
4037
    public void removeCurrentTransaction(final Txn transaction) {
4038
        synchronized (currentTransactions) {
1✔
4039
            if (!currentTransactions.remove(transaction)) {
1!
4040
                throw new IllegalStateException("Unable to remove current transaction: " + transaction.getId());
×
4041
            }
4042
        }
4043
    }
1✔
4044

4045
    @Override
4046
    public @Nullable Txn getCurrentTransaction() {
4047
        synchronized (currentTransactions) {
1✔
4048
            if (currentTransactions.isEmpty()) {
1✔
4049
                return null;
1✔
4050
            }
4051
            return currentTransactions.last();
1✔
4052
        }
4053
    }
4054

4055
    /**
4056
     * Gets the current transaction, or if there is no current transaction
4057
     * for this thread (i.e. broker), then we begin a new transaction.
4058
     *
4059
     * The callee is *always* responsible for calling .close on the transaction
4060
     *
4061
     * Note - When there is an existing transaction, calling .close on the object
4062
     * returned (e.g. ResusableTxn) from this function will only cause a minor state
4063
     * change and not close the original transaction. That is intentional, as it will
4064
     * eventually be closed by the creator of the original transaction (i.e. the code
4065
     * site that began the first transaction)
4066
     *
4067
     * @deprecated This is a stepping-stone; Transactions should be explicitly passed
4068
     *   around. This will be removed in the near future.
4069
     * @return the transaction
4070
     */
4071
    @Override
4072
    @Deprecated
4073
    public Txn continueOrBeginTransaction() {
4074
        synchronized (currentTransactions) {
1✔
4075
            if (currentTransactions.isEmpty()) {
1✔
4076
                final TransactionManager tm = getBrokerPool().getTransactionManager();
1✔
4077
                return tm.beginTransaction(); //TransactionManager will call this#addCurrentTransaction
1✔
4078
            } else {
4079
                return new Txn.ReusableTxn(currentTransactions.last());
1✔
4080
            }
4081
        }
4082
    }
4083

4084
    //TODO the object passed to the function e.g. Txn should not implement .close
4085
    //if we are using a function passing approach like this, i.e. one point of
4086
    //responsibility and WE HERE should be responsible for closing the transaction.
4087
    //we could return a sub-class of Txn which is uncloseable like Txn.reuseable or similar
4088
    //also getCurrentTransaction should then be made private
4089
//    private <T> T transact(final Function<Txn, T> transactee) throws EXistException {
4090
//        final Txn existing = getCurrentTransaction();
4091
//        if(existing == null) {
4092
//            try(final Txn txn = pool.getTransactionManager().beginTransaction()) {
4093
//                return transactee.apply(txn);
4094
//            }
4095
//        } else {
4096
//            return transactee.apply(existing);
4097
//        }
4098
//    }
4099

4100
    @Override
4101
    public boolean isTriggersEnabled() {
4102
        return triggersEnabled;
1✔
4103
    }
4104

4105
    @Override
4106
    public void setTriggersEnabled(final boolean triggersEnabled) {
4107
        this.triggersEnabled = triggersEnabled;
1✔
4108
    }
1✔
4109

4110
    @Override
4111
    public void close() {
4112
        pool.release(this);
1✔
4113
    }
1✔
4114

4115
    public final static class NodeRef extends Value {
4116

4117
        public static final int OFFSET_DOCUMENT_ID = 0;
4118
        public static final int OFFSET_NODE_ID = OFFSET_DOCUMENT_ID + DocumentImpl.LENGTH_DOCUMENT_ID;
4119

4120
        public NodeRef(final int docId) {
1✔
4121
            len = DocumentImpl.LENGTH_DOCUMENT_ID;
1✔
4122
            data = new byte[len];
1✔
4123
            ByteConversion.intToByte(docId, data, OFFSET_DOCUMENT_ID);
1✔
4124
            pos = OFFSET_DOCUMENT_ID;
1✔
4125
        }
1✔
4126

4127
        public NodeRef(final int docId, final NodeId nodeId) {
1✔
4128
            len = DocumentImpl.LENGTH_DOCUMENT_ID + nodeId.size();
1✔
4129
            data = new byte[len];
1✔
4130
            ByteConversion.intToByte(docId, data, OFFSET_DOCUMENT_ID);
1✔
4131
            nodeId.serialize(data, OFFSET_NODE_ID);
1✔
4132
            pos = OFFSET_DOCUMENT_ID;
1✔
4133
        }
1✔
4134

4135
        int getDocId() {
4136
            return ByteConversion.byteToInt(data, OFFSET_DOCUMENT_ID);
×
4137
        }
4138
    }
4139

4140
    private final static class RemovedNode {
4141
        final IStoredNode node;
4142
        final String content;
4143
        final NodePath path;
4144

4145
        RemovedNode(final IStoredNode node, final NodePath path, final String content) {
1✔
4146
            this.node = node;
1✔
4147
            this.path = path;
1✔
4148
            this.content = content;
1✔
4149
        }
1✔
4150
    }
4151

4152
    /**
4153
     * Delegate for Node Processing : indexing
4154
     */
4155
    private class NodeProcessor {
4156
        private Txn transaction;
4157
        private IStoredNode<? extends IStoredNode> node;
4158
        private NodePath currentPath;
4159

4160
        /**
4161
         * work variables
4162
         */
4163
        private DocumentImpl doc;
4164
        private long address;
4165

4166
        private IndexSpec idxSpec;
4167
        private int level;
4168
        private IndexMode indexMode = IndexMode.STORE;
1✔
4169

4170
        NodeProcessor() {
1✔
4171
            //ignore
4172
        }
1✔
4173

4174
        public <T extends IStoredNode> void reset(final Txn transaction, final IStoredNode<T> node, final NodePath currentPath, IndexSpec indexSpec) {
4175
            if(node.getNodeId() == null) {
1!
4176
                LOG.error("illegal node: {}", node.getNodeName());
×
4177
            }
4178
            //TODO : why continue processing ? return ? -pb
4179
            this.transaction = transaction;
1✔
4180
            this.node = node;
1✔
4181
            this.currentPath = currentPath;
1✔
4182
            this.indexMode = IndexMode.STORE;
1✔
4183
            doc = node.getOwnerDocument();
1✔
4184
            address = node.getInternalAddress();
1✔
4185
            if(indexSpec == null) {
1✔
4186
                indexSpec = doc.getCollection().getIndexConfiguration(NativeBroker.this);
1✔
4187
            }
4188
            idxSpec = indexSpec;
1✔
4189
            level = node.getNodeId().getTreeLevel();
1✔
4190
        }
1✔
4191

4192
        public void setIndexMode(final IndexMode indexMode) {
4193
            this.indexMode = indexMode;
1✔
4194
        }
1✔
4195

4196
        /**
4197
         * Updates the various indices
4198
         */
4199
        public void doIndex() {
4200
            //TODO : resolve URI !
4201
            //final boolean isTemp = XmldbURI.TEMP_COLLECTION_URI.equalsInternal(((DocumentImpl) node.getOwnerDocument()).getCollection().getURI());
4202
            int indexType;
4203
            switch(node.getNodeType()) {
1✔
4204
                case Node.ELEMENT_NODE:
4205
                    //Compute index type
4206
                    //TODO : let indexers OR it themselves
4207
                    //we'd need to notify the ElementIndexer at the very end then...
4208
                    indexType = RangeIndexSpec.NO_INDEX;
1✔
4209
                    if(idxSpec != null && idxSpec.getIndexByPath(currentPath) != null) {
1✔
4210
                        indexType |= idxSpec.getIndexByPath(currentPath).getIndexType();
1✔
4211
                    }
4212
                    if(idxSpec != null) {
1✔
4213
                        final QNameRangeIndexSpec qnIdx = idxSpec.getIndexByQName(node.getQName());
1✔
4214
                        if(qnIdx != null) {
1✔
4215
                            indexType |= RangeIndexSpec.QNAME_INDEX;
1✔
4216
                            if(!RangeIndexSpec.hasRangeIndex(indexType)) {
1!
4217
                                indexType |= qnIdx.getIndexType();
1✔
4218
                            }
4219
                        }
4220
                    }
4221
                    ((ElementImpl) node).setIndexType(indexType);
1✔
4222
                    break;
1✔
4223

4224
                case Node.ATTRIBUTE_NODE:
4225
                    final QName qname = new QName(node.getQName());
1✔
4226
                    if(currentPath != null) {
1!
4227
                        currentPath.addComponent(qname);
1✔
4228
                    }
4229
                    //Compute index type
4230
                    //TODO : let indexers OR it themselves
4231
                    //we'd need to notify the ElementIndexer at the very end then...
4232
                    indexType = RangeIndexSpec.NO_INDEX;
1✔
4233
                    if(idxSpec != null) {
1✔
4234
                        final RangeIndexSpec rangeSpec = idxSpec.getIndexByPath(currentPath);
1✔
4235
                        if(rangeSpec != null) {
1✔
4236
                            indexType |= rangeSpec.getIndexType();
1✔
4237
                        }
4238
                        if(rangeSpec != null) {
1✔
4239
                            valueIndex.setDocument(node.getOwnerDocument());
1✔
4240
                            //Oh dear : is it the right semantics then ?
4241
                            valueIndex.storeAttribute((AttrImpl) node, currentPath,
1✔
4242
                                rangeSpec, indexMode == IndexMode.REMOVE);
1!
4243
                        }
4244
                        final QNameRangeIndexSpec qnIdx = idxSpec.getIndexByQName(node.getQName());
1✔
4245
                        if(qnIdx != null) {
1✔
4246
                            indexType |= RangeIndexSpec.QNAME_INDEX;
1✔
4247
                            if(!RangeIndexSpec.hasRangeIndex(indexType)) {
1!
4248
                                indexType |= qnIdx.getIndexType();
1✔
4249
                            }
4250
                            valueIndex.setDocument(node.getOwnerDocument());
1✔
4251
                            //Oh dear : is it the right semantics then ?
4252
                            valueIndex.storeAttribute((AttrImpl) node, currentPath,
1✔
4253
                                qnIdx, indexMode == IndexMode.REMOVE);
1!
4254
                        }
4255
                    }
4256
                    node.setQName(new QName(qname, ElementValue.ATTRIBUTE));
1✔
4257
                    final AttrImpl attr = (AttrImpl) node;
1✔
4258
                    attr.setIndexType(indexType);
1✔
4259
                    switch(attr.getType()) {
1!
4260
                        case AttrImpl.ID:
4261
                            valueIndex.setDocument(doc);
1✔
4262
                            valueIndex.storeAttribute(attr, attr.getValue(), Type.ID, NativeValueIndex.IndexType.GENERIC, indexMode == IndexMode.REMOVE);
1✔
4263
                            break;
1✔
4264

4265
                        case AttrImpl.IDREF:
4266
                            valueIndex.setDocument(doc);
1✔
4267
                            valueIndex.storeAttribute(attr, attr.getValue(), Type.IDREF, NativeValueIndex.IndexType.GENERIC, indexMode == IndexMode.REMOVE);
1!
4268
                            break;
1✔
4269

4270
                        case AttrImpl.IDREFS:
4271
                            valueIndex.setDocument(doc);
×
4272
                            final StringTokenizer tokenizer = new StringTokenizer(attr.getValue(), " ");
×
4273
                            while(tokenizer.hasMoreTokens()) {
×
4274
                                valueIndex.storeAttribute(attr, tokenizer.nextToken(), Type.IDREF, NativeValueIndex.IndexType.GENERIC, indexMode == IndexMode.REMOVE);
×
4275
                            }
4276
                            break;
4277

4278
                        default:
4279
                            // do nothing special
4280
                    }
4281
                    if(currentPath != null) {
1!
4282
                        currentPath.removeLastComponent();
1✔
4283
                    }
4284
                    break;
1✔
4285

4286
                case Node.TEXT_NODE:
4287
                    notifyStoreText((TextImpl) node, currentPath);
1✔
4288
                    break;
4289
            }
4290
        }
1✔
4291

4292
        /**
4293
         * Stores this node into the database, if it's an element
4294
         */
4295
        public void store() {
4296
            final DocumentImpl doc = node.getOwnerDocument();
1✔
4297
            // we store all nodes at level 1 (see - https://github.com/eXist-db/exist/issues/1691), and only element nodes after!
4298
            if(indexMode == IndexMode.STORE && (level == 1 || (node.getNodeType() == Node.ELEMENT_NODE && level <= defaultIndexDepth))) {
1✔
4299
                //TODO : used to be this, but NativeBroker.this avoids an owner change
4300
                new DOMTransaction(NativeBroker.this, domDb, () -> lockManager.acquireBtreeWriteLock(domDb.getLockName())) {
1✔
4301
                    @Override
4302
                    public Object start() throws ReadOnlyException {
4303
                        try {
4304
                            domDb.addValue(transaction, new NodeRef(doc.getDocId(), node.getNodeId()), address);
1✔
4305
                        } catch(final BTreeException | IOException e) {
1✔
4306
                            LOG.error(EXCEPTION_DURING_REINDEX, e);
×
4307
                        }
4308
                        return null;
1✔
4309
                    }
4310
                }.run();
1✔
4311
            }
4312
        }
1✔
4313

4314
        /**
4315
         * check available memory
4316
         */
4317
        private void checkAvailableMemory() {
4318
            if(indexMode != IndexMode.REMOVE && nodesCount > DEFAULT_NODES_BEFORE_MEMORY_CHECK) {
1✔
4319
                if(run.totalMemory() >= run.maxMemory() && run.freeMemory() < pool.getReservedMem()) {
1!
4320
                    flush();
×
4321
                }
4322
                nodesCount = 0;
1✔
4323
            }
4324
        }
1✔
4325

4326
        /**
4327
         * Updates the various indices and stores this node into the database
4328
         */
4329
        public void index() {
4330
            ++nodesCount;
1✔
4331
            checkAvailableMemory();
1✔
4332
            doIndex();
1✔
4333
            store();
1✔
4334
        }
1✔
4335
    }
4336

4337
    private final class DocumentCallback implements BTreeCallback {
4338

4339
        private final Collection.InternalAccess collectionInternalAccess;
4340

4341
        private DocumentCallback(final Collection.InternalAccess collectionInternalAccess) {
1✔
4342
            this.collectionInternalAccess = collectionInternalAccess;
1✔
4343
        }
1✔
4344

4345
        @Override
4346
        public boolean indexInfo(final Value key, final long pointer) throws TerminatedException {
4347

4348
            try {
4349
                final byte type = key.data()[key.start() + Collection.LENGTH_COLLECTION_ID + DocumentImpl.LENGTH_DOCUMENT_TYPE];
1✔
4350
                final VariableByteInput is = collectionsDb.getAsStream(pointer);
1✔
4351

4352
                final DocumentImpl doc;
4353
                if (type == DocumentImpl.BINARY_FILE) {
1✔
4354
                    doc = BinaryDocument.read(pool, is);
1✔
4355
                } else {
1✔
4356
                    doc = DocumentImpl.read(pool, is);
1✔
4357
                }
4358

4359
                collectionInternalAccess.addDocument(doc);
1✔
4360
            } catch(final EXistException | IOException e) {
1✔
4361
                LOG.error("Exception while reading document data", e);
×
4362
            }
4363

4364
            return true;
1✔
4365
        }
4366
    }
4367

4368
    /**
4369
     * Represents a {@link Subject} change
4370
     * made to a broker
4371
     *
4372
     * Used for tracing subject changes
4373
     */
4374
    private static class TraceableSubjectChange extends TraceableStateChange<Subject, TraceableSubjectChange.Change> {
4375
        private final String id;
4376

4377
        public enum Change {
×
4378
            PUSH,
×
4379
            POP
×
4380
        }
4381

4382
        private TraceableSubjectChange(final Change change, final Subject subject, final String id) {
4383
            super(change, subject);
×
4384
            this.id = id;
×
4385
        }
×
4386

4387
        @Override
4388
        public String getId() {
4389
            return id;
×
4390
        }
4391

4392
        @Override
4393
        public String describeState() {
4394
            return getState().getName();
×
4395
        }
4396

4397
        final static TraceableSubjectChange push(final Subject subject, final String id) {
4398
            return new TraceableSubjectChange(Change.PUSH, subject, id);
×
4399
        }
4400

4401
        final static TraceableSubjectChange pop(final Subject subject, final String id) {
4402
            return new TraceableSubjectChange(Change.POP, subject, id);
×
4403
        }
4404
    }
4405
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc