• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

IQSS / dataverse / #22987

23 Aug 2024 06:44PM CUT coverage: 20.61% (-0.2%) from 20.791%
#22987

Pull #10781

github

landreev
added an upfront locks check to the /addGlobusFiles api #10623
Pull Request #10781: Improved handling of Globus uploads

4 of 417 new or added lines in 15 files covered. (0.96%)

4194 existing lines in 35 files now uncovered.

17388 of 84365 relevant lines covered (20.61%)

0.21 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/FinalizeDatasetPublicationCommand.java
1
package edu.harvard.iq.dataverse.engine.command.impl;
2

3
import edu.harvard.iq.dataverse.ControlledVocabularyValue;
4
import edu.harvard.iq.dataverse.DataFile;
5
import edu.harvard.iq.dataverse.Dataset;
6
import edu.harvard.iq.dataverse.DatasetField;
7
import edu.harvard.iq.dataverse.DatasetFieldConstant;
8
import edu.harvard.iq.dataverse.DatasetLock;
9
import static edu.harvard.iq.dataverse.DatasetVersion.VersionState.*;
10
import edu.harvard.iq.dataverse.DatasetVersionUser;
11
import edu.harvard.iq.dataverse.Dataverse;
12
import edu.harvard.iq.dataverse.DvObject;
13
import edu.harvard.iq.dataverse.Embargo;
14
import edu.harvard.iq.dataverse.UserNotification;
15
import edu.harvard.iq.dataverse.authorization.Permission;
16
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
17
import edu.harvard.iq.dataverse.dataset.DatasetUtil;
18
import edu.harvard.iq.dataverse.engine.command.CommandContext;
19
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
20
import edu.harvard.iq.dataverse.engine.command.RequiredPermissions;
21
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
22
import edu.harvard.iq.dataverse.export.ExportService;
23
import edu.harvard.iq.dataverse.pidproviders.PidProvider;
24
import edu.harvard.iq.dataverse.pidproviders.PidUtil;
25
import edu.harvard.iq.dataverse.privateurl.PrivateUrl;
26
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
27
import edu.harvard.iq.dataverse.util.BundleUtil;
28
import edu.harvard.iq.dataverse.workflow.WorkflowContext.TriggerType;
29
import java.io.IOException;
30
import java.sql.Timestamp;
31
import java.util.Date;
32
import java.util.List;
33
import java.util.logging.Level;
34
import java.util.logging.Logger;
35

36
import edu.harvard.iq.dataverse.batch.util.LoggingUtil;
37
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
38
import edu.harvard.iq.dataverse.engine.command.Command;
39
import edu.harvard.iq.dataverse.util.FileUtil;
40
import java.util.ArrayList;
41
import java.util.concurrent.Future;
42
import org.apache.solr.client.solrj.SolrServerException;
43

44

45
/**
46
 *
47
 * Takes the last internal steps in publishing a dataset.
48
 *
49
 * @author michael
50
 */
51
@RequiredPermissions(Permission.PublishDataset)
52
public class FinalizeDatasetPublicationCommand extends AbstractPublishDatasetCommand<Dataset> {
53

54
    private static final Logger logger = Logger.getLogger(FinalizeDatasetPublicationCommand.class.getName());
×
55

56

57

58
    /**
59
     * mirror field from {@link PublishDatasetCommand} of same name
60
     */
61
    final boolean datasetExternallyReleased;
62
    
63
    List<Dataverse> dataversesToIndex = new ArrayList<>();
×
64
    
65
    public static final String FILE_VALIDATION_ERROR = "FILE VALIDATION ERROR";
66
    
67
    public FinalizeDatasetPublicationCommand(Dataset aDataset, DataverseRequest aRequest) {
68
        this( aDataset, aRequest, false );
×
69
    }
×
70
    public FinalizeDatasetPublicationCommand(Dataset aDataset, DataverseRequest aRequest, boolean isPidPrePublished) {
71
        super(aDataset, aRequest);
×
72
        datasetExternallyReleased = isPidPrePublished;
×
73
    }
×
74

75
    @Override
76
    public Dataset execute(CommandContext ctxt) throws CommandException {
77
        Dataset theDataset = getDataset();
×
78
        
79
        logger.info("Finalizing publication of the dataset "+theDataset.getGlobalId().asString());
×
80
        
81
        // validate the physical files before we do anything else: 
82
        // (unless specifically disabled; or a minor version)
83
        if (theDataset.getLatestVersion().getVersionState() != RELEASED
×
84
                && theDataset.getLatestVersion().getMinorVersionNumber() != null
×
85
                && theDataset.getLatestVersion().getMinorVersionNumber().equals((long) 0)
×
86
                && ctxt.systemConfig().isDatafileValidationOnPublishEnabled()) {
×
87
            // some imported datasets may already be released.
88

89
            // validate the physical files (verify checksums):
90
            validateDataFiles(theDataset, ctxt);
×
91
            // (this will throw a CommandException if it fails)
92
        }
93
        
94
        validateOrDie(theDataset.getLatestVersion(), false);
×
95
        
96
                /*
97
                 * Try to register the dataset identifier. For PID providers that have registerWhenPublished == false (all except the FAKE provider at present)
98
                 * the registerExternalIdentifier command will make one try to create the identifier if needed (e.g. if reserving at dataset creation wasn't done/failed).
99
                 * For registerWhenPublished == true providers, if a PID conflict is found, the call will retry with new PIDs. 
100
                 */
101
        if ( theDataset.getGlobalIdCreateTime() == null ) {
×
102
            try {
103
                // This can potentially throw a CommandException, so let's make 
104
                // sure we exit cleanly:
105

106
                    registerExternalIdentifier(theDataset, ctxt, false);
×
107
            } catch (CommandException comEx) {
×
108
                logger.warning("Failed to reserve the identifier "+theDataset.getGlobalId().asString()+"; notifying the user(s), unlocking the dataset");
×
109
                // Send failure notification to the user: 
110
                notifyUsersDatasetPublishStatus(ctxt, theDataset, UserNotification.Type.PUBLISHFAILED_PIDREG);
×
111
                // Remove the dataset lock: 
112
                ctxt.datasets().removeDatasetLocks(theDataset, DatasetLock.Reason.finalizePublication);
×
113
                // re-throw the exception:
114
                throw comEx;
×
115
            }
×
116
        }
117
                
118
        // is this the first publication of the dataset?
119
        if (theDataset.getPublicationDate() == null) {
×
120
            theDataset.setReleaseUser((AuthenticatedUser) getUser());
×
121
        
122
            theDataset.setPublicationDate(new Timestamp(new Date().getTime()));
×
123
            
124
            // if there are any embargoed files in this version, we will save 
125
            // the latest availability date as the "embargoCitationDate" for future 
126
            // reference (if the files are not available yet, as of publishing of 
127
            // the dataset, this date will be used as the "ciatation date" of the dataset, 
128
            // instead of the publicatonDate, in compliance with the DataCite 
129
            // best practices). 
130
            // the code below replicates the logic that used to be in the method 
131
            // Dataset.getCitationDate() that calculated this adjusted date in real time.
132
            
133
            Timestamp latestEmbargoDate = null; 
×
134
            for (DataFile dataFile : theDataset.getFiles()) {
×
135
                // this is the first version of the dataset that is being published. 
136
                // therefore we can iterate through .getFiles() instead of obtaining
137
                // the DataFiles by going through the FileMetadatas in the current version.
138
                Embargo embargo = dataFile.getEmbargo();
×
139
                if (embargo != null) {
×
140
                    // "dataAvailable" is not nullable in the Embargo class, no need for a null check
141
                    Timestamp embargoDate = Timestamp.valueOf(embargo.getDateAvailable().atStartOfDay());
×
142
                    if (latestEmbargoDate == null || latestEmbargoDate.compareTo(embargoDate) < 0) {
×
143
                        latestEmbargoDate = embargoDate;
×
144
                    }
145
                }
146
            }
×
147
            // the above loop could be easily replaced with a database query; 
148
            // but we iterate through .getFiles() elsewhere in the command, when 
149
            // updating and/or registering the files, so it should not result in 
150
            // an extra performance hit. 
151
            theDataset.setEmbargoCitationDate(latestEmbargoDate);
×
152
        } 
153

154
        //Clear any external status
155
        theDataset.getLatestVersion().setExternalStatusLabel(null);
×
156
        
157
        // update metadata
158
        if (theDataset.getLatestVersion().getReleaseTime() == null) {
×
159
            // Allow migrated versions to keep original release dates
160
            theDataset.getLatestVersion().setReleaseTime(getTimestamp());
×
161
        }
162
        theDataset.getLatestVersion().setLastUpdateTime(getTimestamp());
×
163
        theDataset.setModificationTime(getTimestamp());
×
164
        theDataset.setFileAccessRequest(theDataset.getLatestVersion().getTermsOfUseAndAccess().isFileAccessRequest());
×
165
        
166
        //Use dataset pub date (which may not be the current date for migrated datasets)
167
        updateFiles(new Timestamp(theDataset.getLatestVersion().getReleaseTime().getTime()), ctxt);
×
168
        
169
        // 
170
        // TODO: Not sure if this .merge() is necessary here - ? 
171
        // I'm moving a bunch of code from PublishDatasetCommand here; and this .merge()
172
        // comes from there. There's a chance that the final merge, at the end of this
173
        // command, would be sufficient. -- L.A. Sep. 6 2017
174
        theDataset = ctxt.em().merge(theDataset);
×
175
        setDataset(theDataset);
×
176
        updateDatasetUser(ctxt);
×
177
        
178
        //if the publisher hasn't contributed to this version
179
        DatasetVersionUser ddu = ctxt.datasets().getDatasetVersionUser(theDataset.getLatestVersion(), getUser());
×
180
        
181
        if (ddu == null) {
×
182
            ddu = new DatasetVersionUser();
×
183
            ddu.setDatasetVersion(theDataset.getLatestVersion());
×
184
            String id = getUser().getIdentifier();
×
185
            id = id.startsWith("@") ? id.substring(1) : id;
×
186
            AuthenticatedUser au = ctxt.authentication().getAuthenticatedUser(id);
×
187
            ddu.setAuthenticatedUser(au);
×
188
        }
189
        ddu.setLastUpdateDate(getTimestamp());
×
190
        ctxt.em().merge(ddu);
×
191
        
192
        try {
193
            updateParentDataversesSubjectsField(theDataset, ctxt);
×
194
        } catch (IOException | SolrServerException e) {
×
195
            String failureLogText = "Post-publication indexing failed for Dataverse subject update. ";
×
196
            failureLogText += "\r\n" + e.getLocalizedMessage();
×
197
            LoggingUtil.writeOnSuccessFailureLog(this, failureLogText, theDataset);
×
198

199
        }
×
200

201
        List<Command> previouslyCalled = ctxt.getCommandsCalled();
×
202
        
203
        PrivateUrl privateUrl = ctxt.engine().submit(new GetPrivateUrlCommand(getRequest(), theDataset));
×
204
        List<Command> afterSub = ctxt.getCommandsCalled();
×
205
        previouslyCalled.forEach((c) -> {
×
206
            ctxt.getCommandsCalled().add(c);
×
207
        });
×
208
        if (privateUrl != null) {
×
209
            ctxt.engine().submit(new DeletePrivateUrlCommand(getRequest(), theDataset));
×
210
        }
211
        
212
        if (theDataset.getLatestVersion().getVersionState() != RELEASED) {
×
213
            // some imported datasets may already be released.
214

215
            if (!datasetExternallyReleased) {
×
216
                publicizeExternalIdentifier(theDataset, ctxt);
×
217
                // Will throw a CommandException, unless successful.
218
                // This will end the execution of the command, but the method 
219
                // above takes proper care to "clean up after itself" in case of
220
                // a failure - it will remove any locks, and it will send a
221
                // proper notification to the user(s). 
222
            }
223
            theDataset.getLatestVersion().setVersionState(RELEASED);
×
224
        }
225
        
226
        final Dataset ds = ctxt.em().merge(theDataset);
×
227
        //Remove any pre-pub workflow lock (not needed as WorkflowServiceBean.workflowComplete() should already have removed it after setting the finalizePublication lock?)
228
        ctxt.datasets().removeDatasetLocks(ds, DatasetLock.Reason.Workflow);
×
229
        
230
        //Should this be in onSuccess()?
231
        ctxt.workflows().getDefaultWorkflow(TriggerType.PostPublishDataset).ifPresent(wf -> {
×
232
            try {
233
                ctxt.workflows().start(wf, buildContext(ds, TriggerType.PostPublishDataset, datasetExternallyReleased), false);
×
234
            } catch (CommandException ex) {
×
235
                ctxt.datasets().removeDatasetLocks(ds, DatasetLock.Reason.Workflow);
×
236
                logger.log(Level.SEVERE, "Error invoking post-publish workflow: " + ex.getMessage(), ex);
×
237
            }
×
238
        });
×
239

240
        Dataset readyDataset = ctxt.em().merge(ds);
×
241
        
242
        // Finally, unlock the dataset (leaving any post-publish workflow lock in place)
243
        ctxt.datasets().removeDatasetLocks(readyDataset, DatasetLock.Reason.finalizePublication);
×
244
        if (readyDataset.isLockedFor(DatasetLock.Reason.InReview) ) {
×
245
            ctxt.datasets().removeDatasetLocks(readyDataset, DatasetLock.Reason.InReview);
×
246
        }
247
        
248
        logger.info("Successfully published the dataset "+readyDataset.getGlobalId().asString());
×
249
        readyDataset = ctxt.em().merge(readyDataset);
×
250
        
251
        return readyDataset;
×
252
    }
253
    
254
    @Override
255
    public boolean onSuccess(CommandContext ctxt, Object r) {
256
        boolean retVal = true;
×
257
        Dataset dataset = null;
×
258
        try{
259
            dataset = (Dataset) r;
×
260
        } catch (ClassCastException e){
×
261
            dataset  = ((PublishDatasetResult) r).getDataset();
×
262
        }
×
263
        
264
        try {
265
            // Success! - send notification:
266
            notifyUsersDatasetPublishStatus(ctxt, dataset, UserNotification.Type.PUBLISHEDDS);
×
267
        } catch (Exception e) {
×
268
            logger.warning("Failure to send dataset published messages for : " + dataset.getId() + " : " + e.getMessage());
×
269
        }
×
270
        
271
        //re-indexing dataverses that have additional subjects
272
        if (!dataversesToIndex.isEmpty()){
×
273
            for (Dataverse dv : dataversesToIndex) {
×
274
                try {
275
                    Future<String> indexString = ctxt.index().indexDataverse(dv);
×
276
                } catch (IOException | SolrServerException e) {
×
277
                    String failureLogText = "Post-publication indexing failed. You can kick off a re-index of this dataverse with: \r\n curl http://localhost:8080/api/admin/index/dataverses/" + dv.getId().toString();
×
278
                    failureLogText += "\r\n" + e.getLocalizedMessage();
×
279
                    LoggingUtil.writeOnSuccessFailureLog(this, failureLogText, dataset);
×
280
                    retVal = false;
×
281
                } 
×
282
            }
×
283
        }
284

285
        // Metadata export:
286
        
287
        try {
288
            ExportService instance = ExportService.getInstance();
×
289
            instance.exportAllFormats(dataset);
×
290
            dataset = ctxt.datasets().merge(dataset); 
×
291
        } catch (Exception ex) {
×
292
            // Something went wrong!
293
            // Just like with indexing, a failure to export is not a fatal
294
            // condition. We'll just log the error as a warning and keep
295
            // going:
296
            logger.log(Level.WARNING, "Finalization: exception caught while exporting: "+ex.getMessage(), ex);
×
297
            // ... but it is important to only update the export time stamp if the 
298
            // export was indeed successful.
299
        }
×
300
        ctxt.index().asyncIndexDataset(dataset, true);
×
301
        
302
        return retVal;
×
303
    }
304

305
    /**
306
     * add the dataset subjects to all parent dataverses.
307
     */
308
    private void updateParentDataversesSubjectsField(Dataset savedDataset, CommandContext ctxt) throws  SolrServerException, IOException {
309
        
310
        for (DatasetField dsf : savedDataset.getLatestVersion().getDatasetFields()) {
×
311
            if (dsf.getDatasetFieldType().getName().equals(DatasetFieldConstant.subject)) {
×
312
                Dataverse dv = savedDataset.getOwner();
×
313
                while (dv != null) {
×
314
                    boolean newSubjectsAdded = false;
×
315
                    for (ControlledVocabularyValue cvv : dsf.getControlledVocabularyValues()) {                   
×
316
                        if (!dv.getDataverseSubjects().contains(cvv)) {
×
317
                            logger.fine("dv "+dv.getAlias()+" does not have subject "+cvv.getStrValue());
×
318
                            newSubjectsAdded = true;
×
319
                            dv.getDataverseSubjects().add(cvv);
×
320
                        } else {
321
                            logger.fine("dv "+dv.getAlias()+" already has subject "+cvv.getStrValue());
×
322
                        }
323
                    }
×
324
                    if (newSubjectsAdded) {
×
325
                        logger.fine("new dataverse subjects added - saving and reindexing in OnSuccess");
×
326
                        Dataverse dvWithSubjectJustAdded = ctxt.em().merge(dv);
×
327
                        ctxt.em().flush();
×
328
                        //adding dv to list of those we need to re-index for new subjects
329
                        dataversesToIndex.add(dvWithSubjectJustAdded);                       
×
330
                    } else {
×
331
                        logger.fine("no new subjects added to the dataverse; skipping reindexing");
×
332
                    }
333
                    dv = dv.getOwner();
×
334
                }
×
335
                break; // we just update the field whose name is DatasetFieldConstant.subject
336
            }
337
        }
×
338
    }
×
339

340
    private void validateDataFiles(Dataset dataset, CommandContext ctxt) throws CommandException {
341
        try {
342
            long maxDatasetSize = ctxt.systemConfig().getDatasetValidationSizeLimit();
×
343
            long maxFileSize = ctxt.systemConfig().getFileValidationSizeLimit();
×
344

345
            long datasetSize = DatasetUtil.getDownloadSizeNumeric(dataset.getLatestVersion(), false);
×
346
            if (maxDatasetSize == -1 || datasetSize < maxDatasetSize) {
×
347
                for (DataFile dataFile : dataset.getFiles()) {
×
348
                    // TODO: Should we validate all the files in the dataset, or only
349
                    // the files that haven't been published previously?
350
                    // (the decision was made to validate all the files on every
351
                    // major release; we can revisit the decision if there's any
352
                    // indication that this makes publishing take significantly longer.
353
                    String driverId = FileUtil.getStorageDriver(dataFile);
×
354
                    if(StorageIO.isDataverseAccessible(driverId) && (maxFileSize == -1 || dataFile.getFilesize() < maxFileSize)) {
×
355
                        FileUtil.validateDataFileChecksum(dataFile);
×
356
                    }
357
                    else {
358
                        String message = "Checksum Validation skipped for this datafile: " + dataFile.getId() + ", because of the size of the datafile limit (set to " + maxFileSize + " ); ";
×
359
                        logger.info(message);
×
360
                    }
361
                }
×
362
            }
363
            else {
364
                String message = "Checksum Validation skipped for this dataset: " + dataset.getId() + ", because of the size of the dataset limit (set to " + maxDatasetSize + " ); ";
×
365
                logger.info(message);
×
366
            }
367
        } catch (Throwable e) {
×
368
            if (dataset.isLockedFor(DatasetLock.Reason.finalizePublication)) {
×
369
                DatasetLock lock = dataset.getLockFor(DatasetLock.Reason.finalizePublication);
×
370
                lock.setReason(DatasetLock.Reason.FileValidationFailed);
×
371
                lock.setInfo(FILE_VALIDATION_ERROR);
×
372
                ctxt.datasets().updateDatasetLock(lock);
×
373
            } else {            
×
374
                // Lock the dataset with a new FileValidationFailed lock: 
375
                DatasetLock lock = new DatasetLock(DatasetLock.Reason.FileValidationFailed, getRequest().getAuthenticatedUser()); //(AuthenticatedUser)getUser());
×
376
                lock.setDataset(dataset);
×
377
                lock.setInfo(FILE_VALIDATION_ERROR);
×
378
                ctxt.datasets().addDatasetLock(dataset, lock);
×
379
            }
380
            
381
            // Throw a new CommandException; if the command is being called 
382
            // synchronously, it will be intercepted and the page will display 
383
            // the error message for the user.
384
            throw new CommandException(BundleUtil.getStringFromBundle("dataset.publish.file.validation.error.details"), this);
×
385
        }
×
386
    }
×
387
    
388
    private void publicizeExternalIdentifier(Dataset dataset, CommandContext ctxt) throws CommandException {
389
        PidProvider pidProvider = ctxt.dvObjects().getEffectivePidGenerator(dataset);
×
390
        try {
391
            // We will skip trying to register the global identifiers for datafiles
392
            // if "dependent" file-level identifiers are requested, AND the naming
393
            // protocol, or the authority of the dataset global id is different from
394
            // what's currently configured for the Dataverse. In other words
395
            // we can't get "dependent" DOIs assigned to files in a dataset
396
            // with the registered id that is a handle; or even a DOI, but in
397
            // an authority that's different from what's currently configured.
398
            // Additionaly in 4.9.3 we have added a system variable to disable
399
            // registering file PIDs on the installation level.
400
            boolean registerGlobalIdsForFiles = ctxt.systemConfig().isFilePIDsEnabledForCollection(
×
401
                    getDataset().getOwner()) 
×
UNCOV
402
                    && pidProvider.canCreatePidsLike(dataset.getGlobalId());
×
403

404
            if (registerGlobalIdsForFiles 
×
405
                    && dataset.getLatestVersion().getMinorVersionNumber() != null
×
UNCOV
406
                    && dataset.getLatestVersion().getMinorVersionNumber().equals((long) 0)) {
×
407
                // A false return value indicates a failure in calling the service
408
                for (DataFile df : dataset.getFiles()) {
×
UNCOV
409
                    logger.log(Level.FINE, "registering global id for file {0}", df.getId());
×
410
                    // A false return value indicates a failure in calling the service
411
                    if (!pidProvider.publicizeIdentifier(df)) {
×
UNCOV
412
                        throw new Exception();
×
413
                    }
414
                    df.setGlobalIdCreateTime(getTimestamp());
×
415
                    df.setIdentifierRegistered(true);
×
UNCOV
416
                }
×
417
            }
418
            if (!pidProvider.publicizeIdentifier(dataset)) {
×
UNCOV
419
                throw new Exception();
×
420
            }
UNCOV
421
            dataset.setGlobalIdCreateTime(new Date()); // TODO these two methods should be in the responsibility of the
×
422
                                                       // pidProvider.
423
            dataset.setIdentifierRegistered(true);
×
424
        } catch (Throwable e) {
×
UNCOV
425
            logger.warning("Failed to register the identifier " + dataset.getGlobalId().asString()
×
426
                    + ", or to register a file in the dataset; notifying the user(s), unlocking the dataset");
427

428
            // Send failure notification to the user:
UNCOV
429
            notifyUsersDatasetPublishStatus(ctxt, dataset, UserNotification.Type.PUBLISHFAILED_PIDREG);
×
430

431
            ctxt.datasets().removeDatasetLocks(dataset, DatasetLock.Reason.finalizePublication);
×
432
            throw new CommandException(
×
UNCOV
433
                    BundleUtil.getStringFromBundle("dataset.publish.error", pidProvider.getProviderInformation()),
×
434
                    this);
435
        }
×
UNCOV
436
    }
×
437
    
438
    private void updateFiles(Timestamp updateTime, CommandContext ctxt) throws CommandException {
439
        for (DataFile dataFile : getDataset().getFiles()) {
×
UNCOV
440
            if (dataFile.getPublicationDate() == null) {
×
441
                // this is a new, previously unpublished file, so publish by setting date
UNCOV
442
                dataFile.setPublicationDate(updateTime);
×
443
                
444
                // check if any prexisting roleassignments have file download and send notifications
445
                notifyUsersFileDownload(ctxt, dataFile);
×
446
            }
447
            
448
            // set the files restriction flag to the same as the latest version's
449
            if (dataFile.getFileMetadata() != null && dataFile.getFileMetadata().getDatasetVersion().equals(getDataset().getLatestVersion())) {
×
450
                dataFile.setRestricted(dataFile.getFileMetadata().isRestricted());
×
451
            }
452
            
453
            
454
            if (dataFile.isRestricted()) {
×
455
                // If the file has been restricted: 
456
                //    If this (image) file has been assigned as the dedicated 
457
                //    thumbnail for the dataset, we need to remove that assignment, 
458
                //    now that the file is restricted. 
459
               
460
                // Dataset thumbnail assignment: 
461
                
462
                if (dataFile.equals(getDataset().getThumbnailFile())) {
×
463
                    getDataset().setThumbnailFile(null);
×
464
                }
465
            }
466
        }
×
467
    }
×
468
    
469
   
470
    //These notification methods are fairly similar, but it was cleaner to create a few copies.
471
    //If more notifications are needed in this command, they should probably be collapsed.
472
    private void notifyUsersFileDownload(CommandContext ctxt, DvObject subject) {
473
        ctxt.roles().directRoleAssignments(subject).stream()
×
474
            .filter(  ra -> ra.getRole().permissions().contains(Permission.DownloadFile) )
×
475
            .flatMap( ra -> ctxt.roleAssignees().getExplicitUsers(ctxt.roleAssignees().getRoleAssignee(ra.getAssigneeIdentifier())).stream() )
×
476
            .distinct() // prevent double-send
×
477
            .forEach( au -> ctxt.notifications().sendNotification(au, getTimestamp(), UserNotification.Type.GRANTFILEACCESS, getDataset().getId()) );
×
478
    }
×
479
    
480
    private void notifyUsersDatasetPublishStatus(CommandContext ctxt, DvObject subject, UserNotification.Type type) {
481
        
482
        ctxt.roles().rolesAssignments(subject).stream()
×
483
            .filter(  ra -> ra.getRole().permissions().contains(Permission.ViewUnpublishedDataset) || ra.getRole().permissions().contains(Permission.DownloadFile))
×
484
            .flatMap( ra -> ctxt.roleAssignees().getExplicitUsers(ctxt.roleAssignees().getRoleAssignee(ra.getAssigneeIdentifier())).stream() )
×
485
            .distinct() // prevent double-send
×
486
            //.forEach( au -> ctxt.notifications().sendNotification(au, timestamp, messageType, theDataset.getId()) ); //not sure why this line doesn't work instead
487
            .forEach( au -> ctxt.notifications().sendNotificationInNewTransaction(au, getTimestamp(), type, getDataset().getLatestVersion().getId()) ); 
×
488
    }
×
489

490
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc