Skip to content

Commit 366396e

Browse files
committed
add updating index, permindex, export times
1 parent 5274098 commit 366396e

3 files changed

Lines changed: 34 additions & 14 deletions

File tree

src/main/java/edu/harvard/iq/dataverse/DatasetServiceBean.java

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import edu.harvard.iq.dataverse.workflows.WorkflowComment;
3131

3232
import java.io.*;
33+
import java.sql.Timestamp;
3334
import java.text.SimpleDateFormat;
3435
import java.util.*;
3536
import java.util.logging.FileHandler;
@@ -1171,4 +1172,30 @@ public void setLastExportTimeInNewTransaction(Long datasetId, Date lastExportTim
11711172
logger.log(Level.SEVERE, "Failed to retry export after OptimisticLockException for dataset id=" + datasetId, e);
11721173
}
11731174
}
1175+
1176+
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
1177+
public void updateIndexingAndExportTimes(Dataset dataset) {
1178+
Query timestampQuery = em.createNativeQuery(
1179+
"SELECT dvo.indextime, dvo.permissionindextime, d.lastexporttime " +
1180+
"FROM dvobject dvo, dataset d WHERE dvo.id = d.id AND dvo.id = ?");
1181+
timestampQuery.setParameter(1, dataset.getId());
1182+
1183+
Object[] timestamps = (Object[]) timestampQuery.getSingleResult();
1184+
1185+
// Cast and apply the fresh timestamps to the current dataset
1186+
Timestamp freshIndexTime = (Timestamp) timestamps[0];
1187+
Timestamp freshPermissionIndexTime = (Timestamp) timestamps[1];
1188+
Timestamp freshLastExportTime = (Timestamp) timestamps[2];
1189+
1190+
1191+
logger.fine("Updating index time from " + dataset.getIndexTime() + " to " + freshIndexTime);
1192+
dataset.setIndexTime(freshIndexTime);
1193+
1194+
logger.fine("Updating permission index time from " + dataset.getPermissionIndexTime() + " to " + freshPermissionIndexTime);
1195+
dataset.setPermissionIndexTime(freshPermissionIndexTime);
1196+
1197+
logger.fine("Updating last export time from " + dataset.getLastExportTime() + " to " + freshLastExportTime);
1198+
dataset.setLastExportTime(freshLastExportTime);
1199+
1200+
}
11741201
}

src/main/java/edu/harvard/iq/dataverse/workflow/WorkflowServiceBean.java

Lines changed: 6 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
import jakarta.inject.Inject;
4343
import jakarta.persistence.EntityManager;
4444
import jakarta.persistence.PersistenceContext;
45+
import jakarta.persistence.Query;
4546
import jakarta.persistence.TypedQuery;
4647

4748
/**
@@ -149,13 +150,12 @@ public void start(Workflow wf, WorkflowContext ctxt, boolean findDataset) throws
149150
logger.warning("Failed to sleep for a second.");
150151
}
151152
}
152-
//Refresh will only em.find the dataset if findDataset is true. (otherwise the dataset is em.merged)
153+
153154
ctxt = refresh(ctxt, retrieveRequestedSettings( wf.getRequiredSettings()), getCurrentApiToken(ctxt.getRequest().getAuthenticatedUser()), findDataset);
154155
lockDataset(ctxt, new DatasetLock(DatasetLock.Reason.Workflow, ctxt.getRequest().getAuthenticatedUser()));
155156
forward(wf, ctxt);
156157
}
157158

158-
159159
private ApiToken getCurrentApiToken(AuthenticatedUser au) {
160160
if (au != null) {
161161
CommandContext ctxt = engine.getContext();
@@ -210,7 +210,6 @@ public void resume(PendingWorkflowInvocation pending, String body) {
210210
}
211211

212212

213-
@Asynchronous
214213
private void forward(Workflow wf, WorkflowContext ctxt) {
215214
executeSteps(wf, ctxt, 0);
216215
}
@@ -244,7 +243,6 @@ private void doResume(PendingWorkflowInvocation pending, String body) {
244243
}
245244
}
246245

247-
@Asynchronous
248246
private void rollback(Workflow wf, WorkflowContext ctxt, Failure failure, int lastCompletedStepIdx) {
249247
ctxt = refresh(ctxt);
250248
final List<WorkflowStepData> steps = wf.getSteps();
@@ -308,7 +306,6 @@ private void executeSteps(Workflow wf, WorkflowContext ctxt, int initialStepIdx
308306
return;
309307
}
310308
}
311-
312309
workflowCompleted(wf, ctxt);
313310

314311
}
@@ -317,22 +314,18 @@ private void executeSteps(Workflow wf, WorkflowContext ctxt, int initialStepIdx
317314
// Internal methods to run each step in its own transaction.
318315
//
319316

320-
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
321317
WorkflowStepResult runStep( WorkflowStep step, WorkflowContext ctxt ) {
322318
return step.run(ctxt);
323319
}
324320

325-
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
326321
WorkflowStepResult resumeStep( WorkflowStep step, WorkflowContext ctxt, Map<String,String> localData, String externalData ) {
327322
return step.resume(ctxt, localData, externalData);
328323
}
329324

330-
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
331325
void rollbackStep( WorkflowStep step, WorkflowContext ctxt, Failure reason ) {
332326
step.rollback(ctxt, reason);
333327
}
334328

335-
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
336329
void lockDataset(WorkflowContext ctxt, DatasetLock datasetLock) throws CommandException {
337330
/*
338331
* Note that this method directly adds a lock to the database rather than adding
@@ -350,7 +343,6 @@ void lockDataset(WorkflowContext ctxt, DatasetLock datasetLock) throws CommandEx
350343
ctxt.setLockId(datasetLock.getId());
351344
}
352345

353-
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
354346
void unlockDataset(WorkflowContext ctxt) throws CommandException {
355347
/*
356348
* Since the lockDataset command above directly persists a lock to the database,
@@ -387,14 +379,14 @@ private void workflowCompleted(Workflow wf, WorkflowContext ctxt) {
387379
// Read fresh timestamps from DB - parallel index/exports may have occurred while the workflow ran
388380
// (Nominally the workflow lock should have stopped other changes).
389381
Dataset dataset = ctxt.getDataset();
390-
Dataset dbDataset = em.find(Dataset.class, ctxt.getDataset().getId());
391-
dataset.setIndexTime(dbDataset.getIndexTime());
392-
dataset.setPermissionIndexTime(dbDataset.getPermissionIndexTime());
393-
dataset.setLastExportTime(dbDataset.getLastExportTime());
394382

383+
datasets.updateIndexingAndExportTimes(dataset);
384+
385+
395386
try {
396387
if (ctxt.getType() == TriggerType.PrePublishDataset) {
397388
ctxt = refresh(ctxt);
389+
dataset = ctxt.getDataset();
398390
// Now lock for FinalizePublication - this block mirrors that in PublishDatasetCommand
399391
AuthenticatedUser user = ctxt.getRequest().getAuthenticatedUser();
400392
DatasetLock lock = new DatasetLock(DatasetLock.Reason.finalizePublication, user);

src/main/java/edu/harvard/iq/dataverse/workflow/internalspi/ArchivalSubmissionWorkflowStep.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ public WorkflowStepResult run(WorkflowContext context) {
8989
* pending as is done when running archiving from the UI/API. Instead, there is a generic workflow
9090
* lock on the dataset.
9191
*/
92+
9293
return archiveCommand.performArchiveSubmission(
9394
version,
9495
dataCiteXml,

0 commit comments

Comments
 (0)