1
0

M8 komplett umgesetzt

This commit is contained in:
2026-04-08 16:30:13 +02:00
parent a3f47ba560
commit d61316c699
21 changed files with 2377 additions and 89 deletions

View File

@@ -85,30 +85,6 @@ public class AiRequestComposer {
Objects.requireNonNull(promptContent, "promptContent must not be null");
Objects.requireNonNull(documentText, "documentText must not be null");
// The complete request text is composed in a fixed, deterministic order:
// 1. Prompt content (instruction)
// 2. Newline separator
// 3. Prompt identifier marker (for traceability)
// 4. Newline separator
// 5. Document text section marker
// 6. Newline separator
// 7. Document text content
// 8. Newline separator
// 9. Response format specification (JSON-only with required fields)
//
// This order is fixed so that another implementation knows exactly where
// each part is positioned and what to expect.
StringBuilder requestBuilder = new StringBuilder();
requestBuilder.append(promptContent);
requestBuilder.append("\n");
requestBuilder.append("--- Prompt-ID: ").append(promptIdentifier.identifier()).append(" ---");
requestBuilder.append("\n");
requestBuilder.append("--- Document Text ---");
requestBuilder.append("\n");
requestBuilder.append(documentText);
requestBuilder.append("\n");
appendJsonResponseFormat(requestBuilder);
// Record the exact character count of the document text that was included.
// This is the length of the document text (not the complete request).
int sentCharacterCount = documentText.length();

View File

@@ -393,10 +393,10 @@ public class DefaultBatchRunProcessingUseCase implements BatchRunProcessingUseCa
private void logProcessingOutcome(SourceDocumentCandidate candidate, DocumentProcessingOutcome outcome) {
switch (outcome) {
case de.gecheckt.pdf.umbenenner.domain.model.PreCheckFailed failed ->
logger.info("Pre-checks FAILED for '{}': {} (Deterministic content error).",
logger.info("Pre-checks failed for '{}': {} (deterministic content error).",
candidate.uniqueIdentifier(), failed.failureReasonDescription());
case de.gecheckt.pdf.umbenenner.domain.model.TechnicalDocumentError technicalError ->
logger.warn("Processing FAILED for '{}': {} (Technical error retryable).",
logger.warn("Processing failed for '{}': {} (transient technical error retryable).",
candidate.uniqueIdentifier(), technicalError.errorMessage());
case de.gecheckt.pdf.umbenenner.domain.model.NamingProposalReady ready ->
logger.info("AI naming proposal ready for '{}': title='{}', date={}.",
@@ -404,10 +404,10 @@ public class DefaultBatchRunProcessingUseCase implements BatchRunProcessingUseCa
ready.proposal().validatedTitle(),
ready.proposal().resolvedDate());
case de.gecheckt.pdf.umbenenner.domain.model.AiTechnicalFailure aiTechnical ->
logger.warn("AI technical failure for '{}': {} (Transient retryable).",
logger.warn("AI invocation failed for '{}': {} (transient technical error retryable).",
candidate.uniqueIdentifier(), aiTechnical.errorMessage());
case de.gecheckt.pdf.umbenenner.domain.model.AiFunctionalFailure aiFunctional ->
logger.info("AI functional failure for '{}': {} (Deterministic content error).",
logger.info("AI naming failed for '{}': {} (deterministic content error).",
candidate.uniqueIdentifier(), aiFunctional.errorMessage());
default -> { /* other outcomes are handled elsewhere */ }
}

View File

@@ -314,4 +314,13 @@ class AiNamingServiceTest {
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("maxTextCharacters must be >= 1");
}
@Test
void constructor_maxTextCharactersOne_doesNotThrow() {
// maxTextCharacters=1 is the minimum valid value (boundary test).
// A changed-conditional-boundary mutation that changes '< 1' to '<= 1' would
// cause this constructor call to throw — this test detects that mutation.
new AiNamingService(aiInvocationPort, promptPort, validator, MODEL_NAME, 1);
// No exception expected; reaching this line means the boundary is correct
}
}

View File

@@ -829,8 +829,9 @@ class DocumentProcessingCoordinatorTest {
// No PROPOSAL_READY attempt pre-populated
// persistTransientError returns true when the error record was persisted successfully
processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
boolean result = processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_RETRYABLE)
.findFirst()
@@ -851,8 +852,9 @@ class DocumentProcessingCoordinatorTest {
null, DateSource.AI_PROVIDED, "Rechnung", null);
attemptRepo.savedAttempts.add(badProposal);
processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
boolean result = processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_RETRYABLE)
.findFirst()
@@ -871,8 +873,10 @@ class DocumentProcessingCoordinatorTest {
new FailingTargetFolderPort(), new NoOpTargetFileCopyPort(), new NoOpProcessingLogger(),
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithFailingFolder.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
boolean result = coordinatorWithFailingFolder.processDeferredOutcome(
candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_RETRYABLE)
.findFirst()
@@ -891,8 +895,10 @@ class DocumentProcessingCoordinatorTest {
new NoOpTargetFolderPort(), new FailingTargetFileCopyPort(), new NoOpProcessingLogger(),
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithFailingCopy.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
boolean result = coordinatorWithFailingCopy.processDeferredOutcome(
candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_RETRYABLE)
.findFirst()
@@ -914,8 +920,9 @@ class DocumentProcessingCoordinatorTest {
"A".repeat(21), null);
attemptRepo.savedAttempts.add(badProposal);
processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
boolean result = processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_RETRYABLE)
.findFirst()
@@ -939,8 +946,9 @@ class DocumentProcessingCoordinatorTest {
"Rechnung-2026", null);
attemptRepo.savedAttempts.add(badProposal);
processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
boolean result = processor.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_RETRYABLE)
.findFirst()
@@ -1008,9 +1016,10 @@ class DocumentProcessingCoordinatorTest {
new NoOpTargetFolderPort(), countingCopyPort, new NoOpProcessingLogger(),
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCountingCopy.processDeferredOutcome(
boolean result = coordinatorWithCountingCopy.processDeferredOutcome(
candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_RETRYABLE)
.findFirst()
@@ -1037,9 +1046,10 @@ class DocumentProcessingCoordinatorTest {
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), failingCopy, new NoOpProcessingLogger(), 1);
coordinatorWith1Retry.processDeferredOutcome(
boolean result = coordinatorWith1Retry.processDeferredOutcome(
candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(result, "processDeferredOutcome must return true when the transient error is persisted successfully");
ProcessingAttempt errorAttempt = attemptRepo.savedAttempts.stream()
.filter(a -> a.status() == ProcessingStatus.FAILED_FINAL)
.findFirst()
@@ -1055,6 +1065,58 @@ class DocumentProcessingCoordinatorTest {
"Transient error counter must be 1 after the first cross-run transient error");
}
@Test
void processDeferredOutcome_proposalReady_copyFailure_retryDecisionLog_containsFailedRetryable() {
// Verifies that when a copy failure leads to FAILED_RETRYABLE in persistTransientError,
// the retry-decision log message specifically contains "FAILED_RETRYABLE" and
// "will retry in later run" — the branch-specific text that distinguishes it from the
// FAILED_FINAL branch. This kills the negated-conditional mutation on the retryable flag check.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
MessageCapturingProcessingLogger capturingLogger = new MessageCapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), new FailingTargetFileCopyPort(), capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingLogger.anyInfoContains("FAILED_RETRYABLE"),
"Retry decision log for a retryable transient copy error must contain FAILED_RETRYABLE. "
+ "Captured info messages: " + capturingLogger.infoMessages);
assertTrue(capturingLogger.anyInfoContains("will retry in later run"),
"Retry decision log for a retryable transient error must contain 'will retry in later run'. "
+ "Captured info messages: " + capturingLogger.infoMessages);
}
@Test
void processDeferredOutcome_proposalReady_copyFailure_maxRetriesTransient1_retryDecisionLog_containsFailedFinal() {
// Verifies that when a copy failure with maxRetriesTransient=1 leads to FAILED_FINAL in
// persistTransientError, the retry-decision log message contains "FAILED_FINAL" and
// "transient error limit reached" — the branch-specific text that distinguishes it
// from the FAILED_RETRYABLE branch.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
MessageCapturingProcessingLogger capturingLogger = new MessageCapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), new FailingTargetFileCopyPort(), capturingLogger,
1 /* maxRetriesTransient=1 → immediately final */);
coordinatorWithCapturing.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingLogger.anyInfoContains("FAILED_FINAL"),
"Retry decision log for a finalising transient copy error must contain FAILED_FINAL. "
+ "Captured info messages: " + capturingLogger.infoMessages);
assertTrue(capturingLogger.anyInfoContains("transient error limit reached"),
"Retry decision log for a finalising transient error must contain 'transient error limit reached'. "
+ "Captured info messages: " + capturingLogger.infoMessages);
}
@Test
void processDeferredOutcome_proposalReady_immediateRetryDoesNotTriggerAiOrNewProposal() {
// Ensures that during the immediate retry path no pipeline (AI) execution happens
@@ -1375,6 +1437,26 @@ class DocumentProcessingCoordinatorTest {
}
}
/** Counts calls to {@link #tryDeleteTargetFile(String)} for mutation detection. */
private static class CapturingTargetFolderPort implements TargetFolderPort {
int tryDeleteCallCount = 0;
@Override
public String getTargetFolderLocator() {
return "/tmp/target";
}
@Override
public TargetFilenameResolutionResult resolveUniqueFilename(String baseName) {
return new ResolvedTargetFilename(baseName);
}
@Override
public void tryDeleteTargetFile(String resolvedFilename) {
tryDeleteCallCount++;
}
}
private static class NoOpTargetFolderPort implements TargetFolderPort {
@Override
public String getTargetFolderLocator() {
@@ -1493,6 +1575,162 @@ class DocumentProcessingCoordinatorTest {
assertTrue(capturingLogger.anyInfoContains("FAILED_FINAL"),
"Finalising retry decision log must contain the FAILED_FINAL classification. "
+ "Captured info messages: " + capturingLogger.infoMessages);
assertTrue(capturingLogger.anyInfoContains("permanently failed"),
"Finalising retry decision log must contain 'permanently failed' to distinguish "
+ "the FAILED_FINAL branch from the generic status log. "
+ "Captured info messages: " + capturingLogger.infoMessages);
}
// -------------------------------------------------------------------------
// Finalization path logging: error, warn, and info calls in key paths
// -------------------------------------------------------------------------
@Test
void processDeferredOutcome_proposalReady_missingProposalAttempt_logsError() {
// Missing PROPOSAL_READY attempt in history — finalizeProposalReady must log an error.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
// No attempt pre-loaded — proposalAttempt == null branch
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), new NoOpTargetFileCopyPort(), capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingLogger.errorCallCount > 0,
"An error must be logged when the PROPOSAL_READY attempt is missing from history");
}
@Test
void processDeferredOutcome_proposalReady_inconsistentProposalState_logsError() {
// Inconsistent proposal state (null date) — finalizeProposalReady must log an error.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
ProcessingAttempt badProposal = new ProcessingAttempt(
fingerprint, context.runId(), 1, Instant.now(), Instant.now(),
ProcessingStatus.PROPOSAL_READY, null, null, false,
"model", "prompt", 1, 100, "{}", "reason",
null, DateSource.AI_PROVIDED, "Rechnung", null);
attemptRepo.savedAttempts.add(badProposal);
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), new NoOpTargetFileCopyPort(), capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingLogger.errorCallCount > 0,
"An error must be logged when the proposal state is inconsistent");
}
@Test
void processDeferredOutcome_proposalReady_duplicateResolutionFailure_logsError() {
// Duplicate resolution failure — finalizeProposalReady must log an error.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new FailingTargetFolderPort(), new NoOpTargetFileCopyPort(), capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingLogger.errorCallCount > 0,
"An error must be logged when duplicate resolution fails");
}
@Test
void processDeferredOutcome_proposalReady_resolvedFilename_logsInfo() {
// Successful duplicate resolution — resolved filename must be logged at INFO.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), new NoOpTargetFileCopyPort(), capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(
candidate, fingerprint, context, attemptStart,
c -> { throw new AssertionError("Pipeline must not run for PROPOSAL_READY"); });
assertTrue(capturingLogger.infoCallCount > 0,
"Resolved target filename must be logged at INFO level");
}
@Test
void processDeferredOutcome_proposalReady_firstCopyFails_logsWarn() {
// First copy attempt fails → immediate retry: a WARN must be logged for the first failure.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
CountingTargetFileCopyPort onlyFirstFails = new CountingTargetFileCopyPort(1);
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), onlyFirstFails, capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(
candidate, fingerprint, context, attemptStart,
c -> { throw new AssertionError("Pipeline must not run for PROPOSAL_READY"); });
assertTrue(capturingLogger.warnCallCount > 0,
"A WARN must be logged when the first copy attempt fails and an immediate retry is triggered");
}
@Test
void processDeferredOutcome_proposalReady_bothCopyAttemptsFail_logsError() {
// Both copy attempts fail — finalizeProposalReady must log an error.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
CountingTargetFileCopyPort bothFail = new CountingTargetFileCopyPort(2);
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), bothFail, capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(
candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingLogger.errorCallCount > 0,
"An error must be logged when both copy attempts fail");
}
@Test
void processDeferredOutcome_proposalReady_immediateRetrySucceeds_logsInfo() {
// First copy fails, immediate retry succeeds — a success INFO must be logged.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
CountingTargetFileCopyPort onlyFirstFails = new CountingTargetFileCopyPort(1);
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), onlyFirstFails, capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(
candidate, fingerprint, context, attemptStart,
c -> { throw new AssertionError("Pipeline must not run for PROPOSAL_READY"); });
assertTrue(capturingLogger.infoCallCount > 0,
"An INFO must be logged when the immediate within-run retry succeeds");
}
/** Zählt Logger-Aufrufe je Level, um VoidMethodCallMutator-Mutationen zu erkennen. */
@@ -1581,5 +1819,91 @@ class DocumentProcessingCoordinatorTest {
boolean anyInfoContains(String text) {
return infoMessages.stream().anyMatch(m -> m.contains(text));
}
boolean anyErrorContains(String text) {
return errorMessages.stream().anyMatch(m -> m.contains(text));
}
}
// -------------------------------------------------------------------------
// AI sensitive content logging in finalization path
// -------------------------------------------------------------------------
@Test
void processDeferredOutcome_proposalReady_aiContentNotNull_callsDebugSensitiveAiContent() {
// buildValidProposalAttempt() has non-null aiRawResponse and aiReasoning.
// The conditional guards at lines 398 and 402 of finalizeProposalReady must
// trigger the debugSensitiveAiContent call when the values are present.
// If negated, the calls would be suppressed for non-null values — detectable here.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt()); // aiRawResponse="{}", aiReasoning="reason"
CapturingProcessingLogger capturingLogger = new CapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), new NoOpTargetFileCopyPort(), capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(
candidate, fingerprint, context, attemptStart,
c -> { throw new AssertionError("Pipeline must not run for PROPOSAL_READY"); });
assertTrue(capturingLogger.debugSensitiveAiContentCallCount >= 2,
"debugSensitiveAiContent must be called for aiRawResponse and aiReasoning "
+ "when both are non-null. Actual call count: "
+ capturingLogger.debugSensitiveAiContentCallCount);
}
// -------------------------------------------------------------------------
// Best-effort rollback path: tryDeleteTargetFile and secondary persistence
// -------------------------------------------------------------------------
@Test
void processDeferredOutcome_proposalReady_persistenceFailureAfterCopy_callsTryDeleteTargetFile() {
// When persistence fails after a successful copy, the best-effort rollback
// must call tryDeleteTargetFile to clean up the orphaned target file.
// This test kills the 'removed call to tryDeleteTargetFile' mutation.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
unitOfWorkPort.failOnExecute = true;
CapturingTargetFolderPort capturingFolderPort = new CapturingTargetFolderPort();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
capturingFolderPort, new NoOpTargetFileCopyPort(), new NoOpProcessingLogger(),
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingFolderPort.tryDeleteCallCount > 0,
"tryDeleteTargetFile must be called at least once for best-effort rollback "
+ "when persistence fails after a successful copy");
}
@Test
void processDeferredOutcome_proposalReady_persistenceFailureAfterCopy_logsSecondaryFailure() {
// When persistence fails after a successful copy and the secondary persistence
// attempt in persistTransientErrorAfterPersistenceFailure also fails,
// an error must be logged for the secondary failure.
// This kills the 'removed call to persistTransientErrorAfterPersistenceFailure' mutation.
DocumentRecord existingRecord = buildRecord(ProcessingStatus.PROPOSAL_READY, FailureCounters.zero());
recordRepo.setLookupResult(new DocumentKnownProcessable(existingRecord));
attemptRepo.savedAttempts.add(buildValidProposalAttempt());
unitOfWorkPort.failOnExecute = true; // both primary and secondary persistence fail
MessageCapturingProcessingLogger capturingLogger = new MessageCapturingProcessingLogger();
DocumentProcessingCoordinator coordinatorWithCapturing = new DocumentProcessingCoordinator(
recordRepo, attemptRepo, unitOfWorkPort,
new NoOpTargetFolderPort(), new NoOpTargetFileCopyPort(), capturingLogger,
DEFAULT_MAX_RETRIES_TRANSIENT);
coordinatorWithCapturing.processDeferredOutcome(candidate, fingerprint, context, attemptStart, c -> null);
assertTrue(capturingLogger.anyErrorContains("Secondary persistence failure")
|| capturingLogger.anyErrorContains("secondary"),
"An error must be logged for the secondary persistence failure. "
+ "Captured error messages: " + capturingLogger.errorMessages);
}
}

View File

@@ -7,8 +7,11 @@ import de.gecheckt.pdf.umbenenner.domain.model.AiTechnicalFailure;
import de.gecheckt.pdf.umbenenner.domain.model.DateSource;
import de.gecheckt.pdf.umbenenner.domain.model.NamingProposal;
import de.gecheckt.pdf.umbenenner.domain.model.NamingProposalReady;
import de.gecheckt.pdf.umbenenner.domain.model.PdfExtractionSuccess;
import de.gecheckt.pdf.umbenenner.domain.model.PdfPageCount;
import de.gecheckt.pdf.umbenenner.domain.model.PreCheckFailed;
import de.gecheckt.pdf.umbenenner.domain.model.PreCheckFailureReason;
import de.gecheckt.pdf.umbenenner.domain.model.PreCheckPassed;
import de.gecheckt.pdf.umbenenner.domain.model.ProcessingStatus;
import de.gecheckt.pdf.umbenenner.domain.model.SourceDocumentCandidate;
import de.gecheckt.pdf.umbenenner.domain.model.SourceDocumentLocator;
@@ -314,4 +317,58 @@ class ProcessingOutcomeTransitionTest {
assertEquals(ProcessingStatus.FAILED_FINAL, result.overallStatus());
assertEquals(2, result.counters().transientErrorCount());
}
// -------------------------------------------------------------------------
// PreCheckPassed routed through transition (edge case: no AI step taken)
// -------------------------------------------------------------------------
@Test
void forNewDocument_preCheckPassed_limitOne_immediatelyFinal() {
// PreCheckPassed without an AI outcome is treated as a transient error by the transition.
// With limit=1 the first such error must immediately finalise to FAILED_FINAL.
PreCheckPassed outcome = new PreCheckPassed(
candidate(), new PdfExtractionSuccess("text", new PdfPageCount(1)));
ProcessingOutcomeTransition.ProcessingOutcome result =
ProcessingOutcomeTransition.forNewDocument(outcome, LIMIT_1);
assertEquals(ProcessingStatus.FAILED_FINAL, result.overallStatus(),
"With limit=1 a PreCheckPassed-routed transient error must immediately finalise");
assertFalse(result.retryable());
assertEquals(1, result.counters().transientErrorCount());
assertEquals(0, result.counters().contentErrorCount());
}
@Test
void forNewDocument_preCheckPassed_limitTwo_firstErrorRetryable() {
// With limit=2 the first PreCheckPassed-routed transient error is retryable.
PreCheckPassed outcome = new PreCheckPassed(
candidate(), new PdfExtractionSuccess("text", new PdfPageCount(1)));
ProcessingOutcomeTransition.ProcessingOutcome result =
ProcessingOutcomeTransition.forNewDocument(outcome, LIMIT_2);
assertEquals(ProcessingStatus.FAILED_RETRYABLE, result.overallStatus(),
"With limit=2 the first PreCheckPassed-routed transient error must be retryable");
assertTrue(result.retryable());
assertEquals(1, result.counters().transientErrorCount());
assertEquals(0, result.counters().contentErrorCount());
}
@Test
void forKnownDocument_preCheckPassed_limitTwo_secondErrorFinal() {
// With limit=2 and an existing transient error count of 1,
// the next PreCheckPassed-routed error increments to 2 = limit → FAILED_FINAL.
PreCheckPassed outcome = new PreCheckPassed(
candidate(), new PdfExtractionSuccess("text", new PdfPageCount(1)));
FailureCounters existing = new FailureCounters(0, 1);
ProcessingOutcomeTransition.ProcessingOutcome result =
ProcessingOutcomeTransition.forKnownDocument(outcome, existing, LIMIT_2);
assertEquals(ProcessingStatus.FAILED_FINAL, result.overallStatus(),
"PreCheckPassed-routed error at transient limit must finalise to FAILED_FINAL");
assertFalse(result.retryable());
assertEquals(2, result.counters().transientErrorCount());
}
}