Skip to content

Commit ced9292

Browse files
igorlslealvona
authored andcommitted
chore(corpus-origin): address Copilot review on MemPalace#1223
- cli.py: stringify each evidence entry exactly once before the startswith check (was calling str(e) twice per element). - tests: replace brittle `confidence != 0.90` assertion with an equality check against detect_origin_heuristic on the same samples. The original would have spuriously fired if the heuristic ever legitimately produced 0.90 for these samples; the new form pins the contract directly.
1 parent ac3679f commit ced9292

2 files changed

Lines changed: 15 additions & 10 deletions

File tree

mempalace/cli.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -156,12 +156,12 @@ def _run_pass_zero(project_dir, palace_dir, llm_provider) -> dict:
156156
tier1_prefix = "Tier-1 heuristic: "
157157
tier2_prefix = "Tier-2 LLM: "
158158
heuristic_evidence = [
159-
str(e) if str(e).startswith(tier1_prefix) else f"{tier1_prefix}{e}"
160-
for e in result.evidence
159+
s if s.startswith(tier1_prefix) else f"{tier1_prefix}{s}"
160+
for s in (str(e) for e in result.evidence)
161161
]
162162
llm_evidence = [
163-
str(e) if str(e).startswith(tier2_prefix) else f"{tier2_prefix}{e}"
164-
for e in llm_result.evidence
163+
s if s.startswith(tier2_prefix) else f"{tier2_prefix}{s}"
164+
for s in (str(e) for e in llm_result.evidence)
165165
]
166166
result.evidence = heuristic_evidence + llm_evidence
167167
except Exception as exc: # noqa: BLE001 — never block init on LLM failure

tests/test_corpus_origin_integration.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1484,12 +1484,17 @@ def test_merge_tier_fields_heuristic_yes_llm_no_keeps_heuristic_bool():
14841484
f"Got: {res}"
14851485
)
14861486
# The bool and the confidence are paired — both must come from the
1487-
# heuristic. The mocked LLM returned 0.90; if the merge accidentally
1488-
# took LLM's confidence, this would equal 0.90.
1489-
assert res["confidence"] != 0.90, (
1490-
f"Merged confidence equals the mocked LLM's 0.90 — looks like "
1491-
f"LLM's confidence leaked through the merge. Heuristic's confidence "
1492-
f"must be preserved alongside its bool. Got: {res}"
1487+
# heuristic. Compare to detect_origin_heuristic on the same samples
1488+
# so this stays correct regardless of what the heuristic computes
1489+
# for these samples (avoids brittleness vs. a hardcoded sentinel).
1490+
from mempalace.corpus_origin import detect_origin_heuristic
1491+
1492+
expected_confidence = detect_origin_heuristic(_ai_dialogue_samples()).confidence
1493+
assert res["confidence"] == expected_confidence, (
1494+
f"Merged confidence {res['confidence']} did not match the heuristic's "
1495+
f"{expected_confidence} for these samples. The mocked LLM returned "
1496+
f"0.90; if the merge accidentally took the LLM's confidence, the "
1497+
f"merged value would not equal the heuristic's. Got: {res}"
14931498
)
14941499
# Persona/user/platform from LLM should still be merged in.
14951500
assert res["agent_persona_names"] == [

0 commit comments

Comments
 (0)