|
| 1 | +"""Test runInChildContext with large data exceeding individual step limits.""" |
| 2 | + |
| 3 | +from typing import Any |
| 4 | + |
| 5 | +from aws_durable_execution_sdk_python.context import ( |
| 6 | + DurableContext, |
| 7 | + durable_with_child_context, |
| 8 | +) |
| 9 | +from aws_durable_execution_sdk_python.execution import durable_execution |
| 10 | + |
| 11 | + |
| 12 | +def generate_large_string(size_in_kb: int) -> str: |
| 13 | + """Generate a string of approximately the specified size in KB.""" |
| 14 | + target_size = size_in_kb * 1024 # Convert KB to bytes |
| 15 | + base_string = "A" * 1000 # 1KB string |
| 16 | + repetitions = target_size // 1000 |
| 17 | + remainder = target_size % 1000 |
| 18 | + |
| 19 | + return base_string * repetitions + "A" * remainder |
| 20 | + |
| 21 | + |
| 22 | +@durable_with_child_context |
| 23 | +def large_data_processor(child_context: DurableContext) -> dict[str, Any]: |
| 24 | + """Process large data in child context.""" |
| 25 | + # Generate data using a loop - each step returns ~50KB of data (under the step limit) |
| 26 | + step_results: list[str] = [] |
| 27 | + step_sizes: list[int] = [] |
| 28 | + |
| 29 | + for i in range(1, 6): # 1 to 5 |
| 30 | + step_result: str = child_context.step( |
| 31 | + lambda _: generate_large_string(50), # 50KB |
| 32 | + name=f"generate-data-{i}", |
| 33 | + ) |
| 34 | + |
| 35 | + step_results.append(step_result) |
| 36 | + step_sizes.append(len(step_result)) |
| 37 | + |
| 38 | + # Concatenate all results - total should be ~250KB |
| 39 | + concatenated_result = "".join(step_results) |
| 40 | + |
| 41 | + return { |
| 42 | + "totalSize": len(concatenated_result), |
| 43 | + "sizeInKB": round(len(concatenated_result) / 1024), |
| 44 | + "data": concatenated_result, |
| 45 | + "stepSizes": step_sizes, |
| 46 | + } |
| 47 | + |
| 48 | + |
| 49 | +@durable_execution |
| 50 | +def handler(_event: Any, context: DurableContext) -> dict[str, Any]: |
| 51 | + """Handler demonstrating runInChildContext with large data.""" |
| 52 | + # Use runInChildContext to handle large data that would exceed 256k step limit |
| 53 | + large_data_result: dict[str, Any] = context.run_in_child_context( |
| 54 | + large_data_processor(), name="large-data-processor" |
| 55 | + ) |
| 56 | + |
| 57 | + # Add a wait after runInChildContext to test persistence across invocations |
| 58 | + context.wait(seconds=1, name="post-processing-wait") |
| 59 | + |
| 60 | + # Verify the data is still intact after the wait |
| 61 | + data_integrity_check = ( |
| 62 | + len(large_data_result["data"]) == large_data_result["totalSize"] |
| 63 | + and len(large_data_result["data"]) > 0 |
| 64 | + ) |
| 65 | + |
| 66 | + return { |
| 67 | + "success": True, |
| 68 | + "message": "Successfully processed large data exceeding individual step limits using runInChildContext", |
| 69 | + "dataIntegrityCheck": data_integrity_check, |
| 70 | + "summary": { |
| 71 | + "totalDataSize": large_data_result["sizeInKB"], |
| 72 | + "stepsExecuted": 5, |
| 73 | + "childContextUsed": True, |
| 74 | + "waitExecuted": True, |
| 75 | + "dataPreservedAcrossWait": data_integrity_check, |
| 76 | + }, |
| 77 | + } |
0 commit comments