@@ -195,6 +195,105 @@ async function measureOperation({ name, operation, iterations, skipWarmup = fals
195195 }
196196}
197197
198+ /**
199+ * Measure GC pressure for an async operation over multiple iterations.
200+ * Tracks garbage collection duration per operation using PerformanceObserver.
201+ * Larger transient allocations (e.g., from unbounded cursor batch sizes) cause
202+ * more frequent and longer GC pauses, which this metric directly captures.
203+ * @param {Object } options Measurement options.
204+ * @param {string } options.name Name of the operation being measured.
205+ * @param {Function } options.operation Async function to measure.
206+ * @param {number } options.iterations Number of iterations to run.
207+ * @param {boolean } [options.skipWarmup=false] Skip warmup phase.
208+ */
209+ async function measureMemoryOperation ( { name, operation, iterations, skipWarmup = false } ) {
210+ const { PerformanceObserver } = require ( 'node:perf_hooks' ) ;
211+
212+ // Override iterations if global ITERATIONS is set
213+ iterations = ITERATIONS || iterations ;
214+
215+ // Determine warmup count (20% of iterations)
216+ const warmupCount = skipWarmup ? 0 : Math . floor ( iterations * 0.2 ) ;
217+ const gcDurations = [ ] ;
218+
219+ if ( warmupCount > 0 ) {
220+ logInfo ( `Starting warmup phase of ${ warmupCount } iterations...` ) ;
221+ for ( let i = 0 ; i < warmupCount ; i ++ ) {
222+ await operation ( ) ;
223+ }
224+ logInfo ( 'Warmup complete.' ) ;
225+ }
226+
227+ // Measurement phase
228+ logInfo ( `Starting measurement phase of ${ iterations } iterations...` ) ;
229+ const progressInterval = Math . ceil ( iterations / 10 ) ;
230+
231+ for ( let i = 0 ; i < iterations ; i ++ ) {
232+ // Force GC before each iteration to start from a clean state
233+ if ( typeof global . gc === 'function' ) {
234+ global . gc ( ) ;
235+ }
236+
237+ // Track GC events during this iteration; measure the longest single GC pause,
238+ // which reflects the production impact of large transient allocations
239+ let maxGcPause = 0 ;
240+ const obs = new PerformanceObserver ( ( list ) => {
241+ for ( const entry of list . getEntries ( ) ) {
242+ if ( entry . duration > maxGcPause ) {
243+ maxGcPause = entry . duration ;
244+ }
245+ }
246+ } ) ;
247+ obs . observe ( { type : 'gc' , buffered : false } ) ;
248+
249+ await operation ( ) ;
250+
251+ // Flush any buffered entries before disconnecting to avoid data loss
252+ for ( const entry of obs . takeRecords ( ) ) {
253+ if ( entry . duration > maxGcPause ) {
254+ maxGcPause = entry . duration ;
255+ }
256+ }
257+ obs . disconnect ( ) ;
258+ gcDurations . push ( maxGcPause ) ;
259+
260+ if ( LOG_ITERATIONS ) {
261+ logInfo ( `Iteration ${ i + 1 } : ${ maxGcPause . toFixed ( 2 ) } ms GC` ) ;
262+ } else if ( ( i + 1 ) % progressInterval === 0 || i + 1 === iterations ) {
263+ const progress = Math . round ( ( ( i + 1 ) / iterations ) * 100 ) ;
264+ logInfo ( `Progress: ${ progress } %` ) ;
265+ }
266+ }
267+
268+ // Sort for percentile calculations
269+ gcDurations . sort ( ( a , b ) => a - b ) ;
270+
271+ // Filter outliers using IQR method
272+ const q1Index = Math . floor ( gcDurations . length * 0.25 ) ;
273+ const q3Index = Math . floor ( gcDurations . length * 0.75 ) ;
274+ const q1 = gcDurations [ q1Index ] ;
275+ const q3 = gcDurations [ q3Index ] ;
276+ const iqr = q3 - q1 ;
277+ const lowerBound = q1 - 1.5 * iqr ;
278+ const upperBound = q3 + 1.5 * iqr ;
279+
280+ const filtered = gcDurations . filter ( d => d >= lowerBound && d <= upperBound ) ;
281+
282+ const median = filtered [ Math . floor ( filtered . length * 0.5 ) ] ;
283+ const p95 = filtered [ Math . floor ( filtered . length * 0.95 ) ] ;
284+ const p99 = filtered [ Math . floor ( filtered . length * 0.99 ) ] ;
285+ const min = filtered [ 0 ] ;
286+ const max = filtered [ filtered . length - 1 ] ;
287+
288+ return {
289+ name,
290+ value : median ,
291+ unit : 'ms' ,
292+ range : `${ min . toFixed ( 2 ) } - ${ max . toFixed ( 2 ) } ` ,
293+ extra : `p95: ${ p95 . toFixed ( 2 ) } ms, p99: ${ p99 . toFixed ( 2 ) } ms, n=${ filtered . length } /${ gcDurations . length } ` ,
294+ } ;
295+ }
296+
198297/**
199298 * Benchmark: Object Create
200299 */
@@ -525,6 +624,84 @@ async function benchmarkQueryWithIncludeNested(name) {
525624 } ) ;
526625}
527626
627+ /**
628+ * Benchmark: Large Result Set GC Pressure
629+ * Measures max GC pause when querying many large documents, which is affected
630+ * by MongoDB cursor batch size configuration. Without a batch size limit,
631+ * the driver processes larger data chunks between yield points, creating more
632+ * garbage that triggers longer GC pauses.
633+ */
634+ async function benchmarkLargeResultMemory ( name ) {
635+ const TestObject = Parse . Object . extend ( 'BenchmarkLargeResult' ) ;
636+ const TOTAL_OBJECTS = 3_000 ;
637+ const SAVE_BATCH_SIZE = 200 ;
638+
639+ // Seed data in batches; ~8 KB per document so 3,000 docs ≈ 24 MB total,
640+ // exceeding MongoDB's 16 MiB default batch limit to test cursor batching
641+ for ( let i = 0 ; i < TOTAL_OBJECTS ; i += SAVE_BATCH_SIZE ) {
642+ const batch = [ ] ;
643+ for ( let j = 0 ; j < SAVE_BATCH_SIZE && i + j < TOTAL_OBJECTS ; j ++ ) {
644+ const obj = new TestObject ( ) ;
645+ obj . set ( 'category' , ( i + j ) % 10 ) ;
646+ obj . set ( 'value' , i + j ) ;
647+ obj . set ( 'data' , `padding-${ i + j } -${ 'x' . repeat ( 8000 ) } ` ) ;
648+ batch . push ( obj ) ;
649+ }
650+ await Parse . Object . saveAll ( batch ) ;
651+ }
652+
653+ return measureMemoryOperation ( {
654+ name,
655+ iterations : 100 ,
656+ operation : async ( ) => {
657+ const query = new Parse . Query ( 'BenchmarkLargeResult' ) ;
658+ query . limit ( TOTAL_OBJECTS ) ;
659+ await query . find ( { useMasterKey : true } ) ;
660+ } ,
661+ } ) ;
662+ }
663+
664+ /**
665+ * Benchmark: Concurrent Query GC Pressure
666+ * Measures max GC pause under concurrent load with large result sets.
667+ * Simulates production conditions where multiple clients query simultaneously,
668+ * compounding GC pressure from cursor batch sizes.
669+ */
670+ async function benchmarkConcurrentQueryMemory ( name ) {
671+ const TestObject = Parse . Object . extend ( 'BenchmarkConcurrentResult' ) ;
672+ const TOTAL_OBJECTS = 3_000 ;
673+ const SAVE_BATCH_SIZE = 200 ;
674+ const CONCURRENT_QUERIES = 10 ;
675+
676+ // Seed data in batches; ~8 KB per document so 3,000 docs ≈ 24 MB total,
677+ // exceeding MongoDB's 16 MiB default batch limit to test cursor batching
678+ for ( let i = 0 ; i < TOTAL_OBJECTS ; i += SAVE_BATCH_SIZE ) {
679+ const batch = [ ] ;
680+ for ( let j = 0 ; j < SAVE_BATCH_SIZE && i + j < TOTAL_OBJECTS ; j ++ ) {
681+ const obj = new TestObject ( ) ;
682+ obj . set ( 'category' , ( i + j ) % 10 ) ;
683+ obj . set ( 'value' , i + j ) ;
684+ obj . set ( 'data' , `padding-${ i + j } -${ 'x' . repeat ( 8000 ) } ` ) ;
685+ batch . push ( obj ) ;
686+ }
687+ await Parse . Object . saveAll ( batch ) ;
688+ }
689+
690+ return measureMemoryOperation ( {
691+ name,
692+ iterations : 50 ,
693+ operation : async ( ) => {
694+ const queries = [ ] ;
695+ for ( let i = 0 ; i < CONCURRENT_QUERIES ; i ++ ) {
696+ const query = new Parse . Query ( 'BenchmarkConcurrentResult' ) ;
697+ query . limit ( TOTAL_OBJECTS ) ;
698+ queries . push ( query . find ( { useMasterKey : true } ) ) ;
699+ }
700+ await Promise . all ( queries ) ;
701+ } ,
702+ } ) ;
703+ }
704+
528705/**
529706 * Run all benchmarks
530707 */
@@ -555,6 +732,8 @@ async function runBenchmarks() {
555732 { name : 'User.login' , fn : benchmarkUserLogin } ,
556733 { name : 'Query.include (parallel pointers)' , fn : benchmarkQueryWithIncludeParallel } ,
557734 { name : 'Query.include (nested pointers)' , fn : benchmarkQueryWithIncludeNested } ,
735+ { name : 'Query.find (large result, GC pressure)' , fn : benchmarkLargeResultMemory } ,
736+ { name : 'Query.find (concurrent, GC pressure)' , fn : benchmarkConcurrentQueryMemory } ,
558737 ] ;
559738
560739 // Run each benchmark with database cleanup
0 commit comments