@@ -295,6 +295,48 @@ zio_fini(void)
295
295
* ==========================================================================
296
296
*/
297
297
298
+ #ifdef ZFS_DEBUG
299
+ static const ulong_t zio_buf_canary = (u_long )0xdeadc0dedead210b ;
300
+ #endif
301
+
302
+ /*
303
+ * If we have spare memory at the end, put canary to detect buffer overflows.
304
+ */
305
+ static void
306
+ zio_buf_put_canary (ulong_t * p , size_t size , kmem_cache_t * * cache , size_t c )
307
+ {
308
+ #ifdef ZFS_DEBUG
309
+ size_t off = P2ROUNDUP (size , sizeof (ulong_t ));
310
+ ulong_t * canary = p + off / sizeof (ulong_t );
311
+ size_t asize = (c + 1 ) << SPA_MINBLOCKSHIFT ;
312
+ if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
313
+ cache [c ] == cache [c + 1 ])
314
+ asize = (c + 2 ) << SPA_MINBLOCKSHIFT ;
315
+ for (; off < asize ; canary ++ , off += sizeof (ulong_t ))
316
+ * canary = zio_buf_canary ;
317
+ #endif
318
+ }
319
+
320
+ static void
321
+ zio_buf_check_canary (ulong_t * p , size_t size , kmem_cache_t * * cache , size_t c )
322
+ {
323
+ #ifdef ZFS_DEBUG
324
+ size_t off = P2ROUNDUP (size , sizeof (ulong_t ));
325
+ ulong_t * canary = p + off / sizeof (ulong_t );
326
+ size_t asize = (c + 1 ) << SPA_MINBLOCKSHIFT ;
327
+ if (c + 1 < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT &&
328
+ cache [c ] == cache [c + 1 ])
329
+ asize = (c + 2 ) << SPA_MINBLOCKSHIFT ;
330
+ for (; off < asize ; canary ++ , off += sizeof (ulong_t )) {
331
+ if (unlikely (* canary != zio_buf_canary )) {
332
+ PANIC ("ZIO buffer overflow %p (%zu) + %zu %#lx != %#lx" ,
333
+ p , size , (canary - p ) * sizeof (ulong_t ),
334
+ * canary , zio_buf_canary );
335
+ }
336
+ }
337
+ #endif
338
+ }
339
+
298
340
/*
299
341
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
300
342
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
@@ -311,7 +353,9 @@ zio_buf_alloc(size_t size)
311
353
atomic_add_64 (& zio_buf_cache_allocs [c ], 1 );
312
354
#endif
313
355
314
- return (kmem_cache_alloc (zio_buf_cache [c ], KM_PUSHPAGE ));
356
+ void * p = kmem_cache_alloc (zio_buf_cache [c ], KM_PUSHPAGE );
357
+ zio_buf_put_canary (p , size , zio_buf_cache , c );
358
+ return (p );
315
359
}
316
360
317
361
/*
@@ -327,7 +371,9 @@ zio_data_buf_alloc(size_t size)
327
371
328
372
VERIFY3U (c , < , SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT );
329
373
330
- return (kmem_cache_alloc (zio_data_buf_cache [c ], KM_PUSHPAGE ));
374
+ void * p = kmem_cache_alloc (zio_data_buf_cache [c ], KM_PUSHPAGE );
375
+ zio_buf_put_canary (p , size , zio_data_buf_cache , c );
376
+ return (p );
331
377
}
332
378
333
379
void
@@ -340,6 +386,7 @@ zio_buf_free(void *buf, size_t size)
340
386
atomic_add_64 (& zio_buf_cache_frees [c ], 1 );
341
387
#endif
342
388
389
+ zio_buf_check_canary (buf , size , zio_buf_cache , c );
343
390
kmem_cache_free (zio_buf_cache [c ], buf );
344
391
}
345
392
@@ -350,6 +397,7 @@ zio_data_buf_free(void *buf, size_t size)
350
397
351
398
VERIFY3U (c , < , SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT );
352
399
400
+ zio_buf_check_canary (buf , size , zio_data_buf_cache , c );
353
401
kmem_cache_free (zio_data_buf_cache [c ], buf );
354
402
}
355
403
0 commit comments