|
20 | 20 | */
|
21 | 21 | /*
|
22 | 22 | * Copyright (c) 2014 by Chunwei Chen. All rights reserved.
|
23 |
| - * Copyright (c) 2016 by Delphix. All rights reserved. |
| 23 | + * Copyright (c) 2019 by Delphix. All rights reserved. |
24 | 24 | */
|
25 | 25 |
|
26 | 26 | /*
|
@@ -209,6 +209,30 @@ static abd_stats_t abd_stats = {
|
209 | 209 | int zfs_abd_scatter_enabled = B_TRUE;
|
210 | 210 | unsigned zfs_abd_scatter_max_order = MAX_ORDER - 1;
|
211 | 211 |
|
| 212 | +/* |
| 213 | + * zfs_abd_scatter_min_size is the minimum allocation size to use scatter |
| 214 | + * ABD's. Smaller allocations will use linear ABD's which uses |
| 215 | + * zio_[data_]buf_alloc(). |
| 216 | + * |
| 217 | + * Scatter ABD's use at least one page each, so sub-page allocations waste |
| 218 | + * some space when allocated as scatter (e.g. 2KB scatter allocation wastes |
| 219 | + * half of each page). Using linear ABD's for small allocations means that |
| 220 | + * they will be put on slabs which contain many allocations. This can |
| 221 | + * improve memory efficiency, but it also makes it much harder for ARC |
| 222 | + * evictions to actually free pages, because all the buffers on one slab need |
| 223 | + * to be freed in order for the slab (and underlying pages) to be freed. |
| 224 | + * Typically, 512B and 1KB kmem caches have 16 buffers per slab, so it's |
| 225 | + * possible for them to actually waste more memory than scatter (one page per |
| 226 | + * buf = wasting 3/4 or 7/8th; one buf per slab = wasting 15/16th). |
| 227 | + * |
| 228 | + * Spill blocks are typically 512B and are heavily used on systems running |
| 229 | + * selinux with the default dnode size and the `xattr=sa` property set. |
| 230 | + * |
| 231 | + * By default we use linear allocations for 512B and 1KB, and scatter |
| 232 | + * allocations for larger (1.5KB and up). |
| 233 | + */ |
| 234 | +int zfs_abd_scatter_min_size = 512 * 3; |
| 235 | + |
212 | 236 | static kmem_cache_t *abd_cache = NULL;
|
213 | 237 | static kstat_t *abd_ksp;
|
214 | 238 |
|
@@ -581,7 +605,8 @@ abd_free_struct(abd_t *abd)
|
581 | 605 | abd_t *
|
582 | 606 | abd_alloc(size_t size, boolean_t is_metadata)
|
583 | 607 | {
|
584 |
| - if (!zfs_abd_scatter_enabled || size <= PAGESIZE) |
| 608 | + /* see the comment above zfs_abd_scatter_min_size */ |
| 609 | + if (!zfs_abd_scatter_enabled || size < zfs_abd_scatter_min_size) |
585 | 610 | return (abd_alloc_linear(size, is_metadata));
|
586 | 611 |
|
587 | 612 | VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
|
@@ -1532,6 +1557,9 @@ abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
|
1532 | 1557 | module_param(zfs_abd_scatter_enabled, int, 0644);
|
1533 | 1558 | MODULE_PARM_DESC(zfs_abd_scatter_enabled,
|
1534 | 1559 | "Toggle whether ABD allocations must be linear.");
|
| 1560 | +module_param(zfs_abd_scatter_min_size, int, 0644); |
| 1561 | +MODULE_PARM_DESC(zfs_abd_scatter_min_size, |
| 1562 | + "Minimum size of scatter allocations."); |
1535 | 1563 | /* CSTYLED */
|
1536 | 1564 | module_param(zfs_abd_scatter_max_order, uint, 0644);
|
1537 | 1565 | MODULE_PARM_DESC(zfs_abd_scatter_max_order,
|
|
0 commit comments