Skip to content

Commit 4fbdb10

Browse files
authored
remove kmem_cache module parameter KMC_EXPIRE_AGE
By default, `spl_kmem_cache_expire` is `KMC_EXPIRE_MEM`, meaning that objects will be removed from kmem cache magazines by `spl_kmem_cache_reap_now()`. There is also a module parameter to change this to `KMC_EXPIRE_AGE`, which establishes a maximum lifetime for objects to stay in the magazine. This setting has rarely, if ever, been used, and is not regularly tested. This commit removes the code for `KMC_EXPIRE_AGE`, and associated module parameters. Additionally, the unused module parameter `spl_kmem_cache_obj_per_slab_min` is removed. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> Closes #10608
1 parent 02fced3 commit 4fbdb10

File tree

2 files changed

+9
-128
lines changed

2 files changed

+9
-128
lines changed

include/os/linux/spl/sys/kmem_cache.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -85,12 +85,8 @@ typedef enum kmem_cbrc {
8585
#define KMC_REAP_CHUNK INT_MAX
8686
#define KMC_DEFAULT_SEEKS 1
8787

88-
#define KMC_EXPIRE_AGE 0x1 /* Due to age */
89-
#define KMC_EXPIRE_MEM 0x2 /* Due to low memory */
90-
9188
#define KMC_RECLAIM_ONCE 0x1 /* Force a single shrinker pass */
9289

93-
extern unsigned int spl_kmem_cache_expire;
9490
extern struct list_head spl_kmem_cache_list;
9591
extern struct rw_semaphore spl_kmem_cache_sem;
9692

@@ -99,9 +95,7 @@ extern struct rw_semaphore spl_kmem_cache_sem;
9995
#define SKS_MAGIC 0x22222222
10096
#define SKC_MAGIC 0x2c2c2c2c
10197

102-
#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
10398
#define SPL_KMEM_CACHE_OBJ_PER_SLAB 8 /* Target objects per slab */
104-
#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */
10599
#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
106100
#ifdef _LP64
107101
#define SPL_KMEM_CACHE_MAX_SIZE 32 /* Max slab size in MB */
@@ -131,7 +125,6 @@ typedef struct spl_kmem_magazine {
131125
uint32_t skm_size; /* Magazine size */
132126
uint32_t skm_refill; /* Batch refill size */
133127
struct spl_kmem_cache *skm_cache; /* Owned by cache */
134-
unsigned long skm_age; /* Last cache access */
135128
unsigned int skm_cpu; /* Owned by cpu */
136129
void *skm_objs[0]; /* Object pointers */
137130
} spl_kmem_magazine_t;
@@ -181,7 +174,6 @@ typedef struct spl_kmem_cache {
181174
uint32_t skc_obj_align; /* Object alignment */
182175
uint32_t skc_slab_objs; /* Objects per slab */
183176
uint32_t skc_slab_size; /* Slab size */
184-
uint32_t skc_delay; /* Slab reclaim interval */
185177
atomic_t skc_ref; /* Ref count callers */
186178
taskqid_t skc_taskqid; /* Slab reclaim task */
187179
struct list_head skc_list; /* List of caches linkage */

module/os/linux/spl/spl-kmem-cache.c

Lines changed: 9 additions & 120 deletions
Original file line numberDiff line numberDiff line change
@@ -57,20 +57,7 @@
5757
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
5858
#endif
5959

60-
/*
61-
* Cache expiration was implemented because it was part of the default Solaris
62-
* kmem_cache behavior. The idea is that per-cpu objects which haven't been
63-
* accessed in several seconds should be returned to the cache. On the other
64-
* hand Linux slabs never move objects back to the slabs unless there is
65-
* memory pressure on the system. By default the Linux method is enabled
66-
* because it has been shown to improve responsiveness on low memory systems.
67-
* This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
68-
*/
6960
/* BEGIN CSTYLED */
70-
unsigned int spl_kmem_cache_expire = KMC_EXPIRE_MEM;
71-
EXPORT_SYMBOL(spl_kmem_cache_expire);
72-
module_param(spl_kmem_cache_expire, uint, 0644);
73-
MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
7461

7562
/*
7663
* Cache magazines are an optimization designed to minimize the cost of
@@ -106,11 +93,6 @@ unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
10693
module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
10794
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
10895

109-
unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN;
110-
module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644);
111-
MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min,
112-
"Minimal number of objects per slab");
113-
11496
unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
11597
module_param(spl_kmem_cache_max_size, uint, 0644);
11698
MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
@@ -590,102 +572,22 @@ spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
590572
* argument contains the max number of entries to remove from the magazine.
591573
*/
592574
static void
593-
__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
575+
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
594576
{
595-
int i, count = MIN(flush, skm->skm_avail);
577+
spin_lock(&skc->skc_lock);
596578

597579
ASSERT(skc->skc_magic == SKC_MAGIC);
598580
ASSERT(skm->skm_magic == SKM_MAGIC);
599581

600-
for (i = 0; i < count; i++)
582+
int count = MIN(flush, skm->skm_avail);
583+
for (int i = 0; i < count; i++)
601584
spl_cache_shrink(skc, skm->skm_objs[i]);
602585

603586
skm->skm_avail -= count;
604587
memmove(skm->skm_objs, &(skm->skm_objs[count]),
605588
sizeof (void *) * skm->skm_avail);
606-
}
607-
608-
static void
609-
spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
610-
{
611-
spin_lock(&skc->skc_lock);
612-
__spl_cache_flush(skc, skm, flush);
613-
spin_unlock(&skc->skc_lock);
614-
}
615-
616-
static void
617-
spl_magazine_age(void *data)
618-
{
619-
spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
620-
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
621-
622-
ASSERT(skm->skm_magic == SKM_MAGIC);
623-
ASSERT(skm->skm_cpu == smp_processor_id());
624-
ASSERT(irqs_disabled());
625-
626-
/* There are no available objects or they are too young to age out */
627-
if ((skm->skm_avail == 0) ||
628-
time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
629-
return;
630-
631-
/*
632-
* Because we're executing in interrupt context we may have
633-
* interrupted the holder of this lock. To avoid a potential
634-
* deadlock return if the lock is contended.
635-
*/
636-
if (!spin_trylock(&skc->skc_lock))
637-
return;
638-
639-
__spl_cache_flush(skc, skm, skm->skm_refill);
640-
spin_unlock(&skc->skc_lock);
641-
}
642589

643-
/*
644-
* Called regularly to keep a downward pressure on the cache.
645-
*
646-
* Objects older than skc->skc_delay seconds in the per-cpu magazines will
647-
* be returned to the caches. This is done to prevent idle magazines from
648-
* holding memory which could be better used elsewhere. The delay is
649-
* present to prevent thrashing the magazine.
650-
*
651-
* The newly released objects may result in empty partial slabs. Those
652-
* slabs should be released to the system. Otherwise moving the objects
653-
* out of the magazines is just wasted work.
654-
*/
655-
static void
656-
spl_cache_age(void *data)
657-
{
658-
spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
659-
taskqid_t id = 0;
660-
661-
ASSERT(skc->skc_magic == SKC_MAGIC);
662-
663-
/* Dynamically disabled at run time */
664-
if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
665-
return;
666-
667-
atomic_inc(&skc->skc_ref);
668-
669-
if (!(skc->skc_flags & KMC_NOMAGAZINE))
670-
on_each_cpu(spl_magazine_age, skc, 1);
671-
672-
spl_slab_reclaim(skc);
673-
674-
while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
675-
id = taskq_dispatch_delay(
676-
spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
677-
ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
678-
679-
/* Destroy issued after dispatch immediately cancel it */
680-
if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
681-
taskq_cancel_id(spl_kmem_cache_taskq, id);
682-
}
683-
684-
spin_lock(&skc->skc_lock);
685-
skc->skc_taskqid = id;
686590
spin_unlock(&skc->skc_lock);
687-
688-
atomic_dec(&skc->skc_ref);
689591
}
690592

691593
/*
@@ -789,7 +691,6 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
789691
skm->skm_size = skc->skc_mag_size;
790692
skm->skm_refill = skc->skc_mag_refill;
791693
skm->skm_cache = skc;
792-
skm->skm_age = jiffies;
793694
skm->skm_cpu = cpu;
794695
}
795696

@@ -921,7 +822,6 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
921822
skc->skc_flags = flags;
922823
skc->skc_obj_size = size;
923824
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
924-
skc->skc_delay = SPL_KMEM_CACHE_DELAY;
925825
atomic_set(&skc->skc_ref, 0);
926826

927827
INIT_LIST_HEAD(&skc->skc_list);
@@ -1036,12 +936,6 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
1036936
skc->skc_flags |= KMC_NOMAGAZINE;
1037937
}
1038938

1039-
if (spl_kmem_cache_expire & KMC_EXPIRE_AGE) {
1040-
skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
1041-
spl_cache_age, skc, TQ_SLEEP,
1042-
ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
1043-
}
1044-
1045939
down_write(&spl_kmem_cache_sem);
1046940
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
1047941
up_write(&spl_kmem_cache_sem);
@@ -1499,7 +1393,6 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
14991393
if (likely(skm->skm_avail)) {
15001394
/* Object available in CPU cache, use it */
15011395
obj = skm->skm_objs[--skm->skm_avail];
1502-
skm->skm_age = jiffies;
15031396
} else {
15041397
obj = spl_cache_refill(skc, skm, flags);
15051398
if ((obj == NULL) && !(flags & KM_NOSLEEP))
@@ -1629,15 +1522,11 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
16291522
goto out;
16301523

16311524
/* Reclaim from the magazine and free all now empty slabs. */
1632-
if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
1633-
spl_kmem_magazine_t *skm;
1634-
unsigned long irq_flags;
1635-
1636-
local_irq_save(irq_flags);
1637-
skm = skc->skc_mag[smp_processor_id()];
1638-
spl_cache_flush(skc, skm, skm->skm_avail);
1639-
local_irq_restore(irq_flags);
1640-
}
1525+
unsigned long irq_flags;
1526+
local_irq_save(irq_flags);
1527+
spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1528+
spl_cache_flush(skc, skm, skm->skm_avail);
1529+
local_irq_restore(irq_flags);
16411530

16421531
spl_slab_reclaim(skc);
16431532
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);

0 commit comments

Comments
 (0)