Skip to content

Commit 9805730

Browse files
committed
Guarantee that spa_load_guid is unique
The zpool reguid feature introduced the spa_load_guid, which is a transient value used for runtime identification purposes in the ARC. This value is not the same as the spa's persistent pool guid. However, the value is seeded from spa_generate_load_guid() which does not check for uniqueness against the spa_load_guid from other pools. Although extremely rare, you can end up with two different pools sharing the same spa_load_guid value! This change guarantees that the value is always unique and additionally not still in use by an async arc flush task. Sponsored-by: Klara, Inc. Sponsored-by: Wasabi Technology, Inc. Signed-off-by: Don Brady <[email protected]>
1 parent 5dc29a6 commit 9805730

File tree

5 files changed

+134
-17
lines changed

5 files changed

+134
-17
lines changed

include/sys/arc.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,7 @@ void arc_flush(spa_t *spa, boolean_t retry);
332332
void arc_flush_async(spa_t *spa);
333333
void arc_tempreserve_clear(uint64_t reserve);
334334
int arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg);
335+
boolean_t arc_async_flush_guid_inuse(uint64_t load_guid);
335336

336337
uint64_t arc_all_memory(void);
337338
uint64_t arc_default_max(uint64_t min, uint64_t allmem);

include/sys/spa.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1102,6 +1102,7 @@ extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
11021102
extern char *spa_strdup(const char *);
11031103
extern void spa_strfree(char *);
11041104
extern uint64_t spa_generate_guid(spa_t *spa);
1105+
extern uint64_t spa_generate_load_guid(void);
11051106
extern void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp);
11061107
extern void spa_freeze(spa_t *spa);
11071108
extern int spa_change_guid(spa_t *spa, const uint64_t *guidp);

module/zfs/arc.c

Lines changed: 103 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -776,6 +776,23 @@ static buf_hash_table_t buf_hash_table;
776776

777777
uint64_t zfs_crc64_table[256];
778778

779+
/*
780+
* Asynchronous ARC flush
781+
*
782+
* We track these in a list for arc_async_flush_guid_inuse().
783+
* Used for both L1 and L2 async teardown.
784+
*/
785+
static list_t arc_async_flush_list;
786+
static kmutex_t arc_async_flush_lock;
787+
788+
typedef struct arc_async_flush {
789+
uint64_t af_spa_guid;
790+
taskq_ent_t af_tqent;
791+
uint_t af_cache_level; /* 1 or 2 to differentiate node */
792+
list_node_t af_node;
793+
} arc_async_flush_t;
794+
795+
779796
/*
780797
* Level 2 ARC
781798
*/
@@ -4423,19 +4440,52 @@ arc_flush(spa_t *spa, boolean_t retry)
44234440
arc_flush_impl(spa != NULL ? spa_load_guid(spa) : 0, retry);
44244441
}
44254442

4443+
static arc_async_flush_t *
4444+
arc_async_flush_add(uint64_t spa_guid, uint_t level)
4445+
{
4446+
arc_async_flush_t *af = kmem_alloc(sizeof (*af), KM_SLEEP);
4447+
af->af_spa_guid = spa_guid;
4448+
af->af_cache_level = level;
4449+
taskq_init_ent(&af->af_tqent);
4450+
list_link_init(&af->af_node);
4451+
4452+
mutex_enter(&arc_async_flush_lock);
4453+
list_insert_tail(&arc_async_flush_list, af);
4454+
mutex_exit(&arc_async_flush_lock);
4455+
4456+
return (af);
4457+
}
4458+
4459+
static void
4460+
arc_async_flush_remove(uint64_t spa_guid, uint_t level)
4461+
{
4462+
mutex_enter(&arc_async_flush_lock);
4463+
for (arc_async_flush_t *af = list_head(&arc_async_flush_list);
4464+
af != NULL; af = list_next(&arc_async_flush_list, af)) {
4465+
if (af->af_spa_guid == spa_guid &&
4466+
af->af_cache_level == level) {
4467+
list_remove(&arc_async_flush_list, af);
4468+
kmem_free(af, sizeof (*af));
4469+
break;
4470+
}
4471+
}
4472+
mutex_exit(&arc_async_flush_lock);
4473+
}
4474+
44264475
static void
44274476
arc_flush_task(void *arg)
44284477
{
4429-
uint64_t guid = *((uint64_t *)arg);
4478+
arc_async_flush_t *af = arg;
44304479
hrtime_t start_time = gethrtime();
4480+
uint64_t spa_guid = af->af_spa_guid;
44314481

4432-
arc_flush_impl(guid, B_FALSE);
4433-
kmem_free(arg, sizeof (uint64_t *));
4482+
arc_flush_impl(spa_guid, B_FALSE);
4483+
arc_async_flush_remove(spa_guid, af->af_cache_level);
44344484

44354485
uint64_t elaspsed = NSEC2MSEC(gethrtime() - start_time);
44364486
if (elaspsed > 0) {
44374487
zfs_dbgmsg("spa %llu arc flushed in %llu ms",
4438-
(u_longlong_t)guid, (u_longlong_t)elaspsed);
4488+
(u_longlong_t)spa_guid, (u_longlong_t)elaspsed);
44394489
}
44404490
}
44414491

@@ -4452,15 +4502,36 @@ arc_flush_task(void *arg)
44524502
void
44534503
arc_flush_async(spa_t *spa)
44544504
{
4455-
uint64_t *guidp = kmem_alloc(sizeof (uint64_t *), KM_SLEEP);
4505+
uint64_t spa_guid = spa_load_guid(spa);
4506+
arc_async_flush_t *af = arc_async_flush_add(spa_guid, 1);
44564507

4457-
*guidp = spa_load_guid(spa);
4508+
/*
4509+
* Note that arc_flush_task() needs arc_async_flush_lock to remove af
4510+
* list node. So by holding the lock we avoid a race for af removal
4511+
* with our use here.
4512+
*/
4513+
mutex_enter(&arc_async_flush_lock);
4514+
taskq_dispatch_ent(arc_flush_taskq, arc_flush_task,
4515+
af, TQ_SLEEP, &af->af_tqent);
4516+
mutex_exit(&arc_async_flush_lock);
4517+
}
44584518

4459-
if (taskq_dispatch(arc_flush_taskq, arc_flush_task, guidp,
4460-
TQ_SLEEP) == TASKQID_INVALID) {
4461-
arc_flush_impl(*guidp, B_FALSE);
4462-
kmem_free(guidp, sizeof (uint64_t *));
4519+
/*
4520+
* Check if a guid is still in-use as part of an async teardown task
4521+
*/
4522+
boolean_t
4523+
arc_async_flush_guid_inuse(uint64_t spa_guid)
4524+
{
4525+
mutex_enter(&arc_async_flush_lock);
4526+
for (arc_async_flush_t *af = list_head(&arc_async_flush_list);
4527+
af != NULL; af = list_next(&arc_async_flush_list, af)) {
4528+
if (af->af_spa_guid == spa_guid) {
4529+
mutex_exit(&arc_async_flush_lock);
4530+
return (B_TRUE);
4531+
}
44634532
}
4533+
mutex_exit(&arc_async_flush_lock);
4534+
return (B_FALSE);
44644535
}
44654536

44664537
uint64_t
@@ -7801,6 +7872,9 @@ arc_init(void)
78017872
arc_prune_taskq = taskq_create("arc_prune", zfs_arc_prune_task_threads,
78027873
defclsyspri, 100, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
78037874

7875+
list_create(&arc_async_flush_list, sizeof (arc_async_flush_t),
7876+
offsetof(arc_async_flush_t, af_node));
7877+
mutex_init(&arc_async_flush_lock, NULL, MUTEX_DEFAULT, NULL);
78047878
arc_flush_taskq = taskq_create("arc_flush", 75, defclsyspri,
78057879
1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
78067880

@@ -7884,6 +7958,9 @@ arc_fini(void)
78847958
taskq_wait(arc_prune_taskq);
78857959
taskq_destroy(arc_prune_taskq);
78867960

7961+
list_destroy(&arc_async_flush_list);
7962+
mutex_destroy(&arc_async_flush_lock);
7963+
78877964
mutex_enter(&arc_prune_mtx);
78887965
while ((p = list_remove_head(&arc_prune_list)) != NULL) {
78897966
(void) zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
@@ -9795,6 +9872,8 @@ typedef struct {
97959872
l2arc_dev_t *rva_l2arc_dev;
97969873
uint64_t rva_spa_gid;
97979874
uint64_t rva_vdev_gid;
9875+
boolean_t rva_async;
9876+
97989877
} remove_vdev_args_t;
97999878

98009879
static void
@@ -9825,6 +9904,9 @@ l2arc_device_teardown(void *arg)
98259904
(u_longlong_t)rva->rva_vdev_gid,
98269905
(u_longlong_t)elaspsed);
98279906
}
9907+
9908+
if (rva->rva_async)
9909+
arc_async_flush_remove(rva->rva_spa_gid, 2);
98289910
kmem_free(rva, sizeof (remove_vdev_args_t));
98299911
}
98309912

@@ -9850,7 +9932,7 @@ l2arc_remove_vdev(vdev_t *vd)
98509932
remove_vdev_args_t *rva = kmem_alloc(sizeof (remove_vdev_args_t),
98519933
KM_SLEEP);
98529934
rva->rva_l2arc_dev = remdev;
9853-
rva->rva_spa_gid = spa_guid(remdev->l2ad_spa);
9935+
rva->rva_spa_gid = spa_load_guid(spa);
98549936
rva->rva_vdev_gid = remdev->l2ad_vdev->vdev_guid;
98559937

98569938
/*
@@ -9866,6 +9948,7 @@ l2arc_remove_vdev(vdev_t *vd)
98669948
asynchronous = B_FALSE;
98679949
}
98689950
mutex_exit(&l2arc_rebuild_thr_lock);
9951+
rva->rva_async = asynchronous;
98699952

98709953
/*
98719954
* Remove device from global list
@@ -9883,13 +9966,17 @@ l2arc_remove_vdev(vdev_t *vd)
98839966
}
98849967
mutex_exit(&l2arc_dev_mtx);
98859968

9886-
/*
9887-
* If possible, the teardown is completed asynchronously
9888-
*/
9889-
if (!asynchronous || taskq_dispatch(arc_flush_taskq,
9890-
l2arc_device_teardown, rva, TQ_SLEEP) == TASKQID_INVALID) {
9969+
if (!asynchronous) {
98919970
l2arc_device_teardown(rva);
9971+
return;
98929972
}
9973+
9974+
arc_async_flush_t *af = arc_async_flush_add(rva->rva_spa_gid, 2);
9975+
9976+
mutex_enter(&arc_async_flush_lock);
9977+
taskq_dispatch_ent(arc_flush_taskq, l2arc_device_teardown, rva,
9978+
TQ_SLEEP, &af->af_tqent);
9979+
mutex_exit(&arc_async_flush_lock);
98939980
}
98949981

98959982
void

module/zfs/spa_misc.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1588,6 +1588,34 @@ spa_generate_guid(spa_t *spa)
15881588
return (guid);
15891589
}
15901590

1591+
static boolean_t
1592+
spa_load_guid_exists(uint64_t guid)
1593+
{
1594+
avl_tree_t *t = &spa_namespace_avl;
1595+
1596+
ASSERT(MUTEX_HELD(&spa_namespace_lock));
1597+
1598+
for (spa_t *spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1599+
if (spa_load_guid(spa) == guid)
1600+
return (B_TRUE);
1601+
}
1602+
1603+
return (arc_async_flush_guid_inuse(guid));
1604+
}
1605+
1606+
uint64_t
1607+
spa_generate_load_guid(void)
1608+
{
1609+
uint64_t guid;
1610+
1611+
do {
1612+
(void) random_get_pseudo_bytes((void *)&guid,
1613+
sizeof (guid));
1614+
} while (guid == 0 || spa_load_guid_exists(guid));
1615+
1616+
return (guid);
1617+
}
1618+
15911619
void
15921620
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
15931621
{

module/zfs/vdev.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -647,7 +647,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
647647
if (spa->spa_root_vdev == NULL) {
648648
ASSERT(ops == &vdev_root_ops);
649649
spa->spa_root_vdev = vd;
650-
spa->spa_load_guid = spa_generate_guid(NULL);
650+
spa->spa_load_guid = spa_generate_load_guid();
651651
}
652652

653653
if (guid == 0 && ops != &vdev_hole_ops) {

0 commit comments

Comments
 (0)