|
| 1 | +#!/bin/ksh |
| 2 | + |
| 3 | +# |
| 4 | +# This file and its contents are supplied under the terms of the |
| 5 | +# Common Development and Distribution License ("CDDL"), version 1.0. |
| 6 | +# You may only use this file in accordance with the terms of version |
| 7 | +# 1.0 of the CDDL. |
| 8 | +# |
| 9 | +# A full copy of the text of the CDDL should have accompanied this |
| 10 | +# source. A copy of the CDDL is also available via the Internet at |
| 11 | +# http://www.illumos.org/license/CDDL. |
| 12 | +# |
| 13 | + |
| 14 | +# |
| 15 | +# Copyright (c) 2019 by Tomohiro Kusumi. All rights reserved. |
| 16 | +# |
| 17 | + |
| 18 | +. $STF_SUITE/include/libtest.shlib |
| 19 | +. $STF_SUITE/tests/functional/cli_root/zfs_mount/zfs_mount.cfg |
| 20 | + |
| 21 | +# |
| 22 | +# DESCRIPTION: |
| 23 | +# Verify parallel mount ordering is consistent. |
| 24 | +# |
| 25 | +# There was a bug in initial thread dispatching algorithm which put threads |
| 26 | +# under race condition which resulted in undefined mount order. The purpose |
| 27 | +# of this test is to verify `zfs unmount -a` succeeds (not `zfs mount -a` |
| 28 | +# succeeds, it always does) after `zfs mount -a`, which could fail if threads |
| 29 | +# race. See github.com/zfsonlinux/zfs/issues/{8450,8833,8878} for details. |
| 30 | +# |
| 31 | +# STRATEGY: |
| 32 | +# 1. Create pools and filesystems. |
| 33 | +# 2. Set same mount point for >1 datasets. |
| 34 | +# 3. Unmount all datasets. |
| 35 | +# 4. Mount all datasets. |
| 36 | +# 5. Unmount all datasets (verify this succeeds). |
| 37 | +# |
| 38 | + |
| 39 | +verify_runnable "both" |
| 40 | + |
| 41 | +TMPDIR=${TMPDIR:-$TEST_BASE_DIR} |
| 42 | +MNTPT=$TMPDIR/zfs_mount_test_race_mntpt |
| 43 | +DISK1="$TMPDIR/zfs_mount_test_race_disk1" |
| 44 | +DISK2="$TMPDIR/zfs_mount_test_race_disk2" |
| 45 | + |
| 46 | +TESTPOOL1=zfs_mount_test_race_tp1 |
| 47 | +TESTPOOL2=zfs_mount_test_race_tp2 |
| 48 | + |
| 49 | +export __ZFS_POOL_RESTRICT="$TESTPOOL1 $TESTPOOL2" |
| 50 | +log_must zfs $unmountall |
| 51 | +unset __ZFS_POOL_RESTRICT |
| 52 | + |
| 53 | +function cleanup |
| 54 | +{ |
| 55 | + zpool destroy $TESTPOOL1 |
| 56 | + zpool destroy $TESTPOOL2 |
| 57 | + rm -rf $MNTPT |
| 58 | + rm -rf /$TESTPOOL1 |
| 59 | + rm -rf /$TESTPOOL2 |
| 60 | + rm -f $DISK1 |
| 61 | + rm -f $DISK2 |
| 62 | + export __ZFS_POOL_RESTRICT="$TESTPOOL1 $TESTPOOL2" |
| 63 | + log_must zfs $mountall |
| 64 | + unset __ZFS_POOL_RESTRICT |
| 65 | +} |
| 66 | +log_onexit cleanup |
| 67 | + |
| 68 | +log_note "Verify parallel mount ordering is consistent" |
| 69 | + |
| 70 | +log_must truncate -s $MINVDEVSIZE $DISK1 |
| 71 | +log_must truncate -s $MINVDEVSIZE $DISK2 |
| 72 | + |
| 73 | +log_must zpool create -f $TESTPOOL1 $DISK1 |
| 74 | +log_must zpool create -f $TESTPOOL2 $DISK2 |
| 75 | + |
| 76 | +log_must zfs create $TESTPOOL1/$TESTFS1 |
| 77 | +log_must zfs create $TESTPOOL2/$TESTFS2 |
| 78 | + |
| 79 | +log_must zfs set mountpoint=none $TESTPOOL1 |
| 80 | +log_must zfs set mountpoint=$MNTPT $TESTPOOL1/$TESTFS1 |
| 81 | + |
| 82 | +# Note that unmount can fail (due to race condition on `zfs mount -a`) with or |
| 83 | +# without `canmount=off`. The race has nothing to do with canmount property, |
| 84 | +# but turn it off for convenience of mount layout used in this test case. |
| 85 | +log_must zfs set canmount=off $TESTPOOL2 |
| 86 | +log_must zfs set mountpoint=$MNTPT $TESTPOOL2 |
| 87 | + |
| 88 | +# At this point, layout of datasets in two pools will look like below. |
| 89 | +# Previously, on next `zfs mount -a`, pthreads assigned to TESTFS1 and TESTFS2 |
| 90 | +# could race, and TESTFS2 usually (actually always) won in ZoL. Note that the |
| 91 | +# problem is how two or more threads could initially be assigned to the same |
| 92 | +# top level directory, not this specific layout. This layout is just an example |
| 93 | +# that can reproduce race, and is also the layout reported in #8833. |
| 94 | +# |
| 95 | +# NAME MOUNTED MOUNTPOINT |
| 96 | +# ---------------------------------------------- |
| 97 | +# /$TESTPOOL1 no none |
| 98 | +# /$TESTPOOL1/$TESTFS1 yes $MNTPT |
| 99 | +# /$TESTPOOL2 no $MNTPT |
| 100 | +# /$TESTPOOL2/$TESTFS2 yes $MNTPT/$TESTFS2 |
| 101 | + |
| 102 | +# Apparently two datasets must be mounted. |
| 103 | +log_must ismounted $TESTPOOL1/$TESTFS1 |
| 104 | +log_must ismounted $TESTPOOL2/$TESTFS2 |
| 105 | +# This unmount always succeeds, because potential race hasn't happened yet. |
| 106 | +log_must zfs unmount -a |
| 107 | +# This mount always succeeds, whether threads are under race condition or not. |
| 108 | +log_must zfs mount -a |
| 109 | + |
| 110 | +# Verify datasets are mounted (TESTFS2 fails if the race broke mount order). |
| 111 | +log_must ismounted $TESTPOOL1/$TESTFS1 |
| 112 | +log_must ismounted $TESTPOOL2/$TESTFS2 |
| 113 | +# Verify unmount succeeds (fails if the race broke mount order). |
| 114 | +log_must zfs unmount -a |
| 115 | + |
| 116 | +log_pass "Verify parallel mount ordering is consistent passed" |
0 commit comments