Skip to content

Commit

Permalink
DOSE-386 Zfs_copies (openzfs#453)
Browse files Browse the repository at this point in the history
DOSE-387 Zfs_create
DOSE-388 Zfs_destroy
DOSE-389 Zfs_diff
DOSE-390 Zfs_get
DOSE-391 Zfs_ids_to_path
DOSE-392 Zfs_inherit
DOSE-393 Zfs_jail
DOSE-394 Zfs_load-key
DOSE-395 Zfs_mount
  • Loading branch information
nupur-agrawal-delphix authored Sep 20, 2021
1 parent c8b90e1 commit 8e14e4b
Show file tree
Hide file tree
Showing 19 changed files with 212 additions and 50 deletions.
7 changes: 5 additions & 2 deletions scripts/zfs-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
UNAME=$(uname -s)
ZOA_LOG="/var/zoa.log"
ZOA_OUTPUT="/var/zoa.stdout"

# Override some defaults if on FreeBSD
if [ "$UNAME" = "FreeBSD" ] ; then
Expand Down Expand Up @@ -613,10 +614,12 @@ if [ -n "$ZTS_OBJECT_STORE" ]; then
if [ -n "$ZETTA_CACHE_DEV" ]; then
dev=$(basename "$ZETTA_CACHE_DEV")
sudo -E /sbin/zfs_object_agent -vv -c "/dev/${dev}" \
--output-file=$ZOA_LOG >/dev/null 2>&1 &
--output-file=$ZOA_LOG 2>&1 | \
sudo tee $ZOA_OUTPUT > /dev/null &
else
sudo -E /sbin/zfs_object_agent -vv \
--output-file=$ZOA_LOG >/dev/null 2>&1 &
--output-file=$ZOA_LOG 2>&1 | \
sudo tee $ZOA_OUTPUT > /dev/null &
fi

# Verify connectivity before proceeding
Expand Down
71 changes: 71 additions & 0 deletions tests/runfiles/object_store.run
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ tags = ['functional', 'acl', 'posix-sa']
# -------------------------------------------------------------
# TODO: Pool checkpoint functionality hasn't been implemented yet. Uncomment the
# below section once its done.
# DOSE-596 - make 'zpool checkpoint' work with object store pools
# -------------------------------------------------------------
# [tests/functional/pool_checkpoint]
# tests = ['checkpoint_after_rewind', 'checkpoint_big_rewind',
Expand Down Expand Up @@ -211,3 +212,73 @@ tests = ['zfs_unshare_001_pos.ksh', 'zfs_unshare_002_pos.ksh',
'zfs_unshare_005_neg.ksh', 'zfs_unshare_006_pos.ksh',
'zfs_unshare_007_pos.ksh']
tags = ['functional', 'cli_root', 'zfs_unshare']

[tests/functional/cli_root/zfs_copies]
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
tags = ['functional', 'cli_root', 'zfs_copies']

[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_crypt_combos', 'zfs_create_dryrun', 'zfs_create_nomount',
'zfs_create_verbose']
tags = ['functional', 'cli_root', 'zfs_create']

# -------------------------------------------------------------
# TODO: Add the following tests back after relevant bug fixes.
# zfs_clone_livelist_condense_and_disable - DOSE-650
# zfs_destroy_clone_livelist - DOSE-652
# -------------------------------------------------------------
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_clone_livelist_condense_races', 'zfs_clone_livelist_dedup',
'zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos',
'zfs_destroy_dev_removal', 'zfs_destroy_dev_removal_condense']
tags = ['functional', 'cli_root', 'zfs_destroy']

[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
'zfs_diff_types', 'zfs_diff_encrypted']
tags = ['functional', 'cli_root', 'zfs_diff']

[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']

[tests/functional/cli_root/zfs_ids_to_path]
tests = ['zfs_ids_to_path_001_pos']
tags = ['functional', 'cli_root', 'zfs_ids_to_path']

[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos',
'zfs_inherit_mountpoint']
tags = ['functional', 'cli_root', 'zfs_inherit']

[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_https', 'zfs_load-key_location', 'zfs_load-key_noop',
'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']

# -------------------------------------------------------------
# TODO: Add the following tests back after relevant bug fixes.
# zfs_mount_remount - DOSE-654
# -------------------------------------------------------------
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_009_neg', 'zfs_mount_010_neg', 'zfs_mount_011_neg',
'zfs_mount_012_pos', 'zfs_mount_all_001_pos', 'zfs_mount_encrypted',
'zfs_mount_all_fail', 'zfs_mount_all_mountpoints',
'zfs_mount_test_race']
tags = ['functional', 'cli_root', 'zfs_mount']
52 changes: 52 additions & 0 deletions tests/zfs-tests/include/libtest.shlib
Original file line number Diff line number Diff line change
Expand Up @@ -1824,6 +1824,58 @@ function create_pool
return $?
}

# Return 0 if import completes successfully; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# -p <pool name> - REQUIRED
# -e "<extra options that need to be passed while importing pool>" - OPTIONAL

function import_pool
{
typeset pool=""
typeset extra_options=""

typeset opt=""
while getopts "p:e:" opt; do
case $opt in
p) pool=$OPTARG ;;
e) extra_options=$OPTARG ;;
esac
done

if [[ -z $pool ]]; then
log_note "Missing pool name."
return 1
fi

if poolexists $pool ; then
log_note "Pool named $pool already exists."
return 1
fi

if ! is_global_zone ; then
return 0
fi

typeset zpool_import_cmd=""
if use_object_store; then
zpool_import_cmd="zpool import \
-d $ZTS_BUCKET_NAME
-o object-endpoint=$ZTS_OBJECT_ENDPOINT \
-o object-region=$ZTS_REGION \
-o object-credentials-profile=$ZTS_CREDS_PROFILE \
$extra_options \
$pool"
else
zpool_import_cmd="zpool import $extra_options $pool"
fi

log_note $zpool_import_cmd
$zpool_import_cmd

return $?
}

# Return 0 if destroy successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,11 @@
# DESCRIPTION:
# Verify "copies" property can be correctly set as 1,2 and 3 and different
# filesystem can have different value of "copies" property within the same pool.
# For object storage, verify copies can be set to 1.
#
# STRATEGY:
# 1. Create different filesystems with copies set as 1,2,3;
# 1. Create different filesystems with copies set as 1,2,3.
# For object storage, create filesystems with copies set to 1.
# 2. Verify that the "copies" property has been set correctly
#

Expand All @@ -60,19 +62,13 @@ log_onexit cleanup

fs=$TESTPOOL/$TESTFS
fs1=$TESTPOOL/$TESTFS1
fs2=$TESTPOOL/$TESTFS2
vol=$TESTPOOL/$TESTVOL
vol1=$TESTPOOL/$TESTVOL1
vol2=$TESTPOOL/$TESTVOL2

#
# Check the default value for copies property
#
for ds in $fs $vol; do
cmp_prop $ds 1
done
function test_zfs_create_with_copies
{
typeset val="$1"

for val in 1 2 3; do
log_must zfs create -o copies=$val $fs1
if is_global_zone; then
log_must zfs create -V $VOLSIZE -o copies=$val $vol1
Expand All @@ -84,33 +80,40 @@ for val in 1 2 3; do
cmp_prop $ds $val
done

for val2 in 3 2 1; do
log_must zfs create -o copies=$val2 $fs2
if is_global_zone; then
log_must zfs create -V $VOLSIZE -o copies=$val2 $vol2
block_device_wait
else
log_must zfs create -o copies=$val2 $vol2
fi
for ds in $fs2 $vol2; do
cmp_prop $ds $val2
log_must zfs destroy $ds
block_device_wait
done
done

for ds in $fs1 $vol1; do
log_must zfs destroy $ds
block_device_wait
done
}

done
function test_zfs_set_with_copies
{
typeset val="$1"

for val in 3 2 1; do
for ds in $fs $vol; do
log_must zfs set copies=$val $ds
cmp_prop $ds $val
done
}

#
# Check the default value for copies property
#
for ds in $fs $vol; do
cmp_prop $ds 1
done

if use_object_store; then
test_zfs_create_with_copies 1
test_zfs_set_with_copies 1
else
for val in 1 2 3; do
test_zfs_create_with_copies $val
done

for val in 3 2 1; do
test_zfs_set_with_copies $val
done
fi

log_pass "'copies' property with correct arguments works as expected. "
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@

verify_runnable "both"

if use_object_store; then
log_unsupported "Multiple zfs copies inapplicable for object storage \
run."
fi

function cleanup
{
typeset val
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@

verify_runnable "global"

if use_object_store; then
log_unsupported "Multiple zfs copies inapplicable for object storage \
run."
fi

function cleanup
{
if poolexists $TESTPOOL1; then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,14 @@

#
# DESCRIPTION:
# Verify that copies cannot be set to other value except for 1, 2 or 3
# Verify that copies cannot be set to other value except for 1, 2 or 3.
# For object store, verify that it can't be set to any other value
# except 1.
#
# STRATEGY:
# 1. Create filesystems with copies set as any value other than 1, 2 or 3
# 1. Create filesystems with copies set as any value other than 1, 2 or 3.
# For object store, create filesystems with copies set as any value
# other than 1.
# 2. Verify that the create operations fail
#

Expand All @@ -46,6 +50,9 @@ verify_runnable "both"
log_assert "Verify that copies property cannot be set to any value other than 1,2 or 3"

set -A badval 0 01 02 03 0 -1 -2 -3 10 20 30 4 5 6 blah
if use_object_store; then
badval+=(2 3)
fi

for val in ${badval[@]}; do
log_mustnot zfs create -o copies=$val $TESTPOOL/$TESTFS1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,11 @@

verify_runnable "global"

if use_object_store; then
log_unsupported "Multiple zfs copies inapplicable for object storage \
run."
fi

function cleanup
{
if poolexists $ZPOOL_VERSION_1_NAME; then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@

verify_runnable "global"

if use_object_store; then
log_unsupported "Multiple zfs copies inapplicable for object storage \
run."
fi

function cleanup
{
if ismounted $mntp $NEWFS_DEFAULT_FS ; then
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,6 @@ log_assert "'zfs create <filesystem>' can create a ZFS filesystem with name leng
log_must zfs create $TESTPOOL/$TESTFS1
log_mustnot zfs create $TESTPOOL/$TESTFS2
log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL
log_must import_pool -p $TESTPOOL

log_pass "'zfs create <filesystem>' works as expected."
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ function export_race
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/out
done
log_must zpool export $TESTPOOL
log_must zpool import $TESTPOOL
log_must import_pool -p $TESTPOOL
[[ "1" == "$(get_tunable "$1")" ]] || \
log_fail "export/condense race test failed"
log_must zfs destroy $TESTPOOL/$TESTCLONE
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/removal/removal.kshlib

if use_object_store; then
log_unsupported "Device removal inapplicable for object storage run."
fi

function cleanup
{
poolexists $TESTPOOL2 && zpool destroy $TESTPOOL2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@
. $STF_SUITE/tests/functional/removal/removal.kshlib
. $STF_SUITE/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg

if use_object_store; then
log_unsupported "Device removal inapplicable for object storage run."
fi

function cleanup
{
poolexists $TESTPOOL2 && zpool destroy $TESTPOOL2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,9 @@ log_must key_available $TESTPOOL/$TESTFS1
log_mustnot eval "echo $PASSPHRASE | zfs load-key $TESTPOOL/$TESTFS1"

typeset DISK2="$(echo $DISKS | awk '{ print $2 }')"
log_must eval "echo $PASSPHRASE | zpool create -O encryption=on" \
"-O keyformat=passphrase -O keylocation=prompt $TESTPOOL1 $DISK2"
log_must eval "echo $PASSPHRASE | create_pool -e \"-O encryption=on " \
"-O keyformat=passphrase -O keylocation=prompt\" -p $TESTPOOL1 " \
" -d \"$DISK2\""

log_must zfs unmount $TESTPOOL1
log_must zfs unload-key $TESTPOOL1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ log_must zfs create -V 64M -o encryption=on -o keyformat=passphrase \
-o keylocation=file:///$TESTPOOL/pkey $TESTPOOL/zvol

typeset DISK2="$(echo $DISKS | awk '{ print $2}')"
log_must zpool create -O encryption=on -O keyformat=passphrase \
-O keylocation=file:///$TESTPOOL/pkey $TESTPOOL1 $DISK2
log_must create_pool -e "-O encryption=on -O keyformat=passphrase \
-O keylocation=file:///$TESTPOOL/pkey" -p $TESTPOOL1 -d "$DISK2"

log_must zfs unmount $TESTPOOL/$TESTFS1
log_must zfs unload-key $TESTPOOL/$TESTFS1
Expand Down
Loading

0 comments on commit 8e14e4b

Please sign in to comment.