Skip to content

Commit

Permalink
adding rename scenario in tier-1
Browse files Browse the repository at this point in the history
Signed-off-by: julpark <[email protected]>
  • Loading branch information
julpark-rh committed Feb 21, 2025
1 parent 461047a commit 0eeefe8
Show file tree
Hide file tree
Showing 8 changed files with 67 additions and 64 deletions.
11 changes: 5 additions & 6 deletions tests/cephfs/cephfs_snapshot_management.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import traceback

from ceph.ceph import CommandFailed
from tests.cephfs.cephfs_utilsV1 import FsUtils
from utility.log import Log

Expand Down Expand Up @@ -62,19 +61,18 @@ def run(ceph_cluster, **kw):
f"cephfs.{fs_name}.data" if not erasure else f"cephfs.{fs_name}.data-ec"
)
fs_details = fs_util.get_fs_info(client1, fs_name)

if not fs_details:
fs_util.create_fs(client1, fs_name)
new_name = "renamed_fs2"
fs_util.rename_volume(client1, fs_name, new_name)
fs_name = new_name
commands = [
f"ceph fs subvolumegroup create {fs_name} snap_group {pool_name}",
f"ceph fs subvolume create {fs_name} snap_vol --size 5368706371 --group_name snap_group",
]
for command in commands:
client1.exec_command(sudo=True, cmd=command)
results.append(f"{command} successfully executed")

if not fs_util.wait_for_mds_process(client1, f"{fs_name}"):
raise CommandFailed("Failed to start MDS deamons")
log.info("Get the path of sub volume")
subvol_path, rc = client1.exec_command(
sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} snap_vol snap_group"
Expand Down Expand Up @@ -136,7 +134,6 @@ def run(ceph_cluster, **kw):
f"ceph fs subvolume rm {fs_name} snap_vol --group_name snap_group",
f"ceph fs subvolumegroup rm {fs_name} snap_group",
"ceph config set mon mon_allow_pool_delete true",
f"ceph fs volume rm {fs_name} --yes-i-really-mean-it",
"rm -rf /mnt/mycephfs1",
]
for command in commands:
Expand All @@ -153,3 +150,5 @@ def run(ceph_cluster, **kw):
log.info(e)
log.info(traceback.format_exc())
return 1
finally:
fs_util.rename_volume(client1, new_name, "cephfs_new")
19 changes: 13 additions & 6 deletions tests/cephfs/cephfs_tier1_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,10 @@ def run(ceph_cluster, **kw):
return 1
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
default_fs = "cephfs"
default_fs = "cephfs_new"
new_name = "renamed_fs3"
fs_util.rename_volume(clients[0], default_fs, new_name)
default_fs = new_name
if build.startswith("4"):
# create EC pool
list_cmds = [
Expand Down Expand Up @@ -149,6 +152,7 @@ def run(ceph_cluster, **kw):
kernel_mounting_dir_1,
",".join(mon_node_ips),
sub_dir=f"{subvol_path.strip()}",
extra_params=f", fs = {default_fs}",
)

subvol_path, rc = clients[0].exec_command(
Expand All @@ -158,7 +162,7 @@ def run(ceph_cluster, **kw):
fs_util.fuse_mount(
[clients[0]],
fuse_mounting_dir_1,
extra_params=f" -r {subvol_path.strip()}",
extra_params=f" -r {subvol_path.strip()} --client_fs {default_fs}",
)

log.info(
Expand All @@ -176,6 +180,7 @@ def run(ceph_cluster, **kw):
kernel_mounting_dir_2,
",".join(mon_node_ips),
sub_dir=f"{subvol_path.strip()}",
extra_params=f",fs={default_fs}",
)

subvol_path, rc = clients[1].exec_command(
Expand All @@ -186,7 +191,7 @@ def run(ceph_cluster, **kw):
fs_util.fuse_mount(
[clients[1]],
fuse_mounting_dir_2,
extra_params=f" -r {subvol_path.strip()}",
extra_params=f" -r {subvol_path.strip()} --client_fs {default_fs}",
)

log.info(
Expand Down Expand Up @@ -360,7 +365,7 @@ def run(ceph_cluster, **kw):
fs_util.fuse_mount(
[clients[1]],
fuse_mounting_dir_5,
extra_params=f" -r {subvol_path.strip()}",
extra_params=f" -r {subvol_path.strip()} --client_fs {default_fs}",
)
clients[1].exec_command(
sudo=True,
Expand Down Expand Up @@ -507,9 +512,11 @@ def run(ceph_cluster, **kw):
fs_util.remove_subvolumegroup(clients[0], **subvolumegroup)
return 0
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
fs_util.rename_volume(clients[0], new_name, "cephfs_new")


def run_ios(client, mounting_dir):
Expand Down
14 changes: 10 additions & 4 deletions tests/cephfs/cephfs_volume_management.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from ceph.ceph import CommandFailed
from tests.cephfs.cephfs_utils import FsUtils
from tests.cephfs.cephfs_utilsV1 import FsUtils as FsUtils_v1
from utility.log import Log

log = Log(__name__)
Expand Down Expand Up @@ -60,16 +61,19 @@ def run(ceph_cluster, **kw):
tc1 = "83573446"
log.info(f"Execution of testcase {tc1} started")
log.info("Create and list a volume")
fs_name_old = "cephfs_new"
commands = [
"ceph fs volume create cephfs_new",
"ceph fs ls | grep cephfs_new",
"ceph osd lspools | grep cephfs.cephfs_new",
"ceph fs volume ls | grep cephfs_new",
]

for command in commands:
client1.exec_command(sudo=True, cmd=command)
results.append(f"{command} successfully executed")
wait_for_process(client1, "cephfs_new")
new_name = "renamed_fs1"
FsUtils_v1.rename_volume(client1, fs_name_old, new_name)
commands = [
"ceph config set mon mon_allow_pool_delete true",
"ceph fs volume rm cephfs_new --yes-i-really-mean-it",
Expand All @@ -79,7 +83,7 @@ def run(ceph_cluster, **kw):
results.append(f"{command} successfully executed")

verifyremove_command = [
"ceph fs ls | grep cephfs_new",
f"ceph fs ls | grep {new_name}",
"ceph osd lspools | grep cephfs.cephfs_new",
]
for command in verifyremove_command:
Expand All @@ -94,6 +98,8 @@ def run(ceph_cluster, **kw):
return 0

except Exception as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
FsUtils_v1.rename_volume(client1, new_name, "cephfs_new")
19 changes: 9 additions & 10 deletions tests/cephfs/client_authorize.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,16 +84,13 @@ def run(ceph_cluster, **kw):

test_data = kw.get("test_data")
fs_util = FsUtils(ceph_cluster, test_data=test_data)
erasure = (
FsUtils.get_custom_config_value(test_data, "erasure")
if test_data
else False
)
client = ceph_cluster.get_ceph_objects("client")
mon_node_ip = fs_util.get_mon_node_ips()
mon_node_ip = ",".join(mon_node_ip)
fs_count = 1
fs_name = "cephfs" if not erasure else "cephfs-ec"
fs_name_old = "cephfs_new"
fs_name = "renamed_fs4"
fs_util.rename_volume(client[0], fs_name_old, fs_name)
fs_details = fs_util.get_fs_info(client[0], fs_name)

if not fs_details:
Expand Down Expand Up @@ -442,10 +439,12 @@ def run(ceph_cluster, **kw):
return 0

except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
fs_util.rename_volume(client[0], fs_name, "cephfs_new")
19 changes: 9 additions & 10 deletions tests/cephfs/dir_pinning.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,6 @@ def run(ceph_cluster, **kw):
fs_util = FsUtils(ceph_cluster)
test_data = kw.get("test_data")
fs_util_v1 = FsUtilsV1(ceph_cluster, test_data=test_data)
erasure = (
FsUtilsV1.get_custom_config_value(test_data, "erasure")
if test_data
else False
)
clients = ceph_cluster.get_ceph_objects("client")
build = config.get("build", config.get("rhbuild"))
client_info, rc = fs_util.get_clients(build)
Expand All @@ -55,7 +50,9 @@ def run(ceph_cluster, **kw):
tc = "11227"
dir_name = "dir"
log.info(f"Running cephfs {tc} test case")
fs_name = "cephfs" if not erasure else "cephfs-ec"
fs_name_old = "cephfs_new"
fs_name = "renamed_fs5"
fs_util_v1.rename_volume(client1[0], fs_name_old, fs_name)
fs_details = fs_util_v1.get_fs_info(clients[0], fs_name)

if not fs_details:
Expand Down Expand Up @@ -372,8 +369,8 @@ def run(ceph_cluster, **kw):
return 0

except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
log.info("Cleaning up!-----")
if client3[0].pkg_type != "deb" and client4[0].pkg_type != "deb":
rc_client = fs_util_v1.client_clean_up(
Expand All @@ -393,6 +390,8 @@ def run(ceph_cluster, **kw):
log.info("Cleaning up successfull")
return 1
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
fs_util_v1.rename_volume(client1[0], fs_name, "cephfs_new")
11 changes: 4 additions & 7 deletions tests/cephfs/fs_kernel_mount_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,6 @@ def run(ceph_cluster, **kw):
log.info(f"MetaData Information {log.metadata} in {__name__}")
test_data = kw.get("test_data")
fs_util = FsUtils(ceph_cluster, test_data=test_data)
erasure = (
FsUtils.get_custom_config_value(test_data, "erasure")
if test_data
else False
)

config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
clients = ceph_cluster.get_ceph_objects("client")
Expand Down Expand Up @@ -204,7 +198,9 @@ def run(ceph_cluster, **kw):
umount_fs(clients[0], kernel_mounting_dir)

log.info("mount with recovery_session Options")
default_fs = "cephfs" if not erasure else "cephfs-ec"
fs_name_old = "cephfs_new"
default_fs = "renamed_fs8"
fs_util.rename_volume(clients[0], fs_name_old, default_fs)
fs_details = fs_util.get_fs_info(clients[0], default_fs)

if not fs_details:
Expand Down Expand Up @@ -295,3 +291,4 @@ def run(ceph_cluster, **kw):
finally:
log.info("Clean Up in progess")
fs_util.remove_subvolume(clients[0], **subvolume)
fs_util.rename_volume(clients[0], default_fs, "cephfs_new")
18 changes: 8 additions & 10 deletions tests/cephfs/no_recover_session_mount.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,6 @@ def run(ceph_cluster, **kw):
log.info(f"Running cephfs {tc} test case")
test_data = kw.get("test_data")
fs_util = FsUtils(ceph_cluster, test_data=test_data)
erasure = (
FsUtils.get_custom_config_value(test_data, "erasure")
if test_data
else False
)
config = kw["config"]
clients = ceph_cluster.get_ceph_objects("client")
build = config.get("build", config.get("rhbuild"))
Expand All @@ -56,7 +51,9 @@ def run(ceph_cluster, **kw):
if "4." in rhbuild:
fs_name = "cephfs_new"
else:
fs_name = "cephfs" if not erasure else "cephfs-ec"
fs_name_old = "cephfs_new"
fs_name = "renamed_fs7"
fs_util.rename_volume(client1, fs_name_old, fs_name)
fs_details = fs_util.get_fs_info(client1, fs_name)

if not fs_details:
Expand Down Expand Up @@ -126,15 +123,16 @@ def run(ceph_cluster, **kw):
return 1

except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
log.info("Cleaning up")
fs_util.rename_volume(client1, fs_name, "cephfs_new")
mount_dir_2 = "/mnt/" + "".join(
secrets.choice(string.ascii_uppercase + string.digits) for i in range(5)
)
Expand Down
20 changes: 9 additions & 11 deletions tests/cephfs/subvolume_authorize.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,16 +180,12 @@ def run(ceph_cluster, **kw):

test_data = kw.get("test_data")
fs_util = FsUtils(ceph_cluster, test_data=test_data)
erasure = (
FsUtils.get_custom_config_value(test_data, "erasure")
if test_data
else False
)
client = ceph_cluster.get_ceph_objects("client")
mon_node_ip = fs_util.get_mon_node_ips()
mon_node_ip = ",".join(mon_node_ip)

fs_name = "cephfs" if not erasure else "cephfs-ec"
fs_name_old = "cephfs_new"
fs_name = "renamed_fs7"
fs_util.rename_volume(client[0], fs_name_old, fs_name)
fs_details = fs_util.get_fs_info(client[0], fs_name)

if not fs_details:
Expand Down Expand Up @@ -435,10 +431,12 @@ def run(ceph_cluster, **kw):
return 0

except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
log.error(e)
log.error(traceback.format_exc())
return 1
finally:
fs_util.rename_volume(client[0], fs_name, "cephfs_new")

0 comments on commit 0eeefe8

Please sign in to comment.