diff --git a/qemu/tests/cpu_device_hotplug_maximum.py b/qemu/tests/cpu_device_hotplug_maximum.py index d74e291fbbe44ed064f0ed00e337bab105158922..c2c40d4a6798ca724662d28595f3e23887d8a23c 100644 --- a/qemu/tests/cpu_device_hotplug_maximum.py +++ b/qemu/tests/cpu_device_hotplug_maximum.py @@ -95,12 +95,14 @@ def run(test, params, env): if not cpu_utils.check_guest_cpu_topology(session, os_type, cpuinfo): test.fail("CPU topology of guest is not as expected after reboot.") - error_context.context("Hotunplug all vCPU devices", logging.info) - for vcpu_device in reversed(vcpu_devices): - vm.hotunplug_vcpu_device(vcpu_device) - if not utils_misc.wait_for(lambda: vm.get_cpu_count() == smp, - verify_wait_timeout, first=5, step=10): - logging.error(not_equal_text, vm.get_cpu_count(), smp) - test.fail(mismatch_text) - logging.info("CPU quantity is as expected after hotunplug: %s", smp) + # aarch64 do not support vcpu hot-unplug by now. + if platform.machine() != 'aarch64': + error_context.context("Hotunplug all vCPU devices", logging.info) + for vcpu_device in reversed(vcpu_devices): + vm.hotunplug_vcpu_device(vcpu_device) + if not utils_misc.wait_for(lambda: vm.get_cpu_count() == smp, + verify_wait_timeout, first=5, step=10): + logging.error(not_equal_text, vm.get_cpu_count(), smp) + test.fail(mismatch_text) + logging.info("CPU quantity is as expected after hotunplug: %s", smp) session.close() diff --git a/qemu/tests/cpu_device_hotpluggable.py b/qemu/tests/cpu_device_hotpluggable.py index 02fadefe975e56966564a3d265b5fe446b42381a..eb5ca763e54e1530d8363d2bb106f8d8790af8a4 100644 --- a/qemu/tests/cpu_device_hotpluggable.py +++ b/qemu/tests/cpu_device_hotpluggable.py @@ -1,5 +1,5 @@ import logging - +import platform from provider import cpu_utils from aexpect import ShellCmdError @@ -103,8 +103,10 @@ def run(test, params, env): error_context.context("Pause guest to hotunplug all vcpu devices", logging.info) vm.pause() - sub_hotunplug() - error_context.context("Resume guest after hotunplug") + #aarch64 do not support vcpu hot-unplug by now. + if platform.machine() != 'aarch64': + sub_hotunplug() + error_context.context("Resume guest after hotunplug") vm.resume() login_timeout = params.get_numeric("login_timeout", 360) @@ -127,8 +129,9 @@ def run(test, params, env): pluggable_vcpu_dev = vcpu_devices pluggable_vcpu = vcpus_count * len(pluggable_vcpu_dev) else: - pluggable_vcpu_dev = vcpu_devices[::-1] - pluggable_vcpu = -(vcpus_count * len(pluggable_vcpu_dev)) + if platform.machine() != 'aarch64': + pluggable_vcpu_dev = vcpu_devices[::-1] + pluggable_vcpu = -(vcpus_count * len(pluggable_vcpu_dev)) expected_vcpus = vm.get_cpu_count() + pluggable_vcpu if params.get("pause_vm_before_hotplug", "no") == "yes": @@ -154,7 +157,8 @@ def run(test, params, env): session.close() if ("hotunplug" in params.objects("sub_test_after_migrate") or sub_test_type == "pause_resume"): - expected_vcpus -= pluggable_vcpu + if platform.machine() != 'aarch64': + expected_vcpus -= pluggable_vcpu if vm.is_alive(): session = vm.wait_for_login(timeout=login_timeout) diff --git a/qemu/tests/cpu_device_hotpluggable_with_numa.py b/qemu/tests/cpu_device_hotpluggable_with_numa.py index c84c345a7a5fab8d084d0efdef4a38258682a329..40a9b1bf49e125fe17f452643c307bc4f94e0d00 100644 --- a/qemu/tests/cpu_device_hotpluggable_with_numa.py +++ b/qemu/tests/cpu_device_hotpluggable_with_numa.py @@ -1,5 +1,6 @@ import re import logging +import platform from virttest import error_context from virttest import utils_package @@ -96,14 +97,15 @@ def run(test, params, env): test.error("Could not find node %s in guest." % node_id) logging.info("Number of each CPU in guest matches what we assign.") - for vcpu_dev in vcpu_devices[::-1]: - error_context.context("hotunplug vcpu device: %s" % vcpu_dev, - logging.info) - vm.hotunplug_vcpu_device(vcpu_dev) - if vm.get_cpu_count() != vm.cpuinfo.smp: - test.fail("Actual number of guest CPUs is not equal to the " - "expected.") - if get_guest_numa_cpus_info() != numa_before_plug: - logging.debug("Current guest numa info:\n%s", - session.cmd_output("numactl -H")) - test.fail("Numa info of guest is incorrect after vcpu hotunplug.") + if platform.machine() != 'aarch64': + for vcpu_dev in vcpu_devices[::-1]: + error_context.context("hotunplug vcpu device: %s" % vcpu_dev, + logging.info) + vm.hotunplug_vcpu_device(vcpu_dev) + if vm.get_cpu_count() != vm.cpuinfo.smp: + test.fail("Actual number of guest CPUs is not equal to the " + "expected.") + if get_guest_numa_cpus_info() != numa_before_plug: + logging.debug("Current guest numa info:\n%s", + session.cmd_output("numactl -H")) + test.fail("Numa info of guest is incorrect after vcpu hotunplug.") diff --git a/qemu/tests/cpu_device_hotpluggable_with_stress.py b/qemu/tests/cpu_device_hotpluggable_with_stress.py index 03b0bcc8ec342c9b69173ad64baf2d0214906de0..02435965c48a54918cd9e99f3c1853bfa19b645f 100644 --- a/qemu/tests/cpu_device_hotpluggable_with_stress.py +++ b/qemu/tests/cpu_device_hotpluggable_with_stress.py @@ -2,6 +2,7 @@ import re import time import random import logging +import platform from provider import cpu_utils @@ -88,14 +89,16 @@ def run(test, params, env): "%.2f%%" % (cpu_id, cpu_usage_rate)) logging.info("Usage rate of vCPU(%s) is: %.2f%%", cpu_id, cpu_usage_rate) - for vcpu_dev in vcpu_devices: - error_context.context("Hotunplug vcpu device: %s" % vcpu_dev, - logging.info) - vm.hotunplug_vcpu_device(vcpu_dev) - # Drift the running stress task to other vCPUs - time.sleep(random.randint(5, 10)) - if vm.get_cpu_count() != smp: - test.fail("Actual number of guest CPUs is not equal to expected") + # aarch64 do not support vcpu hot-unplug by now. + if platform.machine() != 'aarch64': + for vcpu_dev in vcpu_devices: + error_context.context("Hotunplug vcpu device: %s" % vcpu_dev, + logging.info) + vm.hotunplug_vcpu_device(vcpu_dev) + # Drift the running stress task to other vCPUs + time.sleep(random.randint(5, 10)) + if vm.get_cpu_count() != smp: + test.fail("Actual number of guest CPUs is not equal to expected") stress_tool.unload_stress() stress_tool.clean() else: