Skip to content

Commit 4622838

Browse files
committed
tmp: enable huge pages by default
To do some performance testing comparing huge pages with small pages. Signed-off-by: Patrick Roy <[email protected]>
1 parent 2795a8c commit 4622838

File tree

5 files changed

+13
-17
lines changed

5 files changed

+13
-17
lines changed

.buildkite/common.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
]
2020

2121
DEFAULT_PLATFORMS = [
22-
("al2", "linux_4.14"),
2322
("al2", "linux_5.10"),
2423
("al2023", "linux_6.1"),
2524
]

.buildkite/pipeline_perf.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,6 @@
3434
"test_path": "integration_tests/performance/test_network_ab.py::test_network_tcp_throughput",
3535
"devtool_opts": "-c 1-10 -m 0",
3636
},
37-
"snapshot-latency": {
38-
"label": "📸 Snapshot Latency",
39-
"test_path": "integration_tests/performance/test_snapshot_ab.py",
40-
"devtool_opts": "-c 1-12 -m 0",
41-
},
4237
"vsock-throughput": {
4338
"label": "🧦 Vsock Throughput",
4439
"test_path": "integration_tests/performance/test_vsock_ab.py",

src/vmm/src/vmm_config/machine_config.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,11 @@ impl From<kernel_version::Error> for VmConfigError {
5353
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
5454
pub enum HugePageConfig {
5555
/// Do not use hugepages, e.g. back guest memory by 4K
56-
#[default]
5756
#[serde(alias = "4K")]
5857
None,
5958
/// Back guest memory by 2MB hugetlbfs pages
6059
#[serde(rename = "2M")]
60+
#[default]
6161
Hugetlbfs2M,
6262
/// Back guest memory by 1GB hugetlbfs
6363
#[serde(rename = "1G")]

tests/integration_tests/performance/test_memory_overhead.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,7 @@ def test_memory_overhead(
6565
ps = psutil.Process(microvm.firecracker_pid)
6666

6767
for pmmap in ps.memory_maps(grouped=False):
68+
print(pmmap)
6869
# We publish 'size' and 'rss' (resident). size would be the worst case,
6970
# whereas rss is the current paged-in memory.
7071

tools/devtool

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -691,18 +691,19 @@ cmd_test() {
691691

692692
apply_performance_tweaks
693693
fi
694+
fi
694695

695-
# It seems that even if the tests using huge pages run sequentially on ag=1 agents, right-sizing the huge pages
696-
# pool to the total number of huge pages used across all tests results in spurious failures with pool depletion
697-
# anyway (something else on the host seems to be stealing our huge pages, and we cannot "ear mark" them for
698-
# Firecracker processes). Thus, just allocate 4GB of them and call it a day.
699-
say "Setting up huge pages pool"
700-
num_hugetlbfs_pages=2048
696+
# It seems that even if the tests using huge pages run sequentially on ag=1 agents, right-sizing the huge pages
697+
# pool to the total number of huge pages used across all tests results in spurious failures with pool depletion
698+
# anyway (something else on the host seems to be stealing our huge pages, and we cannot "ear mark" them for
699+
# Firecracker processes). Thus, just allocate 4GB of them and call it a day.
700+
say "Setting up huge pages pool"
701+
num_hugetlbfs_pages=51200 # 100GiB huge pages pool
702+
703+
# Allocate enough 2M hugetlbfs pages to boot a single 128MB microvm
704+
huge_pages_old=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
705+
huge_pages_new=$(echo $num_hugetlbfs_pages |sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
701706

702-
# Allocate enough 2M hugetlbfs pages to boot a single 128MB microvm
703-
huge_pages_old=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
704-
huge_pages_new=$(echo $num_hugetlbfs_pages |sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)
705-
fi
706707

707708
if [[ "$huge_pages_new" -ne "$num_hugetlbfs_pages" ]]; then
708709
die "Failed to allocate $num_hugetlbfs_pages hugetlbfs pages, only got $huge_pages_new"

0 commit comments

Comments
 (0)