diff --git a/openhcl/openhcl_boot/src/host_params/dt.rs b/openhcl/openhcl_boot/src/host_params/dt.rs index 80acc5f225..987e27c4d5 100644 --- a/openhcl/openhcl_boot/src/host_params/dt.rs +++ b/openhcl/openhcl_boot/src/host_params/dt.rs @@ -461,7 +461,17 @@ impl PartitionInfo { crate::cmdline::parse_boot_command_line(storage.cmdline.as_str()) .enable_vtl2_gpa_pool; - max(dt_page_count.unwrap_or(0), cmdline_page_count.unwrap_or(0)) + let isolation_requirements = match params.isolation_type { + #[cfg(target_arch = "x86_64")] + // Supporting TLB flush hypercalls on TDX requires 1 page per VP + IsolationType::Tdx => parsed.cpus.len() as u64, + _ => 0, + }; + + max( + dt_page_count.unwrap_or(0) + isolation_requirements, + cmdline_page_count.unwrap_or(0), + ) }; if vtl2_gpa_pool_size != 0 { // Reserve the specified number of pages for the pool. Use the used diff --git a/openhcl/underhill_core/src/worker.rs b/openhcl/underhill_core/src/worker.rs index c65d78a58c..0ef14791d6 100644 --- a/openhcl/underhill_core/src/worker.rs +++ b/openhcl/underhill_core/src/worker.rs @@ -1533,8 +1533,7 @@ async fn new_underhill_vm( }; // Enable the private pool which supports persisting ranges across servicing - // for DMA devices that support save restore. Today, this is only used for - // NVMe. + // for DMA devices that support save restore. let mut private_pool = if !runtime_params.private_pool_ranges().is_empty() { use vmcore::save_restore::SaveRestore; @@ -1773,7 +1772,11 @@ async fn new_underhill_vm( vmtime: &vmtime_source, isolated_memory_protector: gm.isolated_memory_protector()?, shared_vis_pages_pool: shared_vis_pages_pool.as_ref().map(|p| { - p.allocator("partition".into()) + p.allocator("partition-shared".into()) + .expect("partition name should be unique") + }), + private_vis_pages_pool: private_pool.as_ref().map(|p| { + p.allocator("partition-private".into()) .expect("partition name should be unique") }), }; diff --git a/openhcl/virt_mshv_vtl/src/lib.rs b/openhcl/virt_mshv_vtl/src/lib.rs index 34fa6e0e52..8f32507794 100644 --- a/openhcl/virt_mshv_vtl/src/lib.rs +++ b/openhcl/virt_mshv_vtl/src/lib.rs @@ -219,6 +219,9 @@ struct UhPartitionInner { #[cfg_attr(guest_arch = "aarch64", allow(dead_code))] #[inspect(skip)] shared_vis_pages_pool: Option, + #[cfg_attr(guest_arch = "aarch64", allow(dead_code))] + #[inspect(skip)] + private_vis_pages_pool: Option, #[inspect(with = "inspect::AtomicMut")] no_sidecar_hotplug: AtomicBool, use_mmio_hypercalls: bool, @@ -1170,6 +1173,8 @@ pub struct UhLateParams<'a> { pub isolated_memory_protector: Option>, /// Allocator for shared visibility pages. pub shared_vis_pages_pool: Option, + /// Allocator for private visibility pages. + pub private_vis_pages_pool: Option, } /// Trait for CVM-related protections on guest memory. @@ -1606,6 +1611,7 @@ impl<'a> UhProtoPartition<'a> { guest_vsm: RwLock::new(vsm_state), isolated_memory_protector: late_params.isolated_memory_protector.clone(), shared_vis_pages_pool: late_params.shared_vis_pages_pool, + private_vis_pages_pool: late_params.private_vis_pages_pool, no_sidecar_hotplug: params.no_sidecar_hotplug.into(), use_mmio_hypercalls: params.use_mmio_hypercalls, backing_shared: BackingShared::new(isolation, BackingSharedParams { cvm_state })?, diff --git a/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs b/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs index d03bd890a0..54190c3864 100644 --- a/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs +++ b/openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs @@ -689,13 +689,12 @@ impl BackingPrivate for TdxBacked { let pfns = pfns_handle.base_pfn()..pfns_handle.base_pfn() + pfns_handle.size_pages(); let overlays: Vec<_> = pfns.collect(); - // TODO TDX: This needs to come from a private pool let flush_page = params .partition - .shared_vis_pages_pool + .private_vis_pages_pool .as_ref() - .expect("shared pool exists for cvm") - .alloc(1.try_into().unwrap(), "tdx_tlb_flush".into()) + .expect("private pool exists for cvm") + .alloc_with_mapping(1.try_into().unwrap(), "tdx_tlb_flush".into()) .expect("not out of memory"); let untrusted_synic = params @@ -3412,7 +3411,7 @@ impl hv1_hypercall::FlushVirtualAddressListEx let mut flush_state = self.vp.shared.flush_state[vtl].write(); // If there are too many provided gvas then promote this request to a flush entire. - // TODO do we need the extended check? I don't think so + // TODO TDX GUEST VSM do we need the extended check? I don't think so if gva_ranges.len() > FLUSH_GVA_LIST_SIZE { if flags.non_global_mappings_only() { flush_state.s.flush_entire_non_global_counter += 1; @@ -3422,9 +3421,9 @@ impl hv1_hypercall::FlushVirtualAddressListEx } else { for range in gva_ranges { if flush_state.gva_list.len() == FLUSH_GVA_LIST_SIZE { - flush_state.gva_list.pop_back(); + flush_state.gva_list.pop_front(); } - flush_state.gva_list.push_front(*range); + flush_state.gva_list.push_back(*range); flush_state.s.gva_list_count += 1; } } diff --git a/openhcl/virt_mshv_vtl/src/processor/tdx/tlb_flush.rs b/openhcl/virt_mshv_vtl/src/processor/tdx/tlb_flush.rs index 652fd4c882..6c5603316e 100644 --- a/openhcl/virt_mshv_vtl/src/processor/tdx/tlb_flush.rs +++ b/openhcl/virt_mshv_vtl/src/processor/tdx/tlb_flush.rs @@ -14,6 +14,7 @@ use std::collections::VecDeque; use std::num::Wrapping; use x86defs::tdx::TdGlaVmAndFlags; use x86defs::tdx::TdxGlaListInfo; +use zerocopy::AsBytes; pub(super) const FLUSH_GVA_LIST_SIZE: usize = 32; @@ -174,14 +175,13 @@ impl UhProcessor<'_, TdxBacked> { } else { gla_flags.set_list(true); - // TODO: Actually copy addresses in. - // let page_mapping = flush_page.sparse_mapping().expect("allocated"); + let page_mapping = flush_page.mapping().unwrap(); - // for (i, gva_range) in flush_addrs.iter().enumerate() { - // page_mapping - // .write_at(i * size_of::(), gva_range.as_bytes()) - // .expect("just allocated, should never fail"); - // } + for (i, gva_range) in flush_addrs.iter().enumerate() { + page_mapping + .write_at(i * size_of::(), gva_range.as_bytes()) + .unwrap(); + } let gla_list = TdxGlaListInfo::new() .with_list_gpa(flush_page.base_pfn()) diff --git a/vm/hv1/hv1_emulator/src/cpuid.rs b/vm/hv1/hv1_emulator/src/cpuid.rs index 29d1dfb7b9..b744fe3ffd 100644 --- a/vm/hv1/hv1_emulator/src/cpuid.rs +++ b/vm/hv1/hv1_emulator/src/cpuid.rs @@ -129,11 +129,9 @@ pub fn hv_cpuid_leaves( .with_use_apic_msrs(use_apic_msrs); if hardware_isolated { - // TODO TDX too when it's ready - if isolation == IsolationType::Snp { - enlightenments = enlightenments - .with_use_hypercall_for_remote_flush_and_local_flush_entire(true); - } + enlightenments = + enlightenments.with_use_hypercall_for_remote_flush_and_local_flush_entire(true); + // TODO HCVM: // .with_use_synthetic_cluster_ipi(true);