Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

TDX: Enable TLB flush hypercall support #511

Merged
merged 5 commits into from
Jan 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion openhcl/openhcl_boot/src/host_params/dt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,17 @@ impl PartitionInfo {
crate::cmdline::parse_boot_command_line(storage.cmdline.as_str())
.enable_vtl2_gpa_pool;

max(dt_page_count.unwrap_or(0), cmdline_page_count.unwrap_or(0))
let isolation_requirements = match params.isolation_type {
#[cfg(target_arch = "x86_64")]
// Supporting TLB flush hypercalls on TDX requires 1 page per VP
IsolationType::Tdx => parsed.cpus.len() as u64,
_ => 0,
};

max(
smalis-msft marked this conversation as resolved.
Show resolved Hide resolved
dt_page_count.unwrap_or(0) + isolation_requirements,
cmdline_page_count.unwrap_or(0),
)
};
if vtl2_gpa_pool_size != 0 {
// Reserve the specified number of pages for the pool. Use the used
Expand Down
9 changes: 6 additions & 3 deletions openhcl/underhill_core/src/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1533,8 +1533,7 @@ async fn new_underhill_vm(
};

// Enable the private pool which supports persisting ranges across servicing
// for DMA devices that support save restore. Today, this is only used for
// NVMe.
// for DMA devices that support save restore.
let mut private_pool = if !runtime_params.private_pool_ranges().is_empty() {
use vmcore::save_restore::SaveRestore;

Expand Down Expand Up @@ -1773,7 +1772,11 @@ async fn new_underhill_vm(
vmtime: &vmtime_source,
isolated_memory_protector: gm.isolated_memory_protector()?,
shared_vis_pages_pool: shared_vis_pages_pool.as_ref().map(|p| {
p.allocator("partition".into())
p.allocator("partition-shared".into())
.expect("partition name should be unique")
}),
private_vis_pages_pool: private_pool.as_ref().map(|p| {
p.allocator("partition-private".into())
.expect("partition name should be unique")
}),
};
Expand Down
6 changes: 6 additions & 0 deletions openhcl/virt_mshv_vtl/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,9 @@ struct UhPartitionInner {
#[cfg_attr(guest_arch = "aarch64", allow(dead_code))]
#[inspect(skip)]
shared_vis_pages_pool: Option<page_pool_alloc::PagePoolAllocator>,
#[cfg_attr(guest_arch = "aarch64", allow(dead_code))]
#[inspect(skip)]
private_vis_pages_pool: Option<page_pool_alloc::PagePoolAllocator>,
#[inspect(with = "inspect::AtomicMut")]
no_sidecar_hotplug: AtomicBool,
use_mmio_hypercalls: bool,
Expand Down Expand Up @@ -1170,6 +1173,8 @@ pub struct UhLateParams<'a> {
pub isolated_memory_protector: Option<Arc<dyn ProtectIsolatedMemory>>,
/// Allocator for shared visibility pages.
pub shared_vis_pages_pool: Option<page_pool_alloc::PagePoolAllocator>,
/// Allocator for private visibility pages.
pub private_vis_pages_pool: Option<page_pool_alloc::PagePoolAllocator>,
}

/// Trait for CVM-related protections on guest memory.
Expand Down Expand Up @@ -1606,6 +1611,7 @@ impl<'a> UhProtoPartition<'a> {
guest_vsm: RwLock::new(vsm_state),
isolated_memory_protector: late_params.isolated_memory_protector.clone(),
shared_vis_pages_pool: late_params.shared_vis_pages_pool,
private_vis_pages_pool: late_params.private_vis_pages_pool,
no_sidecar_hotplug: params.no_sidecar_hotplug.into(),
use_mmio_hypercalls: params.use_mmio_hypercalls,
backing_shared: BackingShared::new(isolation, BackingSharedParams { cvm_state })?,
Expand Down
13 changes: 6 additions & 7 deletions openhcl/virt_mshv_vtl/src/processor/tdx/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -689,13 +689,12 @@ impl BackingPrivate for TdxBacked {
let pfns = pfns_handle.base_pfn()..pfns_handle.base_pfn() + pfns_handle.size_pages();
let overlays: Vec<_> = pfns.collect();

// TODO TDX: This needs to come from a private pool
let flush_page = params
.partition
.shared_vis_pages_pool
.private_vis_pages_pool
.as_ref()
.expect("shared pool exists for cvm")
.alloc(1.try_into().unwrap(), "tdx_tlb_flush".into())
.expect("private pool exists for cvm")
.alloc_with_mapping(1.try_into().unwrap(), "tdx_tlb_flush".into())
.expect("not out of memory");

let untrusted_synic = params
Expand Down Expand Up @@ -3412,7 +3411,7 @@ impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
let mut flush_state = self.vp.shared.flush_state[vtl].write();

// If there are too many provided gvas then promote this request to a flush entire.
// TODO do we need the extended check? I don't think so
// TODO TDX GUEST VSM do we need the extended check? I don't think so
if gva_ranges.len() > FLUSH_GVA_LIST_SIZE {
if flags.non_global_mappings_only() {
flush_state.s.flush_entire_non_global_counter += 1;
Expand All @@ -3422,9 +3421,9 @@ impl<T: CpuIo> hv1_hypercall::FlushVirtualAddressListEx
} else {
for range in gva_ranges {
if flush_state.gva_list.len() == FLUSH_GVA_LIST_SIZE {
flush_state.gva_list.pop_back();
flush_state.gva_list.pop_front();
}
flush_state.gva_list.push_front(*range);
flush_state.gva_list.push_back(*range);
flush_state.s.gva_list_count += 1;
}
}
Expand Down
14 changes: 7 additions & 7 deletions openhcl/virt_mshv_vtl/src/processor/tdx/tlb_flush.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ use std::collections::VecDeque;
use std::num::Wrapping;
use x86defs::tdx::TdGlaVmAndFlags;
use x86defs::tdx::TdxGlaListInfo;
use zerocopy::AsBytes;

pub(super) const FLUSH_GVA_LIST_SIZE: usize = 32;

Expand Down Expand Up @@ -174,14 +175,13 @@ impl UhProcessor<'_, TdxBacked> {
} else {
gla_flags.set_list(true);

// TODO: Actually copy addresses in.
// let page_mapping = flush_page.sparse_mapping().expect("allocated");
let page_mapping = flush_page.mapping().unwrap();

// for (i, gva_range) in flush_addrs.iter().enumerate() {
// page_mapping
// .write_at(i * size_of::<HvGvaRange>(), gva_range.as_bytes())
// .expect("just allocated, should never fail");
// }
for (i, gva_range) in flush_addrs.iter().enumerate() {
page_mapping
.write_at(i * size_of::<HvGvaRange>(), gva_range.as_bytes())
.unwrap();
}

let gla_list = TdxGlaListInfo::new()
.with_list_gpa(flush_page.base_pfn())
Expand Down
8 changes: 3 additions & 5 deletions vm/hv1/hv1_emulator/src/cpuid.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,11 +129,9 @@ pub fn hv_cpuid_leaves(
.with_use_apic_msrs(use_apic_msrs);

if hardware_isolated {
// TODO TDX too when it's ready
if isolation == IsolationType::Snp {
enlightenments = enlightenments
.with_use_hypercall_for_remote_flush_and_local_flush_entire(true);
}
enlightenments =
enlightenments.with_use_hypercall_for_remote_flush_and_local_flush_entire(true);

// TODO HCVM:
// .with_use_synthetic_cluster_ipi(true);

Expand Down
Loading