1#![cfg(target_os = "linux")]
7
8mod init;
9mod mapping;
10mod registrar;
11
12pub use init::BootInit;
13pub use init::Init;
14pub use init::MemoryMappings;
15pub use init::init;
16
17use cvm_tracing::CVM_ALLOWED;
18use guestmem::GuestMemoryBackingError;
19use guestmem::PAGE_SIZE;
20use guestmem::ranges::PagedRange;
21use hcl::GuestVtl;
22use hcl::ioctl::AcceptPagesError;
23use hcl::ioctl::ApplyVtlProtectionsError;
24use hcl::ioctl::Mshv;
25use hcl::ioctl::MshvHvcall;
26use hcl::ioctl::MshvVtl;
27use hcl::ioctl::snp::SnpPageError;
28use hv1_structs::VtlArray;
29use hvdef::HV_MAP_GPA_PERMISSIONS_ALL;
30use hvdef::HV_MAP_GPA_PERMISSIONS_NONE;
31use hvdef::HV_PAGE_SHIFT;
32use hvdef::HV_PAGE_SIZE;
33use hvdef::HvError;
34use hvdef::HvMapGpaFlags;
35use hvdef::HypercallCode;
36use hvdef::hypercall::AcceptMemoryType;
37use hvdef::hypercall::HostVisibilityType;
38use hvdef::hypercall::HvInputVtl;
39use mapping::GuestMemoryMapping;
40use mapping::GuestValidMemory;
41use memory_range::MemoryRange;
42use parking_lot::Mutex;
43use parking_lot::MutexGuard;
44use registrar::RegisterMemory;
45use std::collections::VecDeque;
46use std::sync::Arc;
47use std::sync::atomic::AtomicBool;
48use thiserror::Error;
49use virt::IsolationType;
50use virt_mshv_vtl::GpnSource;
51use virt_mshv_vtl::ProtectIsolatedMemory;
52use virt_mshv_vtl::TlbFlushLockAccess;
53use vm_topology::memory::MemoryLayout;
54use x86defs::snp::SevRmpAdjust;
55use x86defs::tdx::GpaVmAttributes;
56use x86defs::tdx::GpaVmAttributesMask;
57use x86defs::tdx::TdgMemPageAttrWriteR8;
58use x86defs::tdx::TdgMemPageGpaAttr;
59
60#[derive(Debug, Error)]
62pub enum QueryVtlPermissionsError {
63 #[error("failed to query rmp permissions")]
65 Snp(#[source] SnpPageError),
66}
67
68#[derive(Debug)]
69struct MshvVtlWithPolicy {
70 mshv_vtl: MshvVtl,
71 ignore_registration_failure: bool,
72 shared: bool,
73}
74
75impl RegisterMemory for MshvVtlWithPolicy {
76 fn register_range(&self, range: MemoryRange) -> Result<(), impl 'static + std::error::Error> {
77 match self.mshv_vtl.add_vtl0_memory(range, self.shared) {
78 Ok(()) => Ok(()),
79 Err(err) if self.ignore_registration_failure => {
81 tracing::warn!(
82 CVM_ALLOWED,
83 error = &err as &dyn std::error::Error,
84 "registration failure, could be expected"
85 );
86 Ok(())
87 }
88 Err(err) => Err(err),
89 }
90 }
91}
92
93#[derive(Debug, Error)]
94#[error("failed to register memory with kernel")]
95struct RegistrationError;
96
97struct DefaultVtlPermissions {
105 vtl0: HvMapGpaFlags,
106 vtl1: Option<HvMapGpaFlags>,
107}
108
109impl DefaultVtlPermissions {
110 fn set(&mut self, vtl: GuestVtl, permissions: HvMapGpaFlags) {
111 match vtl {
112 GuestVtl::Vtl0 => self.vtl0 = permissions,
113 GuestVtl::Vtl1 => self.vtl1 = Some(permissions),
114 }
115 }
116}
117
118#[derive(Copy, Clone)]
120enum GpaVtlPermissions {
121 Vbs(HvMapGpaFlags),
122 Snp(SevRmpAdjust),
123 Tdx(TdgMemPageGpaAttr, TdgMemPageAttrWriteR8),
124}
125
126impl GpaVtlPermissions {
127 fn new(isolation: IsolationType, vtl: GuestVtl, protections: HvMapGpaFlags) -> Self {
128 match isolation {
129 IsolationType::None => unreachable!(),
130 IsolationType::Vbs => GpaVtlPermissions::Vbs(protections),
131 IsolationType::Snp => {
132 let mut vtl_permissions = GpaVtlPermissions::Snp(SevRmpAdjust::new());
133 vtl_permissions.set(vtl, protections);
134 vtl_permissions
135 }
136 IsolationType::Tdx => {
137 let mut vtl_permissions =
138 GpaVtlPermissions::Tdx(TdgMemPageGpaAttr::new(), TdgMemPageAttrWriteR8::new());
139 vtl_permissions.set(vtl, protections);
140 vtl_permissions
141 }
142 }
143 }
144
145 fn set(&mut self, vtl: GuestVtl, protections: HvMapGpaFlags) {
146 match self {
147 GpaVtlPermissions::Vbs(flags) => *flags = protections,
148 GpaVtlPermissions::Snp(rmpadjust) => {
149 *rmpadjust = SevRmpAdjust::new()
150 .with_enable_read(protections.readable())
151 .with_enable_write(protections.writable())
152 .with_enable_user_execute(protections.user_executable())
153 .with_enable_kernel_execute(protections.kernel_executable())
154 .with_target_vmpl(match vtl {
155 GuestVtl::Vtl0 => x86defs::snp::Vmpl::Vmpl2.into(),
156 GuestVtl::Vtl1 => x86defs::snp::Vmpl::Vmpl1.into(),
157 });
158 }
159 GpaVtlPermissions::Tdx(attributes, mask) => {
160 let vm_attributes = GpaVmAttributes::new()
161 .with_valid(true)
162 .with_read(protections.readable())
163 .with_write(protections.writable())
164 .with_kernel_execute(protections.kernel_executable())
165 .with_user_execute(protections.user_executable());
166
167 let (new_attributes, new_mask) = match vtl {
168 GuestVtl::Vtl0 => {
169 let attributes = TdgMemPageGpaAttr::new().with_l2_vm1(vm_attributes);
170 let mask = TdgMemPageAttrWriteR8::new()
171 .with_l2_vm1(GpaVmAttributesMask::ALL_CHANGED);
172 (attributes, mask)
173 }
174 GuestVtl::Vtl1 => {
175 let attributes = TdgMemPageGpaAttr::new().with_l2_vm2(vm_attributes);
176 let mask = TdgMemPageAttrWriteR8::new()
177 .with_l2_vm2(GpaVmAttributesMask::ALL_CHANGED);
178 (attributes, mask)
179 }
180 };
181
182 *attributes = new_attributes;
183 *mask = new_mask;
184 }
185 }
186 }
187}
188
189#[derive(Debug, Error)]
191#[error("failed to modify gpa visibility, elements successfully processed {processed}")]
192pub struct ModifyGpaVisibilityError {
193 source: HvError,
194 processed: usize,
195}
196
197pub struct MemoryAcceptor {
203 mshv_hvcall: MshvHvcall,
204 mshv_vtl: MshvVtl,
205 isolation: IsolationType,
206}
207
208impl MemoryAcceptor {
209 pub fn new(isolation: IsolationType) -> Result<Self, hcl::ioctl::Error> {
211 let mshv = Mshv::new()?;
212 let mshv_vtl = mshv.create_vtl()?;
213 let mshv_hvcall = MshvHvcall::new()?;
214 mshv_hvcall.set_allowed_hypercalls(&[
215 HypercallCode::HvCallAcceptGpaPages,
216 HypercallCode::HvCallModifySparseGpaPageHostVisibility,
217 HypercallCode::HvCallModifyVtlProtectionMask,
218 ]);
219
220 Ok(Self {
222 mshv_hvcall,
223 mshv_vtl,
224 isolation,
225 })
226 }
227
228 pub fn accept_lower_vtl_pages(&self, range: MemoryRange) -> Result<(), AcceptPagesError> {
230 match self.isolation {
231 IsolationType::None => unreachable!(),
232 IsolationType::Vbs => self
233 .mshv_hvcall
234 .accept_gpa_pages(range, AcceptMemoryType::RAM),
235 IsolationType::Snp => {
236 self.mshv_vtl
237 .pvalidate_pages(range, true, false)
238 .map_err(|err| AcceptPagesError::Snp {
239 failed_operation: err,
240 range,
241 })
242 }
243 IsolationType::Tdx => {
244 let attributes = TdgMemPageGpaAttr::new().with_l2_vm1(GpaVmAttributes::FULL_ACCESS);
245 let mask =
246 TdgMemPageAttrWriteR8::new().with_l2_vm1(GpaVmAttributesMask::ALL_CHANGED);
247
248 self.mshv_vtl
249 .tdx_accept_pages(range, Some((attributes, mask)))
250 .map_err(|err| AcceptPagesError::Tdx { error: err, range })
251 }
252 }
253 }
254
255 fn unaccept_lower_vtl_pages(&self, range: MemoryRange) {
256 match self.isolation {
257 IsolationType::None => unreachable!(),
258 IsolationType::Vbs => {
259 }
261 IsolationType::Snp => {
262 for lower_vtl in [GuestVtl::Vtl0, GuestVtl::Vtl1] {
269 self.apply_protections(range, lower_vtl, HV_MAP_GPA_PERMISSIONS_NONE)
270 .unwrap();
271 }
272 self.mshv_vtl.pvalidate_pages(range, false, false).unwrap()
273 }
274
275 IsolationType::Tdx => {
276 }
278 }
279 }
280
281 pub fn modify_gpa_visibility(
283 &self,
284 host_visibility: HostVisibilityType,
285 gpns: &[u64],
286 ) -> Result<(), ModifyGpaVisibilityError> {
287 self.mshv_hvcall
288 .modify_gpa_visibility(host_visibility, gpns)
289 .map_err(|(e, processed)| ModifyGpaVisibilityError {
290 source: e,
291 processed,
292 })
293 }
294
295 pub fn apply_initial_lower_vtl_protections(
299 &self,
300 range: MemoryRange,
301 ) -> Result<(), ApplyVtlProtectionsError> {
302 self.apply_protections(range, GuestVtl::Vtl0, HV_MAP_GPA_PERMISSIONS_ALL)
303 }
304
305 fn apply_protections(
306 &self,
307 range: MemoryRange,
308 vtl: GuestVtl,
309 flags: HvMapGpaFlags,
310 ) -> Result<(), ApplyVtlProtectionsError> {
311 let permissions = GpaVtlPermissions::new(self.isolation, vtl, flags);
312
313 match permissions {
314 GpaVtlPermissions::Vbs(flags) => {
315 assert_ne!(vtl, GuestVtl::Vtl0);
318
319 self.mshv_hvcall
320 .modify_vtl_protection_mask(range, flags, HvInputVtl::from(vtl))
321 }
322 GpaVtlPermissions::Snp(rmpadjust) => {
323 self.mshv_vtl
326 .rmpadjust_pages(range, rmpadjust, false)
327 .map_err(|err| ApplyVtlProtectionsError::Snp {
328 failed_operation: err,
329 range,
330 permissions: rmpadjust,
331 vtl: vtl.into(),
332 })
333 }
334 GpaVtlPermissions::Tdx(attributes, mask) => {
335 self.mshv_vtl
338 .tdx_set_page_attributes(range, attributes, mask)
339 .map_err(|err| ApplyVtlProtectionsError::Tdx {
340 error: err,
341 range,
342 permissions: attributes,
343 vtl: vtl.into(),
344 })
345 }
346 }
347 }
348}
349
350pub struct HardwareIsolatedMemoryProtector {
352 inner: Mutex<HardwareIsolatedMemoryProtectorInner>,
354 layout: MemoryLayout,
355 acceptor: Arc<MemoryAcceptor>,
356 vtl0: Arc<GuestMemoryMapping>,
357 vtl1_protections_enabled: AtomicBool,
358}
359
360struct HardwareIsolatedMemoryProtectorInner {
361 valid_encrypted: Arc<GuestValidMemory>,
362 valid_shared: Arc<GuestValidMemory>,
363 encrypted: Arc<GuestMemoryMapping>,
364 default_vtl_permissions: DefaultVtlPermissions,
365 overlay_pages: VtlArray<Vec<OverlayPage>, 2>,
366 locked_pages: VtlArray<Vec<Box<[u64]>>, 2>,
367}
368
369struct OverlayPage {
370 gpn: u64,
371 previous_permissions: HvMapGpaFlags,
372 overlay_permissions: HvMapGpaFlags,
373 ref_count: u16,
374 gpn_source: GpnSource,
375}
376
377impl HardwareIsolatedMemoryProtector {
378 pub fn new(
383 valid_encrypted: Arc<GuestValidMemory>,
384 valid_shared: Arc<GuestValidMemory>,
385 encrypted: Arc<GuestMemoryMapping>,
386 vtl0: Arc<GuestMemoryMapping>,
387 layout: MemoryLayout,
388 acceptor: Arc<MemoryAcceptor>,
389 ) -> Self {
390 Self {
391 inner: Mutex::new(HardwareIsolatedMemoryProtectorInner {
392 valid_encrypted,
393 valid_shared,
394 encrypted,
395 default_vtl_permissions: DefaultVtlPermissions {
398 vtl0: HV_MAP_GPA_PERMISSIONS_ALL,
399 vtl1: None,
400 },
401 overlay_pages: VtlArray::from_fn(|_| Vec::new()),
402 locked_pages: VtlArray::from_fn(|_| Vec::new()),
403 }),
404 layout,
405 acceptor,
406 vtl0,
407 vtl1_protections_enabled: AtomicBool::new(false),
408 }
409 }
410
411 fn apply_protections_with_overlay_handling(
412 &self,
413 range: MemoryRange,
414 target_vtl: GuestVtl,
415 protections: HvMapGpaFlags,
416 inner: &mut MutexGuard<'_, HardwareIsolatedMemoryProtectorInner>,
417 ) -> Result<(), ApplyVtlProtectionsError> {
418 let mut range_queue = VecDeque::new();
419 range_queue.push_back(range);
420
421 'outer: while let Some(range) = range_queue.pop_front() {
422 for overlay_page in inner.overlay_pages[target_vtl].iter_mut() {
423 let overlay_addr = overlay_page.gpn * HV_PAGE_SIZE;
424 if range.contains_addr(overlay_addr) {
425 overlay_page.previous_permissions = protections;
428 let (left, right_with_overlay) =
430 range.split_at_offset(range.offset_of(overlay_addr).unwrap());
431 let (overlay, right) = right_with_overlay.split_at_offset(HV_PAGE_SIZE);
432 debug_assert_eq!(overlay.start_4k_gpn(), overlay_page.gpn);
433 debug_assert_eq!(overlay.len(), HV_PAGE_SIZE);
434 if !left.is_empty() {
435 range_queue.push_back(left);
436 }
437 if !right.is_empty() {
438 range_queue.push_back(right);
439 }
440 continue 'outer;
441 }
442 }
443 self.apply_protections(range, target_vtl, protections, GpnSource::GuestMemory)?
446 }
447
448 Ok(())
449 }
450
451 fn apply_protections(
452 &self,
453 range: MemoryRange,
454 target_vtl: GuestVtl,
455 protections: HvMapGpaFlags,
456 gpn_source: GpnSource,
457 ) -> Result<(), ApplyVtlProtectionsError> {
458 if gpn_source == GpnSource::GuestMemory && target_vtl == GuestVtl::Vtl0 {
459 self.vtl0.update_permission_bitmaps(range, protections);
461 }
462 self.acceptor
463 .apply_protections(range, target_vtl, protections)
464 }
465
466 fn query_lower_vtl_permissions(
471 &self,
472 vtl: GuestVtl,
473 gpn: u64,
474 ) -> Result<HvMapGpaFlags, HvError> {
475 if !self.is_in_guest_memory(gpn) {
476 return Err(HvError::OperationDenied);
477 }
478
479 let res = match vtl {
480 GuestVtl::Vtl0 => self
481 .vtl0
482 .query_access_permission(gpn)
483 .unwrap_or(HV_MAP_GPA_PERMISSIONS_ALL),
484 GuestVtl::Vtl1 => HV_MAP_GPA_PERMISSIONS_ALL,
485 };
486
487 Ok(res)
488 }
489
490 fn check_gpn_not_locked(
491 &self,
492 inner: &MutexGuard<'_, HardwareIsolatedMemoryProtectorInner>,
493 vtl: GuestVtl,
494 gpn: u64,
495 ) -> Result<(), HvError> {
496 if inner.locked_pages[vtl].iter().flatten().any(|x| *x == gpn)
501 && !inner.overlay_pages[vtl].iter().any(|p| p.gpn == gpn)
502 {
503 return Err(HvError::OperationDenied);
504 }
505 Ok(())
506 }
507
508 fn is_in_guest_memory(&self, gpn: u64) -> bool {
510 let gpa = gpn << HV_PAGE_SHIFT;
511 self.layout.ram().iter().any(|r| r.range.contains_addr(gpa))
512 }
513}
514
515impl ProtectIsolatedMemory for HardwareIsolatedMemoryProtector {
516 fn change_host_visibility(
517 &self,
518 vtl: GuestVtl,
519 shared: bool,
520 gpns: &[u64],
521 tlb_access: &mut dyn TlbFlushLockAccess,
522 ) -> Result<(), (HvError, usize)> {
523 let inner = self.inner.lock();
524
525 for &gpn in gpns {
526 if !self.is_in_guest_memory(gpn) {
528 return Err((HvError::OperationDenied, 0));
529 }
530
531 self.check_gpn_not_locked(&inner, vtl, gpn)
533 .map_err(|x| (x, 0))?;
534
535 if shared && inner.overlay_pages[vtl].iter().any(|p| p.gpn == gpn) {
537 return Err((HvError::OperationDenied, 0));
538 }
539 }
540
541 let orig_gpns = gpns;
545 let mut failed_vtl_permission_index = None;
546 let gpns = gpns
547 .iter()
548 .copied()
549 .enumerate()
550 .take_while(|&(index, gpn)| {
551 if vtl == GuestVtl::Vtl0 && shared && self.vtl1_protections_enabled() {
552 let permissions = self
553 .vtl0
554 .query_access_permission(gpn)
555 .expect("vtl 1 protections enabled, vtl permissions should be tracked");
556 if !permissions.readable() || !permissions.writable() {
557 failed_vtl_permission_index = Some(index);
558 false
559 } else {
560 true
561 }
562 } else {
563 true
564 }
565 })
566 .filter_map(|(_, gpn)| {
567 if inner.valid_shared.check_valid(gpn) != shared {
568 Some(gpn)
569 } else {
570 None
571 }
572 })
573 .collect::<Vec<_>>();
574
575 tracing::debug!(
576 orig = orig_gpns.len(),
577 len = gpns.len(),
578 first = gpns.first(),
579 shared,
580 "change vis"
581 );
582
583 let ranges = PagedRange::new(0, gpns.len() * PagedRange::PAGE_SIZE, &gpns)
584 .unwrap()
585 .ranges()
586 .map(|r| r.map(|r| MemoryRange::new(r.start..r.end)))
587 .collect::<Result<Vec<_>, _>>()
588 .unwrap(); let clear_bitmap = if shared {
592 &inner.valid_encrypted
593 } else {
594 &inner.valid_shared
595 };
596
597 for &range in &ranges {
598 if shared && vtl == GuestVtl::Vtl0 {
599 self.vtl0
605 .update_permission_bitmaps(range, HV_MAP_GPA_PERMISSIONS_NONE);
606 }
607
608 clear_bitmap.update_valid(range, false);
609 }
610
611 guestmem::rcu().synchronize_blocking();
618
619 if let IsolationType::Snp = self.acceptor.isolation {
620 tlb_access.flush_entire();
624 }
625
626 if shared {
629 for &range in &ranges {
631 self.acceptor.unaccept_lower_vtl_pages(range);
632 }
633 }
634
635 let host_visibility = if shared {
637 HostVisibilityType::SHARED
638 } else {
639 HostVisibilityType::PRIVATE
640 };
641
642 let (result, ranges) = match self.acceptor.modify_gpa_visibility(host_visibility, &gpns) {
643 Ok(()) => {
644 (
647 match failed_vtl_permission_index {
648 Some(index) => Err((HvError::AccessDenied, index)),
649 None => Ok(()),
650 },
651 ranges,
652 )
653 }
654 Err(err) => {
655 if shared {
656 panic!(
660 "the hypervisor refused to transition pages to shared, we cannot safely roll back: {:?}",
661 err
662 );
663 }
664
665 let (successful_gpns, failed_gpns) = gpns.split_at(err.processed);
668 let ranges = PagedRange::new(
669 0,
670 successful_gpns.len() * PagedRange::PAGE_SIZE,
671 successful_gpns,
672 )
673 .unwrap()
674 .ranges()
675 .map(|r| r.map(|r| MemoryRange::new(r.start..r.end)))
676 .collect::<Result<Vec<_>, _>>()
677 .expect("previous gpns was already checked");
678
679 let rollback_ranges =
682 PagedRange::new(0, failed_gpns.len() * PagedRange::PAGE_SIZE, failed_gpns)
683 .unwrap()
684 .ranges()
685 .map(|r| r.map(|r| MemoryRange::new(r.start..r.end)))
686 .collect::<Result<Vec<_>, _>>()
687 .expect("previous gpns was already checked");
688
689 for &range in &rollback_ranges {
690 clear_bitmap.update_valid(range, true);
691 }
692
693 let failed_index = orig_gpns
696 .iter()
697 .position(|gpn| *gpn == failed_gpns[0])
698 .expect("failed gpn should be present in the list");
699
700 (Err((err.source, failed_index)), ranges)
701 }
702 };
703
704 if !shared {
705 for &range in &ranges {
707 self.acceptor
708 .accept_lower_vtl_pages(range)
709 .expect("everything should be in a state where we can accept VTL0 pages");
710
711 if self.acceptor.isolation == IsolationType::Snp {
715 inner.encrypted.zero_range(range).expect("VTL 2 should have access to lower VTL memory, the page should be accepted, there should be no vtl protections yet.")
716 }
717 }
718 }
719
720 let set_bitmap = if shared {
722 &inner.valid_shared
723 } else {
724 &inner.valid_encrypted
725 };
726 for &range in &ranges {
727 set_bitmap.update_valid(range, true);
728 }
729
730 if !shared {
731 for &range in &ranges {
735 self.apply_protections(
737 range,
738 GuestVtl::Vtl0,
739 inner.default_vtl_permissions.vtl0,
740 GpnSource::GuestMemory,
741 )
742 .expect("should be able to apply default protections");
743
744 if let Some(vtl1_protections) = inner.default_vtl_permissions.vtl1 {
745 self.apply_protections(
746 range,
747 GuestVtl::Vtl1,
748 vtl1_protections,
749 GpnSource::GuestMemory,
750 )
751 .expect("everything should be in a state where we can apply VTL protections");
752 }
753 }
754 }
755
756 result
759 }
760
761 fn query_host_visibility(
762 &self,
763 gpns: &[u64],
764 host_visibility: &mut [HostVisibilityType],
765 ) -> Result<(), (HvError, usize)> {
766 for (i, &gpn) in gpns.iter().enumerate() {
768 if !self.is_in_guest_memory(gpn) {
769 return Err((HvError::OperationDenied, i));
770 }
771 }
772
773 let inner = self.inner.lock();
774
775 for (gpn, host_vis) in gpns.iter().zip(host_visibility.iter_mut()) {
777 *host_vis = if inner.valid_shared.check_valid(*gpn) {
778 HostVisibilityType::SHARED
779 } else {
780 HostVisibilityType::PRIVATE
781 };
782 }
783 Ok(())
784 }
785
786 fn default_vtl0_protections(&self) -> HvMapGpaFlags {
787 self.inner.lock().default_vtl_permissions.vtl0
788 }
789
790 fn change_default_vtl_protections(
791 &self,
792 target_vtl: GuestVtl,
793 vtl_protections: HvMapGpaFlags,
794 tlb_access: &mut dyn TlbFlushLockAccess,
795 ) -> Result<(), HvError> {
796 let mut inner = self.inner.lock();
803
804 inner
805 .default_vtl_permissions
806 .set(target_vtl, vtl_protections);
807
808 let mut ranges = Vec::new();
809 for ram_range in self.layout.ram().iter() {
810 let mut protect_start = ram_range.range.start();
811 let mut page_count = 0;
812
813 for gpn in
814 ram_range.range.start() / PAGE_SIZE as u64..ram_range.range.end() / PAGE_SIZE as u64
815 {
816 if inner.valid_encrypted.check_valid(gpn) {
822 self.check_gpn_not_locked(&inner, target_vtl, gpn)?;
823 page_count += 1;
824 } else {
825 if page_count > 0 {
826 let end_address = protect_start + (page_count * PAGE_SIZE as u64);
827 ranges.push(MemoryRange::new(protect_start..end_address));
828 }
829 protect_start = (gpn + 1) * PAGE_SIZE as u64;
830 page_count = 0;
831 }
832 }
833
834 if page_count > 0 {
835 let end_address = protect_start + (page_count * PAGE_SIZE as u64);
836 ranges.push(MemoryRange::new(protect_start..end_address));
837 }
838 }
839
840 for range in ranges {
841 self.apply_protections_with_overlay_handling(
842 range,
843 target_vtl,
844 vtl_protections,
845 &mut inner,
846 )
847 .unwrap();
848 }
849
850 guestmem::rcu().synchronize_blocking();
853
854 tlb_access.flush(GuestVtl::Vtl0);
857 tlb_access.set_wait_for_tlb_locks(target_vtl);
858
859 Ok(())
860 }
861
862 fn change_vtl_protections(
863 &self,
864 target_vtl: GuestVtl,
865 gpns: &[u64],
866 protections: HvMapGpaFlags,
867 tlb_access: &mut dyn TlbFlushLockAccess,
868 ) -> Result<(), (HvError, usize)> {
869 let inner = self.inner.lock();
874
875 for &gpn in gpns {
877 if !self.is_in_guest_memory(gpn) {
878 return Err((HvError::OperationDenied, 0));
879 }
880
881 self.check_gpn_not_locked(&inner, target_vtl, gpn)
883 .map_err(|x| (x, 0))?;
884
885 if inner.overlay_pages[target_vtl].iter().any(|p| p.gpn == gpn) {
887 return Err((HvError::OperationDenied, 0));
888 }
889 }
890
891 if gpns.iter().any(|&gpn| inner.valid_shared.check_valid(gpn)) {
893 return Err((HvError::OperationDenied, 0));
894 }
895
896 let ranges = PagedRange::new(0, gpns.len() * PagedRange::PAGE_SIZE, gpns)
897 .unwrap()
898 .ranges()
899 .map(|r| r.map(|r| MemoryRange::new(r.start..r.end)))
900 .collect::<Result<Vec<_>, _>>()
901 .unwrap(); for range in ranges {
904 self.apply_protections(range, target_vtl, protections, GpnSource::GuestMemory)
905 .unwrap();
906 }
907
908 guestmem::rcu().synchronize_blocking();
911
912 tlb_access.flush(GuestVtl::Vtl0);
917 tlb_access.set_wait_for_tlb_locks(target_vtl);
918
919 Ok(())
920 }
921
922 fn register_overlay_page(
923 &self,
924 vtl: GuestVtl,
925 gpn: u64,
926 gpn_source: GpnSource,
927 check_perms: HvMapGpaFlags,
928 new_perms: Option<HvMapGpaFlags>,
929 tlb_access: &mut dyn TlbFlushLockAccess,
930 ) -> Result<(), HvError> {
931 let mut inner = self.inner.lock();
932
933 if let Some(registered) = inner.overlay_pages[vtl].iter_mut().find(|p| p.gpn == gpn) {
938 let needed_perms = new_perms.unwrap_or(check_perms);
939 if registered.overlay_permissions.into_bits() | needed_perms.into_bits()
940 != registered.overlay_permissions.into_bits()
941 {
942 return Err(HvError::OperationDenied);
943 }
944 registered.ref_count += 1;
945 return Ok(());
946 }
947
948 let current_perms = match gpn_source {
949 GpnSource::GuestMemory => {
950 let current_perms = self.query_lower_vtl_permissions(vtl, gpn)?;
952 if current_perms.into_bits() | check_perms.into_bits() != current_perms.into_bits()
953 {
954 return Err(HvError::OperationDenied);
955 }
956
957 if inner.valid_shared.check_valid(gpn) {
959 return Err(HvError::OperationDenied);
960 }
961
962 current_perms
963 }
964 GpnSource::Dma => {
965 if self.is_in_guest_memory(gpn) {
966 return Err(HvError::OperationDenied);
968 }
969
970 HV_MAP_GPA_PERMISSIONS_NONE
971 }
972 };
973
974 self.check_gpn_not_locked(&inner, vtl, gpn)?;
976
977 if let Some(new_perms) = new_perms {
979 self.apply_protections(
980 MemoryRange::from_4k_gpn_range(gpn..gpn + 1),
981 vtl,
982 new_perms,
983 gpn_source,
984 )
985 .map_err(|_| HvError::OperationDenied)?;
986 }
987
988 inner.overlay_pages[vtl].push(OverlayPage {
990 gpn,
991 previous_permissions: current_perms,
992 overlay_permissions: new_perms.unwrap_or(current_perms),
993 ref_count: 1,
994 gpn_source,
995 });
996
997 guestmem::rcu().synchronize_blocking();
1000
1001 tlb_access.flush(vtl);
1006 tlb_access.set_wait_for_tlb_locks(vtl);
1007
1008 Ok(())
1009 }
1010
1011 fn unregister_overlay_page(
1012 &self,
1013 vtl: GuestVtl,
1014 gpn: u64,
1015 tlb_access: &mut dyn TlbFlushLockAccess,
1016 ) -> Result<(), HvError> {
1017 let mut inner = self.inner.lock();
1018 let overlay_pages = &mut inner.overlay_pages[vtl];
1019
1020 let index = overlay_pages
1022 .iter()
1023 .position(|p| p.gpn == gpn)
1024 .ok_or(HvError::OperationDenied)?;
1025
1026 if overlay_pages[index].ref_count > 1 {
1031 overlay_pages[index].ref_count -= 1;
1032 return Ok(());
1033 }
1034
1035 self.apply_protections(
1037 MemoryRange::from_4k_gpn_range(gpn..gpn + 1),
1038 vtl,
1039 overlay_pages[index].previous_permissions,
1040 overlay_pages[index].gpn_source,
1041 )
1042 .map_err(|_| HvError::OperationDenied)?;
1043
1044 overlay_pages.remove(index);
1046
1047 guestmem::rcu().synchronize_blocking();
1050
1051 tlb_access.flush(vtl);
1056 tlb_access.set_wait_for_tlb_locks(vtl);
1057 Ok(())
1058 }
1059
1060 fn is_overlay_page(&self, vtl: GuestVtl, gpn: u64) -> bool {
1061 self.inner.lock().overlay_pages[vtl]
1062 .iter()
1063 .any(|p| p.gpn == gpn)
1064 }
1065
1066 fn lock_gpns(&self, vtl: GuestVtl, gpns: &[u64]) -> Result<(), GuestMemoryBackingError> {
1067 self.inner.lock().locked_pages[vtl].push(gpns.to_vec().into_boxed_slice());
1074 Ok(())
1075 }
1076
1077 fn unlock_gpns(&self, vtl: GuestVtl, gpns: &[u64]) {
1078 let mut inner = self.inner.lock();
1079 let locked_pages = &mut inner.locked_pages[vtl];
1080 for (i, w) in locked_pages.iter().enumerate() {
1081 if **w == *gpns {
1082 locked_pages.swap_remove(i);
1083 return;
1084 }
1085 }
1086
1087 panic!("Tried to unlock pages that were not locked");
1093 }
1094
1095 fn set_vtl1_protections_enabled(&self) {
1096 self.vtl1_protections_enabled
1097 .store(true, std::sync::atomic::Ordering::Relaxed);
1098 }
1099
1100 fn vtl1_protections_enabled(&self) -> bool {
1101 self.vtl1_protections_enabled
1102 .load(std::sync::atomic::Ordering::Relaxed)
1103 }
1104}