Skip to content

Commit 112e1e0

Browse files
MatiasVarapriyasiddharth
authored andcommitted
Add proof for conformance to 2.7.7.2 section
Add the verify_spec_2_7_7_2() proof to verify that the implementation of queue satisfies 2.7.7.2 requirement. The proof relies on whether the EVENT_IDX feature has been negotiated. Conversely with `test_needs_notification()` test, this proof `tests` for all possible values of the queue structure. Signed-off-by: Matias Ezequiel Vara Larsen <[email protected]> Signed-off-by: Siddharth Priya <[email protected]>
1 parent a3bdfea commit 112e1e0

File tree

2 files changed

+260
-0
lines changed

2 files changed

+260
-0
lines changed

virtio-queue/Cargo.toml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,19 @@ criterion = "0.6.0"
2323
vm-memory = { workspace = true, features = ["backend-mmap", "backend-atomic"] }
2424
memoffset = "0.9.0"
2525

26+
# The following deps are needed only under kani
27+
[target.'cfg(kani)'.dependencies]
28+
libc = "0.2.161"
29+
vm-memory = { workspace = true, features = ["backend-mmap"] }
30+
2631
[[bench]]
2732
name = "main"
2833
harness = false
34+
35+
# From https://model-checking.github.io/kani/usage.html#configuration-in-cargotoml
36+
#
37+
# Starting with Rust 1.80 (or nightly-2024-05-05), every reachable #[cfg] will be automatically
38+
# checked that they match the expected config names and values. To avoid warnings on
39+
# cfg(kani), we recommend adding the check-cfg lint config in your crate's Cargo.toml
40+
[lints.rust]
41+
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] }

virtio-queue/src/queue.rs

Lines changed: 247 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -269,6 +269,253 @@ impl Queue {
269269
}
270270
}
271271

272+
#[cfg(kani)]
273+
#[allow(dead_code)]
274+
mod verification {
275+
use std::mem::ManuallyDrop;
276+
use vm_memory::MmapRegion;
277+
278+
use std::num::Wrapping;
279+
use vm_memory::FileOffset;
280+
281+
use vm_memory::{GuestMemoryRegion, MemoryRegionAddress};
282+
283+
use super::*;
284+
285+
/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
286+
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
287+
/// search to determine which region a specific read or write request goes to,
288+
/// this only uses a single region. Eliminating this binary search significantly
289+
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
290+
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
291+
/// it works identically to `GuestMemoryMmap` with only a single contained region.
292+
pub struct ProofGuestMemory {
293+
the_region: vm_memory::GuestRegionMmap,
294+
}
295+
296+
impl GuestMemory for ProofGuestMemory {
297+
type R = vm_memory::GuestRegionMmap;
298+
299+
fn num_regions(&self) -> usize {
300+
1
301+
}
302+
303+
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
304+
self.the_region
305+
.to_region_addr(addr)
306+
.map(|_| &self.the_region)
307+
}
308+
309+
fn iter(&self) -> impl Iterator<Item = &Self::R> {
310+
std::iter::once(&self.the_region)
311+
}
312+
313+
fn try_access<F>(
314+
&self,
315+
count: usize,
316+
addr: GuestAddress,
317+
mut f: F,
318+
) -> vm_memory::guest_memory::Result<usize>
319+
where
320+
F: FnMut(
321+
usize,
322+
usize,
323+
MemoryRegionAddress,
324+
&Self::R,
325+
) -> vm_memory::guest_memory::Result<usize>,
326+
{
327+
// We only have a single region, meaning a lot of the complications of the default
328+
// try_access implementation for dealing with reads/writes across multiple
329+
// regions does not apply.
330+
let region_addr = self
331+
.the_region
332+
.to_region_addr(addr)
333+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
334+
self.the_region
335+
.checked_offset(region_addr, count)
336+
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
337+
f(0, count, region_addr, &self.the_region)
338+
}
339+
}
340+
341+
pub struct ProofContext(pub Queue, pub ProofGuestMemory);
342+
343+
pub struct MmapRegionStub {
344+
addr: *mut u8,
345+
size: usize,
346+
bitmap: (),
347+
file_offset: Option<FileOffset>,
348+
prot: i32,
349+
flags: i32,
350+
owned: bool,
351+
hugetlbfs: Option<bool>,
352+
}
353+
354+
/// We start the first guest memory region at an offset so that harnesses using
355+
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
356+
/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
357+
/// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
358+
const GUEST_MEMORY_BASE: u64 = 0;
359+
360+
// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
361+
// to make sure we not only test queues where all segments are consecutively aligned.
362+
// We need to give at least 16 bytes of buffer space for the descriptor table to be
363+
// able to change its address, as it is 16-byte aligned.
364+
const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30;
365+
366+
fn guest_memory(memory: *mut u8) -> ProofGuestMemory {
367+
// Ideally, we'd want to do
368+
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
369+
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
370+
// .build()
371+
// .unwrap()};
372+
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
373+
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
374+
// standard library using a special version of the libc crate, it runs into some problems
375+
// [1] Even if we work around those problems, we run into performance problems [2].
376+
// Therefore, for now we stick to this ugly transmute hack (which only works because
377+
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
378+
//
379+
// [1]: https://github.com/model-checking/kani/issues/2673
380+
// [2]: https://github.com/model-checking/kani/issues/2538
381+
let region_stub = MmapRegionStub {
382+
addr: memory,
383+
size: GUEST_MEMORY_SIZE,
384+
bitmap: Default::default(),
385+
file_offset: None,
386+
prot: 0,
387+
flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
388+
owned: false,
389+
hugetlbfs: None,
390+
};
391+
392+
let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };
393+
394+
let guest_region =
395+
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();
396+
397+
// Use a single memory region, just as firecracker does for guests of size < 2GB
398+
// For largest guests, firecracker uses two regions (due to the MMIO gap being
399+
// at the top of 32-bit address space)
400+
ProofGuestMemory {
401+
the_region: guest_region,
402+
}
403+
}
404+
405+
// can't implement kani::Arbitrary for the relevant types due to orphan rules
406+
fn setup_kani_guest_memory() -> ProofGuestMemory {
407+
// Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now
408+
// as `any_vec` will likely result in worse performance. We do not loose much from
409+
// `exact_vec`, as our proofs do not make any assumptions about "filling" guest
410+
// memory: Since everything is placed at non-deterministic addresses with
411+
// non-deterministic lengths, we still cover all scenarios that would be covered by
412+
// smaller guest memory closely. We leak the memory allocated here, so that it
413+
// doesnt get deallocated at the end of this function. We do not explicitly
414+
// de-allocate, but since this is a kani proof, that does not matter.
415+
guest_memory(
416+
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
417+
)
418+
}
419+
420+
const MAX_QUEUE_SIZE: u16 = 256;
421+
422+
// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
423+
// at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
424+
// specification.
425+
const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE;
426+
427+
/// descriptor table has 16 bytes per entry, avail ring starts right after
428+
const AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16;
429+
430+
/// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
431+
/// and needs 2 bytes of padding
432+
const USED_RING_BASE_ADDRESS: u64 = AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2;
433+
434+
/// The address of the first byte after the queue. Since our queue starts at guest physical
435+
/// address 0, this is also the size of the memory area occupied by the queue.
436+
/// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
437+
const QUEUE_END: u64 = USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64;
438+
439+
impl kani::Arbitrary for ProofContext {
440+
fn any() -> Self {
441+
let mem = setup_kani_guest_memory();
442+
443+
let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap();
444+
445+
queue.ready = true;
446+
447+
queue.set_desc_table_address(
448+
Some(QUEUE_BASE_ADDRESS as u32),
449+
Some((QUEUE_BASE_ADDRESS >> 32) as u32),
450+
);
451+
452+
queue.set_avail_ring_address(
453+
Some(AVAIL_RING_BASE_ADDRESS as u32),
454+
Some((AVAIL_RING_BASE_ADDRESS >> 32) as u32),
455+
);
456+
457+
queue.set_used_ring_address(
458+
Some(USED_RING_BASE_ADDRESS as u32),
459+
Some((USED_RING_BASE_ADDRESS >> 32) as u32),
460+
);
461+
462+
queue.set_next_avail(kani::any());
463+
queue.set_next_used(kani::any());
464+
queue.set_event_idx(kani::any());
465+
queue.num_added = Wrapping(kani::any());
466+
467+
kani::assume(queue.is_valid(&mem));
468+
469+
ProofContext(queue, mem)
470+
}
471+
}
472+
473+
#[kani::proof]
474+
#[kani::unwind(0)] // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
475+
// This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
476+
// recursively calls itself. Kani will generally unwind this recursion infinitely
477+
fn verify_spec_2_7_7_2() {
478+
// Section 2.7.7.2 deals with device-to-driver notification suppression.
479+
// It describes a mechanism by which the driver can tell the device that it does not
480+
// want notifications (IRQs) about the device finishing processing individual buffers
481+
// (descriptor chain heads) from the avail ring until a specific number of descriptors
482+
// has been processed. This is done by the driver
483+
// defining a "used_event" index, which tells the device "please do not notify me until
484+
// used.ring[used_event] has been written to by you".
485+
let ProofContext(mut queue, mem) = kani::any();
486+
487+
let num_added_old = queue.num_added.0;
488+
let needs_notification = queue.needs_notification(&mem);
489+
490+
// event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
491+
if !queue.event_idx_enabled {
492+
// The specification here says
493+
// After the device writes a descriptor index into the used ring:
494+
// – If flags is 1, the device SHOULD NOT send a notification.
495+
// – If flags is 0, the device MUST send a notification
496+
// flags is the first field in the avail_ring_address, which we completely ignore. We
497+
// always send a notification, and as there only is a SHOULD NOT, that is okay
498+
assert!(needs_notification.unwrap());
499+
} else {
500+
// next_used - 1 is where the previous descriptor was placed
501+
if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap())
502+
== std::num::Wrapping(queue.next_used - Wrapping(1))
503+
&& num_added_old > 0
504+
{
505+
// If the idx field in the used ring (which determined where that descriptor index
506+
// was placed) was equal to used_event, the device MUST send a
507+
// notification.
508+
assert!(needs_notification.unwrap());
509+
510+
kani::cover!();
511+
}
512+
513+
// The other case is handled by a "SHOULD NOT send a notification" in the spec.
514+
// So we do not care
515+
}
516+
}
517+
}
518+
272519
impl<'a> QueueGuard<'a> for Queue {
273520
type G = &'a mut Self;
274521
}

0 commit comments

Comments
 (0)