Merge commit '8fe82aca24375960f4eca04c7a2a508a46d19ee8' into task

This commit is contained in:
Ben Pig Chu 2020-07-02 19:25:33 +08:00
commit f3b9dfa002
49 changed files with 467 additions and 316 deletions

View File

@ -55,12 +55,28 @@ jobs:
with:
command: doc
build-aarch64:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2020-06-04
override: true
target: aarch64-unknown-linux-gnu
- uses: actions-rs/cargo@v1
with:
command: build
use-cross: true
args: -p zircon-loader --all-features --target aarch64-unknown-linux-gnu
test:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- name: Pull prebuilt images
run: git lfs pull -X prebuilt/zircon/bringup.zbi prebuilt/zircon/core-tests.zbi
run: git lfs pull -X prebuilt/zircon/x64/bringup.zbi prebuilt/zircon/x64/core-tests.zbi prebuilt/zircon/arm64
- name: Prepare rootfs
run: make rootfs
- name: Test
@ -99,7 +115,7 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Pull prebuilt images
run: git lfs pull -X prebuilt/zircon/bringup.zbi
run: git lfs pull -X prebuilt/zircon/x64/bringup.zbi prebuilt/zircon/arm64
- name: Checkout submodules
shell: bash
run: |

View File

@ -7,7 +7,6 @@ members = [
"linux-syscall",
"linux-loader",
"kernel-hal-unix",
"kernel-hal-bare",
"kernel-hal",
]

View File

@ -1,5 +1,5 @@
ROOTFS_TAR := alpine-minirootfs-3.11.3-x86_64.tar.gz
ROOTFS_URL := http://dl-cdn.alpinelinux.org/alpine/v3.11/releases/x86_64/$(ROOTFS_TAR)
ROOTFS_TAR := alpine-minirootfs-3.12.0-x86_64.tar.gz
ROOTFS_URL := http://dl-cdn.alpinelinux.org/alpine/v3.12/releases/x86_64/$(ROOTFS_TAR)
.PHONY: rootfs
@ -7,6 +7,6 @@ prebuilt/linux/$(ROOTFS_TAR):
wget $(ROOTFS_URL) -O $@
rootfs: prebuilt/linux/$(ROOTFS_TAR)
mkdir -p rootfs
rm -rf rootfs && mkdir -p rootfs
tar xf $< -C rootfs
cp prebuilt/linux/libc-libos.so rootfs/lib/ld-musl-x86_64.so.1

View File

@ -43,7 +43,7 @@ cargo run --release -p linux-loader /bin/busybox [args]
Run native Zircon program (shell):
```sh
cargo run --release -p zircon-loader prebuilt/zircon
cargo run --release -p zircon-loader prebuilt/zircon/x64
```
Run Zircon on bare-metal (zCore):

View File

@ -11,10 +11,10 @@ description = "Kernel HAL implementation for bare metal environment."
log = "0.4"
spin = "0.5"
git-version = "0.3"
executor = { git = "https://github.com/rcore-os/executor.git", rev = "e8ffcfb" }
trapframe = "0.3.0"
executor = { git = "https://github.com/rcore-os/executor.git", rev = "a2d02ee9" }
trapframe = "0.4.1"
kernel-hal = { path = "../kernel-hal" }
naive-timer = { git = "https://github.com/rcore-os/naive-timer.git", rev="d0cfe04" }
naive-timer = "0.1.0"
lazy_static = { version = "1.4", features = ["spin_no_std" ] }
[target.'cfg(target_arch = "x86_64")'.dependencies]

View File

@ -3,6 +3,7 @@ use {
acpi::{parse_rsdp, Acpi, AcpiHandler, PhysicalMapping},
alloc::{collections::VecDeque, vec::Vec},
apic::{LocalApic, XApic},
core::arch::x86_64::{__cpuid, _mm_clflush, _mm_mfence},
core::convert::TryFrom,
core::fmt::{Arguments, Write},
core::ptr::NonNull,
@ -71,7 +72,13 @@ impl PageTableImpl {
.unwrap()
.flush();
};
trace!("map: {:x?} -> {:x?}, flags={:?}", vaddr, paddr, flags);
trace!(
"map: {:x?} -> {:x?}, flags={:?} in {:#x?}",
vaddr,
paddr,
flags,
self.root_paddr
);
Ok(())
}
@ -80,10 +87,25 @@ impl PageTableImpl {
pub fn unmap(&mut self, vaddr: x86_64::VirtAddr) -> Result<(), ()> {
let mut pt = self.get();
let page = Page::<Size4KiB>::from_start_address(vaddr).unwrap();
if let Ok((_, flush)) = pt.unmap(page) {
flush.flush();
// This is a workaround to an issue in the x86-64 crate
// A page without PRESENT bit is not unmappable AND mapable
// So we add PRESENT bit here
unsafe {
pt.update_flags(page, PTF::PRESENT | PTF::NO_EXECUTE).ok();
}
match pt.unmap(page) {
Ok((_, flush)) => {
flush.flush();
trace!("unmap: {:x?} in {:#x?}", vaddr, self.root_paddr);
}
Err(err) => {
debug!(
"unmap failed: {:x?} err={:x?} in {:#x?}",
vaddr, err, self.root_paddr
);
return Err(());
}
}
trace!("unmap: {:x?}", vaddr);
Ok(())
}
@ -423,3 +445,20 @@ pub fn outpd(port: u16, value: u32) {
pub fn inpd(port: u16) -> u32 {
unsafe { Port::new(port).read() }
}
/// Flush the physical frame.
#[export_name = "hal_frame_flush"]
pub fn frame_flush(target: PhysAddr) {
unsafe {
for paddr in (target..target + PAGE_SIZE).step_by(cacheline_size()) {
_mm_clflush(phys_to_virt(paddr) as *const u8);
}
_mm_mfence();
}
}
/// Get cache line size in bytes.
fn cacheline_size() -> usize {
let leaf = unsafe { __cpuid(1).ebx };
(((leaf >> 8) & 0xff) << 3) as usize
}

View File

@ -29,7 +29,6 @@ extern crate lazy_static;
use alloc::boxed::Box;
use core::time::Duration;
use core::{
arch::x86_64::{__cpuid, _mm_clflush, _mm_mfence},
future::Future,
pin::Pin,
task::{Context, Poll},
@ -177,23 +176,6 @@ pub fn frame_zero_in_range(target: PhysAddr, start: usize, end: usize) {
}
}
/// Flush the physical frame.
#[export_name = "hal_frame_flush"]
pub fn frame_flush(target: PhysAddr) {
unsafe {
for paddr in (target..target + PAGE_SIZE).step_by(cacheline_size()) {
_mm_clflush(phys_to_virt(paddr) as *const u8);
}
_mm_mfence();
}
}
/// Get cache line size in bytes.
fn cacheline_size() -> usize {
let leaf = unsafe { __cpuid(1).ebx };
(((leaf >> 8) & 0xff) << 3) as usize
}
lazy_static! {
pub static ref NAIVE_TIMER: Mutex<Timer> = Mutex::new(Timer::default());
}

View File

@ -16,3 +16,4 @@ lazy_static = "1.4"
kernel-hal = { path = "../kernel-hal" }
async-std = "1.5"
git-version = "0.3"
trapframe = "0.4.1"

View File

@ -22,17 +22,15 @@ use {
tempfile::tempdir,
};
pub use self::trap::syscall_entry;
pub use kernel_hal::defs::*;
use kernel_hal::vdso::*;
pub use kernel_hal::*;
use std::io::Read;
pub use trapframe::syscall_fn_entry as syscall_entry;
#[cfg(target_os = "macos")]
include!("macos.rs");
mod trap;
#[repr(C)]
pub struct Thread {
thread: usize,
@ -67,9 +65,7 @@ task_local! {
#[export_name = "hal_context_run"]
unsafe fn context_run(context: &mut UserContext) {
trap::run_user(&mut context.general);
// cause: syscall
context.trap_num = 0x100;
context.run_fncall();
}
/// Page Table

View File

@ -1,195 +0,0 @@
use super::GeneralRegs;
// User: (musl)
// - fs:0 (pthread.self) = user fsbase
// - fs:48 (pthread.canary2) = kernel fsbase
//
// Kernel: (glibc)
// - fs:0 (pthread.self) = kernel fsbase
// - fs:64 (pthread.???) = kernel stack
// - fs:72 (pthread.???) = init user fsbase
//
#[cfg(target_os = "linux")]
global_asm!(
r#"
.macro SWITCH_TO_KERNEL_STACK
mov rsp, fs:48 # rsp = kernel fsbase
mov rsp, [rsp + 64] # rsp = kernel stack
.endm
.macro SAVE_KERNEL_STACK
mov fs:64, rsp
.endm
.macro PUSH_USER_FSBASE
push fs:0
.endm
.macro SWITCH_TO_KERNEL_FSBASE
mov eax, 158 # SYS_arch_prctl
mov edi, 0x1002 # SET_FS
mov rsi, fs:48 # rsi = kernel fsbase
syscall
.endm
.macro POP_USER_FSBASE
mov rsi, [rsp + 18 * 8] # rsi = user fsbase
mov rdx, fs:0 # rdx = kernel fsbase
test rsi, rsi
jnz 1f # if not 0, goto set
0: lea rsi, [rdx + 72] # rsi = init user fsbase
mov [rsi], rsi # user_fs:0 = user fsbase
1: mov eax, 158 # SYS_arch_prctl
mov edi, 0x1002 # SET_FS
syscall # set fsbase
mov fs:48, rdx # user_fs:48 = kernel fsbase
.endm
.global unix_syscall_entry
.global run_user
"#
);
// User: (musl)
// - gs:0 (pthread.self) = user gsbase
// - gs:48 (pthread.canary2) = kernel gsbase
//
// Kernel: (darwin)
// - gs:0 (pthread.tsd[self]) = kernel gsbase - 224
// - gs:48 (pthread.tsd[6]) = kernel stack
// - gs:240 (pthread.tsd[30]) = init user fsbase
//
// Ref:
// - Set gsbase:
// - https://gist.github.com/aras-p/5389747
// - Get gsbase:
// - https://github.com/DynamoRIO/dynamorio/issues/1568#issuecomment-239819506
// - https://github.com/apple/darwin-libpthread/blob/03c4628c8940cca6fd6a82957f683af804f62e7f/src/internal.h#L241
#[cfg(target_os = "macos")]
global_asm!(
r#"
.macro SWITCH_TO_KERNEL_STACK
mov rsp, gs:48 # rsp = kernel gsbase
mov rsp, [rsp + 48] # rsp = kernel stack
.endm
.macro SAVE_KERNEL_STACK
mov gs:48, rsp
.endm
.macro PUSH_USER_FSBASE
push gs:0
.endm
.macro SWITCH_TO_KERNEL_FSBASE
mov rdi, gs:48 # rdi = kernel gsbase
mov eax, 0x3000003
syscall # set gsbase
.endm
.macro POP_USER_FSBASE
mov rdi, [rsp + 18 * 8] # rdi = user gsbase
mov rsi, gs:0
add rsi, 224 # rsi = kernel gsbase
test rdi, rdi
jnz 1f # if not 0, goto set
0: lea rdi, [rsi + 30*8] # rdi = init user gsbase
# = pthread.tsd[30] (kernel gsbase + 30 * 8)
mov [rdi], rdi # user_gs:0 = user gsbase
1: mov eax, 0x3000003
syscall # set gsbase
mov gs:48, rsi # user_gs:48 = kernel gsbase
.endm
.global _unix_syscall_entry
.global _run_user
.set _unix_syscall_entry, unix_syscall_entry
.set _run_user, run_user
"#
);
global_asm!(
r#"
.intel_syntax noprefix
unix_syscall_entry:
# save rsp
lea r11, [rsp + 8] # save rsp to r11 (clobber)
SWITCH_TO_KERNEL_STACK
pop rsp
lea rsp, [rsp + 20*8] # rsp = top of trap frame
# push trap frame (struct GeneralRegs)
push 0 # ignore gs_base
PUSH_USER_FSBASE
pushfq # push rflags
push [r11 - 8] # push rip
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push r11 # push rsp
push rbp
push rdi
push rsi
push rdx
push rcx
push rbx
push rax
# restore callee-saved registers
SWITCH_TO_KERNEL_STACK
pop rbx
pop rbx
pop rbp
pop r12
pop r13
pop r14
pop r15
SWITCH_TO_KERNEL_FSBASE
# go back to Rust
ret
# extern "C" fn run_user(&mut GeneralRegs)
run_user:
# save callee-saved registers
push r15
push r14
push r13
push r12
push rbp
push rbx
push rdi
SAVE_KERNEL_STACK
mov rsp, rdi
POP_USER_FSBASE
# pop trap frame (struct GeneralRegs)
pop rax
pop rbx
pop rcx
pop rdx
pop rsi
pop rdi
pop rbp
pop r8 # skip rsp
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
pop r11 # r11 = rip. FIXME: don't overwrite r11!
popfq # pop rflags
mov rsp, [rsp - 8*11] # restore rsp
jmp r11 # restore rip
"#
);
extern "C" {
#[link_name = "unix_syscall_entry"]
pub fn syscall_entry();
pub fn run_user(regs: &mut GeneralRegs);
}

View File

@ -9,6 +9,6 @@ description = "Kernel HAL interface definations."
[dependencies]
bitflags = "1.2"
trapframe = "0.3.0"
trapframe = "0.4.1"
numeric-enum-macro = "0.2"
acpi = "1.0.0"

View File

@ -392,3 +392,21 @@ pub fn inpd(_port: u16) -> u32 {
pub fn apic_local_id() -> u8 {
unimplemented!()
}
/// Fill random bytes to the buffer
#[cfg(target_arch = "x86_64")]
pub fn fill_random(buf: &mut [u8]) {
// TODO: optimize
for x in buf.iter_mut() {
let mut r = 0;
unsafe {
core::arch::x86_64::_rdrand16_step(&mut r);
}
*x = r as _;
}
}
#[cfg(target_arch = "aarch64")]
pub fn fill_random(_buf: &mut [u8]) {
// TODO
}

View File

@ -84,6 +84,9 @@ impl<T, P: Policy> UserPtr<T, P> {
if self.ptr.is_null() {
return Err(Error::InvalidPointer);
}
if (self.ptr as usize) % core::mem::align_of::<T>() != 0 {
return Err(Error::InvalidPointer);
}
Ok(())
}
}

View File

@ -60,17 +60,20 @@ fn spawn(thread: Arc<Thread>) {
}
}
0xe => {
let vaddr = kernel_hal::fetch_fault_vaddr();
let flags = if cx.error_code & 0x2 == 0 {
MMUFlags::READ
} else {
MMUFlags::WRITE
};
panic!(
"Page Fault from user mode {:#x} {:#x?}\n{:#x?}",
kernel_hal::fetch_fault_vaddr(),
flags,
cx
);
error!("page fualt from user mode {:#x} {:#x?}", vaddr, flags);
let vmar = thread.proc().vmar();
match vmar.handle_page_fault(vaddr, flags) {
Ok(()) => {}
Err(_) => {
panic!("Page Fault from user mode {:#x?}", cx);
}
}
}
_ => panic!("not supported interrupt from user mode. {:#x?}", cx),
}

View File

@ -14,7 +14,7 @@ xmas-elf = "0.7"
hashbrown = "0.7"
zircon-object = { path = "../zircon-object", features = ["elf"] }
kernel-hal = { path = "../kernel-hal" }
downcast-rs = { git = "https://github.com/rcore-os/downcast-rs" }
downcast-rs = { git = "https://github.com/rcore-os/downcast-rs", rev = "a632ce1", default-features = false }
rcore-fs = { git = "https://github.com/rcore-os/rcore-fs", rev = "e17b27b" }
rcore-fs-sfs = { git = "https://github.com/rcore-os/rcore-fs", rev = "e17b27b" }
rcore-fs-ramfs = { git = "https://github.com/rcore-os/rcore-fs", rev = "e17b27b" }

View File

@ -23,7 +23,7 @@ use zircon_object::{
pub trait ProcessExt {
fn create_linux(job: &Arc<Job>, rootfs: Arc<dyn FileSystem>) -> ZxResult<Arc<Self>>;
fn linux(&self) -> &LinuxProcess;
fn vfork_from(parent: &Arc<Self>) -> ZxResult<Arc<Self>>;
fn fork_from(parent: &Arc<Self>, vfork: bool) -> ZxResult<Arc<Self>>;
}
impl ProcessExt for Process {
@ -36,10 +36,10 @@ impl ProcessExt for Process {
self.ext().downcast_ref::<LinuxProcess>().unwrap()
}
/// [Vfork] the process.
/// [Fork] the process.
///
/// [Vfork]: http://man7.org/linux/man-pages/man2/vfork.2.html
fn vfork_from(parent: &Arc<Self>) -> ZxResult<Arc<Self>> {
/// [Fork]: http://man7.org/linux/man-pages/man2/fork.2.html
fn fork_from(parent: &Arc<Self>, vfork: bool) -> ZxResult<Arc<Self>> {
let linux_parent = parent.linux();
let mut linux_parent_inner = linux_parent.inner.lock();
let new_linux_proc = LinuxProcess {
@ -56,6 +56,9 @@ impl ProcessExt for Process {
linux_parent_inner
.children
.insert(new_proc.id(), new_proc.clone());
if !vfork {
new_proc.vmar().fork_from(&parent.vmar())?;
}
// notify parent on terminated
let parent = parent.clone();

View File

@ -218,7 +218,7 @@ impl Syscall<'_> {
// Sys::SELECT => self.sys_select(a0, a1.into(), a2.into(), a3.into(), a4.into()),
Sys::DUP2 => self.sys_dup2(a0.into(), a1.into()),
// Sys::ALARM => self.unimplemented("alarm", Ok(0)),
Sys::FORK => self.sys_fork().await,
Sys::FORK => self.sys_fork(),
Sys::VFORK => self.sys_vfork().await,
Sys::RENAME => self.sys_rename(a0.into(), a1.into()),
Sys::MKDIR => self.sys_mkdir(a0.into(), a1),

View File

@ -8,14 +8,19 @@ use linux_object::thread::ThreadExt;
impl Syscall<'_> {
/// Fork the current process. Return the child's PID.
pub async fn sys_fork(&self) -> SysResult {
warn!("fork: not supported! go to vfork");
self.sys_vfork().await
pub fn sys_fork(&self) -> SysResult {
info!("fork:");
let new_proc = Process::fork_from(self.zircon_process(), false)?;
let new_thread = Thread::create_linux(&new_proc)?;
new_thread.start_with_regs(GeneralRegs::new_fork(self.regs), self.spawn_fn)?;
info!("fork: {} -> {}", self.zircon_process().id(), new_proc.id());
Ok(new_proc.id() as usize)
}
pub async fn sys_vfork(&self) -> SysResult {
info!("vfork:");
let new_proc = Process::vfork_from(self.zircon_process())?;
let new_proc = Process::fork_from(self.zircon_process(), true)?;
let new_thread = Thread::create_linux(&new_proc)?;
new_thread.start_with_regs(GeneralRegs::new_fork(self.regs), self.spawn_fn)?;

BIN
prebuilt/zircon/arm64/bringup.zbi (Stored with Git LFS) Normal file

Binary file not shown.

BIN
prebuilt/zircon/arm64/core-tests.zbi (Stored with Git LFS) Normal file

Binary file not shown.

BIN
prebuilt/zircon/arm64/libzircon-libos.so (Stored with Git LFS) Normal file

Binary file not shown.

BIN
prebuilt/zircon/arm64/libzircon.so (Stored with Git LFS) Normal file

Binary file not shown.

BIN
prebuilt/zircon/arm64/userboot-libos.so (Stored with Git LFS) Normal file

Binary file not shown.

BIN
prebuilt/zircon/arm64/userboot.so (Stored with Git LFS) Normal file

Binary file not shown.

View File

@ -83,6 +83,10 @@ ChannelWriteEtcTest.HandleWithoutDuplicateRightsMoveOpSucceedsDuplicateOpFails
ClockTest.ClockMonotonic
ClockTest.DeadlineAfter
ProcessDebugUtilsTest.XorShiftIsOk
ProcessDebugTest.ReadMemoryAtOffsetIsOk
ProcessDebugTest.WriteMemoryAtOffsetIsOk
ProcessDebugTest.ReadMemoryAtInvalidOffsetReturnsErrorNoMemory
ProcessDebugTest.WriteAtInvalidOffsetReturnsErrorNoMemory
ExecutableTlsTest.BasicInitalizersInThread
ExecutableTlsTest.BasicInitalizersInMain
ExecutableTlsTest.ArrayInitializerInThread
@ -340,6 +344,13 @@ Threads.SuspendChannelCall
Threads.SuspendPortCall
Threads.WritingArmFlagsRegister
TicksTest.ElapsedTimeUsingTicks
Vmar.DestroyTest
Vmar.BasicAllocateTest
Vmar.MapInCompactTest
Vmar.AllocateOobTest
Vmar.ObjectInfoTest
Vmar.UnmapBaseNotMappedTest
Vmar.AllowFaultsTest
VmoCloneTestCase.SizeAlign
VmoCloneTestCase.NameProperty
VmoCloneTestCase.Decommit
@ -374,6 +385,7 @@ VmoClone2TestCase.ManyChildren
VmoClone2TestCase.ManyChildrenRevClose
VmoClone2TestCase.ManyCloneMapping
VmoClone2TestCase.ManyCloneOffset
VmoClone2TestCase.ForbidContiguousVmo
VmoClone2TestCase.PinBeforeCreateFailure
VmoClone2TestCase.NoPhysical
VmoClone2TestCase.Uncached

View File

@ -1,7 +1,6 @@
*
-Bti.NoDelayedUnpin
-Bti.DecommitRace
-ProcessDebugTest.*
-ProcessDebugVDSO.*
-HandleCloseTest.ManyDuplicateTest*
-JobTest.*
@ -22,7 +21,12 @@
-Threads.WriteReadDebugRegisterState
-Threads.DebugRegistersValidation
-Threads.NoncanonicalRipAddressIRETQ
-Vmar.*
-Vmar.ProtectOverDemandPagedTest
-Vmar.ProtectLargeUncomittedTest
-Vmar.UnmapLargeUncommittedTest
-Vmar.NestedRegionPermsTest
-Vmar.MapSpecificOverwriteTest
-Vmar.MapOverDestroyedTest
-VmoTestCase.ReadOnlyMap
-VmoTestCase.NoPermMap
-VmoTestCase.NoPermProtect
@ -30,6 +34,7 @@
-VmoTestCase.CacheOp
-VmoTestCase.ResizeHazard
-VmoTestCase.Cache*
-VmoClone2TestCase.PinClonePages
-VersionTest.*
-BadAccessTest.*
-DefaultExceptionHandlerTest.*

View File

@ -185,3 +185,29 @@ index 7baa69755e0..18cdb981f0c 100644
+zcore_syscall_entry:
+ .quad 0xdeadbeaf
+.popsection
diff --git a/zircon/system/ulib/zircon/zircon-syscall-arm64.S b/zircon/system/ulib/zircon/zircon-syscall-arm64.S
index 50598676cb2..fc69f203ed0 100644
--- a/zircon/system/ulib/zircon/zircon-syscall-arm64.S
+++ b/zircon/system/ulib/zircon/zircon-syscall-arm64.S
@@ -6,7 +6,11 @@
.macro zircon_syscall num, name, caller
mov x16, #\num
- svc #0x0
+ push_regpair x29, x30
+ adr x29, zcore_syscall_entry
+ ldr x29, [x29]
+ blr x29
+ pop_regpair x29, x30
// This symbol at the return address identifies this as an approved call site.
.hidden CODE_SYSRET_\name\()_VIA_\caller
CODE_SYSRET_\name\()_VIA_\caller\():
@@ -46,3 +50,8 @@ CODE_SYSRET_\name\()_VIA_\caller\():
.cfi_same_value \reg0
.cfi_same_value \reg1
.endm
+
+.pushsection .rodata
+zcore_syscall_entry:
+ .quad 0xdeadbeaf
+.popsection

View File

@ -23,8 +23,8 @@ rboot = { path = "../rboot", default-features = false }
kernel-hal-bare = { path = "../kernel-hal-bare" }
lazy_static = { version = "1.4", features = ["spin_no_std" ] }
bitmap-allocator = { git = "https://github.com/rcore-os/bitmap-allocator", rev = "03bd9909" }
trapframe = "0.3.0"
executor = { git = "https://github.com/rcore-os/executor.git", rev = "e8ffcfb" }
trapframe = "0.4.1"
executor = { git = "https://github.com/rcore-os/executor.git", rev = "a2d02ee9" }
zircon-object = { path = "../zircon-object" }
zircon-loader = { path = "../zircon-loader", default-features = false, optional = true }
linux-loader = { path = "../linux-loader", default-features = false, optional = true }

View File

@ -74,7 +74,7 @@ justrun: $(QEMU_DISK)
$(qemu) $(qemu_opts)
build-test: build
cp ../prebuilt/zircon/core-tests.zbi $(ESP)/EFI/zCore/fuchsia.zbi
cp ../prebuilt/zircon/x64/core-tests.zbi $(ESP)/EFI/zCore/fuchsia.zbi
echo 'cmdline=LOG=warn:userboot=test/core/standalone-test:userboot.shutdown:core-tests=$(test_filter)' >> $(ESP)/EFI/Boot/rboot.conf
build: $(kernel_img)
@ -86,7 +86,7 @@ $(kernel_img): kernel bootloader
ifeq ($(linux), 1)
cp x86_64.img $(ESP)/EFI/zCore/fuchsia.zbi
else
cp ../prebuilt/zircon/$(zbi_file).zbi $(ESP)/EFI/zCore/fuchsia.zbi
cp ../prebuilt/zircon/x64/$(zbi_file).zbi $(ESP)/EFI/zCore/fuchsia.zbi
endif
cp $(kernel) $(ESP)/EFI/zCore/zcore.elf

View File

@ -47,12 +47,12 @@ pub extern "C" fn _start(boot_info: &BootInfo) -> ! {
fn main(ramfs_data: &[u8], cmdline: &str) {
use zircon_loader::{run_userboot, Images};
let images = Images::<&[u8]> {
userboot: include_bytes!("../../prebuilt/zircon/userboot.so"),
vdso: include_bytes!("../../prebuilt/zircon/libzircon.so"),
userboot: include_bytes!("../../prebuilt/zircon/x64/userboot.so"),
vdso: include_bytes!("../../prebuilt/zircon/x64/libzircon.so"),
zbi: ramfs_data,
};
let _proc = run_userboot(&images, cmdline);
executor::run();
run();
}
#[cfg(feature = "linux")]
@ -67,7 +67,15 @@ fn main(ramfs_data: &'static mut [u8], _cmdline: &str) {
let device = Arc::new(MemBuf::new(ramfs_data));
let rootfs = rcore_fs_sfs::SimpleFileSystem::open(device).unwrap();
let _proc = linux_loader::run(args, envs, rootfs);
executor::run();
run();
}
fn run() -> ! {
loop {
executor::run_until_idle();
x86_64::instructions::interrupts::enable_interrupts_and_hlt();
x86_64::instructions::interrupts::disable();
}
}
fn get_log_level(cmdline: &str) -> &str {

View File

@ -108,8 +108,11 @@ pub fn run_userboot(images: &Images<impl AsRef<[u8]>>, cmdline: &str) -> Arc<Pro
let stack_bottom = vmar
.map(None, stack_vmo.clone(), 0, stack_vmo.len(), flags)
.unwrap();
#[cfg(target_arch = "x86_64")]
// WARN: align stack to 16B, then emulate a 'call' (push rip)
let sp = stack_bottom + stack_vmo.len() - 8;
#[cfg(target_arch = "aarch64")]
let sp = stack_bottom + stack_vmo.len();
// channel
let (user_channel, kernel_channel) = Channel::create();
@ -187,7 +190,16 @@ fn spawn(thread: Arc<Thread>) {
thread.time_add(time);
trace!("back from user: {:#x?}", cx);
EXCEPTIONS_USER.add(1);
#[cfg(target_arch = "aarch64")]
let exit;
#[cfg(target_arch = "aarch64")]
match cx.trap_num {
0 => exit = handle_syscall(&thread, &mut cx.general).await,
_ => unimplemented!(),
}
#[cfg(target_arch = "x86_64")]
let mut exit = false;
#[cfg(target_arch = "x86_64")]
match cx.trap_num {
0x100 => exit = handle_syscall(&thread, &mut cx.general).await,
0x20..=0x3f => {
@ -199,11 +211,15 @@ fn spawn(thread: Arc<Thread>) {
}
0xe => {
EXCEPTIONS_PGFAULT.add(1);
#[cfg(target_arch = "x86_64")]
let flags = if cx.error_code & 0x2 == 0 {
MMUFlags::READ
} else {
MMUFlags::WRITE
};
// FIXME:
#[cfg(target_arch = "aarch64")]
let flags = MMUFlags::WRITE;
error!(
"page fualt from user mode {:#x} {:#x?}",
kernel_hal::fetch_fault_vaddr(),
@ -222,11 +238,21 @@ fn spawn(thread: Arc<Thread>) {
thread.name(),
e
);
panic!("Page Fault from user mode {:#x?}", cx);
error!("Page Fault from user mode {:#x?}", cx);
//TODO: implement exception channel
if !thread.handle_exception().await {
exit = true;
}
}
}
}
_ => panic!("not supported interrupt from user mode. {:#x?}", cx),
_ => {
error!("not supported interrupt from user mode. {:#x?}", cx);
//TODO: implement exception channel
if !thread.handle_exception().await {
exit = true;
}
}
}
thread.end_running(cx);
if exit {
@ -238,9 +264,13 @@ fn spawn(thread: Arc<Thread>) {
}
async fn handle_syscall(thread: &Arc<Thread>, regs: &mut GeneralRegs) -> bool {
#[cfg(target_arch = "x86_64")]
let num = regs.rax as u32;
#[cfg(target_arch = "aarch64")]
let num = regs.x16 as u32;
// LibOS: Function call ABI
#[cfg(feature = "std")]
#[cfg(target_arch = "x86_64")]
let args = unsafe {
let a6 = (regs.rsp as *const usize).read();
let a7 = (regs.rsp as *const usize).add(1).read();
@ -250,15 +280,29 @@ async fn handle_syscall(thread: &Arc<Thread>, regs: &mut GeneralRegs) -> bool {
};
// RealOS: Zircon syscall ABI
#[cfg(not(feature = "std"))]
#[cfg(target_arch = "x86_64")]
let args = [
regs.rdi, regs.rsi, regs.rdx, regs.r10, regs.r8, regs.r9, regs.r12, regs.r13,
];
// ARM64
#[cfg(target_arch = "aarch64")]
let args = [
regs.x0, regs.x1, regs.x2, regs.x3, regs.x4, regs.x5, regs.x6, regs.x7,
];
let mut syscall = Syscall {
regs,
thread: thread.clone(),
spawn_fn: spawn,
exit: false,
};
syscall.regs.rax = syscall.syscall(num, args).await as usize;
let ret = syscall.syscall(num, args).await as usize;
#[cfg(target_arch = "x86_64")]
{
syscall.regs.rax = ret;
}
#[cfg(target_arch = "aarch64")]
{
syscall.regs.x0 = ret;
}
syscall.exit
}

View File

@ -70,7 +70,10 @@ mod tests {
kernel_hal_unix::init();
let opt = Opt {
prebuilt_path: PathBuf::from("../prebuilt/zircon"),
#[cfg(target_arch = "x86_64")]
prebuilt_path: PathBuf::from("../prebuilt/zircon/x64"),
#[cfg(target_arch = "aarch64")]
prebuilt_path: PathBuf::from("../prebuilt/zircon/arm64"),
cmdline: String::from(""),
};
let images = open_images(&opt.prebuilt_path).expect("failed to read file");

View File

@ -15,14 +15,13 @@ bitflags = "1.2"
spin = "0.5"
log = "0.4"
hashbrown = "0.7"
downcast-rs = { git = "https://github.com/rcore-os/downcast-rs" }
downcast-rs = { git = "https://github.com/rcore-os/downcast-rs", rev = "a632ce1", default-features = false }
kernel-hal = { path = "../kernel-hal" }
numeric-enum-macro = "0.2"
futures = { version = "0.3", default-features = false, features = ["alloc", "async-await"] }
xmas-elf = { version = "0.7", optional = true }
region-alloc = { git = "https://github.com/rzswh/region-allocator", rev = "122c7a71" }
lazy_static = { version = "1.4", features = ["spin_no_std" ] }
apic = { git = "https://github.com/rcore-os/apic-rs", rev = "fb86bd7" }
acpi = "1.0.0"
[dev-dependencies]

View File

@ -182,11 +182,21 @@ impl Thread {
{
let mut inner = self.inner.lock();
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
context.general.rip = entry;
context.general.rsp = stack;
context.general.rdi = arg1;
context.general.rsi = arg2;
context.general.rflags |= 0x3202;
#[cfg(target_arch = "x86_64")]
{
context.general.rip = entry;
context.general.rsp = stack;
context.general.rdi = arg1;
context.general.rsi = arg2;
context.general.rflags |= 0x3202;
}
#[cfg(target_arch = "aarch64")]
{
context.elr = entry;
context.sp = stack;
context.general.x0 = arg1;
context.general.x1 = arg2;
}
inner.state = ThreadState::Running;
self.base.signal_set(Signal::THREAD_RUNNING);
}
@ -204,7 +214,10 @@ impl Thread {
let mut inner = self.inner.lock();
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
context.general = regs;
context.general.rflags |= 0x3202;
#[cfg(target_arch = "x86_64")]
{
context.general.rflags |= 0x3202;
}
inner.state = ThreadState::Running;
self.base.signal_set(Signal::THREAD_RUNNING);
}
@ -341,6 +354,19 @@ impl Thread {
pub fn get_time(&self) -> u64 {
self.inner.lock().time as u64
}
pub fn handle_exception(&self) -> impl Future<Output = bool> {
//TODO: implement exception channel
self.exit();
struct ExceptionFuture;
impl Future for ExceptionFuture {
type Output = bool;
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(false)
}
}
ExceptionFuture
}
}
impl Task for Thread {

View File

@ -11,9 +11,7 @@ numeric_enum! {
Vector = 2,
Debug = 4,
SingleStep = 5,
#[cfg(target_arch = "x86_64")]
FS = 6,
#[cfg(target_arch = "x86_64")]
GS = 7,
}
}

View File

@ -190,7 +190,7 @@ impl VmAddressRegion {
// align = 1K? 2K? 4K? 8K? ...
if !self.test_map(inner, offset, len, PAGE_SIZE) {
if overwrite {
self.unmap(addr, len)?;
self.unmap_inner(addr, len, inner)?;
} else {
return Err(ZxError::NO_MEMORY);
}
@ -216,6 +216,14 @@ impl VmAddressRegion {
}
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
self.unmap_inner(addr, len, inner)
}
/// Must hold self.inner.lock() before calling.
fn unmap_inner(&self, addr: VirtAddr, len: usize, inner: &mut VmarInner) -> ZxResult {
if !page_aligned(addr) || !page_aligned(len) || len == 0 {
return Err(ZxError::INVALID_ARGS);
}
let begin = addr;
let end = addr + len;
@ -292,9 +300,12 @@ impl VmAddressRegion {
fn destroy_internal(&self) -> ZxResult {
let mut guard = self.inner.lock();
let inner = guard.as_mut().ok_or(ZxError::BAD_STATE)?;
for vmar in inner.children.iter() {
for vmar in inner.children.drain(..) {
vmar.destroy_internal()?;
}
for mapping in inner.mappings.drain(..) {
drop(mapping);
}
*guard = None;
Ok(())
}
@ -475,12 +486,56 @@ impl VmAddressRegion {
}
}
/// Clone the entire address space and VMOs from source VMAR. (For Linux fork)
pub fn fork_from(&self, src: &Arc<Self>) -> ZxResult {
let mut guard = self.inner.lock();
let inner = guard.as_mut().unwrap();
inner.fork_from(src, &self.page_table)
}
pub fn get_task_stats(&self) -> TaskStatsInfo {
let mut task_stats = TaskStatsInfo::default();
self.for_each_mapping(&mut |map| map.fill_in_task_status(&mut task_stats));
task_stats
}
/// Read from address space.
///
/// Return the actual number of bytes read.
pub fn read_memory(&self, vaddr: usize, buf: &mut [u8]) -> ZxResult<usize> {
// TODO: support multiple VMOs
let map = self.find_mapping(vaddr).ok_or(ZxError::NO_MEMORY)?;
let map_inner = map.inner.lock();
let vmo_offset = vaddr - map_inner.addr + map_inner.vmo_offset;
map.vmo.read(vmo_offset, buf)?;
Ok(buf.len())
}
/// Write to address space.
///
/// Return the actual number of bytes written.
pub fn write_memory(&self, vaddr: usize, buf: &[u8]) -> ZxResult<usize> {
// TODO: support multiple VMOs
let map = self.find_mapping(vaddr).ok_or(ZxError::NO_MEMORY)?;
let map_inner = map.inner.lock();
let vmo_offset = vaddr - map_inner.addr + map_inner.vmo_offset;
map.vmo.write(vmo_offset, buf)?;
Ok(buf.len())
}
/// Find mapping of vaddr
fn find_mapping(&self, vaddr: usize) -> Option<Arc<VmMapping>> {
let guard = self.inner.lock();
let inner = guard.as_ref().unwrap();
if let Some(mapping) = inner.mappings.iter().find(|map| map.contains(vaddr)) {
return Some(mapping.clone());
}
if let Some(child) = inner.children.iter().find(|ch| ch.contains(vaddr)) {
return child.find_mapping(vaddr);
}
None
}
#[cfg(test)]
fn count(&self) -> usize {
let mut guard = self.inner.lock();
@ -498,6 +553,27 @@ impl VmAddressRegion {
}
}
impl VmarInner {
/// Clone the entire address space and VMOs from source VMAR. (For Linux fork)
fn fork_from(
&mut self,
src: &Arc<VmAddressRegion>,
page_table: &Arc<Mutex<PageTable>>,
) -> ZxResult {
let src_guard = src.inner.lock();
let src_inner = src_guard.as_ref().unwrap();
for child in src_inner.children.iter() {
self.fork_from(child, page_table)?;
}
for map in src_inner.mappings.iter() {
let mapping = map.clone_map(page_table.clone())?;
mapping.map()?;
self.mappings.push(mapping);
}
Ok(())
}
}
#[repr(C)]
#[derive(Debug)]
pub struct VmarInfo {
@ -513,6 +589,7 @@ pub struct VmMapping {
inner: Mutex<VmMappingInner>,
}
#[derive(Debug, Clone)]
struct VmMappingInner {
addr: VirtAddr,
size: usize,
@ -585,8 +662,8 @@ impl VmMapping {
}
fn unmap(&self) {
let mut page_table = self.page_table.lock();
let inner = self.inner.lock();
let mut page_table = self.page_table.lock();
self.vmo
.unmap_from(&mut page_table, inner.addr, inner.vmo_offset, inner.size);
}
@ -687,8 +764,8 @@ impl VmMapping {
}
fn protect(&self, flags: MMUFlags) {
let mut pg_table = self.page_table.lock();
let inner = self.inner.lock();
let mut pg_table = self.page_table.lock();
for i in 0..inner.size {
pg_table.protect(inner.addr + i * PAGE_SIZE, flags).unwrap();
}
@ -740,6 +817,19 @@ impl VmMapping {
.map_err(|_| ZxError::ACCESS_DENIED)?;
Ok(())
}
/// Clone VMO and map it to a new page table. (For Linux)
fn clone_map(&self, page_table: Arc<Mutex<PageTable>>) -> ZxResult<Arc<Self>> {
let new_vmo = self.vmo.create_child(false, 0, self.vmo.len())?;
let mapping = Arc::new(VmMapping {
inner: Mutex::new(self.inner.lock().clone()),
flags: self.flags,
page_table,
vmo: new_vmo.clone(),
});
new_vmo.append_mapping(Arc::downgrade(&mapping));
Ok(mapping)
}
}
impl VmMappingInner {

View File

@ -727,6 +727,11 @@ impl VMObjectPagedInner {
user_id: KoID,
lock_ref: &Arc<Mutex<()>>,
) -> ZxResult<Arc<VMObjectPaged>> {
// clone contiguous vmo is no longer permitted
// https://fuchsia.googlesource.com/fuchsia/+/e6b4c6751bbdc9ed2795e81b8211ea294f139a45
if self.is_contiguous() {
return Err(ZxError::INVALID_ARGS);
}
if self.cache_policy != CachePolicy::Cached || self.pin_count != 0 {
return Err(ZxError::BAD_STATE);
}

View File

@ -1,20 +1,11 @@
use {super::*, core::arch::x86_64::_rdrand32_step};
use super::*;
#[allow(unsafe_code)]
impl Syscall<'_> {
pub fn sys_cprng_draw_once(&self, buf: usize, len: usize) -> ZxResult {
info!("cprng_draw_once: buf=({:#x}; {:?})", buf, len);
if len % 4 == 0 {
let size = len / 4;
let mut res = vec![0u32; size];
res.iter_mut().for_each(|value| unsafe {
// TODO: move to HAL
_rdrand32_step(value);
});
UserOutPtr::<u32>::from(buf).write_array(&res)?;
Ok(())
} else {
unimplemented!()
}
pub fn sys_cprng_draw_once(&self, mut buf: UserOutPtr<u8>, len: usize) -> ZxResult {
info!("cprng_draw_once: buf=({:?}; {:?})", buf, len);
let mut res = vec![0u8; len];
kernel_hal::fill_random(&mut res);
buf.write_array(&res)?;
Ok(())
}
}

View File

@ -111,6 +111,12 @@ impl Syscall<'_> {
Sys::PROCESS_START => {
self.sys_process_start(a0 as _, a1 as _, a2 as _, a3 as _, a4 as _, a5 as _)
}
Sys::PROCESS_READ_MEMORY => {
self.sys_process_read_memory(a0 as _, a1 as _, a2.into(), a3 as _, a4.into())
}
Sys::PROCESS_WRITE_MEMORY => {
self.sys_process_write_memory(a0 as _, a1 as _, a2.into(), a3 as _, a4.into())
}
Sys::PROCESS_EXIT => self.sys_process_exit(a0 as _),
Sys::JOB_CREATE => self.sys_job_create(a0 as _, a1 as _, a2.into()),
Sys::JOB_SET_POLICY => self.sys_job_set_policy(a0 as _, a1 as _, a2 as _, a3, a4 as _),
@ -226,7 +232,7 @@ impl Syscall<'_> {
}
Sys::VMAR_PROTECT => self.sys_vmar_protect(a0 as _, a1 as _, a2 as _, a3 as _),
Sys::VMAR_DESTROY => self.sys_vmar_destroy(a0 as _),
Sys::CPRNG_DRAW_ONCE => self.sys_cprng_draw_once(a0 as _, a1 as _),
Sys::CPRNG_DRAW_ONCE => self.sys_cprng_draw_once(a0.into(), a1 as _),
Sys::NANOSLEEP => self.sys_nanosleep(a0.into()).await,
Sys::CLOCK_GET => self.sys_clock_get(a0 as _, a1.into()),
Sys::CLOCK_READ => self.sys_clock_read(a0 as _, a1.into()),
@ -293,6 +299,7 @@ impl Syscall<'_> {
Sys::PCI_ADD_SUBTRACT_IO_RANGE => {
self.sys_pci_add_subtract_io_range(a0 as _, a1 != 0, a2 as _, a3 as _, a4 != 0)
}
#[cfg(target_arch = "x86_64")]
Sys::PCI_CFG_PIO_RW => self.sys_pci_cfg_pio_rw(
a0 as _,
a1 as _,

View File

@ -105,6 +105,7 @@ impl Syscall<'_> {
.set_debug_addr(addr);
Ok(())
}
#[cfg(target_arch = "x86_64")]
Property::RegisterFs => {
let thread = proc.get_object::<Thread>(handle_value)?;
assert!(Arc::ptr_eq(&thread, &self.thread));

View File

@ -34,6 +34,7 @@ impl Syscall<'_> {
}
#[allow(clippy::too_many_arguments)]
#[cfg(target_arch = "x86_64")]
pub fn sys_pci_cfg_pio_rw(
&self,
handle: HandleValue,
@ -45,26 +46,21 @@ impl Syscall<'_> {
width: usize,
write: bool,
) -> ZxResult {
#[cfg(not(target_arch = "x86_64"))]
return Err(ZxError::NOT_SUPPORTED);
#[cfg(target_arch = "x86_64")]
{
info!(
info!(
"pci.cfg_pio_rw: handle={:#x}, addr={:x}:{:x}:{:x}, offset={:#x}, width={:#x}, write={:#}",
handle, bus, dev, func, offset, width, write
);
let proc = self.thread.proc();
proc.get_object::<Resource>(handle)?
.validate(ResourceKind::ROOT)?;
if write {
let value = value_ptr.read()?;
pio_config_write(bus, dev, func, offset, value, width)?;
} else {
let value = pio_config_read(bus, dev, func, offset, width)?;
value_ptr.write(value)?;
}
Ok(())
let proc = self.thread.proc();
proc.get_object::<Resource>(handle)?
.validate(ResourceKind::ROOT)?;
if write {
let value = value_ptr.read()?;
pio_config_write(bus, dev, func, offset, value, width)?;
} else {
let value = pio_config_read(bus, dev, func, offset, width)?;
value_ptr.write(value)?;
}
Ok(())
}
// TODO: review

View File

@ -284,6 +284,47 @@ impl Syscall<'_> {
_ => Err(ZxError::INVALID_ARGS),
}
}
pub fn sys_process_read_memory(
&self,
handle_value: HandleValue,
vaddr: usize,
mut buffer: UserOutPtr<u8>,
buffer_size: usize,
mut actual: UserOutPtr<u32>,
) -> ZxResult {
if buffer.is_null() || buffer_size == 0 || buffer_size > MAX_BLOCK {
return Err(ZxError::INVALID_ARGS);
}
let proc = self.thread.proc();
let process =
proc.get_object_with_rights::<Process>(handle_value, Rights::READ | Rights::WRITE)?;
let mut data = vec![0u8; buffer_size];
let len = process.vmar().read_memory(vaddr, &mut data)?;
buffer.write_array(&data[..len])?;
actual.write_if_not_null(len as u32)?;
Ok(())
}
pub fn sys_process_write_memory(
&self,
handle_value: HandleValue,
vaddr: usize,
buffer: UserInPtr<u8>,
buffer_size: usize,
mut actual: UserOutPtr<u32>,
) -> ZxResult {
if buffer.is_null() || buffer_size == 0 || buffer_size > MAX_BLOCK {
return Err(ZxError::INVALID_ARGS);
}
let proc = self.thread.proc();
let process =
proc.get_object_with_rights::<Process>(handle_value, Rights::READ | Rights::WRITE)?;
let data = buffer.read_array(buffer_size)?;
let len = process.vmar().write_memory(vaddr, &data)?;
actual.write_if_not_null(len as u32)?;
Ok(())
}
}
const JOB_POL_BASE_V1: u32 = 0;
@ -292,3 +333,5 @@ const JOB_POL_TIMER_SLACK: u32 = 1;
const JOB_POL_RELATIVE: u32 = 0;
const JOB_POL_ABSOLUTE: u32 = 1;
const MAX_BLOCK: usize = 64 * 1024 * 1024; //64M

View File

@ -1,8 +1,11 @@
use {super::*, bitflags::bitflags, zircon_object::vm::*};
fn amount_of_alignments(options: u32) -> ZxResult<usize> {
let align_pow2 = (options >> 24) as usize;
if (align_pow2 < 10 && align_pow2 != 0) || (align_pow2 > 32) {
let mut align_pow2 = (options >> 24) as usize;
if align_pow2 == 0 {
align_pow2 = PAGE_SIZE_LOG2;
}
if (align_pow2 < PAGE_SIZE_LOG2) || (align_pow2 > 32) {
Err(ZxError::INVALID_ARGS)
} else {
Ok(1 << align_pow2)
@ -55,11 +58,12 @@ impl Syscall<'_> {
None
};
let size = roundup_pages(size as usize);
// check `size`
if size == 0u64 {
if size == 0usize {
return Err(ZxError::INVALID_ARGS);
}
let child = parent.allocate(offset, size as usize, vmar_flags, align)?;
let child = parent.allocate(offset, size, vmar_flags, align)?;
let child_addr = child.addr();
let child_handle = proc.add_handle(Handle::new(child, Rights::DEFAULT_VMAR | perm_rights));
info!("vmar.allocate: at {:#x?}", child_addr);
@ -181,7 +185,12 @@ impl Syscall<'_> {
mapping_flags.set(MMUFlags::READ, options.contains(VmOptions::PERM_READ));
mapping_flags.set(MMUFlags::WRITE, options.contains(VmOptions::PERM_WRITE));
mapping_flags.set(MMUFlags::EXECUTE, options.contains(VmOptions::PERM_EXECUTE));
vmar.protect(addr as usize, len as usize, mapping_flags)?;
let len = roundup_pages(len as usize);
if len == 0usize {
return Err(ZxError::INVALID_ARGS);
}
vmar.protect(addr as usize, len, mapping_flags)?;
Ok(())
}