Merge remote-tracking branch 'origin/master' into merging

This commit is contained in:
dflasher 2020-05-04 23:01:35 +08:00
commit e15ce35e61
25 changed files with 304 additions and 255 deletions

View File

@ -1,6 +1,6 @@
# zCore
[![Actions Status](https://github.com/rcore-os/zCore/workflows/CI/badge.svg)](https://github.com/rcore-os/zCore/actions)
[![CI](https://github.com/rcore-os/zCore/workflows/CI/badge.svg?branch=master)](https://github.com/rcore-os/zCore/actions)
[![Docs](https://img.shields.io/badge/docs-alpha-blue)](https://rcore-os.github.io/zCore/zircon_object/)
[![Coverage Status](https://coveralls.io/repos/github/rcore-os/zCore/badge.svg?branch=master)](https://coveralls.io/github/rcore-os/zCore?branch=master)
@ -14,10 +14,18 @@ Reimplement [Zircon][zircon] microkernel in safe Rust as a userspace program!
## Getting started
Environments
* [Rust toolchain](http://rustup.rs)
* [QEMU](https://www.qemu.org)
* [Git LFS](https://git-lfs.github.com)
Clone repo and pull prebuilt fuchsia images:
```sh
git clone https://github.com/rcore-os/zCore
git lfs pull
git clone https://github.com/rcore-os/zCore --recursive
cd zCore
git lfs pull
```
Prepare Alpine Linux rootfs:
@ -32,7 +40,7 @@ Run native Linux program (Busybox):
cargo run --release -p linux-loader /bin/busybox [args]
```
Run native Zircon program (userboot):
Run native Zircon program (shell):
```sh
cargo run --release -p zircon-loader prebuilt/zircon

View File

@ -340,17 +340,36 @@ fn vdso_constants() -> VdsoConstants {
}
/// Initialize the HAL.
pub fn init() {
pub fn init(config: Config) {
timer_init();
interrupt::init();
COM1.lock().init();
unsafe {
// enable global page
Cr4::update(|f| f.insert(Cr4Flags::PAGE_GLOBAL));
// store config
CONFIG = config;
}
}
/// Configuration of HAL.
pub struct Config {
pub acpi_rsdp: u64,
pub smbios: u64,
}
#[export_name = "fetch_fault_vaddr"]
pub fn fetch_fault_vaddr() -> VirtAddr {
Cr2::read().as_u64() as _
}
/// Get physical address of `acpi_rsdp` and `smbios` on x86_64.
#[export_name = "hal_pc_firmware_tables"]
pub fn pc_firmware_tables() -> (u64, u64) {
unsafe { (CONFIG.acpi_rsdp, CONFIG.smbios) }
}
static mut CONFIG: Config = Config {
acpi_rsdp: 0,
smbios: 0,
};

View File

@ -171,10 +171,11 @@ pub fn frame_copy(src: PhysAddr, target: PhysAddr) {
/// Zero `target` frame.
#[export_name = "hal_frame_zero"]
pub fn frame_zero(target: PhysAddr) {
pub fn frame_zero_in_range(target: PhysAddr, start: usize, end: usize) {
assert!(start < PAGE_SIZE && end <= PAGE_SIZE);
trace!("frame_zero: {:#x}", target);
unsafe {
core::ptr::write_bytes(phys_to_virt(target) as *mut u8, 0, PAGE_SIZE);
core::ptr::write_bytes(phys_to_virt(target + start) as *mut u8, 0, end - start);
}
}
@ -211,11 +212,11 @@ pub fn timer_tick() {
}
/// Initialize the HAL.
pub fn init() {
pub fn init(config: Config) {
unsafe {
trapframe::init();
}
arch::init();
arch::init(config);
}
#[cfg(test)]

View File

@ -209,7 +209,7 @@ pub fn frame_copy(_src: PhysAddr, _target: PhysAddr) {
/// Zero `target` frame.
#[linkage = "weak"]
#[export_name = "hal_frame_zero"]
pub fn frame_zero(_target: PhysAddr) {
pub fn frame_zero_in_range(_target: PhysAddr, _start: usize, _end: usize) {
unimplemented!()
}
@ -282,3 +282,10 @@ pub fn vdso_constants() -> VdsoConstants {
pub fn fetch_fault_vaddr() -> VirtAddr {
unimplemented!()
}
/// Get physical address of `acpi_rsdp` and `smbios` on x86_64.
#[linkage = "weak"]
#[export_name = "hal_pc_firmware_tables"]
pub fn pc_firmware_tables() -> (u64, u64) {
unimplemented!()
}

2
rboot

@ -1 +1 @@
Subproject commit 5ab7648d1401f56255d3d90fb160b6b7c13c879a
Subproject commit 228ef7902486b8c5e5a69272b1f0f1dcf962c098

View File

@ -29,7 +29,10 @@ pub extern "C" fn _start(boot_info: &BootInfo) -> ! {
#[cfg(feature = "graphic")]
init_framebuffer(boot_info);
info!("{:#x?}", boot_info);
kernel_hal_bare::init();
kernel_hal_bare::init(kernel_hal_bare::Config {
acpi_rsdp: boot_info.acpi2_rsdp_addr,
smbios: boot_info.smbios_addr,
});
let zbi_data = unsafe {
core::slice::from_raw_parts(

View File

@ -41,7 +41,7 @@ pub fn create_kcounter_vmo() -> (Arc<VmObject>, Arc<VmObject>) {
kcounters_arena_end as usize / PAGE_SIZE,
"all kcounters must in the same page"
);
unsafe { VmObject::new_physical(paddr, 1) }
VmObject::new_physical(paddr, 1)
};
kcounters_vmo.set_name("counters/arena");
(counter_name_vmo, kcounters_vmo)

View File

@ -12,14 +12,7 @@ use {
alloc::{boxed::Box, sync::Arc, vec::Vec},
kernel_hal::GeneralRegs,
xmas_elf::ElfFile,
zircon_object::{
ipc::*,
object::*,
resource::{Resource, ResourceFlags, ResourceKind},
task::*,
util::elf_loader::*,
vm::*,
},
zircon_object::{ipc::*, object::*, task::*, util::elf_loader::*, vm::*, resource::*},
zircon_syscall::Syscall,
};
@ -56,7 +49,13 @@ pub fn run_userboot(images: &Images<impl AsRef<[u8]>>, cmdline: &str) -> Arc<Pro
let job = Job::root();
let proc = Process::create(&job, "proc", 0).unwrap();
let thread = Thread::create(&proc, "thread", 0).unwrap();
let resource = Resource::create("root", ResourceKind::ROOT, 0, 0, ResourceFlags::empty());
let resource = Resource::create(
"root",
ResourceKind::ROOT,
0,
0x1_0000_0000,
ResourceFlags::empty(),
);
let vmar = proc.vmar();
// userboot

View File

@ -104,14 +104,19 @@ impl Fifo {
let count_size = count * elem_size;
assert_eq!(data.len(), count_size);
let peer = self.peer.upgrade().ok_or(ZxError::PEER_CLOSED)?;
let peer = self.peer.upgrade();
let mut recv_queue = self.recv_queue.lock();
if recv_queue.is_empty() {
if peer.is_none() {
return Err(ZxError::PEER_CLOSED);
}
return Err(ZxError::SHOULD_WAIT);
}
let read_size = count_size.min(recv_queue.len());
if recv_queue.len() == self.capacity() {
peer.base.signal_set(Signal::WRITABLE);
if let Some(peer) = peer {
peer.base.signal_set(Signal::WRITABLE);
}
}
for (i, x) in recv_queue.drain(..read_size).enumerate() {
data[i] = x;

View File

@ -11,6 +11,7 @@ bitflags! {
const SIGNALED = 1 << 3;
const HANDLE_CLOSED = 1 << 23;
const KERNEL_ALL = 0xff_ffff;
const USER_ALL = 0xff << 24;
const CLOCK_STARTED = 1 << 4;

View File

@ -45,11 +45,7 @@ impl Resource {
flags: ResourceFlags,
) -> Arc<Self> {
Arc::new(Resource {
base: {
let base = KObjectBase::new();
base.set_name(name);
base
},
base: KObjectBase::with_name(name),
kind,
addr,
len,
@ -72,9 +68,6 @@ impl Resource {
len: usize,
) -> ZxResult {
self.validate(kind)?;
if self.kind == ResourceKind::MMIO {
unimplemented!()
}
if addr >= self.addr && (addr + len) <= (self.addr + self.len) {
Ok(())
} else {

View File

@ -185,20 +185,14 @@ impl Job {
self.exceptionate.clone()
}
pub fn enumerate_process(&self, mut f: impl FnMut(KoID) -> bool) {
self.inner
.lock()
.processes
.iter()
.find(|child| !f(child.id()));
/// Get KoIDs of Processes.
pub fn process_ids(&self) -> Vec<KoID> {
self.inner.lock().processes.iter().map(|p| p.id()).collect()
}
pub fn enumerate_children(&self, mut f: impl FnMut(KoID) -> bool) {
self.inner
.lock()
.children
.iter()
.find(|child| !f(child.id()));
/// Get KoIDs of children Jobs.
pub fn children_ids(&self) -> Vec<KoID> {
self.inner.lock().children.iter().map(|j| j.id()).collect()
}
}

View File

@ -1,5 +1,5 @@
use {
super::{exception::*, job::Job, job_policy::*, resource::*, thread::Thread, *},
super::{exception::*, job::Job, job_policy::*, thread::Thread, *},
crate::{object::*, signal::Futex, vm::*},
alloc::{boxed::Box, collections::BTreeMap, sync::Arc, vec::Vec},
core::{any::Any, sync::atomic::AtomicI32},
@ -341,16 +341,6 @@ impl Process {
Ok(object)
}
/// Try to get Resource and validate it
pub fn validate_resource(&self, handle_value: HandleValue, kind: ResourceKind) -> ZxResult {
let handle = self.get_handle(handle_value)?;
let object = handle
.object
.downcast_arc::<Resource>()
.map_err(|_| ZxError::WRONG_TYPE)?;
object.validate(kind)
}
pub fn get_handle_info(&self, handle_value: HandleValue) -> ZxResult<HandleBasicInfo> {
let handle = self.get_handle(handle_value)?;
Ok(handle.get_info())
@ -424,12 +414,9 @@ impl Process {
self.exceptionate.clone()
}
pub fn enumerate_thread(&self, mut f: impl FnMut(KoID) -> bool) {
self.inner
.lock()
.threads
.iter()
.find(|child| !f(child.id()));
/// Get KoIDs of Threads.
pub fn thread_ids(&self) -> Vec<KoID> {
self.inner.lock().threads.iter().map(|t| t.id()).collect()
}
}

View File

@ -172,7 +172,7 @@ impl Thread {
context.general.rsp = stack;
context.general.rdi = arg1;
context.general.rsi = arg2;
context.general.rflags |= 0x202;
context.general.rflags |= 0x3202;
context.vector.fcw = 0x37f;
inner.state = ThreadState::Running;
self.base.signal_set(Signal::THREAD_RUNNING);
@ -191,7 +191,7 @@ impl Thread {
let mut inner = self.inner.lock();
let context = inner.context.as_mut().ok_or(ZxError::BAD_STATE)?;
context.general = regs;
context.general.rflags |= 0x202;
context.general.rflags |= 0x3202;
context.vector.fcw = 0x37f;
inner.state = ThreadState::Running;
self.base.signal_set(Signal::THREAD_RUNNING);

View File

@ -1,7 +1,7 @@
use core::sync::atomic::*;
use {
super::*, crate::object::*, alloc::collections::VecDeque, alloc::sync::Arc, alloc::vec::Vec,
bitflags::bitflags, kernel_hal::PageTable, spin::Mutex,
super::*, crate::object::*, alloc::sync::Arc, alloc::vec::Vec, bitflags::bitflags,
kernel_hal::PageTable, spin::Mutex,
};
bitflags! {
@ -449,28 +449,20 @@ impl VmAddressRegion {
Err(ZxError::NOT_FOUND)
}
pub fn get_task_stats(&self) -> ZxInfoTaskStats {
let mut task_stats = ZxInfoTaskStats::default();
let mut list = VecDeque::new();
self.inner
.lock()
.as_ref()
.unwrap()
.children
.iter()
.for_each(|child| {
list.push_back(child.clone());
});
while let Some(vmar) = list.pop_front() {
let vmar_inner = vmar.inner.lock();
let inner = vmar_inner.as_ref().unwrap();
inner.children.iter().for_each(|child| {
list.push_back(child.clone());
});
inner.mappings.iter().for_each(|map| {
map.fill_in_task_status(&mut task_stats);
});
fn for_each_mapping(&self, f: &mut impl FnMut(&Arc<VmMapping>)) {
let guard = self.inner.lock();
let inner = guard.as_ref().unwrap();
for map in inner.mappings.iter() {
f(map);
}
for child in inner.children.iter() {
child.for_each_mapping(f);
}
}
pub fn get_task_stats(&self) -> TaskStatsInfo {
let mut task_stats = TaskStatsInfo::default();
self.for_each_mapping(&mut |map| map.fill_in_task_status(&mut task_stats));
task_stats
}
@ -514,7 +506,7 @@ struct VmMappingInner {
#[repr(C)]
#[derive(Default)]
pub struct ZxInfoTaskStats {
pub struct TaskStatsInfo {
mapped_bytes: u64,
private_bytes: u64,
shared_bytes: u64,
@ -582,7 +574,7 @@ impl VmMapping {
.unmap_from(&mut page_table, inner.addr, inner.vmo_offset, inner.size);
}
fn fill_in_task_status(&self, task_stats: &mut ZxInfoTaskStats) {
fn fill_in_task_status(&self, task_stats: &mut TaskStatsInfo) {
let inner = self.inner.lock();
let start_idx = inner.vmo_offset / PAGE_SIZE;
let end_idx = start_idx + inner.size / PAGE_SIZE;

View File

@ -74,7 +74,7 @@ pub trait VMObjectTrait: Sync + Send {
fn remove_mapping(&self, mapping: Weak<VmMapping>);
fn complete_info(&self, info: &mut ZxInfoVmo);
fn complete_info(&self, info: &mut VmoInfo);
fn get_cache_policy(&self) -> CachePolicy;
@ -99,6 +99,7 @@ pub trait VMObjectTrait: Sync + Send {
fn is_paged(&self) -> bool {
false
}
fn zero(&self, offset: usize, len: usize) -> ZxResult;
}
pub struct VmObject {
@ -132,12 +133,8 @@ impl VmObject {
}
/// Create a new VMO representing a piece of contiguous physical memory.
///
/// # Safety
///
/// You must ensure nobody has the ownership of this piece of memory yet.
#[allow(unsafe_code)]
pub unsafe fn new_physical(paddr: PhysAddr, pages: usize) -> Arc<Self> {
pub fn new_physical(paddr: PhysAddr, pages: usize) -> Arc<Self> {
Arc::new(VmObject {
base: KObjectBase::with_signal(Signal::VMO_ZERO_CHILDREN),
parent: Mutex::new(Default::default()),
@ -247,8 +244,8 @@ impl VmObject {
}
/// Get information of this VMO.
pub fn get_info(&self) -> ZxInfoVmo {
let mut ret = ZxInfoVmo {
pub fn get_info(&self) -> VmoInfo {
let mut ret = VmoInfo {
koid: self.base.id,
name: {
let mut arr = [0u8; 32];
@ -313,6 +310,14 @@ impl Drop for VmObject {
let mut children = parent.children.lock();
children.append(&mut my_children);
children.retain(|c| c.strong_count() != 0);
children.iter().for_each(|child| {
let arc_child = child.upgrade().unwrap();
let mut locked_children = arc_child.children.lock();
locked_children.retain(|c| c.strong_count() != 0);
if locked_children.is_empty() {
arc_child.base.signal_set(Signal::VMO_ZERO_CHILDREN);
}
});
// Non-zero to zero?
if children.is_empty() {
parent.base.signal_set(Signal::VMO_ZERO_CHILDREN);
@ -324,7 +329,7 @@ impl Drop for VmObject {
/// Describes a VMO.
#[repr(C)]
#[derive(Default)]
pub struct ZxInfoVmo {
pub struct VmoInfo {
/// The koid of this VMO.
koid: KoID,
/// The name of this VMO.

View File

@ -276,6 +276,32 @@ impl VMObjectTrait for VMObjectPaged {
})
}
fn zero(&self, offset: usize, len: usize) -> ZxResult {
if offset + len > self.inner.lock().size {
return Err(ZxError::OUT_OF_RANGE);
}
let iter = BlockIter {
begin: offset,
end: offset + len,
block_size_log2: 12,
};
let mut unwanted = VecDeque::new();
for block in iter {
//let paddr = self.commit_page(block.block, MMUFlags::READ)?;
if block.len() == PAGE_SIZE {
let _ = self.commit_page(block.block, MMUFlags::WRITE)?;
unwanted.push_back(block.block);
self.inner.lock().frames.remove(&block.block);
} else if self.committed_pages_in_range(block.block, block.block + 1) != 0 {
// check whether this page is initialized, otherwise nothing should be done
let paddr = self.commit_page(block.block, MMUFlags::WRITE)?;
kernel_hal::frame_zero_in_range(paddr, block.begin, block.end);
}
}
self.inner.lock().release_unwanted_pages(unwanted);
Ok(())
}
fn len(&self) -> usize {
self.inner.lock().size
}
@ -317,8 +343,12 @@ impl VMObjectTrait for VMObjectPaged {
return res;
}
let mut inner = self.inner.lock();
// non-slice child VMOs do not support decommit.
if inner.parent.is_some() {
if inner.type_.is_slice() {
let parent_offset = offset + inner.parent_offset;
return inner.parent.as_ref().unwrap().decommit(parent_offset, len);
}
let check = inner.parent.is_none();
if !check {
return Err(ZxError::NOT_SUPPORTED);
}
let start_page = offset / PAGE_SIZE;
@ -398,7 +428,7 @@ impl VMObjectTrait for VMObjectPaged {
.drain_filter(|x| x.strong_count() == 0 || Weak::ptr_eq(x, &mapping));
}
fn complete_info(&self, info: &mut ZxInfoVmo) {
fn complete_info(&self, info: &mut VmoInfo) {
info.flags |= VmoInfoFlags::TYPE_PAGED;
self.inner.lock().complete_info(info);
}
@ -563,6 +593,15 @@ impl VMObjectPaged {
return res;
}
let mut inner = self.inner.lock();
if inner.type_.is_slice() {
assert!((inner.parent_limit - inner.parent_offset) / PAGE_SIZE > page_idx);
let parent_idx = page_idx + inner.parent_offset / PAGE_SIZE;
return inner.parent.as_ref().unwrap().commit_page_internal(
parent_idx,
flags,
&inner.self_ref,
);
}
// special case
let no_parent = inner.parent.is_none();
let no_frame = !inner.frames.contains_key(&page_idx);
@ -580,7 +619,7 @@ impl VMObjectPaged {
}
// lazy allocate zero frame
let target_frame = PhysFrame::alloc().ok_or(ZxError::NO_MEMORY)?;
kernel_hal::frame_zero(target_frame.addr());
kernel_hal::frame_zero_in_range(target_frame.addr(), 0, PAGE_SIZE);
if out_of_range {
// can never be a hidden vmo
assert!(!inner.type_.is_hidden());
@ -735,7 +774,7 @@ impl VMObjectPaged {
let mut inner = self.inner.lock();
inner.contiguous = true;
for (i, f) in frames.drain(0..).enumerate() {
kernel_hal::frame_zero(f.addr());
kernel_hal::frame_zero_in_range(f.addr(), 0, PAGE_SIZE);
let mut state = PageState::new(f);
state.pin_count += 1;
inner.frames.insert(i, state);
@ -774,7 +813,7 @@ impl VMObjectPagedInner {
/// Count committed pages of the VMO.
fn committed_pages_in_range(&self, start_idx: usize, end_idx: usize) -> usize {
assert!(
start_idx < self.size / PAGE_SIZE,
start_idx < self.size / PAGE_SIZE || start_idx == 0,
"start_idx {:#x}, self.size {:#x}",
start_idx,
self.size
@ -804,6 +843,9 @@ impl VMObjectPagedInner {
break;
}
}
if inner.user_id != self.user_id {
break;
}
current_idx += inner.parent_offset / PAGE_SIZE;
if current_idx >= inner.parent_limit / PAGE_SIZE {
break;
@ -926,7 +968,7 @@ impl VMObjectPagedInner {
Ok(child)
}
fn complete_info(&self, info: &mut ZxInfoVmo) {
fn complete_info(&self, info: &mut VmoInfo) {
if let VMOType::Snapshot = self.type_ {
info.flags |= VmoInfoFlags::IS_COW_CLONE;
}
@ -946,47 +988,50 @@ impl VMObjectPagedInner {
let mut child = self.self_ref.clone();
while let Some(parent) = option_parent {
let mut locked_parent = parent.inner.lock();
if locked_parent.user_id == self.user_id {
let (tag, other) = locked_parent.type_.get_tag_and_other(&child);
let arc_other = other.upgrade().unwrap();
let mut locked_other = arc_other.inner.lock();
let start = locked_other.parent_offset / PAGE_SIZE;
let end = locked_other.parent_limit / PAGE_SIZE;
for _ in 0..unwanted.len() {
let idx = unwanted.pop_front().unwrap();
// if the frame is in locked_other's range, check if it can be move to locked_other
if start <= idx && idx < end {
if locked_parent.frames.contains_key(&idx) {
let mut to_insert = locked_parent.frames.remove(&idx).unwrap();
if to_insert.tag != tag.negate() {
to_insert.tag = PageStateTag::Owned;
locked_other.frames.insert(idx - start, to_insert);
}
let (tag, other) = locked_parent.type_.get_tag_and_other(&child);
let arc_other = other.upgrade().unwrap();
let mut locked_other = arc_other.inner.lock();
let start = locked_other.parent_offset / PAGE_SIZE;
let end = locked_other.parent_limit / PAGE_SIZE;
for _ in 0..unwanted.len() {
let idx = unwanted.pop_front().unwrap();
// if the frame is in locked_other's range, check if it can be move to locked_other
if start <= idx && idx < end {
if locked_parent.frames.contains_key(&idx) {
let mut to_insert = locked_parent.frames.remove(&idx).unwrap();
if to_insert.tag != tag.negate() {
to_insert.tag = PageStateTag::Owned;
locked_other.frames.insert(idx - start, to_insert);
}
unwanted.push_back(idx + locked_parent.parent_offset / PAGE_SIZE);
}
} else {
// otherwise, if it exists in our frames, remove it; if not, push_back it again
if locked_parent.frames.contains_key(&idx) {
locked_parent.frames.remove(&idx);
} else {
// otherwise, if it exists in our frames, remove it; if not, push_back it again
if locked_parent.frames.contains_key(&idx) {
locked_parent.frames.remove(&idx);
} else {
unwanted.push_back(idx + locked_parent.parent_offset / PAGE_SIZE);
}
unwanted.push_back(idx + locked_parent.parent_offset / PAGE_SIZE);
}
}
child = locked_parent.self_ref.clone();
option_parent = locked_parent.parent.clone();
drop(locked_parent);
} else {
break;
}
child = locked_parent.self_ref.clone();
option_parent = locked_parent.parent.clone();
drop(locked_parent);
}
}
fn resize(&mut self, new_size: usize) {
if new_size < self.size {
if new_size == 0 && new_size < self.size {
self.frames.clear();
if let Some(parent) = self.parent.as_ref() {
parent.inner.lock().remove_child(&self.self_ref);
self.parent = None;
}
} else if new_size < self.size {
let mut unwanted = VecDeque::<usize>::new();
let parent_end = (self.parent_limit - self.parent_offset) / PAGE_SIZE;
for i in new_size / PAGE_SIZE..self.size / PAGE_SIZE {
if self.frames.remove(&i).is_none() && parent_end > i {
if parent_end > i {
unwanted.push_back(i);
}
}

View File

@ -29,12 +29,8 @@ impl VMObjectPhysicalInner {
impl VMObjectPhysical {
/// Create a new VMO representing a piece of contiguous physical memory.
///
/// # Safety
///
/// You must ensure nobody has the ownership of this piece of memory yet.
#[allow(unsafe_code)]
pub unsafe fn new(paddr: PhysAddr, pages: usize) -> Arc<Self> {
pub fn new(paddr: PhysAddr, pages: usize) -> Arc<Self> {
assert!(page_aligned(paddr));
Arc::new(VMObjectPhysical {
paddr,
@ -91,7 +87,6 @@ impl VMObjectTrait for VMObjectPhysical {
Err(ZxError::NOT_SUPPORTED)
}
#[allow(unsafe_code)]
fn create_slice(
self: Arc<Self>,
_id: KoID,
@ -99,7 +94,7 @@ impl VMObjectTrait for VMObjectPhysical {
len: usize,
) -> ZxResult<Arc<dyn VMObjectTrait>> {
assert!(page_aligned(offset) && page_aligned(len));
let obj = unsafe { VMObjectPhysical::new(self.paddr + offset, len / PAGE_SIZE) };
let obj = VMObjectPhysical::new(self.paddr + offset, len / PAGE_SIZE);
obj.inner.lock().cache_policy = self.inner.lock().cache_policy;
Ok(obj)
}
@ -116,8 +111,8 @@ impl VMObjectTrait for VMObjectPhysical {
inner.mapping_count -= 1;
}
fn complete_info(&self, _info: &mut ZxInfoVmo) {
unimplemented!()
fn complete_info(&self, _info: &mut VmoInfo) {
warn!("VmoInfo for physical is unimplemented");
}
fn get_cache_policy(&self) -> CachePolicy {
@ -140,10 +135,14 @@ impl VMObjectTrait for VMObjectPhysical {
}
fn share_count(&self) -> usize {
unimplemented!()
self.inner.lock().mapping_count as usize
}
fn committed_pages_in_range(&self, _start_idx: usize, _end_idx: usize) -> usize {
0
}
fn zero(&self, _offset: usize, _len: usize) -> ZxResult {
unimplemented!()
}
@ -160,7 +159,7 @@ mod tests {
#[test]
fn read_write() {
let vmo = unsafe { VmObject::new_physical(0x1000, 2) };
let vmo = VmObject::new_physical(0x1000, 2);
let vmphy = vmo.inner.clone();
assert_eq!(vmphy.get_cache_policy(), CachePolicy::Uncached);
super::super::tests::read_write(&vmo);

View File

@ -1,12 +1,9 @@
use {
super::*,
zircon_object::{
dev::*,
resource::*,
},
bitflags::bitflags,
kernel_hal::DevVAddr,
zircon_object::vm::{page_aligned, VmObject},
zircon_object::{dev::*, resource::*},
};
impl Syscall<'_> {
@ -23,7 +20,8 @@ impl Syscall<'_> {
resource, type_, desc, desc_size, out
);
let proc = self.thread.proc();
proc.validate_resource(resource, ResourceKind::ROOT)?;
proc.get_object::<Resource>(resource)?
.validate(ResourceKind::ROOT)?;
if desc_size > IOMMU_MAX_DESC_LEN {
return Err(ZxError::INVALID_ARGS);
}
@ -91,7 +89,6 @@ impl Syscall<'_> {
let mut iommu_perms = IommuPerms::empty();
let options = BtiOptions::from_bits_truncate(options);
if options.contains(BtiOptions::PERM_READ) {
if !rights.contains(Rights::READ) {
return Err(ZxError::ACCESS_DENIED);
@ -107,15 +104,14 @@ impl Syscall<'_> {
}
if options.contains(BtiOptions::PERM_EXECUTE) {
// NOTE: Check Rights::READ instead of Rights::EXECUTE,
// because Rights::EXECUTE applies to the execution permission of the host CPU,
// NOTE: Check Rights::READ instead of Rights::EXECUTE,
// because Rights::EXECUTE applies to the execution permission of the host CPU,
// but ZX_BTI_PERM_EXECUTE applies to transactions initiated by the bus device.
if !rights.contains(Rights::READ) {
return Err(ZxError::ACCESS_DENIED);
}
iommu_perms.insert(IommuPerms::PERM_EXECUTE);
}
if options.contains(BtiOptions::CONTIGUOUS) && options.contains(BtiOptions::COMPRESS) {
return Err(ZxError::INVALID_ARGS);
}
@ -127,8 +123,11 @@ impl Syscall<'_> {
let pmt = bti.pin(vmo, offset, size, iommu_perms)?;
let encoded_addrs = pmt.as_ref().encode_addrs(compress_results, contiguous)?;
if encoded_addrs.len() != addrs_count {
warn!("bti.pin addrs_count = {}, but encoded_addrs.len = {}",
addrs_count, encoded_addrs.len());
warn!(
"bti.pin addrs_count = {}, but encoded_addrs.len = {}",
addrs_count,
encoded_addrs.len()
);
return Err(ZxError::INVALID_ARGS);
}
addrs.write_array(&encoded_addrs)?;
@ -137,26 +136,35 @@ impl Syscall<'_> {
Ok(())
}
pub fn sys_pmt_unpin(
&self,
pmt: HandleValue,
) -> ZxResult {
pub fn sys_pmt_unpin(&self, pmt: HandleValue) -> ZxResult {
info!("pmt.unpin: pmt={:#x}", pmt);
let proc = self.thread.proc();
let pmt = proc.remove_object::<Pmt>(pmt)?;
pmt.as_ref().unpin_and_remove()
}
pub fn sys_bti_release_quarantine(
&self,
bti: HandleValue,
) -> ZxResult {
pub fn sys_bti_release_quarantine(&self, bti: HandleValue) -> ZxResult {
info!("bti.release_quarantine: bti = {:#x}", bti);
let proc = self.thread.proc();
let bti = proc.get_object_with_rights::<Bti>(bti, Rights::WRITE)?;
bti.release_quarantine();
Ok(())
}
pub fn sys_pc_firmware_tables(
&self,
resource: HandleValue,
mut acpi_rsdp_ptr: UserOutPtr<u64>,
mut smbios_ptr: UserOutPtr<u64>,
) -> ZxResult {
info!("pc_firmware_tables: handle={:?}", resource);
let proc = self.thread.proc();
proc.get_object::<Resource>(resource)?
.validate(ResourceKind::ROOT)?;
let (acpi_rsdp, smbios) = kernel_hal::pc_firmware_tables();
acpi_rsdp_ptr.write(acpi_rsdp)?;
smbios_ptr.write(smbios)?;
Ok(())
}
}
const IOMMU_MAX_DESC_LEN: usize = 4096;
@ -172,4 +180,3 @@ bitflags! {
const CONTIGUOUS = 1 << 4;
}
}

View File

@ -1,5 +1,5 @@
use super::*;
use zircon_object::resource::ResourceKind;
use zircon_object::resource::*;
impl Syscall<'_> {
pub fn sys_debug_write(&self, buf: UserInPtr<u8>, len: usize) -> ZxResult {
@ -21,7 +21,8 @@ impl Syscall<'_> {
handle, buf, buf_size
);
let proc = self.thread.proc();
proc.validate_resource(handle, ResourceKind::ROOT)?;
proc.get_object::<Resource>(handle)?
.validate(ResourceKind::ROOT)?;
// FIXME: To make 'console' work, now debug_read is a blocking call.
// But it should be non-blocking.
// let mut vec = vec![0u8; buf_size as usize];

View File

@ -1,6 +1,6 @@
use {
super::*,
zircon_object::{debuglog::DebugLog, resource::ResourceKind},
zircon_object::{debuglog::DebugLog, resource::*},
};
const FLAG_READABLE: u32 = 0x4000_0000u32;
@ -18,7 +18,8 @@ impl Syscall<'_> {
);
let proc = self.thread.proc();
if rsrc != 0 {
proc.validate_resource(rsrc, ResourceKind::ROOT)?;
proc.get_object::<Resource>(rsrc)?
.validate(ResourceKind::ROOT)?;
}
let dlog = DebugLog::create(options);
let dlog_right = if options & FLAG_READABLE == 0 {

View File

@ -248,6 +248,7 @@ impl Syscall<'_> {
Sys::OBJECT_GET_CHILD => {
self.sys_object_get_child(a0 as _, a1 as _, a2 as _, a3.into())
}
Sys::PC_FIRMWARE_TABLES => self.sys_pc_firmware_tables(a0 as _, a1.into(), a2.into()),
_ => {
error!("syscall unimplemented: {:?}", sys_type);
Err(ZxError::NOT_SUPPORTED)

View File

@ -134,17 +134,11 @@ impl Syscall<'_> {
deadline: Deadline,
mut observed: UserOutPtr<Signal>,
) -> ZxResult {
let signals = Signal::from_bits_truncate(signals);
info!(
"object.wait_one: handle={:#x?}, signals={:#x?}, deadline={:#x?}, observed={:#x?}",
handle, signals, deadline, observed
);
let signals = Signal::from_bits(signals).ok_or_else(|| {
if !deadline.is_positive() {
ZxError::TIMED_OUT
} else {
ZxError::INVALID_ARGS
}
})?;
let proc = self.thread.proc();
let object = proc.get_dyn_object_with_rights(handle, Rights::WAIT)?;
let cancel_token = proc.get_cancel_token(handle)?;
@ -168,7 +162,6 @@ impl Syscall<'_> {
Ok(())
}
#[allow(unsafe_code)]
pub fn sys_object_get_info(
&self,
handle: HandleValue,
@ -224,75 +217,39 @@ impl Syscall<'_> {
let mut info = vmo.get_info();
info.flags |= VmoInfoFlags::VIA_HANDLE;
info.rights |= rights;
UserOutPtr::<ZxInfoVmo>::from(buffer).write(info)?;
UserOutPtr::<VmoInfo>::from(buffer).write(info)?;
}
Topic::KmemStats => {
let mut kmem = ZxInfoKmem::default();
let mut kmem = KmemInfo::default();
kmem.vmo_bytes = vmo_page_bytes() as u64;
UserOutPtr::<ZxInfoKmem>::from(buffer).write(kmem)?;
}
Topic::JobProcess => {
let job = proc.get_object_with_rights::<Job>(handle, Rights::ENUMERATE)?;
let (mut count, mut avail_count) = (0usize, 0usize);
let ptr = UserOutPtr::<KoID>::from(buffer).as_ptr();
let item_size = core::mem::size_of::<KoID>();
job.enumerate_process(|id| {
if count < buffer_size / item_size {
unsafe {
ptr.add(count).write(id);
}
count += 1;
}
avail_count += 1;
true
});
actual.write(count)?;
avail.write(avail_count)?;
UserOutPtr::<KmemInfo>::from(buffer).write(kmem)?;
}
Topic::TaskStats => {
assert_eq!(core::mem::size_of::<ZxInfoTaskStats>(), buffer_size);
assert_eq!(core::mem::size_of::<TaskStatsInfo>(), buffer_size);
let vmar = proc
.get_object_with_rights::<Process>(handle, Rights::INSPECT)?
.vmar();
//let mut task_stats = ZxInfoTaskStats::default();
let task_stats = vmar.get_task_stats();
UserOutPtr::<ZxInfoTaskStats>::from(buffer).write(task_stats)?;
UserOutPtr::<TaskStatsInfo>::from(buffer).write(task_stats)?;
}
Topic::ProcessThreads => {
let (mut count, mut avail_count) = (0usize, 0usize);
let ptr = UserOutPtr::<KoID>::from(buffer).as_ptr();
let item_size = core::mem::size_of::<KoID>();
proc.get_object_with_rights::<Process>(handle, Rights::ENUMERATE)?
.enumerate_thread(|id| {
if count < buffer_size / item_size {
unsafe {
ptr.add(count).write(id);
}
count += 1;
}
avail_count += 1;
true
});
Topic::JobChildren | Topic::JobProcess | Topic::ProcessThreads => {
let ids = match topic {
Topic::JobChildren => proc
.get_object_with_rights::<Job>(handle, Rights::ENUMERATE)?
.children_ids(),
Topic::JobProcess => proc
.get_object_with_rights::<Job>(handle, Rights::ENUMERATE)?
.process_ids(),
Topic::ProcessThreads => proc
.get_object_with_rights::<Process>(handle, Rights::ENUMERATE)?
.thread_ids(),
_ => unreachable!(),
};
let count = (buffer_size / core::mem::size_of::<KoID>()).min(ids.len());
UserOutPtr::<KoID>::from(buffer).write_array(&ids[..count])?;
actual.write(count)?;
avail.write(avail_count)?;
}
Topic::JobChildren => {
let (mut count, mut avail_count) = (0usize, 0usize);
let ptr = UserOutPtr::<KoID>::from(buffer).as_ptr();
let item_size = core::mem::size_of::<KoID>();
proc.get_object_with_rights::<Job>(handle, Rights::ENUMERATE)?
.enumerate_children(|id| {
if count < buffer_size / item_size {
unsafe {
ptr.add(count).write(id);
}
count += 1;
}
avail_count += 1;
true
});
actual.write(count)?;
avail.write(avail_count)?;
avail.write(ids.len())?;
}
Topic::Bti => {
let bti = proc.get_object_with_rights::<Bti>(handle, Rights::INSPECT)?;
@ -333,7 +290,7 @@ impl Syscall<'_> {
signals: u32,
options: u32,
) -> ZxResult {
let signals = Signal::from_bits(signals).ok_or(ZxError::INVALID_ARGS)?;
let signals = Signal::from_bits_truncate(signals);
info!(
"object.wait_async: handle={:#x}, port={:#x}, key={:#x}, signal={:?}, options={:#X}",
handle_value, port_handle_value, key, signals, options
@ -484,7 +441,7 @@ pub struct UserWaitItem {
#[repr(C)]
#[derive(Default)]
struct ZxInfoKmem {
struct KmemInfo {
total_bytes: u64,
free_bytes: u64,
wired_bytes: u64,

View File

@ -32,14 +32,14 @@ impl Syscall<'_> {
}
}
pub fn sys_clock_adjust(&self, hrsrc: HandleValue, clock_id: u32, offset: u64) -> ZxResult {
pub fn sys_clock_adjust(&self, resource: HandleValue, clock_id: u32, offset: u64) -> ZxResult {
info!(
"clock.adjust: hrsrc={:#x?}, id={:#x}, offset={:#x}",
hrsrc, clock_id, offset
"clock.adjust: resource={:#x?}, id={:#x}, offset={:#x}",
resource, clock_id, offset
);
self.thread
.proc()
.validate_resource(hrsrc, ResourceKind::ROOT)?;
let proc = self.thread.proc();
proc.get_object::<Resource>(resource)?
.validate(ResourceKind::ROOT)?;
match clock_id {
ZX_CLOCK_MONOTONIC => Err(ZxError::ACCESS_DENIED),
ZX_CLOCK_UTC => {

View File

@ -2,6 +2,7 @@ use {
super::*,
bitflags::bitflags,
kernel_hal::CachePolicy,
numeric_enum_macro::numeric_enum,
zircon_object::{dev::*, resource::*, task::PolicyCondition, vm::*},
};
@ -83,7 +84,8 @@ impl Syscall<'_> {
);
let proc = self.thread.proc();
if vmex != INVALID_HANDLE {
proc.validate_resource(vmex, ResourceKind::VMEX)?;
proc.get_object::<Resource>(vmex)?
.validate(ResourceKind::VMEX)?;
} else {
proc.check_policy(PolicyCondition::AmbientMarkVMOExec)?;
}
@ -166,21 +168,21 @@ impl Syscall<'_> {
Ok(())
}
#[allow(unsafe_code)]
pub fn sys_vmo_create_physical(
&self,
rsrc: HandleValue,
resource: HandleValue,
paddr: PhysAddr,
size: usize,
mut out: UserOutPtr<HandleValue>,
) -> ZxResult {
info!(
"vmo.create_physical: handle={:#x?}, paddr={:#x?}, size={:#x}, out={:#x?}",
size, paddr, size, out
resource, paddr, size, out
);
let proc = self.thread.proc();
proc.check_policy(PolicyCondition::NewVMO)?;
proc.validate_resource(rsrc, ResourceKind::MMIO)?;
proc.get_object::<Resource>(resource)?
.validate_ranged_resource(ResourceKind::MMIO, paddr, size)?;
let size = roundup_pages(size);
if size == 0 || !page_aligned(paddr) {
return Err(ZxError::INVALID_ARGS);
@ -188,7 +190,7 @@ impl Syscall<'_> {
if paddr.overflowing_add(size).1 {
return Err(ZxError::INVALID_ARGS);
}
let vmo = unsafe { VmObject::new_physical(paddr, size / PAGE_SIZE) };
let vmo = VmObject::new_physical(paddr, size / PAGE_SIZE);
let handle_value = proc.add_handle(Handle::new(vmo, Rights::DEFAULT_VMO | Rights::EXECUTE));
out.write(handle_value)?;
Ok(())
@ -250,25 +252,35 @@ impl Syscall<'_> {
"vmo.op_range: handle={:#x}, op={:#X}, offset={:#x}, len={:#x}, buffer_size={:#x}",
handle_value, op, offset, len, _buffer_size,
);
let op = VmoOpType::try_from(op).or(Err(ZxError::INVALID_ARGS))?;
let proc = self.thread.proc();
let (vmo, rights) = proc.get_object_and_rights::<VmObject>(handle_value)?;
if !page_aligned(offset) || !page_aligned(len) {
return Err(ZxError::INVALID_ARGS);
}
match op {
VMO_OP_COMMIT => {
VmoOpType::Commit => {
if !rights.contains(Rights::WRITE) {
return Err(ZxError::ACCESS_DENIED);
}
if !page_aligned(offset) || !page_aligned(len) {
return Err(ZxError::INVALID_ARGS);
}
vmo.commit(offset, len)?;
Ok(())
}
VMO_OP_DECOMMIT => {
VmoOpType::Decommit => {
if !rights.contains(Rights::WRITE) {
return Err(ZxError::ACCESS_DENIED);
}
if !page_aligned(offset) || !page_aligned(len) {
return Err(ZxError::INVALID_ARGS);
}
vmo.decommit(offset, len)
}
VmoOpType::Zero => {
if !rights.contains(Rights::WRITE) {
return Err(ZxError::ACCESS_DENIED);
}
vmo.zero(offset, len)
}
_ => unimplemented!(),
}
}
@ -292,6 +304,18 @@ bitflags! {
}
}
/// VMO Opcodes (for vmo_op_range)
const VMO_OP_COMMIT: u32 = 1;
const VMO_OP_DECOMMIT: u32 = 2;
numeric_enum! {
#[repr(u32)]
/// VMO Opcodes (for vmo_op_range)
pub enum VmoOpType {
Commit = 1,
Decommit = 2,
Lock = 3,
Unlock = 4,
CacheSync = 6,
CacheInvalidate = 7,
CacheClean = 8,
CacheCleanInvalidate = 9,
Zero = 10,
}
}