diff --git a/drivers/src/builder/devicetree.rs b/drivers/src/builder/devicetree.rs index e2594a4a..c13c9431 100644 --- a/drivers/src/builder/devicetree.rs +++ b/drivers/src/builder/devicetree.rs @@ -89,7 +89,7 @@ impl DevicetreeDriverBuilder { match comp { #[cfg(feature = "virtio")] c if c.contains("virtio,mmio") => self.parse_virtio(node, props), - c if c.contains("allwinner,sunxi-gmac") => self.parse_ethernet(node, comp, props), + // c if c.contains("allwinner,sunxi-gmac") => self.parse_ethernet(node, comp, props), c if c.contains("ns16550a") => self.parse_uart(node, comp, props), c if c.contains("allwinner,sun20i-uart") => self.parse_uart(node, comp, props), _ => Err(DeviceError::NotSupported), diff --git a/drivers/src/input/mouse.rs b/drivers/src/input/mouse.rs index b5539d26..63db0831 100644 --- a/drivers/src/input/mouse.rs +++ b/drivers/src/input/mouse.rs @@ -1,7 +1,7 @@ use alloc::{boxed::Box, sync::Arc}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::prelude::{CapabilityType, InputEvent, InputEventType}; use crate::scheme::{impl_event_scheme, InputScheme}; diff --git a/drivers/src/irq/riscv_intc.rs b/drivers/src/irq/riscv_intc.rs index cf2e160c..bff11ef0 100644 --- a/drivers/src/irq/riscv_intc.rs +++ b/drivers/src/irq/riscv_intc.rs @@ -1,6 +1,6 @@ use riscv::register::sie; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::prelude::IrqHandler; use crate::scheme::{IrqScheme, Scheme}; diff --git a/drivers/src/irq/riscv_plic.rs b/drivers/src/irq/riscv_plic.rs index 9bf4b670..e575f3bb 100644 --- a/drivers/src/irq/riscv_plic.rs +++ b/drivers/src/irq/riscv_plic.rs @@ -1,7 +1,7 @@ use core::ops::Range; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::io::{Io, Mmio}; use crate::prelude::IrqHandler; diff --git a/drivers/src/irq/x86_apic/ioapic.rs b/drivers/src/irq/x86_apic/ioapic.rs index 5cb745e9..f22520bc 100644 --- a/drivers/src/irq/x86_apic/ioapic.rs +++ b/drivers/src/irq/x86_apic/ioapic.rs @@ -4,7 +4,7 @@ use core::{fmt, ptr::NonNull}; use acpi::platform::interrupt::InterruptModel; use acpi::{AcpiHandler, AcpiTables, PhysicalMapping}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use x2apic::ioapic::{IoApic as IoApicInner, IrqFlags, IrqMode}; use super::{IrqPolarity, IrqTriggerMode, Phys2VirtFn}; diff --git a/drivers/src/irq/x86_apic/mod.rs b/drivers/src/irq/x86_apic/mod.rs index 0ae4e414..dabd64d2 100644 --- a/drivers/src/irq/x86_apic/mod.rs +++ b/drivers/src/irq/x86_apic/mod.rs @@ -5,7 +5,7 @@ mod lapic; use core::ops::Range; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use self::consts::{X86_INT_BASE, X86_INT_LOCAL_APIC_BASE}; use self::ioapic::{IoApic, IoApicList}; diff --git a/drivers/src/mock/uart.rs b/drivers/src/mock/uart.rs index 151a926b..5fb29457 100644 --- a/drivers/src/mock/uart.rs +++ b/drivers/src/mock/uart.rs @@ -2,7 +2,7 @@ use std::collections::VecDeque; use async_std::{io, io::prelude::*, task}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::scheme::{impl_event_scheme, Scheme, UartScheme}; use crate::utils::EventListener; diff --git a/drivers/src/net/loopback.rs b/drivers/src/net/loopback.rs index cfaa075a..644f53f3 100644 --- a/drivers/src/net/loopback.rs +++ b/drivers/src/net/loopback.rs @@ -6,7 +6,7 @@ use alloc::sync::Arc; use alloc::string::String; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::scheme::{NetScheme, Scheme}; use crate::{DeviceError, DeviceResult}; diff --git a/drivers/src/net/mod.rs b/drivers/src/net/mod.rs index 74227fca..373e279e 100644 --- a/drivers/src/net/mod.rs +++ b/drivers/src/net/mod.rs @@ -64,7 +64,7 @@ pub use loopback::LoopbackInterface; use alloc::sync::Arc; use alloc::vec; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use smoltcp::socket::SocketSet; diff --git a/drivers/src/net/rtlx.rs b/drivers/src/net/rtlx.rs index bde6c175..ac13e639 100644 --- a/drivers/src/net/rtlx.rs +++ b/drivers/src/net/rtlx.rs @@ -4,7 +4,7 @@ use alloc::sync::Arc; use alloc::vec; use alloc::vec::Vec; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use smoltcp::iface::*; use smoltcp::phy::{self, Device, DeviceCapabilities, Medium}; diff --git a/drivers/src/uart/buffered.rs b/drivers/src/uart/buffered.rs index 821d22c8..0e04464b 100644 --- a/drivers/src/uart/buffered.rs +++ b/drivers/src/uart/buffered.rs @@ -1,7 +1,7 @@ use alloc::{boxed::Box, collections::VecDeque, string::String, sync::Arc}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::scheme::{impl_event_scheme, Scheme, UartScheme}; use crate::utils::EventListener; diff --git a/drivers/src/uart/uart_16550.rs b/drivers/src/uart/uart_16550.rs index aa445123..7ca224b9 100644 --- a/drivers/src/uart/uart_16550.rs +++ b/drivers/src/uart/uart_16550.rs @@ -3,7 +3,7 @@ use core::ops::{BitAnd, BitOr, Not}; use bitflags::bitflags; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::io::{Io, Mmio, ReadOnly}; use crate::scheme::{impl_event_scheme, Scheme, UartScheme}; diff --git a/drivers/src/utils/event_listener.rs b/drivers/src/utils/event_listener.rs index d39fcfd3..728a022d 100644 --- a/drivers/src/utils/event_listener.rs +++ b/drivers/src/utils/event_listener.rs @@ -1,7 +1,7 @@ use alloc::{boxed::Box, vec::Vec}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; pub type EventHandler = Box; diff --git a/drivers/src/virtio/blk.rs b/drivers/src/virtio/blk.rs index 5bca0414..ff7aeea0 100644 --- a/drivers/src/virtio/blk.rs +++ b/drivers/src/virtio/blk.rs @@ -1,5 +1,5 @@ // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use virtio_drivers::{VirtIOBlk as InnerDriver, VirtIOHeader}; use crate::scheme::{BlockScheme, Scheme}; diff --git a/drivers/src/virtio/console.rs b/drivers/src/virtio/console.rs index 45d9f5e8..7425bdb6 100644 --- a/drivers/src/virtio/console.rs +++ b/drivers/src/virtio/console.rs @@ -1,7 +1,7 @@ use core::fmt::{Result, Write}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use virtio_drivers::{VirtIOConsole as InnerDriver, VirtIOHeader}; use crate::prelude::DeviceResult; diff --git a/drivers/src/virtio/gpu.rs b/drivers/src/virtio/gpu.rs index ed5c6a38..e33c9901 100644 --- a/drivers/src/virtio/gpu.rs +++ b/drivers/src/virtio/gpu.rs @@ -1,5 +1,5 @@ // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use virtio_drivers::{VirtIOGpu as InnerDriver, VirtIOHeader}; use crate::prelude::{ColorFormat, DisplayInfo, FrameBuffer}; diff --git a/drivers/src/virtio/input.rs b/drivers/src/virtio/input.rs index b0097db9..39605700 100644 --- a/drivers/src/virtio/input.rs +++ b/drivers/src/virtio/input.rs @@ -1,7 +1,7 @@ use core::convert::TryFrom; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use virtio_drivers::{InputConfigSelect, VirtIOHeader, VirtIOInput as InnerDriver}; use crate::prelude::{CapabilityType, InputCapability, InputEvent, InputEventType}; diff --git a/kernel-hal/src/bare/arch/riscv/trap.rs b/kernel-hal/src/bare/arch/riscv/trap.rs index 211f60ec..8d417aca 100644 --- a/kernel-hal/src/bare/arch/riscv/trap.rs +++ b/kernel-hal/src/bare/arch/riscv/trap.rs @@ -26,6 +26,9 @@ pub(super) fn super_soft() { #[no_mangle] pub extern "C" fn trap_handler(tf: &mut TrapFrame) { let scause = scause::read(); + + info!("trap happened: {:?}", TrapReason::from(scause)); + match TrapReason::from(scause) { TrapReason::SoftwareBreakpoint => breakpoint(&mut tf.sepc), TrapReason::PageFault(vaddr, flags) => crate::KHANDLER.handle_page_fault(vaddr, flags), diff --git a/kernel-hal/src/bare/arch/riscv/vm.rs b/kernel-hal/src/bare/arch/riscv/vm.rs index bc514b39..cf23a0f7 100644 --- a/kernel-hal/src/bare/arch/riscv/vm.rs +++ b/kernel-hal/src/bare/arch/riscv/vm.rs @@ -5,7 +5,7 @@ use core::slice; use riscv::{asm, register::satp}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::utils::page_table::{GenericPTE, PageTableImpl, PageTableLevel3}; use crate::{mem::phys_to_virt, MMUFlags, PhysAddr, VirtAddr, KCONFIG}; diff --git a/kernel-hal/src/bare/net.rs b/kernel-hal/src/bare/net.rs index 9a546b16..7d105666 100644 --- a/kernel-hal/src/bare/net.rs +++ b/kernel-hal/src/bare/net.rs @@ -12,7 +12,7 @@ use alloc::sync::Arc; use alloc::string::String; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::drivers::add_device; use crate::drivers::all_net; diff --git a/kernel-hal/src/bare/timer.rs b/kernel-hal/src/bare/timer.rs index 16050724..567b11c3 100644 --- a/kernel-hal/src/bare/timer.rs +++ b/kernel-hal/src/bare/timer.rs @@ -5,7 +5,7 @@ use core::time::Duration; use naive_timer::Timer; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; #[allow(dead_code)] pub(super) const TICKS_PER_SEC: u64 = 100; diff --git a/kernel-hal/src/common/console.rs b/kernel-hal/src/common/console.rs index 2a2a5eef..83ed089b 100644 --- a/kernel-hal/src/common/console.rs +++ b/kernel-hal/src/common/console.rs @@ -3,7 +3,7 @@ use crate::drivers; use core::fmt::{Arguments, Result, Write}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; struct SerialWriter; diff --git a/kernel-hal/src/drivers.rs b/kernel-hal/src/drivers.rs index 1c918893..d5588420 100644 --- a/kernel-hal/src/drivers.rs +++ b/kernel-hal/src/drivers.rs @@ -3,7 +3,8 @@ use alloc::{sync::Arc, vec::Vec}; use core::convert::From; -use spin::{RwLock, RwLockReadGuard}; +// use spin::{RwLock, RwLockReadGuard}; +use lock::rwlock::{RwLock, RwLockReadGuard}; use zcore_drivers::scheme::{ BlockScheme, DisplayScheme, InputScheme, IrqScheme, NetScheme, Scheme, UartScheme, diff --git a/kernel-hal/src/libos/mem.rs b/kernel-hal/src/libos/mem.rs index 04fee61a..bd5a51b2 100644 --- a/kernel-hal/src/libos/mem.rs +++ b/kernel-hal/src/libos/mem.rs @@ -5,7 +5,7 @@ use core::ops::Range; use bitmap_allocator::BitAlloc; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use super::mock_mem::MockMemory; use crate::{PhysAddr, VirtAddr, PAGE_SIZE}; diff --git a/kernel-hal/src/libos/net.rs b/kernel-hal/src/libos/net.rs index 9a546b16..7d105666 100644 --- a/kernel-hal/src/libos/net.rs +++ b/kernel-hal/src/libos/net.rs @@ -12,7 +12,7 @@ use alloc::sync::Arc; use alloc::string::String; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use crate::drivers::add_device; use crate::drivers::all_net; diff --git a/kernel-hal/src/utils/page_table.rs b/kernel-hal/src/utils/page_table.rs index 75986872..77853dea 100644 --- a/kernel-hal/src/utils/page_table.rs +++ b/kernel-hal/src/utils/page_table.rs @@ -7,7 +7,7 @@ use crate::common::vm::*; use crate::{mem::PhysFrame, MMUFlags, PhysAddr, VirtAddr}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; pub trait PageTableLevel: Sync + Send { const LEVEL: usize; diff --git a/linux-object/src/fs/devfs/input/event.rs b/linux-object/src/fs/devfs/input/event.rs index 9f6ca763..749ab00a 100644 --- a/linux-object/src/fs/devfs/input/event.rs +++ b/linux-object/src/fs/devfs/input/event.rs @@ -3,7 +3,7 @@ use core::task::{Context, Poll}; use core::{any::Any, future::Future, mem::size_of, pin::Pin}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use kernel_hal::drivers::prelude::{InputEvent, InputEventType}; use kernel_hal::drivers::scheme::InputScheme; diff --git a/linux-object/src/fs/devfs/input/mice.rs b/linux-object/src/fs/devfs/input/mice.rs index 0ca5e75e..6868db98 100644 --- a/linux-object/src/fs/devfs/input/mice.rs +++ b/linux-object/src/fs/devfs/input/mice.rs @@ -3,7 +3,7 @@ use core::task::{Context, Poll}; use core::{any::Any, future::Future, pin::Pin}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use kernel_hal::drivers::prelude::input::{Mouse, MouseFlags, MouseState}; use kernel_hal::drivers::scheme::{EventScheme, InputScheme}; diff --git a/linux-object/src/fs/devfs/random.rs b/linux-object/src/fs/devfs/random.rs index f2648aeb..996252e3 100644 --- a/linux-object/src/fs/devfs/random.rs +++ b/linux-object/src/fs/devfs/random.rs @@ -6,7 +6,7 @@ use core::any::Any; use rcore_fs::vfs::*; use rcore_fs_devfs::DevFS; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; /// random INode data struct pub struct RandomINodeData { diff --git a/linux-object/src/fs/device.rs b/linux-object/src/fs/device.rs index b2fceaaf..e7f73e29 100644 --- a/linux-object/src/fs/device.rs +++ b/linux-object/src/fs/device.rs @@ -1,7 +1,8 @@ //! Implement Device use rcore_fs::dev::{Device, Result}; -use spin::RwLock; +// use spin::RwLock; +use lock::rwlock::RwLock; /// memory buffer for device pub struct MemBuf(RwLock<&'static mut [u8]>); diff --git a/linux-object/src/fs/file.rs b/linux-object/src/fs/file.rs index e1637e9a..03e2c1fa 100644 --- a/linux-object/src/fs/file.rs +++ b/linux-object/src/fs/file.rs @@ -3,7 +3,8 @@ use alloc::{boxed::Box, string::String, sync::Arc}; use async_trait::async_trait; -use spin::RwLock; +// use spin::RwLock; +use lock::rwlock::RwLock; use rcore_fs::vfs::{FileType, FsError, INode, Metadata, PollStatus}; use zircon_object::object::*; diff --git a/linux-object/src/fs/pipe.rs b/linux-object/src/fs/pipe.rs index bed731da..587dccf7 100644 --- a/linux-object/src/fs/pipe.rs +++ b/linux-object/src/fs/pipe.rs @@ -11,7 +11,7 @@ use core::{ }; use rcore_fs::vfs::*; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; #[derive(Clone, PartialEq)] #[allow(dead_code)] diff --git a/linux-object/src/fs/rcore_fs_wrapper.rs b/linux-object/src/fs/rcore_fs_wrapper.rs index edf1058e..7f446683 100644 --- a/linux-object/src/fs/rcore_fs_wrapper.rs +++ b/linux-object/src/fs/rcore_fs_wrapper.rs @@ -7,7 +7,8 @@ extern crate rcore_fs; use kernel_hal::drivers::scheme::BlockScheme; use rcore_fs::dev::{BlockDevice, DevError, Device, Result}; -use spin::RwLock; +// use spin::RwLock; +use lock::rwlock::RwLock; /// A naive LRU cache layer for `BlockDevice`, re-exported from `rcore-fs`. pub use rcore_fs::dev::block_cache::BlockCache; diff --git a/linux-object/src/fs/stdio.rs b/linux-object/src/fs/stdio.rs index 697763e2..3c07ed14 100644 --- a/linux-object/src/fs/stdio.rs +++ b/linux-object/src/fs/stdio.rs @@ -14,7 +14,7 @@ use kernel_hal::console::{self, ConsoleWinSize}; use lazy_static::lazy_static; use rcore_fs::vfs::*; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; lazy_static! { /// STDIN global reference diff --git a/linux-object/src/ipc/mod.rs b/linux-object/src/ipc/mod.rs index f6c85185..e966bd3a 100644 --- a/linux-object/src/ipc/mod.rs +++ b/linux-object/src/ipc/mod.rs @@ -8,6 +8,8 @@ pub use self::shared_mem::*; use alloc::collections::BTreeMap; use alloc::sync::Arc; use bitflags::*; +// use spin::Mutex; +use lock::mutex::Mutex; /// Semaphore table in a process #[derive(Default)] @@ -130,7 +132,7 @@ impl Drop for SemProc { impl ShmProc { /// Insert the `SharedGuard` and return its ID - pub fn add(&mut self, shared_guard: Arc>) -> ShmId { + pub fn add(&mut self, shared_guard: Arc>) -> ShmId { let id = self.get_free_id(); let shm_identifier = ShmIdentifier { addr: 0, diff --git a/linux-object/src/ipc/semary.rs b/linux-object/src/ipc/semary.rs index 899eadda..3d99e514 100644 --- a/linux-object/src/ipc/semary.rs +++ b/linux-object/src/ipc/semary.rs @@ -7,8 +7,9 @@ use alloc::{collections::BTreeMap, sync::Arc, sync::Weak, vec::Vec}; use core::ops::Index; use lazy_static::*; // use spin::Mutex; -use lock::spinlock::Mutex; -use spin::RwLock; +use lock::mutex::Mutex; +// use spin::RwLock; +use lock::rwlock::RwLock; /// semid data structure /// diff --git a/linux-object/src/ipc/shared_mem.rs b/linux-object/src/ipc/shared_mem.rs index 128db31f..81b2722a 100644 --- a/linux-object/src/ipc/shared_mem.rs +++ b/linux-object/src/ipc/shared_mem.rs @@ -5,13 +5,13 @@ use crate::time::TimeSpec; use alloc::{collections::BTreeMap, sync::Arc, sync::Weak}; use lazy_static::lazy_static; // use spin::Mutex; -use lock::spinlock::Mutex; -use spin::RwLock; +use lock::mutex::Mutex; +// use spin::RwLock; +use lock::rwlock::RwLock; use zircon_object::vm::*; lazy_static! { - static ref KEY2SHM: RwLock>>> = - RwLock::new(BTreeMap::new()); + static ref KEY2SHM: RwLock>>> = RwLock::new(BTreeMap::new()); } /// shmid data structure @@ -44,7 +44,7 @@ pub struct ShmIdentifier { /// Shared memory address pub addr: usize, /// Shared memory buffer and data - pub guard: Arc>, + pub guard: Arc>, } /// shared memory buffer and data @@ -67,7 +67,7 @@ impl ShmIdentifier { memsize: usize, flags: usize, cpid: u32, - ) -> Result>, LxError> { + ) -> Result>, LxError> { let mut key2shm = KEY2SHM.write(); let flag = IpcGetFlag::from_bits_truncate(flags); @@ -81,7 +81,7 @@ impl ShmIdentifier { return Ok(guard); } } - let shared_guard = Arc::new(spin::Mutex::new(ShmGuard { + let shared_guard = Arc::new(Mutex::new(ShmGuard { shared_guard: VmObject::new_paged(pages(memsize)), shmid_ds: Mutex::new(ShmidDs { perm: IpcPerm { diff --git a/linux-object/src/net/mod.rs b/linux-object/src/net/mod.rs index a8f07e13..5dc2d1f0 100644 --- a/linux-object/src/net/mod.rs +++ b/linux-object/src/net/mod.rs @@ -14,7 +14,7 @@ pub mod udp; pub use udp::*; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; /// missing documentation // pub mod raw; // pub use raw::*; diff --git a/linux-object/src/net/tcp.rs b/linux-object/src/net/tcp.rs index 1a8c71de..0613d51e 100644 --- a/linux-object/src/net/tcp.rs +++ b/linux-object/src/net/tcp.rs @@ -16,7 +16,7 @@ use crate::net::TCP_RECVBUF; use crate::net::TCP_SENDBUF; use alloc::sync::Arc; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; // alloc use alloc::boxed::Box; diff --git a/linux-object/src/net/udp.rs b/linux-object/src/net/udp.rs index 2d8491f0..b1b12e75 100644 --- a/linux-object/src/net/udp.rs +++ b/linux-object/src/net/udp.rs @@ -24,7 +24,7 @@ use crate::net::UDP_METADATA_BUF; use crate::net::UDP_RECVBUF; use crate::net::UDP_SENDBUF; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; // alloc use alloc::boxed::Box; diff --git a/linux-object/src/process.rs b/linux-object/src/process.rs index f9e42162..2779139b 100644 --- a/linux-object/src/process.rs +++ b/linux-object/src/process.rs @@ -13,7 +13,7 @@ use hashbrown::HashMap; use rcore_fs::vfs::{FileSystem, INode}; use smoltcp::socket::SocketHandle; // use spin::{Mutex, MutexGuard}; -use lock::spinlock::{Mutex, MutexGuard}; +use lock::mutex::{Mutex, MutexGuard}; use kernel_hal::VirtAddr; use zircon_object::{ @@ -485,7 +485,7 @@ impl LinuxProcess { } /// Insert the `SharedGuard` and return its ID - pub fn shm_add(&self, shared_guard: Arc>) -> usize { + pub fn shm_add(&self, shared_guard: Arc>) -> usize { self.inner.lock().shm_identifiers.add(shared_guard) } diff --git a/linux-object/src/sync/event_bus.rs b/linux-object/src/sync/event_bus.rs index 81ec0832..5c499185 100644 --- a/linux-object/src/sync/event_bus.rs +++ b/linux-object/src/sync/event_bus.rs @@ -10,7 +10,7 @@ use core::{ task::{Context, Poll}, }; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; bitflags! { #[derive(Default)] diff --git a/linux-object/src/sync/semaphore.rs b/linux-object/src/sync/semaphore.rs index f82ee022..305d4ec4 100644 --- a/linux-object/src/sync/semaphore.rs +++ b/linux-object/src/sync/semaphore.rs @@ -12,7 +12,7 @@ use core::ops::Deref; use core::pin::Pin; use core::task::{Context, Poll}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; /// A counting, blocking, semaphore. pub struct Semaphore { diff --git a/linux-object/src/thread.rs b/linux-object/src/thread.rs index ca772a4a..fa43c629 100644 --- a/linux-object/src/thread.rs +++ b/linux-object/src/thread.rs @@ -6,7 +6,7 @@ use alloc::sync::Arc; use kernel_hal::user::{Out, UserOutPtr, UserPtr}; use kernel_hal::VirtAddr; // use spin::{Mutex, MutexGuard}; -use lock::spinlock::{Mutex, MutexGuard}; +use lock::mutex::{Mutex, MutexGuard}; use zircon_object::task::{CurrentThread, Process, Thread}; use zircon_object::ZxResult; diff --git a/linux-syscall/src/net.rs b/linux-syscall/src/net.rs index 9641fa75..30f85f85 100644 --- a/linux-syscall/src/net.rs +++ b/linux-syscall/src/net.rs @@ -7,7 +7,7 @@ use linux_object::net::TcpSocketState; use linux_object::net::UdpSocketState; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; impl Syscall<'_> { /// net socket diff --git a/lock/src/interrupt.rs b/lock/src/interrupt.rs index 0e69e7fc..e89535fd 100644 --- a/lock/src/interrupt.rs +++ b/lock/src/interrupt.rs @@ -1,27 +1,33 @@ use core::cell::{RefCell, RefMut}; use lazy_static::*; -extern "C" interrupt_ffi { - pub fn intr_on(); - pub fn intr_off(); - pub fn intr_get() -> bool; - pub fn cpu_id() -> u8; +mod lock_ffi { + extern "C" { + pub fn intr_on(); + pub fn intr_off(); + pub fn intr_get() -> bool; + pub fn cpu_id() -> u8; + } } -pub fn intr_on_() { - unsafe { intr_on(); } +pub fn intr_on() { + unsafe { + lock_ffi::intr_on(); + } } -pub fn intr_off_() { - unsafe { intr_off(); } +pub fn intr_off() { + unsafe { + lock_ffi::intr_off(); + } } -pub fn intr_get_() -> bool { - unsafe { intr_get() } +pub fn intr_get() -> bool { + unsafe { lock_ffi::intr_get() } } -pub fn cpu_id_() -> u8 { - unsafe { cpu_id() } +pub fn cpu_id() -> u8 { + unsafe { lock_ffi::cpu_id() } } #[derive(Debug, Default, Clone, Copy)] @@ -58,15 +64,15 @@ lazy_static! { } pub fn mycpu() -> RefMut<'static, Cpu> { - return CPUS[cpu_id_() as usize].0.borrow_mut(); + return CPUS[cpu_id() as usize].0.borrow_mut(); } -// push_off/pop_off are like intr_off_()/intr_on_() except that they are matched: +// push_off/pop_off are like intr_off()/intr_on() except that they are matched: // it takes two pop_off()s to undo two push_off()s. Also, if interrupts // are initially off, then push_off, pop_off leaves them off. pub(crate) fn push_off() { - let old = intr_get_(); - intr_off_(); + let old = intr_get(); + intr_off(); let mut cpu = mycpu(); if cpu.noff == 0 { cpu.interrupt_enable = old; @@ -76,11 +82,11 @@ pub(crate) fn push_off() { pub(crate) fn pop_off() { let mut cpu = mycpu(); - if intr_get_() || cpu.noff < 1 { + if intr_get() || cpu.noff < 1 { panic!("pop_off"); } cpu.noff -= 1; if cpu.noff == 0 && cpu.interrupt_enable { - intr_on_(); + intr_on(); } } diff --git a/lock/src/lib.rs b/lock/src/lib.rs index 492581f3..21a1cb3a 100644 --- a/lock/src/lib.rs +++ b/lock/src/lib.rs @@ -3,7 +3,7 @@ extern crate alloc; -pub mod interrupt; -// pub mod mutex; -pub mod spinlock; +mod interrupt; +pub mod mutex; +pub mod rwlock; diff --git a/lock/src/spinlock.rs b/lock/src/mutex.rs similarity index 83% rename from lock/src/spinlock.rs rename to lock/src/mutex.rs index 3b4628fd..fcb6301c 100644 --- a/lock/src/spinlock.rs +++ b/lock/src/mutex.rs @@ -1,16 +1,15 @@ use core::{ cell::UnsafeCell, + default::Default, fmt, ops::{Deref, DerefMut}, sync::atomic::{AtomicBool, Ordering}, - default::Default, }; -use crate::interrupt::{cpu_id_, pop_off, push_off}; +use crate::interrupt::{pop_off, push_off}; pub struct Mutex { pub(crate) locked: AtomicBool, - cpuid: u8, data: UnsafeCell, } @@ -32,7 +31,6 @@ impl Mutex { Mutex { locked: AtomicBool::new(false), data: UnsafeCell::new(data), - cpuid: 0, } } @@ -54,10 +52,6 @@ impl Mutex { #[inline(always)] pub fn lock(&self) -> MutexGuard { push_off(); - if self.holding() { - panic!("a spinlock can only be locked once by a CPU"); - } - while self .locked .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) @@ -68,7 +62,6 @@ impl Mutex { core::hint::spin_loop(); } } - MutexGuard { spinlock: self, data: unsafe { &mut *self.data.get() }, @@ -78,9 +71,6 @@ impl Mutex { #[inline(always)] pub fn try_lock(&self) -> Option> { push_off(); - if self.holding() { - panic!("a spinlock can only be locked once by a CPU"); - } if self .locked .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) @@ -107,13 +97,6 @@ impl Mutex { pub fn is_locked(&self) -> bool { self.locked.load(Ordering::Relaxed) } - - /// Check whether this cpu is holding the lock. - /// Interrupts must be off. - #[inline(always)] - pub fn holding(&self) -> bool { - return self.is_locked() && self.cpuid == cpu_id_(); - } } impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> { @@ -138,7 +121,7 @@ impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { /// The dropping of the MutexGuard will release the lock it was created from. fn drop(&mut self) { - if !self.spinlock.holding() { + if !self.spinlock.is_locked() { panic!("current cpu doesn't hold the lock{}", self.spinlock); } self.spinlock.locked.store(false, Ordering::Release); @@ -146,14 +129,17 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { } } +// Not make sence, just to use #[drive(Debug)] impl fmt::Display for Mutex { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Spinlock{{locked={}, cpuid={}}}", - self.locked.load(Ordering::Relaxed), - self.cpuid, - ) + write!(f, "Mutex{{locked={}}}", self.locked.load(Ordering::Relaxed),) + } +} + +// Not make sence, just to use #[drive(Debug)] +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Mutex{{locked={}}}", self.locked.load(Ordering::Relaxed)) } } @@ -161,4 +147,4 @@ impl Default for Mutex { fn default() -> Self { Mutex::new(T::default()) } -} \ No newline at end of file +} diff --git a/lock/src/rwlock.rs b/lock/src/rwlock.rs index 3a45c279..7dfcca19 100644 --- a/lock/src/rwlock.rs +++ b/lock/src/rwlock.rs @@ -4,14 +4,15 @@ use core::{ cell::UnsafeCell, fmt, hint::spin_loop, - marker::PhantomData, + // marker::PhantomData, mem, ops::{Deref, DerefMut}, sync::atomic::{AtomicUsize, Ordering}, }; +use crate::interrupt::{pop_off, push_off}; + pub struct RwLock { - // phantom: PhantomData, lock: AtomicUsize, data: UnsafeCell, } @@ -52,8 +53,8 @@ pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized> { } // Same unsafe impls as `std::sync::RwLock` -unsafe impl Send for RwLock {} -unsafe impl Sync for RwLock {} +unsafe impl Send for RwLock {} +unsafe impl Sync for RwLock {} impl RwLock { /// Creates a new spinlock wrapping the supplied data. @@ -74,13 +75,13 @@ impl RwLock { #[inline] pub const fn new(data: T) -> Self { RwLock { - phantom: PhantomData, + // phantom: PhantomData, lock: AtomicUsize::new(0), data: UnsafeCell::new(data), } } - /// Consumes this `RwLock`, returning the underlying data. + /// Consumes this `RwLock`eturning the underlying data. #[inline] pub fn into_inner(self) -> T { // We know statically that there are no outstanding references to @@ -189,9 +190,7 @@ impl RwLock { } } } -} -impl RwLock { /// Attempt to acquire this lock with shared read access. /// /// This function will never block and will return immediately if `read` @@ -216,6 +215,7 @@ impl RwLock { /// ``` #[inline] pub fn try_read(&self) -> Option> { + push_off(); let value = self.lock.fetch_add(READER, Ordering::Acquire); // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held. @@ -223,6 +223,7 @@ impl RwLock { if value & (WRITER | UPGRADED) != 0 { // Lock is taken, undo. self.lock.fetch_sub(READER, Ordering::Release); + pop_off(); None } else { Some(RwLockReadGuard { @@ -285,6 +286,7 @@ impl RwLock { #[inline(always)] fn try_write_internal(&self, strong: bool) -> Option> { + push_off(); if compare_exchange( &self.lock, 0, @@ -296,11 +298,12 @@ impl RwLock { .is_ok() { Some(RwLockWriteGuard { - phantom: PhantomData, + // phantom: PhantomData, inner: self, data: unsafe { &mut *self.data.get() }, }) } else { + pop_off(); None } } @@ -332,15 +335,17 @@ impl RwLock { /// Tries to obtain an upgradeable lock guard. #[inline] pub fn try_upgradeable_read(&self) -> Option> { + push_off(); if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 { Some(RwLockUpgradableGuard { - phantom: PhantomData, + // phantom: PhantomData, inner: self, data: unsafe { &*self.data.get() }, }) } else { // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock. // When they unlock, they will clear the bit. + pop_off(); None } } @@ -364,7 +369,7 @@ impl RwLock { } } -impl fmt::Debug for RwLock { +impl fmt::Debug for RwLock { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try_read() { Some(guard) => write!(f, "RwLock {{ data: ") @@ -375,7 +380,7 @@ impl fmt::Debug for RwLock { } } -impl Default for RwLock { +impl Default for RwLock { fn default() -> Self { Self::new(Default::default()) } @@ -418,7 +423,7 @@ impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock } } -impl<'rwlock, T: ?Sized> RwLockUpgradableGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> RwLockUpgradableGuard<'rwlock, T> { /// Upgrades an upgradeable lock guard to a writable lock guard. /// /// ``` @@ -428,21 +433,21 @@ impl<'rwlock, T: ?Sized> RwLockUpgradableGuard<'rwlock, T, R> { /// let writable = upgradeable.upgrade(); /// ``` #[inline] - pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> { + pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T> { loop { self = match self.try_upgrade_internal(false) { Ok(guard) => return guard, Err(e) => e, }; - R::relax(); + spin_loop(); } } } -impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> RwLockUpgradableGuard<'rwlock, T> { #[inline(always)] - fn try_upgrade_internal(self, strong: bool) -> Result, Self> { + fn try_upgrade_internal(self, strong: bool) -> Result, Self> { if compare_exchange( &self.inner.lock, UPGRADED, @@ -460,7 +465,7 @@ impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> { // Upgrade successful Ok(RwLockWriteGuard { - phantom: PhantomData, + // phantom: PhantomData, inner, data: unsafe { &mut *inner.data.get() }, }) @@ -481,7 +486,7 @@ impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> { /// }; /// ``` #[inline] - pub fn try_upgrade(self) -> Result, Self> { + pub fn try_upgrade(self) -> Result, Self> { self.try_upgrade_internal(true) } @@ -532,19 +537,19 @@ impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> { } } -impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockUpgradableGuard<'rwlock, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } -impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockUpgradableGuard<'rwlock, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } } -impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> { /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin. /// /// ``` @@ -585,7 +590,7 @@ impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> { /// assert_eq!(*readable, 1); /// ``` #[inline] - pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> { + pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T> { debug_assert_eq!( self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED), WRITER @@ -600,7 +605,7 @@ impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> { mem::forget(self); RwLockUpgradableGuard { - phantom: PhantomData, + // phantom: PhantomData, inner, data: unsafe { &*inner.data.get() }, } @@ -626,13 +631,13 @@ impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> { } } -impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'rwlock, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } -impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'rwlock, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&**self, f) } @@ -646,7 +651,7 @@ impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> { } } -impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> Deref for RwLockUpgradableGuard<'rwlock, T> { type Target = T; fn deref(&self) -> &T { @@ -654,7 +659,7 @@ impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> { } } -impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> { type Target = T; fn deref(&self) -> &T { @@ -662,7 +667,7 @@ impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> { } } -impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> { fn deref_mut(&mut self) -> &mut T { self.data } @@ -672,20 +677,22 @@ impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> { fn drop(&mut self) { debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0); self.lock.fetch_sub(READER, Ordering::Release); + pop_off(); } } -impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> Drop for RwLockUpgradableGuard<'rwlock, T> { fn drop(&mut self) { debug_assert_eq!( self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED), UPGRADED ); self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel); + pop_off(); } } -impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> { +impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> { fn drop(&mut self) { debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER); @@ -694,6 +701,7 @@ impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> { self.inner .lock .fetch_and(!(WRITER | UPGRADED), Ordering::Release); + pop_off(); } } @@ -713,365 +721,230 @@ fn compare_exchange( } } -#[cfg(feature = "lock_api")] -unsafe impl lock_api_crate::RawRwLock for RwLock<(), R> { - type GuardMarker = lock_api_crate::GuardSend; +// #[cfg(test)] +// mod tests { +// use std::prelude::v1::*; - const INIT: Self = Self::new(()); +// use std::sync::atomic::{AtomicUsize, Ordering}; +// use std::sync::mpsc::channel; +// use std::sync::Arc; +// use std::thread; - #[inline(always)] - fn lock_exclusive(&self) { - // Prevent guard destructor running - core::mem::forget(self.write()); - } +// type RwLock = super::RwLock; - #[inline(always)] - fn try_lock_exclusive(&self) -> bool { - // Prevent guard destructor running - self.try_write().map(|g| core::mem::forget(g)).is_some() - } +// #[derive(Eq, PartialEq, Debug)] +// struct NonCopy(i32); - #[inline(always)] - unsafe fn unlock_exclusive(&self) { - drop(RwLockWriteGuard { - inner: self, - data: &mut (), - phantom: PhantomData, - }); - } +// #[test] +// fn smoke() { +// let l = RwLock::new(()); +// drop(l.read()); +// drop(l.write()); +// drop((l.read(), l.read())); +// drop(l.write()); +// } - #[inline(always)] - fn lock_shared(&self) { - // Prevent guard destructor running - core::mem::forget(self.read()); - } +// // TODO: needs RNG +// //#[test] +// //fn frob() { +// // static R: RwLock = RwLock::new(); +// // const N: usize = 10; +// // const M: usize = 1000; +// // +// // let (txx) = channel::<()>(); +// // for _ in 0..N { +// // let tx = tx.clone(); +// // thread::spawn(move|| { +// // let mut rng = rand::thread_rng(); +// // for _ in 0..M { +// // if rng.gen_weighted_bool(N) { +// // drop(R.write()); +// // } else { +// // drop(R.read()); +// // } +// // } +// // drop(tx); +// // }); +// // } +// // drop(tx); +// // let _ = rx.recv(); +// // unsafe { R.destroy(); } +// //} - #[inline(always)] - fn try_lock_shared(&self) -> bool { - // Prevent guard destructor running - self.try_read().map(|g| core::mem::forget(g)).is_some() - } +// #[test] +// fn test_rw_arc() { +// let arc = Arc::new(RwLock::new(0)); +// let arc2 = arc.clone(); +// let (txx) = channel(); - #[inline(always)] - unsafe fn unlock_shared(&self) { - drop(RwLockReadGuard { - lock: &self.lock, - data: &(), - }); - } +// thread::spawn(move || { +// let mut lock = arc2.write(); +// for _ in 0..10 { +// let tmp = *lock; +// *lock = -1; +// thread::yield_now(); +// *lock = tmp + 1; +// } +// tx.send(()).unwrap(); +// }); - #[inline(always)] - fn is_locked(&self) -> bool { - self.lock.load(Ordering::Relaxed) != 0 - } -} +// // Readers try to catch the writer in the act +// let mut children = Vec::new(); +// for _ in 0..5 { +// let arc3 = arc.clone(); +// children.push(thread::spawn(move || { +// let lock = arc3.read(); +// assert!(*lock >= 0); +// })); +// } -#[cfg(feature = "lock_api")] -unsafe impl lock_api_crate::RawRwLockUpgrade for RwLock<(), R> { - #[inline(always)] - fn lock_upgradable(&self) { - // Prevent guard destructor running - core::mem::forget(self.upgradeable_read()); - } +// // Wait for children to pass their asserts +// for r in children { +// assert!(r.join().is_ok()); +// } - #[inline(always)] - fn try_lock_upgradable(&self) -> bool { - // Prevent guard destructor running - self.try_upgradeable_read() - .map(|g| core::mem::forget(g)) - .is_some() - } +// // Wait for writer to finish +// rx.recv().unwrap(); +// let lock = arc.read(); +// assert_eq!(*lock, 10); +// } - #[inline(always)] - unsafe fn unlock_upgradable(&self) { - drop(RwLockUpgradableGuard { - inner: self, - data: &(), - phantom: PhantomData, - }); - } +// #[test] +// fn test_rw_access_in_unwind() { +// let arc = Arc::new(RwLock::new(1)); +// let arc2 = arc.clone(); +// let _ = thread::spawn(move || -> () { +// struct Unwinder { +// i: Arc>, +// } +// impl Drop for Unwinder { +// fn drop(&mut self) { +// let mut lock = self.i.write(); +// *lock += 1; +// } +// } +// let _u = Unwinder { i: arc2 }; +// panic!(); +// }) +// .join(); +// let lock = arc.read(); +// assert_eq!(*lock, 2); +// } - #[inline(always)] - unsafe fn upgrade(&self) { - let tmp_guard = RwLockUpgradableGuard { - inner: self, - data: &(), - phantom: PhantomData, - }; - core::mem::forget(tmp_guard.upgrade()); - } +// #[test] +// fn test_rwlock_unsized() { +// let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); +// { +// let b = &mut *rw.write(); +// b[0] = 4; +// b[2] = 5; +// } +// let comp: &[i32] = &[4, 2, 5]; +// assert_eq!(&*rw.read(), comp); +// } - #[inline(always)] - unsafe fn try_upgrade(&self) -> bool { - let tmp_guard = RwLockUpgradableGuard { - inner: self, - data: &(), - phantom: PhantomData, - }; - tmp_guard - .try_upgrade() - .map(|g| core::mem::forget(g)) - .is_ok() - } -} +// #[test] +// fn test_rwlock_try_write() { +// use std::mem::drop; -#[cfg(feature = "lock_api")] -unsafe impl lock_api_crate::RawRwLockDowngrade for RwLock<(), R> { - unsafe fn downgrade(&self) { - let tmp_guard = RwLockWriteGuard { - inner: self, - data: &mut (), - phantom: PhantomData, - }; - core::mem::forget(tmp_guard.downgrade()); - } -} +// let lock = RwLock::new(0isize); +// let read_guard = lock.read(); -#[cfg(feature = "lock_api1")] -unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> { - unsafe fn downgrade_upgradable(&self) { - let tmp_guard = RwLockUpgradableGuard { - inner: self, - data: &(), - phantom: PhantomData, - }; - core::mem::forget(tmp_guard.downgrade()); - } +// let write_result = lock.try_write(); +// match write_result { +// None => (), +// Some(_) => assert!( +// false, +// "try_write should not succeed while read_guard is in scope" +// ), +// } - unsafe fn downgrade_to_upgradable(&self) { - let tmp_guard = RwLockWriteGuard { - inner: self, - data: &mut (), - phantom: PhantomData, - }; - core::mem::forget(tmp_guard.downgrade_to_upgradeable()); - } -} +// drop(read_guard); +// } -#[cfg(test)] -mod tests { - use std::prelude::v1::*; +// #[test] +// fn test_rw_try_read() { +// let m = RwLock::new(0); +// ::std::mem::forget(m.write()); +// assert!(m.try_read().is_none()); +// } - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::mpsc::channel; - use std::sync::Arc; - use std::thread; +// #[test] +// fn test_into_inner() { +// let m = RwLock::new(NonCopy(10)); +// assert_eq!(m.into_inner(), NonCopy(10)); +// } - type RwLock = super::RwLock; +// #[test] +// fn test_into_inner_drop() { +// struct Foo(Arc); +// impl Drop for Foo { +// fn drop(&mut self) { +// self.0.fetch_add(1, Ordering::SeqCst); +// } +// } +// let num_drops = Arc::new(AtomicUsize::new(0)); +// let m = RwLock::new(Foo(num_drops.clone())); +// assert_eq!(num_drops.load(Ordering::SeqCst), 0); +// { +// let _inner = m.into_inner(); +// assert_eq!(num_drops.load(Ordering::SeqCst), 0); +// } +// assert_eq!(num_drops.load(Ordering::SeqCst), 1); +// } - #[derive(Eq, PartialEq, Debug)] - struct NonCopy(i32); +// #[test] +// fn test_force_read_decrement() { +// let m = RwLock::new(()); +// ::std::mem::forget(m.read()); +// ::std::mem::forget(m.read()); +// ::std::mem::forget(m.read()); +// assert!(m.try_write().is_none()); +// unsafe { +// m.force_read_decrement(); +// m.force_read_decrement(); +// } +// assert!(m.try_write().is_none()); +// unsafe { +// m.force_read_decrement(); +// } +// assert!(m.try_write().is_some()); +// } - #[test] - fn smoke() { - let l = RwLock::new(()); - drop(l.read()); - drop(l.write()); - drop((l.read(), l.read())); - drop(l.write()); - } +// #[test] +// fn test_force_write_unlock() { +// let m = RwLock::new(()); +// ::std::mem::forget(m.write()); +// assert!(m.try_read().is_none()); +// unsafe { +// m.force_write_unlock(); +// } +// assert!(m.try_read().is_some()); +// } - // TODO: needs RNG - //#[test] - //fn frob() { - // static R: RwLock = RwLock::new(); - // const N: usize = 10; - // const M: usize = 1000; - // - // let (tx, rx) = channel::<()>(); - // for _ in 0..N { - // let tx = tx.clone(); - // thread::spawn(move|| { - // let mut rng = rand::thread_rng(); - // for _ in 0..M { - // if rng.gen_weighted_bool(N) { - // drop(R.write()); - // } else { - // drop(R.read()); - // } - // } - // drop(tx); - // }); - // } - // drop(tx); - // let _ = rx.recv(); - // unsafe { R.destroy(); } - //} +// #[test] +// fn test_upgrade_downgrade() { +// let m = RwLock::new(()); +// { +// let _r = m.read(); +// let upg = m.try_upgradeable_read().unwrap(); +// assert!(m.try_read().is_none()); +// assert!(m.try_write().is_none()); +// assert!(upg.try_upgrade().is_err()); +// } +// { +// let w = m.write(); +// assert!(m.try_upgradeable_read().is_none()); +// let _r = w.downgrade(); +// assert!(m.try_upgradeable_read().is_some()); +// assert!(m.try_read().is_some()); +// assert!(m.try_write().is_none()); +// } +// { +// let _u = m.upgradeable_read(); +// assert!(m.try_upgradeable_read().is_none()); +// } - #[test] - fn test_rw_arc() { - let arc = Arc::new(RwLock::new(0)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - - thread::spawn(move || { - let mut lock = arc2.write(); - for _ in 0..10 { - let tmp = *lock; - *lock = -1; - thread::yield_now(); - *lock = tmp + 1; - } - tx.send(()).unwrap(); - }); - - // Readers try to catch the writer in the act - let mut children = Vec::new(); - for _ in 0..5 { - let arc3 = arc.clone(); - children.push(thread::spawn(move || { - let lock = arc3.read(); - assert!(*lock >= 0); - })); - } - - // Wait for children to pass their asserts - for r in children { - assert!(r.join().is_ok()); - } - - // Wait for writer to finish - rx.recv().unwrap(); - let lock = arc.read(); - assert_eq!(*lock, 10); - } - - #[test] - fn test_rw_access_in_unwind() { - let arc = Arc::new(RwLock::new(1)); - let arc2 = arc.clone(); - let _ = thread::spawn(move || -> () { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - let mut lock = self.i.write(); - *lock += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.read(); - assert_eq!(*lock, 2); - } - - #[test] - fn test_rwlock_unsized() { - let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); - { - let b = &mut *rw.write(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*rw.read(), comp); - } - - #[test] - fn test_rwlock_try_write() { - use std::mem::drop; - - let lock = RwLock::new(0isize); - let read_guard = lock.read(); - - let write_result = lock.try_write(); - match write_result { - None => (), - Some(_) => assert!( - false, - "try_write should not succeed while read_guard is in scope" - ), - } - - drop(read_guard); - } - - #[test] - fn test_rw_try_read() { - let m = RwLock::new(0); - ::std::mem::forget(m.write()); - assert!(m.try_read().is_none()); - } - - #[test] - fn test_into_inner() { - let m = RwLock::new(NonCopy(10)); - assert_eq!(m.into_inner(), NonCopy(10)); - } - - #[test] - fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = RwLock::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); - } - - #[test] - fn test_force_read_decrement() { - let m = RwLock::new(()); - ::std::mem::forget(m.read()); - ::std::mem::forget(m.read()); - ::std::mem::forget(m.read()); - assert!(m.try_write().is_none()); - unsafe { - m.force_read_decrement(); - m.force_read_decrement(); - } - assert!(m.try_write().is_none()); - unsafe { - m.force_read_decrement(); - } - assert!(m.try_write().is_some()); - } - - #[test] - fn test_force_write_unlock() { - let m = RwLock::new(()); - ::std::mem::forget(m.write()); - assert!(m.try_read().is_none()); - unsafe { - m.force_write_unlock(); - } - assert!(m.try_read().is_some()); - } - - #[test] - fn test_upgrade_downgrade() { - let m = RwLock::new(()); - { - let _r = m.read(); - let upg = m.try_upgradeable_read().unwrap(); - assert!(m.try_read().is_none()); - assert!(m.try_write().is_none()); - assert!(upg.try_upgrade().is_err()); - } - { - let w = m.write(); - assert!(m.try_upgradeable_read().is_none()); - let _r = w.downgrade(); - assert!(m.try_upgradeable_read().is_some()); - assert!(m.try_read().is_some()); - assert!(m.try_write().is_none()); - } - { - let _u = m.upgradeable_read(); - assert!(m.try_upgradeable_read().is_none()); - } - - assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok()); - } -} +// assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok()); +// } +// } diff --git a/zCore/src/memory.rs b/zCore/src/memory.rs index b7d82072..7c30d155 100644 --- a/zCore/src/memory.rs +++ b/zCore/src/memory.rs @@ -5,7 +5,7 @@ use core::ops::Range; use bitmap_allocator::BitAlloc; use kernel_hal::PhysAddr; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use super::platform::consts::*; diff --git a/zircon-object/src/debuglog.rs b/zircon-object/src/debuglog.rs index 742a8c8d..6d8e5ead 100644 --- a/zircon-object/src/debuglog.rs +++ b/zircon-object/src/debuglog.rs @@ -5,7 +5,7 @@ use { alloc::{sync::Arc, vec::Vec}, lazy_static::lazy_static, // spin::Mutex, - lock::spinlock::Mutex, + lock::mutex::Mutex, }; lazy_static! { diff --git a/zircon-object/src/dev/bti.rs b/zircon-object/src/dev/bti.rs index acc9c879..2c9ec5c1 100644 --- a/zircon-object/src/dev/bti.rs +++ b/zircon-object/src/dev/bti.rs @@ -5,7 +5,7 @@ use { alloc::{sync::Arc, vec::Vec}, dev::Iommu, // spin::Mutex, - lock::spinlock::Mutex, + lock::mutex::Mutex, }; /// Bus Transaction Initiator. diff --git a/zircon-object/src/dev/interrupt/event_interrupt.rs b/zircon-object/src/dev/interrupt/event_interrupt.rs index 24a4c3c4..2758719b 100644 --- a/zircon-object/src/dev/interrupt/event_interrupt.rs +++ b/zircon-object/src/dev/interrupt/event_interrupt.rs @@ -1,5 +1,5 @@ use kernel_hal::interrupt; -use {super::*, /*spin::Mutex*/lock::spinlock::Mutex,}; +use {super::*, /*spin::Mutex*/ lock::mutex::Mutex}; pub struct EventInterrupt { vector: usize, diff --git a/zircon-object/src/dev/interrupt/mod.rs b/zircon-object/src/dev/interrupt/mod.rs index e66b752d..b5ba38b7 100644 --- a/zircon-object/src/dev/interrupt/mod.rs +++ b/zircon-object/src/dev/interrupt/mod.rs @@ -8,7 +8,7 @@ use { alloc::{boxed::Box, sync::Arc}, bitflags::bitflags, // spin::Mutex, - lock::spinlock::Mutex, + lock::mutex::Mutex, }; mod event_interrupt; diff --git a/zircon-object/src/dev/interrupt/pci_interrupt.rs b/zircon-object/src/dev/interrupt/pci_interrupt.rs index 86f44a7f..08ea6e8e 100644 --- a/zircon-object/src/dev/interrupt/pci_interrupt.rs +++ b/zircon-object/src/dev/interrupt/pci_interrupt.rs @@ -1,6 +1,6 @@ use alloc::{boxed::Box, sync::Arc}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use super::InterruptTrait; use crate::dev::pci::{constants::PCIE_IRQRET_MASK, IPciNode}; diff --git a/zircon-object/src/dev/pci/bus.rs b/zircon-object/src/dev/pci/bus.rs index 9c47bcfa..3cad5958 100644 --- a/zircon-object/src/dev/pci/bus.rs +++ b/zircon-object/src/dev/pci/bus.rs @@ -18,7 +18,7 @@ use core::marker::{Send, Sync}; use lazy_static::*; use region_alloc::RegionAllocator; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; /// PCIE Bus Driver. pub struct PCIeBusDriver { diff --git a/zircon-object/src/dev/pci/caps.rs b/zircon-object/src/dev/pci/caps.rs index 71835ff4..d2d8c77e 100644 --- a/zircon-object/src/dev/pci/caps.rs +++ b/zircon-object/src/dev/pci/caps.rs @@ -4,7 +4,8 @@ use crate::{ZxError, ZxResult}; use alloc::boxed::Box; use core::convert::TryFrom; use kernel_hal::interrupt; -use spin::*; +// use spin::*; +use lock::mutex::Mutex; /// Enumeration for PCI capabilities. #[derive(Debug)] diff --git a/zircon-object/src/dev/pci/nodes.rs b/zircon-object/src/dev/pci/nodes.rs index 900ee0fb..d15de6c6 100644 --- a/zircon-object/src/dev/pci/nodes.rs +++ b/zircon-object/src/dev/pci/nodes.rs @@ -17,7 +17,7 @@ use kernel_hal::interrupt; use numeric_enum_macro::numeric_enum; use region_alloc::RegionAllocator; // use spin::{Mutex, MutexGuard}; -use lock::spinlock::{Mutex, MutexGuard}; +use lock::mutex::{Mutex, MutexGuard}; numeric_enum! { #[repr(u8)] diff --git a/zircon-object/src/dev/pci/pio.rs b/zircon-object/src/dev/pci/pio.rs index f614ba0e..b5a73b17 100644 --- a/zircon-object/src/dev/pci/pio.rs +++ b/zircon-object/src/dev/pci/pio.rs @@ -14,7 +14,7 @@ cfg_if::cfg_if! { if #[cfg(all(target_arch = "x86_64", target_os = "none"))] { use kernel_hal::x86_64::{pio_read, pio_write}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; static PIO_LOCK: Mutex<()> = Mutex::new(()); const PCI_CONFIG_ADDR: u16 = 0xcf8; diff --git a/zircon-object/src/hypervisor/vcpu.rs b/zircon-object/src/hypervisor/vcpu.rs index 9b891df8..7a2772e4 100644 --- a/zircon-object/src/hypervisor/vcpu.rs +++ b/zircon-object/src/hypervisor/vcpu.rs @@ -8,7 +8,8 @@ use { alloc::sync::Arc, core::convert::TryInto, rvm::{self, Vcpu as VcpuInner}, - spin::Mutex, + //spin::Mutex, + lock::mutex::Mutex, }; /// Virtual CPU within a Guest, which allows for execution within the virtual machine. diff --git a/zircon-object/src/ipc/channel.rs b/zircon-object/src/ipc/channel.rs index 10694364..043c037e 100644 --- a/zircon-object/src/ipc/channel.rs +++ b/zircon-object/src/ipc/channel.rs @@ -8,7 +8,7 @@ use { futures::channel::oneshot::{self, Sender}, hashbrown::HashMap, // spin::Mutex, - lock::spinlock::Mutex, + lock::mutex::Mutex, }; /// Bidirectional interprocess communication diff --git a/zircon-object/src/ipc/fifo.rs b/zircon-object/src/ipc/fifo.rs index 121f3dd5..ccbb1c90 100644 --- a/zircon-object/src/ipc/fifo.rs +++ b/zircon-object/src/ipc/fifo.rs @@ -3,7 +3,7 @@ use { alloc::collections::VecDeque, alloc::sync::{Arc, Weak}, // spin::Mutex, - lock::spinlock::Mutex, + lock::mutex::Mutex, }; /// First-In First-Out inter-process queue. diff --git a/zircon-object/src/ipc/socket.rs b/zircon-object/src/ipc/socket.rs index ed1c5d32..00cc6155 100644 --- a/zircon-object/src/ipc/socket.rs +++ b/zircon-object/src/ipc/socket.rs @@ -3,7 +3,7 @@ use { alloc::collections::VecDeque, alloc::sync::{Arc, Weak}, bitflags::bitflags, - spin::Mutex, + lock::mutex::Mutex, // spin::Mutex, }; /// Bidirectional streaming IPC transport. diff --git a/zircon-object/src/object/mod.rs b/zircon-object/src/object/mod.rs index 97ba83f3..cc5f078c 100644 --- a/zircon-object/src/object/mod.rs +++ b/zircon-object/src/object/mod.rs @@ -55,7 +55,7 @@ //! ``` //! use zircon_object::object::*; //! use std::sync::Arc; -//! use lock::spinlock::Mutex; +//! use lock::mutex::Mutex; //! //! pub struct SampleObject { //! base: KObjectBase, @@ -105,7 +105,7 @@ use { task::{Context, Poll}, }, downcast_rs::{impl_downcast, DowncastSync}, - lock::spinlock::Mutex, // spin::Mutex, + lock::mutex::Mutex, // spin::Mutex, }; pub use {super::*, handle::*, rights::*, signal::*}; diff --git a/zircon-object/src/signal/futex.rs b/zircon-object/src/signal/futex.rs index 710919ac..14f52771 100644 --- a/zircon-object/src/signal/futex.rs +++ b/zircon-object/src/signal/futex.rs @@ -7,7 +7,7 @@ use core::pin::Pin; use core::sync::atomic::*; use core::task::{Context, Poll, Waker}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; /// A primitive for creating userspace synchronization tools. /// diff --git a/zircon-object/src/signal/port.rs b/zircon-object/src/signal/port.rs index 0693ef8f..0ad5daf6 100644 --- a/zircon-object/src/signal/port.rs +++ b/zircon-object/src/signal/port.rs @@ -5,7 +5,7 @@ use alloc::collections::{BTreeSet, VecDeque}; use alloc::sync::Arc; use bitflags::bitflags; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; #[path = "port_packet.rs"] mod port_packet; diff --git a/zircon-object/src/signal/timer.rs b/zircon-object/src/signal/timer.rs index 96da81f7..abf0e181 100644 --- a/zircon-object/src/signal/timer.rs +++ b/zircon-object/src/signal/timer.rs @@ -4,7 +4,7 @@ use alloc::boxed::Box; use alloc::sync::Arc; use core::time::Duration; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; /// An object that may be signaled at some point in the future /// diff --git a/zircon-object/src/task/exception.rs b/zircon-object/src/task/exception.rs index 37b59a98..e0e180bd 100644 --- a/zircon-object/src/task/exception.rs +++ b/zircon-object/src/task/exception.rs @@ -4,7 +4,7 @@ use core::mem::size_of; use futures::channel::oneshot; use kernel_hal::context::{TrapReason, UserContext}; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use super::{Job, Task, Thread}; use crate::ipc::{Channel, MessagePacket}; diff --git a/zircon-object/src/task/job.rs b/zircon-object/src/task/job.rs index ed0a25d1..04ca0b0d 100644 --- a/zircon-object/src/task/job.rs +++ b/zircon-object/src/task/job.rs @@ -6,7 +6,7 @@ use { crate::task::Task, alloc::sync::{Arc, Weak}, alloc::vec::Vec, - lock::spinlock::Mutex, //spin::Mutex, + lock::mutex::Mutex, //spin::Mutex, }; /// Control a group of processes diff --git a/zircon-object/src/task/process.rs b/zircon-object/src/task/process.rs index 5bd3a557..5932d4cb 100644 --- a/zircon-object/src/task/process.rs +++ b/zircon-object/src/task/process.rs @@ -4,7 +4,7 @@ use core::{any::Any, sync::atomic::AtomicI32}; use futures::channel::oneshot::{self, Receiver, Sender}; use hashbrown::HashMap; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use super::exception::{ExceptionChannelType, Exceptionate}; use super::job_policy::{JobPolicy, PolicyAction, PolicyCondition}; diff --git a/zircon-object/src/task/thread.rs b/zircon-object/src/task/thread.rs index 5949299e..d1224524 100644 --- a/zircon-object/src/task/thread.rs +++ b/zircon-object/src/task/thread.rs @@ -11,7 +11,7 @@ use bitflags::bitflags; use futures::{channel::oneshot::*, future::FutureExt, pin_mut, select_biased}; use kernel_hal::context::UserContext; // use spin::Mutex; -use lock::spinlock::Mutex; +use lock::mutex::Mutex; use self::thread_state::ContextAccessState; use super::{exception::*, Process, Task}; diff --git a/zircon-object/src/vm/stream.rs b/zircon-object/src/vm/stream.rs index 72e74c41..9a324aed 100644 --- a/zircon-object/src/vm/stream.rs +++ b/zircon-object/src/vm/stream.rs @@ -1,5 +1,5 @@ use { - super::*, crate::object::*, alloc::sync::Arc, lock::spinlock::Mutex, /*spin::Mutex*/ + super::*, crate::object::*, alloc::sync::Arc, lock::mutex::Mutex, /*spin::Mutex*/ numeric_enum_macro::numeric_enum, }; diff --git a/zircon-object/src/vm/vmar.rs b/zircon-object/src/vm/vmar.rs index f8873724..d498c9d7 100644 --- a/zircon-object/src/vm/vmar.rs +++ b/zircon-object/src/vm/vmar.rs @@ -6,7 +6,7 @@ use { kernel_hal::vm::{ GenericPageTable, IgnoreNotMappedErr, Page, PageSize, PageTable, PagingError, }, - spin::Mutex, + lock::mutex::Mutex, //spin::Mutex, }; bitflags! { diff --git a/zircon-object/src/vm/vmo/mod.rs b/zircon-object/src/vm/vmo/mod.rs index 1e756704..7326c105 100644 --- a/zircon-object/src/vm/vmo/mod.rs +++ b/zircon-object/src/vm/vmo/mod.rs @@ -9,7 +9,7 @@ use { bitflags::bitflags, core::ops::Deref, kernel_hal::CachePolicy, - spin::Mutex, + lock::mutex::Mutex, //spin::Mutex, }; mod paged; diff --git a/zircon-object/src/vm/vmo/paged.rs b/zircon-object/src/vm/vmo/paged.rs index 416889a8..4fdd2fa6 100644 --- a/zircon-object/src/vm/vmo/paged.rs +++ b/zircon-object/src/vm/vmo/paged.rs @@ -9,7 +9,8 @@ use { core::sync::atomic::*, hashbrown::HashMap, kernel_hal::{mem::PhysFrame, PAGE_SIZE}, - spin::{Mutex, MutexGuard}, + // spin::{Mutex, MutexGuard}, + lock::mutex::{Mutex, MutexGuard}, }; enum VMOType { diff --git a/zircon-object/src/vm/vmo/physical.rs b/zircon-object/src/vm/vmo/physical.rs index 7be94273..819b88be 100644 --- a/zircon-object/src/vm/vmo/physical.rs +++ b/zircon-object/src/vm/vmo/physical.rs @@ -1,4 +1,4 @@ -use {super::*, alloc::sync::Arc, lock::spinlock::Mutex /*spin::Mutex*/}; +use {super::*, alloc::sync::Arc, lock::mutex::Mutex /*spin::Mutex*/}; /// VMO representing a physical range of memory. pub struct VMObjectPhysical { diff --git a/zircon-syscall/src/channel.rs b/zircon-syscall/src/channel.rs index 45087496..84e84621 100644 --- a/zircon-syscall/src/channel.rs +++ b/zircon-syscall/src/channel.rs @@ -1,7 +1,7 @@ use { super::*, alloc::{string::String, vec::Vec}, - lock::spinlock::Mutex, //spin::Mutex, + lock::mutex::Mutex, //spin::Mutex, zircon_object::{ ipc::{Channel, MessagePacket}, object::{obj_type, HandleInfo},