diff --git a/src/libgreen/sched.rs b/src/libgreen/sched.rs index 4b1c4e3b425d4..ad32ba7ba6d1c 100644 --- a/src/libgreen/sched.rs +++ b/src/libgreen/sched.rs @@ -15,7 +15,7 @@ use std::rt::rtio::{RemoteCallback, PausableIdleCallback, Callback, EventLoop}; use std::rt::task::BlockedTask; use std::rt::task::Task; use std::sync::deque; -use std::unstable::mutex::Mutex; +use std::unstable::mutex::NativeMutex; use std::unstable::raw; use TaskState; @@ -669,8 +669,7 @@ impl Scheduler { // is acquired here. This is the resumption points and the "bounce" // that it is referring to. unsafe { - current_task.nasty_deschedule_lock.lock(); - current_task.nasty_deschedule_lock.unlock(); + let _guard = current_task.nasty_deschedule_lock.lock(); } return current_task; } @@ -765,10 +764,11 @@ impl Scheduler { // to it, but we're guaranteed that the task won't exit until we've // unlocked the lock so there's no worry of this memory going away. let cur = self.change_task_context(cur, next, |sched, mut task| { - let lock: *mut Mutex = &mut task.nasty_deschedule_lock; - unsafe { (*lock).lock() } - f(sched, BlockedTask::block(task.swap())); - unsafe { (*lock).unlock() } + let lock: *mut NativeMutex = &mut task.nasty_deschedule_lock; + unsafe { + let _guard = (*lock).lock(); + f(sched, BlockedTask::block(task.swap())); + } }); cur.put(); } @@ -1453,8 +1453,8 @@ mod test { #[test] fn test_spawn_sched_blocking() { - use std::unstable::mutex::{Mutex, MUTEX_INIT}; - static mut LOCK: Mutex = MUTEX_INIT; + use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; // Testing that a task in one scheduler can block in foreign code // without affecting other schedulers @@ -1466,12 +1466,11 @@ mod test { let mut handle = pool.spawn_sched(); handle.send(PinnedTask(pool.task(TaskOpts::new(), proc() { unsafe { - LOCK.lock(); + let mut guard = LOCK.lock(); start_ch.send(()); - LOCK.wait(); // block the scheduler thread - LOCK.signal(); // let them know we have the lock - LOCK.unlock(); + guard.wait(); // block the scheduler thread + guard.signal(); // let them know we have the lock } fin_ch.send(()); @@ -1503,10 +1502,9 @@ mod test { child_ch.send(20); pingpong(&parent_po, &child_ch); unsafe { - LOCK.lock(); - LOCK.signal(); // wakeup waiting scheduler - LOCK.wait(); // wait for them to grab the lock - LOCK.unlock(); + let mut guard = LOCK.lock(); + guard.signal(); // wakeup waiting scheduler + guard.wait(); // wait for them to grab the lock } }))); drop(handle); diff --git a/src/libgreen/simple.rs b/src/libgreen/simple.rs index 8db95f55d18db..297c22e2cd6ce 100644 --- a/src/libgreen/simple.rs +++ b/src/libgreen/simple.rs @@ -17,10 +17,10 @@ use std::rt::local::Local; use std::rt::rtio; use std::rt::task::{Task, BlockedTask}; use std::task::TaskOpts; -use std::unstable::sync::LittleLock; +use std::unstable::mutex::NativeMutex; struct SimpleTask { - lock: LittleLock, + lock: NativeMutex, awoken: bool, } @@ -59,9 +59,9 @@ impl Runtime for SimpleTask { to_wake.put_runtime(self as ~Runtime); unsafe { cast::forget(to_wake); - let _l = (*me).lock.lock(); + let mut guard = (*me).lock.lock(); (*me).awoken = true; - (*me).lock.signal(); + guard.signal(); } } @@ -83,7 +83,7 @@ impl Runtime for SimpleTask { pub fn task() -> ~Task { let mut task = ~Task::new(); task.put_runtime(~SimpleTask { - lock: LittleLock::new(), + lock: unsafe {NativeMutex::new()}, awoken: false, } as ~Runtime); return task; diff --git a/src/libgreen/task.rs b/src/libgreen/task.rs index 2aca72e35f19d..74d93b4b2db9a 100644 --- a/src/libgreen/task.rs +++ b/src/libgreen/task.rs @@ -25,7 +25,7 @@ use std::rt::local::Local; use std::rt::rtio; use std::rt::task::{Task, BlockedTask, SendMessage}; use std::task::TaskOpts; -use std::unstable::mutex::Mutex; +use std::unstable::mutex::NativeMutex; use std::unstable::raw; use context::Context; @@ -65,7 +65,7 @@ pub struct GreenTask { pool_id: uint, // See the comments in the scheduler about why this is necessary - nasty_deschedule_lock: Mutex, + nasty_deschedule_lock: NativeMutex, } pub enum TaskType { @@ -163,7 +163,7 @@ impl GreenTask { task_type: task_type, sched: None, handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, + nasty_deschedule_lock: unsafe { NativeMutex::new() }, task: Some(~Task::new()), } } @@ -322,11 +322,10 @@ impl GreenTask { // uncontended except for when the task is rescheduled). fn reawaken_remotely(mut ~self) { unsafe { - let mtx = &mut self.nasty_deschedule_lock as *mut Mutex; + let mtx = &mut self.nasty_deschedule_lock as *mut NativeMutex; let handle = self.handle.get_mut_ref() as *mut SchedHandle; - (*mtx).lock(); + let _guard = (*mtx).lock(); (*handle).send(RunOnce(self)); - (*mtx).unlock(); } } } @@ -479,12 +478,6 @@ impl Runtime for GreenTask { fn wrap(~self) -> ~Any { self as ~Any } } -impl Drop for GreenTask { - fn drop(&mut self) { - unsafe { self.nasty_deschedule_lock.destroy(); } - } -} - #[cfg(test)] mod tests { use std::rt::Runtime; diff --git a/src/libnative/bookkeeping.rs b/src/libnative/bookkeeping.rs index 868586b36911e..b1addc5cda531 100644 --- a/src/libnative/bookkeeping.rs +++ b/src/libnative/bookkeeping.rs @@ -17,10 +17,10 @@ //! The green counterpart for this is bookkeeping on sched pools. use std::sync::atomics; -use std::unstable::mutex::{Mutex, MUTEX_INIT}; +use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; static mut TASK_COUNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT; -static mut TASK_LOCK: Mutex = MUTEX_INIT; +static mut TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; pub fn increment() { let _ = unsafe { TASK_COUNT.fetch_add(1, atomics::SeqCst) }; @@ -29,9 +29,8 @@ pub fn increment() { pub fn decrement() { unsafe { if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 { - TASK_LOCK.lock(); - TASK_LOCK.signal(); - TASK_LOCK.unlock(); + let mut guard = TASK_LOCK.lock(); + guard.signal(); } } } @@ -40,11 +39,12 @@ pub fn decrement() { /// the entry points of native programs pub fn wait_for_other_tasks() { unsafe { - TASK_LOCK.lock(); - while TASK_COUNT.load(atomics::SeqCst) > 0 { - TASK_LOCK.wait(); + { + let mut guard = TASK_LOCK.lock(); + while TASK_COUNT.load(atomics::SeqCst) > 0 { + guard.wait(); + } } - TASK_LOCK.unlock(); TASK_LOCK.destroy(); } } diff --git a/src/libnative/io/net.rs b/src/libnative/io/net.rs index ec6a5c5cb9bc9..b33b54862dc2b 100644 --- a/src/libnative/io/net.rs +++ b/src/libnative/io/net.rs @@ -218,11 +218,11 @@ pub fn init() { } unsafe { - use std::unstable::mutex::{Mutex, MUTEX_INIT}; + use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; static mut INITIALIZED: bool = false; - static mut LOCK: Mutex = MUTEX_INIT; + static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; - LOCK.lock(); + let _guard = LOCK.lock(); if !INITIALIZED { let mut data: WSADATA = mem::init(); let ret = WSAStartup(0x202, // version 2.2 @@ -230,7 +230,6 @@ pub fn init() { assert_eq!(ret, 0); INITIALIZED = true; } - LOCK.unlock(); } } diff --git a/src/libnative/io/timer_helper.rs b/src/libnative/io/timer_helper.rs index 2c976e67d25b3..004cd6f311452 100644 --- a/src/libnative/io/timer_helper.rs +++ b/src/libnative/io/timer_helper.rs @@ -22,7 +22,7 @@ use std::cast; use std::rt; -use std::unstable::mutex::{Mutex, MUTEX_INIT}; +use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; use bookkeeping; use io::timer::{Req, Shutdown}; @@ -37,11 +37,11 @@ static mut HELPER_CHAN: *mut Chan = 0 as *mut Chan; static mut HELPER_SIGNAL: imp::signal = 0 as imp::signal; pub fn boot(helper: fn(imp::signal, Port)) { - static mut LOCK: Mutex = MUTEX_INIT; + static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; static mut INITIALIZED: bool = false; unsafe { - LOCK.lock(); + let mut _guard = LOCK.lock(); if !INITIALIZED { let (msgp, msgc) = Chan::new(); // promote this to a shared channel @@ -58,7 +58,6 @@ pub fn boot(helper: fn(imp::signal, Port)) { rt::at_exit(proc() { shutdown() }); INITIALIZED = true; } - LOCK.unlock(); } } diff --git a/src/libnative/task.rs b/src/libnative/task.rs index a9c3afbbb16c8..d8f410834f252 100644 --- a/src/libnative/task.rs +++ b/src/libnative/task.rs @@ -22,7 +22,7 @@ use std::rt::task::{Task, BlockedTask, SendMessage}; use std::rt::thread::Thread; use std::rt; use std::task::TaskOpts; -use std::unstable::mutex::Mutex; +use std::unstable::mutex::NativeMutex; use std::unstable::stack; use io; @@ -40,7 +40,7 @@ pub fn new(stack_bounds: (uint, uint)) -> ~Task { fn ops() -> ~Ops { ~Ops { - lock: unsafe { Mutex::new() }, + lock: unsafe { NativeMutex::new() }, awoken: false, io: io::IoFactory::new(), // these *should* get overwritten @@ -109,7 +109,7 @@ pub fn spawn_opts(opts: TaskOpts, f: proc()) { // This structure is the glue between channels and the 1:1 scheduling mode. This // structure is allocated once per task. struct Ops { - lock: Mutex, // native synchronization + lock: NativeMutex, // native synchronization awoken: bool, // used to prevent spurious wakeups io: io::IoFactory, // local I/O factory @@ -191,20 +191,19 @@ impl rt::Runtime for Ops { let task = BlockedTask::block(cur_task); if times == 1 { - (*me).lock.lock(); + let mut guard = (*me).lock.lock(); (*me).awoken = false; match f(task) { Ok(()) => { while !(*me).awoken { - (*me).lock.wait(); + guard.wait(); } } Err(task) => { cast::forget(task.wake()); } } - (*me).lock.unlock(); } else { let mut iter = task.make_selectable(times); - (*me).lock.lock(); + let mut guard = (*me).lock.lock(); (*me).awoken = false; let success = iter.all(|task| { match f(task) { @@ -216,9 +215,8 @@ impl rt::Runtime for Ops { } }); while success && !(*me).awoken { - (*me).lock.wait(); + guard.wait(); } - (*me).lock.unlock(); } // re-acquire ownership of the task cur_task = cast::transmute::(cur_task_dupe); @@ -235,10 +233,9 @@ impl rt::Runtime for Ops { let me = &mut *self as *mut Ops; to_wake.put_runtime(self as ~rt::Runtime); cast::forget(to_wake); - (*me).lock.lock(); + let mut guard = (*me).lock.lock(); (*me).awoken = true; - (*me).lock.signal(); - (*me).lock.unlock(); + guard.signal(); } } @@ -254,12 +251,6 @@ impl rt::Runtime for Ops { } } -impl Drop for Ops { - fn drop(&mut self) { - unsafe { self.lock.destroy() } - } -} - #[cfg(test)] mod tests { use std::rt::Runtime; diff --git a/src/librustuv/queue.rs b/src/librustuv/queue.rs index 5b697e0d73d08..da502ca72de57 100644 --- a/src/librustuv/queue.rs +++ b/src/librustuv/queue.rs @@ -23,7 +23,7 @@ use std::cast; use std::libc::{c_void, c_int}; use std::rt::task::BlockedTask; -use std::unstable::sync::LittleLock; +use std::unstable::mutex::NativeMutex; use std::sync::arc::UnsafeArc; use mpsc = std::sync::mpsc_queue; @@ -39,7 +39,7 @@ enum Message { struct State { handle: *uvll::uv_async_t, - lock: LittleLock, // see comments in async_cb for why this is needed + lock: NativeMutex, // see comments in async_cb for why this is needed queue: mpsc::Queue, } @@ -112,7 +112,7 @@ impl QueuePool { let handle = UvHandle::alloc(None::, uvll::UV_ASYNC); let state = UnsafeArc::new(State { handle: handle, - lock: LittleLock::new(), + lock: unsafe {NativeMutex::new()}, queue: mpsc::Queue::new(), }); let q = ~QueuePool { diff --git a/src/libstd/comm/shared.rs b/src/libstd/comm/shared.rs index 77bf2d7a68d36..031ce991ba47b 100644 --- a/src/libstd/comm/shared.rs +++ b/src/libstd/comm/shared.rs @@ -28,7 +28,7 @@ use rt::local::Local; use rt::task::{Task, BlockedTask}; use rt::thread::Thread; use sync::atomics; -use unstable::mutex::Mutex; +use unstable::mutex::NativeMutex; use vec::OwnedVector; use mpsc = sync::mpsc_queue; @@ -53,7 +53,7 @@ pub struct Packet { // this lock protects various portions of this implementation during // select() - select_lock: Mutex, + select_lock: NativeMutex, } pub enum Failure { @@ -72,10 +72,10 @@ impl Packet { channels: atomics::AtomicInt::new(2), port_dropped: atomics::AtomicBool::new(false), sender_drain: atomics::AtomicInt::new(0), - select_lock: unsafe { Mutex::new() }, + select_lock: unsafe { NativeMutex::new() }, }; // see comments in inherit_blocker about why we grab this lock - unsafe { p.select_lock.lock() } + unsafe { p.select_lock.lock_noguard() } return p; } @@ -124,7 +124,7 @@ impl Packet { // interfere with this method. After we unlock this lock, we're // signifying that we're done modifying self.cnt and self.to_wake and // the port is ready for the world to continue using it. - unsafe { self.select_lock.unlock() } + unsafe { self.select_lock.unlock_noguard() } } pub fn send(&mut self, t: T) -> bool { @@ -438,8 +438,7 @@ impl Packet { // about looking at and dealing with to_wake. Once we have acquired the // lock, we are guaranteed that inherit_blocker is done. unsafe { - self.select_lock.lock(); - self.select_lock.unlock(); + let _guard = self.select_lock.lock(); } // Like the stream implementation, we want to make sure that the count @@ -487,7 +486,6 @@ impl Drop for Packet { assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED); assert_eq!(self.to_wake.load(atomics::SeqCst), 0); assert_eq!(self.channels.load(atomics::SeqCst), 0); - self.select_lock.destroy(); } } } diff --git a/src/libstd/os.rs b/src/libstd/os.rs index 20d1ae2f3e221..719ed62d03d0a 100644 --- a/src/libstd/os.rs +++ b/src/libstd/os.rs @@ -44,7 +44,6 @@ use ptr; use str; use str::{Str, StrSlice}; use fmt; -use unstable::finally::Finally; use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; use path::{Path, GenericPath}; use iter::Iterator; @@ -145,16 +144,13 @@ Accessing environment variables is not generally threadsafe. Serialize access through a global lock. */ fn with_env_lock(f: || -> T) -> T { - use unstable::mutex::{Mutex, MUTEX_INIT}; - use unstable::finally::Finally; + use unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; - static mut lock: Mutex = MUTEX_INIT; + static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT; unsafe { - return (|| { - lock.lock(); - f() - }).finally(|| lock.unlock()); + let _guard = lock.lock(); + f() } } diff --git a/src/libstd/rt/args.rs b/src/libstd/rt/args.rs index c417ea375fd38..6f73265978bf4 100644 --- a/src/libstd/rt/args.rs +++ b/src/libstd/rt/args.rs @@ -68,12 +68,11 @@ mod imp { use option::{Option, Some, None}; use ptr::RawPtr; use iter::Iterator; - use unstable::finally::Finally; - use unstable::mutex::{Mutex, MUTEX_INIT}; + use unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; use mem; static mut global_args_ptr: uint = 0; - static mut lock: Mutex = MUTEX_INIT; + static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT; #[cfg(not(test))] pub unsafe fn init(argc: int, argv: **u8) { @@ -111,16 +110,10 @@ mod imp { } fn with_lock(f: || -> T) -> T { - (|| { - unsafe { - lock.lock(); - f() - } - }).finally(|| { - unsafe { - lock.unlock(); - } - }) + unsafe { + let _guard = lock.lock(); + f() + } } fn get_global_ptr() -> *mut Option<~~[~[u8]]> { diff --git a/src/libstd/unstable/dynamic_lib.rs b/src/libstd/unstable/dynamic_lib.rs index 8529b69c6eb8a..84fa528ebf130 100644 --- a/src/libstd/unstable/dynamic_lib.rs +++ b/src/libstd/unstable/dynamic_lib.rs @@ -152,12 +152,12 @@ pub mod dl { } pub fn check_for_errors_in(f: || -> T) -> Result { - use unstable::mutex::{Mutex, MUTEX_INIT}; - static mut lock: Mutex = MUTEX_INIT; + use unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT; unsafe { // dlerror isn't thread safe, so we need to lock around this entire // sequence - lock.lock(); + let _guard = lock.lock(); let _old_error = dlerror(); let result = f(); @@ -168,7 +168,7 @@ pub mod dl { } else { Err(str::raw::from_c_str(last_error)) }; - lock.unlock(); + ret } } diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs index 3122e925e82d9..34ddee46d350e 100644 --- a/src/libstd/unstable/mutex.rs +++ b/src/libstd/unstable/mutex.rs @@ -8,78 +8,157 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! A native mutex and condition variable type +//! A native mutex and condition variable type. //! //! This module contains bindings to the platform's native mutex/condition -//! variable primitives. It provides a single type, `Mutex`, which can be -//! statically initialized via the `MUTEX_INIT` value. This object serves as both a -//! mutex and a condition variable simultaneously. +//! variable primitives. It provides two types: `StaticNativeMutex`, which can +//! be statically initialized via the `NATIVE_MUTEX_INIT` value, and a simple +//! wrapper `NativeMutex` that has a destructor to clean up after itself. These +//! objects serve as both mutexes and condition variables simultaneously. //! -//! The lock is lazily initialized, but it can only be unsafely destroyed. A -//! statically initialized lock doesn't necessarily have a time at which it can -//! get deallocated. For this reason, there is no `Drop` implementation of the -//! mutex, but rather the `destroy()` method must be invoked manually if -//! destruction of the mutex is desired. +//! The static lock is lazily initialized, but it can only be unsafely +//! destroyed. A statically initialized lock doesn't necessarily have a time at +//! which it can get deallocated. For this reason, there is no `Drop` +//! implementation of the static mutex, but rather the `destroy()` method must +//! be invoked manually if destruction of the mutex is desired. //! -//! It is not recommended to use this type for idiomatic rust use. This type is -//! appropriate where no other options are available, but other rust concurrency -//! primitives should be used before this type. +//! The non-static `NativeMutex` type does have a destructor, but cannot be +//! statically initialized. +//! +//! It is not recommended to use this type for idiomatic rust use. These types +//! are appropriate where no other options are available, but other rust +//! concurrency primitives should be used before them: the `sync` crate defines +//! `StaticMutex` and `Mutex` types. //! //! # Example //! -//! use std::unstable::mutex::{Mutex, MUTEX_INIT}; +//! ```rust +//! use std::unstable::mutex::{NativeMutex, StaticNativeMutex, NATIVE_MUTEX_INIT}; +//! +//! // Use a statically initialized mutex +//! static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; +//! +//! unsafe { +//! let _guard = LOCK.lock(); +//! } // automatically unlocked here //! -//! // Use a statically initialized mutex -//! static mut lock: Mutex = MUTEX_INIT; +//! // Use a normally initialized mutex +//! unsafe { +//! let mut lock = NativeMutex::new(); //! -//! unsafe { -//! lock.lock(); -//! lock.unlock(); -//! } +//! { +//! let _guard = lock.lock(); +//! } // unlocked here //! -//! // Use a normally initialized mutex -//! let mut lock = Mutex::new(); -//! unsafe { -//! lock.lock(); -//! lock.unlock(); -//! lock.destroy(); -//! } +//! // sometimes the RAII guard isn't appropriate +//! lock.lock_noguard(); +//! lock.unlock_noguard(); +//! } // `lock` is deallocated here +//! ``` #[allow(non_camel_case_types)]; -pub struct Mutex { +use option::{Option, None, Some}; +use ops::Drop; + +/// A native mutex suitable for storing in statics (that is, it has +/// the `destroy` method rather than a destructor). +/// +/// Prefer the `NativeMutex` type where possible, since that does not +/// require manual deallocation. +pub struct StaticNativeMutex { priv inner: imp::Mutex, } -pub static MUTEX_INIT: Mutex = Mutex { +/// A native mutex with a destructor for clean-up. +/// +/// See `StaticNativeMutex` for a version that is suitable for storing in +/// statics. +pub struct NativeMutex { + priv inner: StaticNativeMutex +} + +/// Automatically unlocks the mutex that it was created from on +/// destruction. +/// +/// Using this makes lock-based code resilient to unwinding/task +/// failure, because the lock will be automatically unlocked even +/// then. +#[must_use] +pub struct LockGuard<'a> { + priv lock: &'a mut StaticNativeMutex +} + +pub static NATIVE_MUTEX_INIT: StaticNativeMutex = StaticNativeMutex { inner: imp::MUTEX_INIT, }; -impl Mutex { - /// Creates a new mutex - pub unsafe fn new() -> Mutex { - Mutex { inner: imp::Mutex::new() } +impl StaticNativeMutex { + /// Creates a new mutex. + /// + /// Note that a mutex created in this way needs to be explicit + /// freed with a call to `destroy` or it will leak. + pub unsafe fn new() -> StaticNativeMutex { + StaticNativeMutex { inner: imp::Mutex::new() } } /// Acquires this lock. This assumes that the current thread does not /// already hold the lock. - pub unsafe fn lock(&mut self) { self.inner.lock() } + /// + /// # Example + /// ```rust + /// use std::unstable::mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT}; + /// static mut LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT; + /// unsafe { + /// let _guard = LOCK.lock(); + /// // critical section... + /// } // automatically unlocked in `_guard`'s destructor + /// ``` + pub unsafe fn lock<'a>(&'a mut self) -> LockGuard<'a> { + self.inner.lock(); + + LockGuard { lock: self } + } + + /// Attempts to acquire the lock. The value returned is `Some` if + /// the attempt succeeded. + pub unsafe fn trylock<'a>(&'a mut self) -> Option> { + if self.inner.trylock() { + Some(LockGuard { lock: self }) + } else { + None + } + } - /// Attempts to acquire the lock. The value returned is whether the lock was - /// acquired or not - pub unsafe fn trylock(&mut self) -> bool { self.inner.trylock() } + /// Acquire the lock without creating a `LockGuard`. + /// + /// These needs to be paired with a call to `.unlock_noguard`. Prefer using + /// `.lock`. + pub unsafe fn lock_noguard(&mut self) { self.inner.lock() } + + /// Attempts to acquire the lock without creating a + /// `LockGuard`. The value returned is whether the lock was + /// acquired or not. + /// + /// If `true` is returned, this needs to be paired with a call to + /// `.unlock_noguard`. Prefer using `.trylock`. + pub unsafe fn trylock_noguard(&mut self) -> bool { + self.inner.trylock() + } /// Unlocks the lock. This assumes that the current thread already holds the /// lock. - pub unsafe fn unlock(&mut self) { self.inner.unlock() } + pub unsafe fn unlock_noguard(&mut self) { self.inner.unlock() } /// Block on the internal condition variable. /// - /// This function assumes that the lock is already held - pub unsafe fn wait(&mut self) { self.inner.wait() } + /// This function assumes that the lock is already held. Prefer + /// using `LockGuard.wait` since that guarantees that the lock is + /// held. + pub unsafe fn wait_noguard(&mut self) { self.inner.wait() } /// Signals a thread in `wait` to wake up - pub unsafe fn signal(&mut self) { self.inner.signal() } + pub unsafe fn signal_noguard(&mut self) { self.inner.signal() } /// This function is especially unsafe because there are no guarantees made /// that no other thread is currently holding the lock or waiting on the @@ -87,6 +166,96 @@ impl Mutex { pub unsafe fn destroy(&mut self) { self.inner.destroy() } } +impl NativeMutex { + /// Creates a new mutex. + /// + /// The user must be careful to ensure the mutex is not locked when its is + /// being destroyed. + pub unsafe fn new() -> NativeMutex { + NativeMutex { inner: StaticNativeMutex::new() } + } + + /// Acquires this lock. This assumes that the current thread does not + /// already hold the lock. + /// + /// # Example + /// ```rust + /// use std::unstable::mutex::NativeMutex; + /// unsafe { + /// let mut lock = NativeMutex::new(); + /// + /// { + /// let _guard = lock.lock(); + /// // critical section... + /// } // automatically unlocked in `_guard`'s destructor + /// } + /// ``` + pub unsafe fn lock<'a>(&'a mut self) -> LockGuard<'a> { + self.inner.lock() + } + + /// Attempts to acquire the lock. The value returned is `Some` if + /// the attempt succeeded. + pub unsafe fn trylock<'a>(&'a mut self) -> Option> { + self.inner.trylock() + } + + /// Acquire the lock without creating a `LockGuard`. + /// + /// These needs to be paired with a call to `.unlock_noguard`. Prefer using + /// `.lock`. + pub unsafe fn lock_noguard(&mut self) { self.inner.lock_noguard() } + + /// Attempts to acquire the lock without creating a + /// `LockGuard`. The value returned is whether the lock was + /// acquired or not. + /// + /// If `true` is returned, this needs to be paired with a call to + /// `.unlock_noguard`. Prefer using `.trylock`. + pub unsafe fn trylock_noguard(&mut self) -> bool { + self.inner.trylock_noguard() + } + + /// Unlocks the lock. This assumes that the current thread already holds the + /// lock. + pub unsafe fn unlock_noguard(&mut self) { self.inner.unlock_noguard() } + + /// Block on the internal condition variable. + /// + /// This function assumes that the lock is already held. Prefer + /// using `LockGuard.wait` since that guarantees that the lock is + /// held. + pub unsafe fn wait_noguard(&mut self) { self.inner.wait_noguard() } + + /// Signals a thread in `wait` to wake up + pub unsafe fn signal_noguard(&mut self) { self.inner.signal_noguard() } +} + +impl Drop for NativeMutex { + fn drop(&mut self) { + unsafe {self.inner.destroy()} + } +} + +impl<'a> LockGuard<'a> { + /// Block on the internal condition variable. + pub unsafe fn wait(&mut self) { + self.lock.wait_noguard() + } + + /// Signals a thread in `wait` to wake up. + pub unsafe fn signal(&mut self) { + self.lock.signal_noguard() + } +} + +#[unsafe_destructor] +impl<'a> Drop for LockGuard<'a> { + fn drop(&mut self) { + unsafe {self.lock.unlock_noguard()} + } +} + #[cfg(unix)] mod imp { use libc; @@ -382,30 +551,56 @@ mod imp { mod test { use prelude::*; - use super::{Mutex, MUTEX_INIT}; + use mem::drop; + use super::{StaticNativeMutex, NATIVE_MUTEX_INIT}; use rt::thread::Thread; #[test] - fn somke_lock() { - static mut lock: Mutex = MUTEX_INIT; + fn smoke_lock() { + static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT; unsafe { - lock.lock(); - lock.unlock(); + let _guard = lock.lock(); } } #[test] - fn somke_cond() { - static mut lock: Mutex = MUTEX_INIT; + fn smoke_cond() { + static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT; unsafe { - lock.lock(); + let mut guard = lock.lock(); let t = Thread::start(proc() { - lock.lock(); - lock.signal(); - lock.unlock(); + let mut guard = lock.lock(); + guard.signal(); }); - lock.wait(); - lock.unlock(); + guard.wait(); + drop(guard); + + t.join(); + } + } + + #[test] + fn smoke_lock_noguard() { + static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT; + unsafe { + lock.lock_noguard(); + lock.unlock_noguard(); + } + } + + #[test] + fn smoke_cond_noguard() { + static mut lock: StaticNativeMutex = NATIVE_MUTEX_INIT; + unsafe { + lock.lock_noguard(); + let t = Thread::start(proc() { + lock.lock_noguard(); + lock.signal_noguard(); + lock.unlock_noguard(); + }); + lock.wait_noguard(); + lock.unlock_noguard(); + t.join(); } } @@ -413,7 +608,7 @@ mod test { #[test] fn destroy_immediately() { unsafe { - let mut m = Mutex::new(); + let mut m = StaticNativeMutex::new(); m.destroy(); } } diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs index c2fa168a47825..93322977bc121 100644 --- a/src/libstd/unstable/sync.rs +++ b/src/libstd/unstable/sync.rs @@ -10,63 +10,11 @@ use clone::Clone; use kinds::Send; -use ops::Drop; -use option::{Option,Some,None}; use sync::arc::UnsafeArc; -use unstable::mutex::Mutex; - -pub struct LittleLock { - priv l: Mutex, -} - -pub struct LittleGuard<'a> { - priv l: &'a mut Mutex, -} - -impl Drop for LittleLock { - fn drop(&mut self) { - unsafe { self.l.destroy(); } - } -} - -#[unsafe_destructor] -impl<'a> Drop for LittleGuard<'a> { - fn drop(&mut self) { - unsafe { self.l.unlock(); } - } -} - -impl LittleLock { - pub fn new() -> LittleLock { - unsafe { LittleLock { l: Mutex::new() } } - } - - pub unsafe fn lock<'a>(&'a mut self) -> LittleGuard<'a> { - self.l.lock(); - LittleGuard { l: &mut self.l } - } - - pub unsafe fn try_lock<'a>(&'a mut self) -> Option> { - if self.l.trylock() { - Some(LittleGuard { l: &mut self.l }) - } else { - None - } - } - - pub unsafe fn signal(&mut self) { - self.l.signal(); - } -} - -impl<'a> LittleGuard<'a> { - pub unsafe fn wait(&mut self) { - self.l.wait(); - } -} +use unstable::mutex::NativeMutex; struct ExData { - lock: LittleLock, + lock: NativeMutex, failed: bool, data: T, } @@ -95,7 +43,7 @@ impl Clone for Exclusive { impl Exclusive { pub fn new(user_data: T) -> Exclusive { let data = ExData { - lock: LittleLock::new(), + lock: unsafe {NativeMutex::new()}, failed: false, data: user_data }; @@ -104,8 +52,8 @@ impl Exclusive { } } - // Exactly like std::arc::MutexArc,access(), but with the LittleLock - // instead of a proper mutex. Same reason for being unsafe. + // Exactly like sync::MutexArc.access(). Same reason for being + // unsafe. // // Currently, scheduling operations (i.e., descheduling, receiving on a pipe, // accessing the provided condition variable) are prohibited while inside @@ -131,14 +79,14 @@ impl Exclusive { #[inline] pub unsafe fn hold_and_signal(&self, f: |x: &mut T|) { let rec = self.x.get(); - let _l = (*rec).lock.lock(); + let mut guard = (*rec).lock.lock(); if (*rec).failed { fail!("Poisoned Exclusive::new - another task failed inside!"); } (*rec).failed = true; f(&mut (*rec).data); (*rec).failed = false; - (*rec).lock.signal(); + guard.signal(); } #[inline] diff --git a/src/libsync/sync/mutex.rs b/src/libsync/sync/mutex.rs index 8489773bd8814..9ca2f1fd7ffe3 100644 --- a/src/libsync/sync/mutex.rs +++ b/src/libsync/sync/mutex.rs @@ -133,7 +133,7 @@ pub struct StaticMutex { /// uint-cast of the native thread waiting for this mutex priv native_blocker: uint, /// an OS mutex used by native threads - priv lock: mutex::Mutex, + priv lock: mutex::StaticNativeMutex, /// A concurrent mpsc queue used by green threads, along with a count used /// to figure out when to dequeue and enqueue. @@ -143,6 +143,7 @@ pub struct StaticMutex { /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. +#[must_use] pub struct Guard<'a> { priv lock: &'a mut StaticMutex, } @@ -150,7 +151,7 @@ pub struct Guard<'a> { /// Static initialization of a mutex. This constant can be used to initialize /// other mutex constants. pub static MUTEX_INIT: StaticMutex = StaticMutex { - lock: mutex::MUTEX_INIT, + lock: mutex::NATIVE_MUTEX_INIT, state: atomics::INIT_ATOMIC_UINT, flavor: Unlocked, green_blocker: 0, @@ -288,11 +289,11 @@ impl StaticMutex { // `lock()` function on an OS mutex fn native_lock(&mut self, t: ~Task) { Local::put(t); - unsafe { self.lock.lock(); } + unsafe { self.lock.lock_noguard(); } } fn native_unlock(&mut self) { - unsafe { self.lock.unlock(); } + unsafe { self.lock.unlock_noguard(); } } fn green_lock(&mut self, t: ~Task) { @@ -441,7 +442,7 @@ impl Mutex { native_blocker: 0, green_cnt: atomics::AtomicUint::new(0), q: q::Queue::new(), - lock: unsafe { mutex::Mutex::new() }, + lock: unsafe { mutex::StaticNativeMutex::new() }, } } }