From 05d4e15940cc5a99d3e6cb86389cca70b0ac7f79 Mon Sep 17 00:00:00 2001 From: Huanyi Chen Date: Sun, 28 Jan 2024 16:25:03 -0500 Subject: [PATCH] Tweak L10 & L11 flipped notes and drop STM content from L12 --- lectures/flipped/L10.md | 1 + lectures/flipped/L11.md | 29 +++++++++++----- lectures/flipped/L12.md | 76 ----------------------------------------- 3 files changed, 22 insertions(+), 84 deletions(-) diff --git a/lectures/flipped/L10.md b/lectures/flipped/L10.md index 036c275..910278e 100644 --- a/lectures/flipped/L10.md +++ b/lectures/flipped/L10.md @@ -42,6 +42,7 @@ lazy_static! { static ref MUTEX1: Mutex = Mutex::new(0); static ref MUTEX2: Mutex = Mutex::new(0); } +// ^ std::sync::OnceLock can do the trick now, but that requires Rust version 1.70.0 fn main() { // Spawn thread and store handles diff --git a/lectures/flipped/L11.md b/lectures/flipped/L11.md index b1d7234..b92eec9 100644 --- a/lectures/flipped/L11.md +++ b/lectures/flipped/L11.md @@ -1,10 +1,15 @@ # Lecture 11 — Lock Convoys, Atomics, Lock-Freedom +## Roadmap + +We will talk about the lock convoy problem, and some things we can use to avoid +using locks entirely. + ## Lock Convoys -Activity: given a lock (say a piece of paper), 3 students play as threads -with the same priority, of which 2/3 need the lock and 1/3 doesn't; -one additional student plays as the scheduler. +Activity: given a lock (say a piece of paper), 3 students play as threads with +the same priority, of which 2/3 need the lock and 1/3 doesn't; one additional +student plays as the scheduler. - Once the lock is free, a random thread picks up the lock then informs the scheduler to pick the next thread to run. @@ -12,13 +17,14 @@ one additional student plays as the scheduler. thread randomly. The possible actions for the threads are: -- Threads that need the lock: attempt to acquire the lock/wait for the lock, - and eventually release the lock. -- Thread that doesn't need the lock: just runs normally (gets a bunch of execution.) +- Threads that need the lock: attempt to acquire the lock/wait for the lock, and + eventually release the lock. +- Thread that doesn't need the lock: just runs normally (gets a bunch of + execution.) Let's see how many times the scheduler picks the wrong thread to run. -## Try-lock +## Trylock ```rust use std::sync::Mutex; @@ -105,7 +111,14 @@ fn main() { Discuss about non-blocking, blocking, lock-free, and wait-free. See -Talk about the lock-free stack (see the lecture notes) if there's time +A quick summary: + +- non-blocking/blocking: blocking does not cost CPU time, but non-blocking does +- lock-free: other thread can still proceed even when the thread holding the + lock is suspended. Spinlock is not lock-free, but it is non-blocking. +- wait-free: a lock-free data structure with the additional property that + every thread accessing the data structure can make complete its operation + within a bounded number of steps Question: Are lock-free programming techniques always better for performance? diff --git a/lectures/flipped/L12.md b/lectures/flipped/L12.md index 13d5e37..a0773ab 100644 --- a/lectures/flipped/L12.md +++ b/lectures/flipped/L12.md @@ -40,82 +40,6 @@ fn do_other_work(x: i32, y: i32) -> i32 { Question: when is your modified code faster? When is it slower? How could you improve the use of threads? -## Software Transactional Memory - -The idea resembles database transactions. The code in the atomic block either -executes completely, or aborts/rolls back in the event of a conflict with -another transaction (which triggers a retry later on, and repeated retries if -necessary to get it applied). - -### Benefits - -- simple programming model. - -### Problems - -- Rollback is key to STM. But, some things cannot be rolled back. (write to the - screen, send packet over network) -- Nested transactions. -- Limited transaction size. Most implementations (especially all-hardware) have - a limited transaction size. -- Currently slow, but a lot of research is going into improving it. -- Intermediate states may still become visible in real implementation. (Maybe - this doesn't happen in the Rust implementation because of fearless - concurrency, but it certainly can in C/C++ versions of STM.) - -### Example - -Talk about this if time permits - -```rust -/* - -// unfornately, not runnable on rustexplorer - -[dependencies] -stm = "0.4.0" -*/ - -use std::thread; -use std::time::Duration; -use stm::atomically; -use stm::TVar; - -fn main() { - // create var - let var = TVar::new(0); - let varc = var.clone(); // Clone for other thread. - - // spawn a thread - let t = thread::spawn(move || { - atomically(|tx| { - // read the var - let x = varc.read(tx)?; - // ensure that x varc changes in between - thread::sleep(Duration::from_millis(500)); - - // write back modified data this should only - // happen when the value has not changed - varc.write(tx, x + 10) - }); - }); - - // ensure that the thread has started and already read the var - thread::sleep(Duration::from_millis(100)); - // now change it before the transaction finishes - atomically(|tx| var.write(tx, 32)); - - // finish and compare - let _ = t.join(); - - println!("{:?}", var.read_atomic()); - // ^ `read_atomic` reads a value atomically, without starting a transaction. - - // it turns out var = 42, because the thread will rerun its transaction when - // the var changes while executing -} -``` - # After-action report, plam, 10Feb23 Gave examples of memory-carried dependencies, and loop-carried dependencies (Lab 2).