complete threads till 06

This commit is contained in:
2025-04-15 16:25:01 +05:30
parent 9bc063a05b
commit 4fbee2d894
9 changed files with 67 additions and 19 deletions

View File

@@ -1,6 +1,6 @@
fn intro() -> &'static str {
// TODO: fix me 👇
"I'm ready to _!"
"I'm ready to build a concurrent ticket management system!"
}
#[cfg(test)]

View File

@@ -15,7 +15,14 @@
use std::thread;
pub fn sum(v: Vec<i32>) -> i32 {
todo!()
// let mut c1 = (v.clone()[..v.len() / 2]).iter().collect::<Vec<_>>();
// let mut c2 = (v.clone()[v.len() / 2..]).iter().collect::<Vec<_>>();
let (c1, c2) = v.split_at(v.len() / 2);
let c1 = c1.to_vec();
let c2 = c2.to_vec();
let t1 = std::thread::spawn(move || c1.iter().sum::<i32>());
let t2 = std::thread::spawn(move || c2.iter().sum::<i32>());
t1.join().unwrap() + t2.join().unwrap()
}
#[cfg(test)]

View File

@@ -4,7 +4,10 @@
use std::thread;
pub fn sum(slice: &'static [i32]) -> i32 {
todo!()
let (s1, s2) = slice.split_at(slice.len() / 2);
let t1 = std::thread::spawn(|| s1.iter().sum::<i32>());
let t2 = std::thread::spawn(|| s2.iter().sum::<i32>());
t1.join().unwrap() + t2.join().unwrap()
}
#[cfg(test)]

View File

@@ -6,7 +6,11 @@
use std::thread;
pub fn sum(v: Vec<i32>) -> i32 {
todo!()
let lv = v.leak();
let (s1, s2) = lv.split_at(lv.len() / 2);
let t1 = std::thread::spawn(|| s1.iter().sum::<i32>());
let t2 = std::thread::spawn(|| s2.iter().sum::<i32>());
return t1.join().unwrap() + t2.join().unwrap();
}
#[cfg(test)]

View File

@@ -3,7 +3,13 @@
// Don't perform any heap allocation. Don't leak any memory.
pub fn sum(v: Vec<i32>) -> i32 {
todo!()
let mid = v.len() / 2;
let (s1, s2) = v.split_at(mid);
std::thread::scope(|scope| {
let t1 = scope.spawn(|| s1.iter().sum::<i32>());
let t2 = scope.spawn(|| s2.iter().sum::<i32>());
t1.join().unwrap() + t2.join().unwrap()
})
}
#[cfg(test)]

View File

@@ -1,10 +1,13 @@
use std::sync::mpsc::{Receiver, Sender};
use data::TicketDraft;
use store::TicketStore;
pub mod data;
pub mod store;
pub enum Command {
Insert(todo!()),
Insert(TicketDraft),
}
// Start the system by spawning the server thread.
@@ -20,4 +23,14 @@ pub fn launch() -> Sender<Command> {
// Enter a loop: wait for a command to show up in
// the channel, then execute it, then start waiting
// for the next command.
pub fn server(receiver: Receiver<Command>) {}
pub fn server(receiver: Receiver<Command>) {
let mut tickStor = TicketStore::new();
'aLoop: loop {
let command = receiver.recv().ok().unwrap();
match command {
Command::Insert(a) => {
tickStor.add_ticket(a);
}
}
}
}

View File

@@ -26,7 +26,7 @@ fn ready() {
// since our server doesn't expose any **read** actions.
// We have no way to know if the inserts are actually happening and if they
// are happening correctly.
let move_forward = false;
let move_forward = true;
assert!(move_forward);
}

View File

@@ -6,18 +6,18 @@ use std::rc::Rc;
pub struct DropTracker<T> {
value: T,
counter: todo!(),
counter: Rc<RefCell<usize>>,
}
impl<T> DropTracker<T> {
pub fn new(value: T, counter: todo!()) -> Self {
pub fn new(value: T, counter: Rc<RefCell<usize>>) -> Self {
Self { value, counter }
}
}
impl<T> Drop for DropTracker<T> {
fn drop(&mut self) {
todo!()
*self.counter.borrow_mut() += 1;
}
}

View File

@@ -1,4 +1,7 @@
use data::{Ticket, TicketDraft};
use std::sync::mpsc::{Receiver, Sender};
use store::TicketId;
use crate::store::TicketStore;
pub mod data;
@@ -6,8 +9,14 @@ pub mod store;
// Refer to the tests to understand the expected schema.
pub enum Command {
Insert { todo!() },
Get { todo!() }
Insert {
draft: TicketDraft,
response_sender: Sender<TicketId>,
},
Get {
id: TicketId,
response_sender: Sender<Option<Ticket>>,
},
}
pub fn launch() -> Sender<Command> {
@@ -21,19 +30,25 @@ pub fn server(receiver: Receiver<Command>) {
let mut store = TicketStore::new();
loop {
match receiver.recv() {
Ok(Command::Insert {}) => {
todo!()
Ok(Command::Insert {
draft,
response_sender,
}) => {
let id = store.add_ticket(draft);
response_sender.send(id);
}
Ok(Command::Get {
todo!()
id,
response_sender,
}) => {
todo!()
let tick = store.get(id);
response_sender.send(tick.cloned());
}
Err(_) => {
// There are no more senders, so we can safely break
// and shut down the server.
break
},
break;
}
}
}
}