-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
25 changed files
with
1,503 additions
and
64 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,201 @@ | ||
use std::{ | ||
net::TcpListener, | ||
path::PathBuf, | ||
rc::Rc, | ||
str::FromStr, | ||
sync::{Arc, Barrier, Mutex}, | ||
thread, | ||
time::Instant, | ||
}; | ||
|
||
use atoma_types::{AtomaChildMessage, AtomaInferenceMessage, TextModelInput, TextModelOutput}; | ||
use candle::{DType, Device, Tensor}; | ||
use candle_nn::VarBuilder; | ||
use candle_transformers::{ | ||
generation::LogitsProcessor, | ||
models::llama::{Config, LlamaConfig}, | ||
}; | ||
use hf_hub::{api::sync::ApiBuilder, Repo, RepoType}; | ||
|
||
use candle_transformers::models::llama as model; | ||
use tokenizers::Tokenizer; | ||
use tracing::info; | ||
use tungstenite::accept; | ||
|
||
use crate::models::{ | ||
config::ModelConfig, | ||
token_output_stream::TokenOutputStream, | ||
types::{LlmLoadData, ModelType}, | ||
ModelError, ModelTrait, | ||
}; | ||
|
||
use super::hub_load_safetensors; | ||
|
||
pub struct MultiGpuLlamaModel { | ||
model_type: ModelType, | ||
gpu_instances: Arc<Mutex<Vec<Arc<tungstenite::WebSocket<std::net::TcpStream>>>>>, | ||
sync_barrier: Arc<Barrier>, | ||
result: Arc<Mutex<Option<TextModelOutput>>>, | ||
} | ||
|
||
pub struct MultiGpuLlamaLoadData { | ||
file_paths: Vec<PathBuf>, | ||
dtype: String, | ||
model_type: ModelType, | ||
} | ||
|
||
impl ModelTrait for MultiGpuLlamaModel { | ||
type Input = TextModelInput; | ||
type Output = TextModelOutput; | ||
type LoadData = MultiGpuLlamaLoadData; | ||
|
||
fn fetch( | ||
api_key: String, | ||
cache_dir: PathBuf, | ||
config: ModelConfig, | ||
) -> Result<Self::LoadData, ModelError> { | ||
let api = ApiBuilder::new() | ||
.with_progress(true) | ||
.with_token(Some(api_key)) | ||
.with_cache_dir(cache_dir) | ||
.build()?; | ||
|
||
let model_type = ModelType::from_str(&config.model_id())?; | ||
let repo_id = model_type.repo().to_string(); | ||
let revision = model_type.default_revision().to_string(); | ||
|
||
let repo = api.repo(Repo::with_revision( | ||
repo_id.clone(), | ||
RepoType::Model, | ||
revision, | ||
)); | ||
let config_file_path = repo.get("config.json")?; | ||
let tokenizer_file_path = repo.get("tokenizer.json")?; | ||
|
||
let model_weights_file_paths = if &repo_id == "TinyLlama/TinyLlama-1.1B-Chat-v1.0" { | ||
vec![repo.get("model.safetensors")?] | ||
} else { | ||
hub_load_safetensors(&repo, "model.safetensors.index.json")? | ||
}; | ||
|
||
let mut file_paths = Vec::with_capacity(2 + model_weights_file_paths.len()); | ||
file_paths.extend(vec![config_file_path, tokenizer_file_path]); | ||
file_paths.extend(model_weights_file_paths); | ||
|
||
Ok(Self::LoadData { | ||
file_paths, | ||
model_type: ModelType::from_str(&config.model_id())?, | ||
dtype: config.dtype(), // use_flash_attention: config.use_flash_attention(), | ||
}) | ||
} | ||
|
||
fn model_type(&self) -> ModelType { | ||
self.model_type.clone() | ||
} | ||
|
||
fn load(load_data: Self::LoadData) -> Result<Self, ModelError> { | ||
let server = TcpListener::bind("127.0.0.1:0").unwrap(); | ||
let port = server.local_addr().unwrap().port(); | ||
println!("Server listening on port {}", port); | ||
let num_shards = 2; | ||
let mut num_connections = 0; | ||
let gpu_instances = Arc::new(Mutex::new(Vec::with_capacity(num_shards))); | ||
let init_barrier = Arc::new(Barrier::new(num_shards)); | ||
let sync_barrier = Arc::new(Barrier::new(num_shards + 1)); | ||
let result = Arc::new(Mutex::new(None)); | ||
for stream in server.incoming() { | ||
let gpu_instances = Arc::clone(&gpu_instances); | ||
let init_barrier = Arc::clone(&init_barrier); | ||
let sync_barrier = Arc::clone(&sync_barrier); | ||
let config_file_path = load_data.file_paths[0].clone(); | ||
let tokenizer_filename = load_data.file_paths[1].clone(); | ||
let filenames = load_data.file_paths[2..].to_vec(); | ||
let dtype = load_data.dtype.clone(); | ||
let result = Arc::clone(&result); | ||
thread::spawn(move || { | ||
let mut websocket = Arc::new(accept(stream.unwrap()).unwrap()); | ||
let index = { | ||
let mut gpu_instances = gpu_instances.lock().unwrap(); | ||
gpu_instances.push(Arc::clone(&websocket)); | ||
gpu_instances.len() - 1 | ||
}; | ||
loop { | ||
init_barrier.wait(); | ||
let msg = Arc::get_mut(&mut websocket).unwrap().read().unwrap(); | ||
if msg.is_text() { | ||
let message: AtomaChildMessage = | ||
serde_json::from_str(msg.to_string().as_str()).unwrap(); | ||
match message { | ||
AtomaChildMessage::Initialized(nccl_id) => { | ||
if let Some(nccl_id) = nccl_id { | ||
let mut gpu_instances = gpu_instances.lock().unwrap(); | ||
for (i, websocket) in gpu_instances.iter_mut().enumerate() { | ||
if i != index { | ||
Arc::get_mut(websocket) | ||
.unwrap() | ||
.send(tungstenite::Message::Text( | ||
serde_json::to_string( | ||
&AtomaInferenceMessage::InitializeComm( | ||
nccl_id.clone(), | ||
), | ||
) | ||
.unwrap(), | ||
)) | ||
.unwrap(); | ||
} | ||
} | ||
} | ||
} | ||
AtomaChildMessage::CommsReady => { | ||
Arc::get_mut(&mut websocket) | ||
.unwrap() | ||
.send(tungstenite::Message::Text( | ||
serde_json::to_string(&AtomaInferenceMessage::LoadModel( | ||
config_file_path.clone(), | ||
dtype.clone(), | ||
filenames.clone(), | ||
tokenizer_filename.clone(), | ||
)) | ||
.unwrap(), | ||
)) | ||
.unwrap(); | ||
} | ||
AtomaChildMessage::Loaded => { | ||
sync_barrier.wait(); | ||
} | ||
AtomaChildMessage::InferenceResult(output) => { | ||
*result.lock().unwrap() = Some(output); | ||
sync_barrier.wait(); | ||
} | ||
} | ||
} | ||
} | ||
}); | ||
num_connections += 1; | ||
if num_connections == num_shards { | ||
break; | ||
} | ||
} | ||
sync_barrier.wait(); | ||
Ok(MultiGpuLlamaModel { | ||
model_type: load_data.model_type, | ||
gpu_instances, | ||
sync_barrier, | ||
result, | ||
}) | ||
} | ||
|
||
fn run(&mut self, input: Self::Input) -> Result<Self::Output, ModelError> { | ||
for websocket in self.gpu_instances.lock().unwrap().iter_mut() { | ||
Arc::get_mut(websocket) | ||
.unwrap() | ||
.send(tungstenite::Message::Text( | ||
serde_json::to_string(&AtomaInferenceMessage::Inference(input.clone())) | ||
.unwrap(), | ||
)) | ||
.unwrap(); | ||
} | ||
self.sync_barrier.wait(); | ||
Ok(self.result.lock().unwrap().take().unwrap()) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.