From a87b63fce3cc3d24dc71ae170a8d431440025565 Mon Sep 17 00:00:00 2001 From: Haotian Zhang Date: Mon, 27 Nov 2023 13:04:41 -0500 Subject: [PATCH] Add try and exception for MM Model runs (#9163) Co-authored-by: haotian zhang --- .../multi_modal/replicate_multi_modal.ipynb | 47 +++++++++---------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/docs/examples/multi_modal/replicate_multi_modal.ipynb b/docs/examples/multi_modal/replicate_multi_modal.ipynb index 46ae9e1e30231..369350874f048 100644 --- a/docs/examples/multi_modal/replicate_multi_modal.ipynb +++ b/docs/examples/multi_modal/replicate_multi_modal.ipynb @@ -23,15 +23,7 @@ "execution_count": null, "id": "fc691ca8", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "UsageError: Line magic function `%` not found.\n" - ] - } - ], + "outputs": [], "source": [ "% pip install replicate" ] @@ -186,26 +178,33 @@ "for prompt_idx, prompt in enumerate(prompts):\n", " for image_idx, image_doc in enumerate(image_documents):\n", " for llm_idx, llm_model in enumerate(REPLICATE_MULTI_MODAL_LLM_MODELS):\n", - " ## Initialize the MultiModal LLM model\n", - " llava_multi_modal_llm = ReplicateMultiModal(\n", - " model=REPLICATE_MULTI_MODAL_LLM_MODELS[llm_model],\n", - " max_new_tokens=100,\n", - " temperature=0.1,\n", - " num_input_files=1,\n", - " top_p=0.9,\n", - " num_beams=1,\n", - " repetition_penalty=1,\n", - " )\n", + " try:\n", + " ## Initialize the MultiModal LLM model\n", + " multi_modal_llm = ReplicateMultiModal(\n", + " model=REPLICATE_MULTI_MODAL_LLM_MODELS[llm_model],\n", + " max_new_tokens=100,\n", + " temperature=0.1,\n", + " num_input_files=1,\n", + " top_p=0.9,\n", + " num_beams=1,\n", + " repetition_penalty=1,\n", + " )\n", "\n", - " llava_resp = llava_multi_modal_llm.complete(\n", - " prompt=prompt,\n", - " image_documents=[image_doc],\n", - " )\n", + " mm_resp = multi_modal_llm.complete(\n", + " prompt=prompt,\n", + " image_documents=[image_doc],\n", + " )\n", + " except Exception as e:\n", + " print(\n", + " f\"Error with LLM model inference with prompt {prompt}, image {image_idx}, and MM model {llm_model}\"\n", + " )\n", + " print(\"Inference Failed due to: \", e)\n", + " continue\n", " res.append(\n", " {\n", " \"model\": llm_model,\n", " \"prompt\": prompt,\n", - " \"response\": llava_resp,\n", + " \"response\": mm_resp,\n", " \"image\": str(image_doc.image_path),\n", " }\n", " )"