From 2d156f487832958887500f78a40e2b2fa73bdd0e Mon Sep 17 00:00:00 2001 From: Dongfu Date: Thu, 7 Dec 2023 12:27:29 -0500 Subject: [PATCH] update --- README.md | 8 ++++---- blender_usage.ipynb | 33 +++------------------------------ 2 files changed, 7 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 602a9d1..e7df1e5 100755 --- a/README.md +++ b/README.md @@ -71,14 +71,14 @@ blender.loadranker("llm-blender/PairRM") # load ranker checkpoint - Then you can rank with the following function ```python -inputs = ["hello!", "I love you!"] -candidates_texts = [["get out!", "hi! nice to meet you!", "bye"], +inputs = ["hello, how are you!", "I love you!"] +candidates_texts = [["get out!", "hi! I am fine, thanks!", "bye!"], ["I love you too!", "I hate you!", "Thanks! You're a good guy!"]] -ranks = blender.rank(inputs, candidates_texts, return_scores=False, batch_size=2) +ranks = blender.rank(inputs, candidates_texts, return_scores=False, batch_size=1) # ranks is a list of ranks where ranks[i][j] represents the ranks of candidate-j for input-i """ ranks --> -array([[3, 1, 2], # it means "hi! nice to meet you!" ranks the 1st, "bye" ranks the 2nd, and "get out!" ranks the 3rd. +array([[3, 1, 2], # it means "hi! I am fine, thanks!" ranks the 1st, "bye" ranks the 2nd, and "get out!" ranks the 3rd. [1, 3, 2]], # it means "I love you too"! ranks the the 1st, and "I hate you!" ranks the 3rd. dtype=int32) diff --git a/blender_usage.ipynb b/blender_usage.ipynb index d8b71f2..7a709b2 100755 --- a/blender_usage.ipynb +++ b/blender_usage.ipynb @@ -16,39 +16,12 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2023-12-06 00:10:33.392190: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", - "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", - "2023-12-06 00:10:34.093177: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory\n", - "2023-12-06 00:10:34.093246: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory\n", - "2023-12-06 00:10:34.093251: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", - "WARNING:root:No ranker config provided, no ranker loaded, please load ranker first through load_ranker()\n", - "WARNING:root:No fuser config provided, no fuser loaded, please load fuser first through load_fuser()\n", - "/home/dongfu/miniconda3/envs/llm-blender/lib/python3.9/site-packages/dataclasses_json/core.py:187: RuntimeWarning: 'NoneType' object value of non-optional type load_checkpoint detected when decoding RankerConfig.\n", - " warnings.warn(\n", - "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n", - "/home/dongfu/miniconda3/envs/llm-blender/lib/python3.9/site-packages/transformers/convert_slow_tokenizer.py:470: UserWarning: The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option which is not implemented in the fast tokenizers. In practice this means that the fast version of the tokenizer can produce unknown tokens whereas the sentencepiece version would have converted these unknown tokens into a sequence of byte tokens matching the original piece of text.\n", - " warnings.warn(\n", - "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Successfully loaded ranker from /home/dongfu/.cache/huggingface/hub/llm-blender/PairRM\n" - ] - } - ], + "outputs": [], "source": [ "import os\n", - "# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", + "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", "import llm_blender\n", "blender = llm_blender.Blender()\n", "# Load Ranker\n",