From a00f9659ef69a855104e9469e9e02835936a0474 Mon Sep 17 00:00:00 2001 From: Chen Lai Date: Thu, 18 Apr 2024 20:59:44 -0700 Subject: [PATCH] add cpu device to run eval on cpu (#3133) Summary: `HFLM` from `lm_eval` can take cpu device. https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/models/huggingface.py#L95 Currently running `eval_llama` fails on cpu Reviewed By: lucylq Differential Revision: D56313161 --- examples/models/llama2/eval_llama_lib.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/models/llama2/eval_llama_lib.py b/examples/models/llama2/eval_llama_lib.py index 8bb653739b3..d8c6c7bf1d4 100644 --- a/examples/models/llama2/eval_llama_lib.py +++ b/examples/models/llama2/eval_llama_lib.py @@ -42,12 +42,11 @@ def __init__( tokenizer: Union[SentencePieceTokenizer, Tiktoken], max_seq_length: Optional[int] = None, ): - super().__init__() + device = "cuda" if torch.cuda.is_available() else "cpu" + super().__init__(device=device) self._model = model self._tokenizer = tokenizer - self._device = ( - torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - ) + self._device = torch.device(device) self._max_seq_length = 2048 if max_seq_length is None else max_seq_length @property