[Llama2 receipt] Python Gradio를 이용한 WebUI 추가하기
AI2024. 2. 6. 13:47
기존 여러 LLM관련 프로젝트에서는 gradio를 이용해서 WebUI를 지원했는데, Lama2 receipt에서는 아무리 찾아봐도 WebUI가 없어서 한번 만들어봤습니다. Gradio Python package를 처음 접했을 때 놀랐습니다. 이렇게 쉡게 웹 UI를 만들 수 있고 사람들에게 공유할 수 있구나하고요. 정말 Python 공동체는 이런것도 만들 생각을 하는구나 하는 생각도 했습니다. 당연하겠지만, 전문 프로그래머가 아닌 과학자나 공학자들이 좋아하는 언어로 웹서비스 구현에 시간을 쓸 이유가 없는거죠. 그냥 동작만하는 WebUI가 필요할 때, 바로 Gradio 패키지를 쓰면 됩니다.
일단, 해당 프로젝트에 Pull Request를 했는데, 다행히 리뷰도 받고 있어서 조만간 머지될 것 같습니다.
코드는 다음과 같습니다.
example/inference.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
# from accelerate import init_empty_weights, load_checkpoint_and_dispatch
import fire
import os
import sys
import time
import gradio as gr
import torch
from transformers import LlamaTokenizer
from llama_recipes.inference.safety_utils import get_safety_checker, AgentType
from llama_recipes.inference.model_utils import load_model, load_peft_model
from accelerate.utils import is_xpu_available
def main(
model_name,
peft_model: str=None,
quantization: bool=False,
max_new_tokens =100, #The maximum numbers of tokens to generate
prompt_file: str=None,
seed: int=42, #seed value for reproducibility
do_sample: bool=True, #Whether or not to use sampling ; use greedy decoding otherwise.
min_length: int=None, #The minimum length of the sequence to be generated, input prompt + min_new_tokens
use_cache: bool=True, #[optional] Whether or not the model should use the past last key/values attentions Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.
top_p: float=1.0, # [optional] If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation.
temperature: float=1.0, # [optional] The value used to modulate the next token probabilities.
top_k: int=50, # [optional] The number of highest probability vocabulary tokens to keep for top-k-filtering.
repetition_penalty: float=1.0, #The parameter for repetition penalty. 1.0 means no penalty.
length_penalty: int=1, #[optional] Exponential penalty to the length that is used with beam-based generation.
enable_azure_content_safety: bool=False, # Enable safety check with Azure content safety api
enable_sensitive_topics: bool=False, # Enable check for sensitive topics using AuditNLG APIs
enable_salesforce_content_safety: bool=True, # Enable safety check with Salesforce safety flan t5
enable_llamaguard_content_safety: bool=False,
max_padding_length: int=None, # the max padding length to be used with tokenizer padding the prompts.
use_fast_kernels: bool = False, # Enable using SDPA from PyTroch Accelerated Transformers, make use Flash Attention and Xformer memory-efficient kernels
**kwargs
):
def evaluate(user_prompt, temperature, top_p, top_k, max_new_tokens, **kwargs,):
safety_checker = get_safety_checker(enable_azure_content_safety,
enable_sensitive_topics,
enable_salesforce_content_safety,
enable_llamaguard_content_safety
)
# Safety check of the user prompt
safety_results = [check(user_prompt) for check in safety_checker]
are_safe = all([r[1] for r in safety_results])
if are_safe:
print("User prompt deemed safe.")
print(f"User prompt:\n{user_prompt}")
else:
print("User prompt deemed unsafe.")
for method, is_safe, report in safety_results:
if not is_safe:
print(method)
print(report)
print("Skipping the inference as the prompt is not safe.")
sys.exit(1) # Exit the program with an error status
# Set the seeds for reproducibility
if is_xpu_available():
torch.xpu.manual_seed(seed)
else:
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
model = load_model(model_name, quantization, use_fast_kernels)
if peft_model:
model = load_peft_model(model, peft_model)
model.eval()
tokenizer = LlamaTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
batch = tokenizer(user_prompt, padding='max_length', truncation=True, max_length=max_padding_length, return_tensors="pt")
if is_xpu_available():
batch = {k: v.to("xpu") for k, v in batch.items()}
else:
batch = {k: v.to("cuda") for k, v in batch.items()}
start = time.perf_counter()
with torch.no_grad():
outputs = model.generate(
**batch,
max_new_tokens=max_new_tokens,
do_sample=do_sample,
top_p=top_p,
temperature=temperature,
min_length=min_length,
use_cache=use_cache,
top_k=top_k,
repetition_penalty=repetition_penalty,
length_penalty=length_penalty,
**kwargs
)
e2e_inference_time = (time.perf_counter()-start)*1000
print(f"the inference time is {e2e_inference_time} ms")
output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Safety check of the model output
safety_results = [check(output_text, agent_type=AgentType.AGENT, user_prompt=user_prompt) for check in safety_checker]
are_safe = all([r[1] for r in safety_results])
if are_safe:
print("User input and model output deemed safe.")
print(f"Model output:\n{output_text}")
else:
print("Model output deemed unsafe.")
for method, is_safe, report in safety_results:
if not is_safe:
print(method)
print(report)
return output_text
if prompt_file is not None:
assert os.path.exists(
prompt_file
), f"Provided Prompt file does not exist {prompt_file}"
with open(prompt_file, "r") as f:
user_prompt = "\n".join(f.readlines())
evaluate(user_prompt, temperature, top_p, top_k, max_new_tokens)
elif not sys.stdin.isatty():
user_prompt = "\n".join(sys.stdin.readlines())
evaluate(user_prompt, temperature, top_p, top_k, max_new_tokens)
else:
gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=9,
label="User Prompt",
placeholder="none",
),
gr.components.Slider(
minimum=0, maximum=1, value=1.0, label="Temperature"
),
gr.components.Slider(
minimum=0, maximum=1, value=1.0, label="Top p"
),
gr.components.Slider(
minimum=0, maximum=100, step=1, value=50, label="Top k"
),
gr.components.Slider(
minimum=1, maximum=2000, step=1, value=200, label="Max tokens"
),
],
outputs=[
gr.components.Textbox(
lines=5,
label="Output",
)
],
title="Llama2 Playground",
description="https://github.com/facebookresearch/llama-recipes",
).queue().launch(server_name="0.0.0.0", share=True)
if __name__ == "__main__":
fire.Fire(main)
보시면 알겠지만, evaluate 함수가 추가되어 있습니다. 지금까지는 CLI만 지원해서 아래와 같이 사용할 수 있었습니다.
python examples/inference.py --model_name '../models/Llama-2-7b-hf' --peft_model 'outputs/7b' --max_new_tokens 580 --quantization true --prompt_file examples/samsum_prompt.txt
or
cat examples/samsum_prompt.txt | python examples/inference.py --model_name '../models/Llama-2-7b-hf' --peft_model 'outputs/7b' --max_new_tokens 580 --quantization true
이제 그냥 실행하면,
python examples/inference.py --model_name '../models/Llama-2-7b-hf' --peft_model 'outputs/7b' --max_new_tokens 580 --quantization true
Running on local URL: http://0.0.0.0:7860
Running on public URL: https://???????????.gradio.live
This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)
재미있는 것은 gradio에서 고유 URL을 제공합니다. 그래서 외부에서 로컬 컴퓨터로 접속 가능하게 해줍니다. examples/samsum_prompt.txt내용을 User Prompt 박스에 입력하고, Submit버튼을 누르면, 아래와 같이 실행이 잘 되는 것을 볼 수 있습니다.
'AI' 카테고리의 다른 글
구글 제미니(Gemini) 울트라 1.0 발표 (0) | 2024.02.09 |
---|---|
[동영상 강의] 대형 언어 모델이란? (0) | 2024.02.07 |
LLaMa2 파인튜닝(finetuning) 하기 (0) | 2024.02.03 |
Loz: 깃 커밋 메세지(git commit message) 자동 작성 툴 (0) | 2024.01.31 |
Code LLaMa를 이용해서 GIT 커밋 메시지 자동 작성하기 (1) | 2024.01.28 |
댓글()