Spaces:
Build error
Build error
modified model path
Browse files
app.py
CHANGED
|
@@ -4,19 +4,26 @@ import os
|
|
| 4 |
import torch
|
| 5 |
|
| 6 |
theme = "darkgrass"
|
| 7 |
-
title = "
|
| 8 |
-
model_name = "EleutherAI/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
description = "
|
| 11 |
-
|
| 12 |
examples = [
|
| 13 |
-
["
|
| 14 |
-
["์ง๋ฌธ:
|
| 15 |
["2040๋
๋ฏธ๊ตญ์, "]
|
| 16 |
]
|
| 17 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 18 |
model = AutoModelForCausalLM.from_pretrained(
|
| 19 |
-
|
| 20 |
)
|
| 21 |
model.eval()
|
| 22 |
|
|
@@ -30,10 +37,7 @@ def predict(text):
|
|
| 30 |
tokens, do_sample=True, temperature=0.8, max_new_tokens=64, top_k=50, top_p=0.8,
|
| 31 |
no_repeat_ngram_size=3, repetition_penalty=1.2,
|
| 32 |
bad_words_ids=[
|
| 33 |
-
tokenizer.encode(
|
| 34 |
-
tokenizer.encode('....'),
|
| 35 |
-
tokenizer.encode('(์ค๋ต)'),
|
| 36 |
-
tokenizer.encode('http')
|
| 37 |
],
|
| 38 |
eos_token_id=tokenizer.eos_token_id,
|
| 39 |
pad_token_id=tokenizer.pad_token_id
|
|
|
|
| 4 |
import torch
|
| 5 |
|
| 6 |
theme = "darkgrass"
|
| 7 |
+
title = "Polyglot(Korean) Demo"
|
| 8 |
+
model_name = "EleutherAI/polyglot-ko-1.3b"
|
| 9 |
+
|
| 10 |
+
bad_words = [
|
| 11 |
+
'...',
|
| 12 |
+
'....',
|
| 13 |
+
'(์ค๋ต)',
|
| 14 |
+
'http'
|
| 15 |
+
]
|
| 16 |
|
| 17 |
+
description = "polyglot (1.3B ํ๋ผ๋ฏธํฐ ์ฌ์ด์ฆ) ํ๊ตญ์ด ๋ชจ๋ธ์ ์์ฐํ๋ ๋ฐ๋ชจํ์ด์ง ์
๋๋ค."
|
| 18 |
+
article = "<p style='text-align: center'><a href='https://github.com/EleutherAI/polyglot' target='_blank'>Polyglot: Large Language Models of Well-balanced Competence in Multi-languages</a></p>"
|
| 19 |
examples = [
|
| 20 |
+
["CPU์ GPU์ ์ฐจ์ด๋,"],
|
| 21 |
+
["์ง๋ฌธ: ์ฐํฌ๋ผ์ด๋ ์ ์์ด ์ธ๊ณ3์ฐจ๋์ ์ผ๋ก ํ์ ์ด ๋ ๊น์? \n๋ต๋ณ:"],
|
| 22 |
["2040๋
๋ฏธ๊ตญ์, "]
|
| 23 |
]
|
| 24 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 25 |
model = AutoModelForCausalLM.from_pretrained(
|
| 26 |
+
model_name
|
| 27 |
)
|
| 28 |
model.eval()
|
| 29 |
|
|
|
|
| 37 |
tokens, do_sample=True, temperature=0.8, max_new_tokens=64, top_k=50, top_p=0.8,
|
| 38 |
no_repeat_ngram_size=3, repetition_penalty=1.2,
|
| 39 |
bad_words_ids=[
|
| 40 |
+
tokenizer.encode(bad_word) for bad_word in bad_words
|
|
|
|
|
|
|
|
|
|
| 41 |
],
|
| 42 |
eos_token_id=tokenizer.eos_token_id,
|
| 43 |
pad_token_id=tokenizer.pad_token_id
|