add readme (#10)
* Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * Update Readme.md * remove submodule * add mPLUG MiniGPT4 * Update Readme.md * Update Readme.md * Update Readme.md --------- Co-authored-by: Yuliang Liu <34134635+Yuliang-Liu@users.noreply.github.com>
This commit is contained in:
0
models/mPLUG_Owl/serve/__init__.py
Normal file
0
models/mPLUG_Owl/serve/__init__.py
Normal file
181
models/mPLUG_Owl/serve/conversation.py
Normal file
181
models/mPLUG_Owl/serve/conversation.py
Normal file
@@ -0,0 +1,181 @@
|
||||
import dataclasses
|
||||
from enum import auto, Enum
|
||||
from typing import List, Tuple
|
||||
import os
|
||||
from decord import VideoReader
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
class SeparatorStyle(Enum):
|
||||
"""Different separator style."""
|
||||
SINGLE = auto()
|
||||
TWO = auto()
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Conversation:
|
||||
"""A class that keeps all conversation history."""
|
||||
system: str
|
||||
roles: List[str]
|
||||
messages: List[List[str]]
|
||||
offset: int
|
||||
sep_style: SeparatorStyle = SeparatorStyle.SINGLE
|
||||
sep: str = "\n "
|
||||
sep2: str = None
|
||||
|
||||
skip_next: bool = False
|
||||
|
||||
def get_prompt(self):
|
||||
self.system = "The following is a conversation between a curious human and AI. The AI gives helpful, detailed, and polite answers to the human's questions."
|
||||
self.sep = "\n"
|
||||
if self.sep_style == SeparatorStyle.SINGLE:
|
||||
ret = self.system + self.sep
|
||||
for role, message in self.messages:
|
||||
if message:
|
||||
if type(message) is tuple:
|
||||
message, _ = message
|
||||
ret += role.replace("AI", "AI") + ": " + message + self.sep
|
||||
else:
|
||||
if role != "":
|
||||
ret += role.replace("AI", "AI") + ":"
|
||||
return ret
|
||||
elif self.sep_style == SeparatorStyle.TWO:
|
||||
seps = [self.sep, self.sep2]
|
||||
ret = self.system + seps[0]
|
||||
for i, (role, message) in enumerate(self.messages):
|
||||
if message:
|
||||
if type(message) is tuple:
|
||||
message, _ = message
|
||||
ret += role + ": " + message + seps[i % 2]
|
||||
else:
|
||||
ret += role + ":"
|
||||
return ret
|
||||
else:
|
||||
raise ValueError(f"Invalid style: {self.sep_style}")
|
||||
|
||||
def append_message(self, role, message):
|
||||
self.messages.append([role, message])
|
||||
|
||||
def get_index(self, num_frames, num_segments):
|
||||
seg_size = float(num_frames - 1) / num_segments
|
||||
start = int(seg_size / 2)
|
||||
offsets = np.array([
|
||||
start + int(np.round(seg_size * idx)) for idx in range(num_segments)
|
||||
])
|
||||
return offsets
|
||||
|
||||
def load_video(self, path, num_frames=4):
|
||||
vr = VideoReader(path, height=224, width=224)
|
||||
total_frames = len(vr)
|
||||
frame_indices = self.get_index(total_frames, num_frames)
|
||||
images_group = list()
|
||||
for frame_index in frame_indices:
|
||||
img = Image.fromarray(vr[frame_index].asnumpy()).convert('RGB')
|
||||
images_group.append(img)
|
||||
return images_group
|
||||
|
||||
def get_images(self, log_dir=None):
|
||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
images = []
|
||||
k = 0
|
||||
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
||||
if i % 2 == 0:
|
||||
if type(msg) is tuple:
|
||||
import base64
|
||||
from io import BytesIO
|
||||
msg, image = msg
|
||||
image_tmp = image
|
||||
if isinstance(image_tmp, str):
|
||||
image_pils = self.load_video(image_tmp)
|
||||
else:
|
||||
image_pils = [image_tmp]
|
||||
|
||||
for image in image_pils:
|
||||
buffered = BytesIO()
|
||||
|
||||
image.save(buffered, format="JPEG")
|
||||
|
||||
img_str = base64.b64encode(buffered.getvalue()).decode()
|
||||
images.append(img_str)
|
||||
k += 1
|
||||
return images
|
||||
|
||||
def to_gradio_chatbot(self):
|
||||
ret = []
|
||||
for i, (role, msg) in enumerate(self.messages[self.offset:]):
|
||||
if i % 2 == 0:
|
||||
if type(msg) is tuple:
|
||||
import base64
|
||||
from io import BytesIO
|
||||
msg, image = msg
|
||||
if isinstance(image, str):
|
||||
with open(image, 'rb') as f:
|
||||
data = f.read()
|
||||
img_b64_str = base64.b64encode(data).decode()
|
||||
image_str = f'<video src="data:video/mp4;base64,{img_b64_str}" controls width="426" height="240"></video>'
|
||||
msg = msg.replace('\n'.join(['<image>']*4), image_str)
|
||||
else:
|
||||
max_hw, min_hw = max(image.size), min(image.size)
|
||||
aspect_ratio = max_hw / min_hw
|
||||
max_len, min_len = 800, 400
|
||||
shortest_edge = int(min(max_len / aspect_ratio, min_len, min_hw))
|
||||
longest_edge = int(shortest_edge * aspect_ratio)
|
||||
W, H = image.size
|
||||
if H > W:
|
||||
H, W = longest_edge, shortest_edge
|
||||
else:
|
||||
H, W = shortest_edge, longest_edge
|
||||
image = image.resize((W, H))
|
||||
# image = image.resize((224, 224))
|
||||
buffered = BytesIO()
|
||||
image.save(buffered, format="JPEG")
|
||||
img_b64_str = base64.b64encode(buffered.getvalue()).decode()
|
||||
img_str = f'<img src="data:image/png;base64,{img_b64_str}" alt="user upload image" />'
|
||||
msg = msg.replace('<image>', img_str)
|
||||
ret.append([msg, None])
|
||||
else:
|
||||
ret[-1][-1] = msg
|
||||
return ret
|
||||
|
||||
def copy(self):
|
||||
return Conversation(
|
||||
system=self.system,
|
||||
roles=self.roles,
|
||||
messages=[[x, y] for x, y in self.messages],
|
||||
offset=self.offset,
|
||||
sep_style=self.sep_style,
|
||||
sep=self.sep,
|
||||
sep2=self.sep2)
|
||||
|
||||
def dict(self):
|
||||
if len(self.get_images()) > 0:
|
||||
return {
|
||||
"system": self.system,
|
||||
"roles": self.roles,
|
||||
"messages": [[x, y[0] if type(y) is tuple else y] for x, y in self.messages],
|
||||
"offset": self.offset,
|
||||
"images": self.get_images(),
|
||||
"sep": self.sep,
|
||||
"sep2": self.sep2,
|
||||
}
|
||||
return {
|
||||
"system": self.system,
|
||||
"roles": self.roles,
|
||||
"messages": self.messages,
|
||||
"offset": self.offset,
|
||||
"sep": self.sep,
|
||||
"sep2": self.sep2,
|
||||
}
|
||||
|
||||
mplug_owl_v0 = Conversation(
|
||||
system="The following is a conversation between a curious human and assistant AI. The assistant AI gives helpful, detailed, and polite answers to the human's questions.",
|
||||
roles=("Human", "AI"),
|
||||
messages=(),
|
||||
offset=0,
|
||||
sep_style=SeparatorStyle.SINGLE,
|
||||
sep="###",
|
||||
)
|
||||
|
||||
default_conversation = mplug_owl_v0
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(default_conversation.get_prompt())
|
73
models/mPLUG_Owl/serve/gradio_css.py
Normal file
73
models/mPLUG_Owl/serve/gradio_css.py
Normal file
@@ -0,0 +1,73 @@
|
||||
code_highlight_css = (
|
||||
"""
|
||||
#chatbot .hll { background-color: #ffffcc }
|
||||
#chatbot .c { color: #408080; font-style: italic }
|
||||
#chatbot .err { border: 1px solid #FF0000 }
|
||||
#chatbot .k { color: #008000; font-weight: bold }
|
||||
#chatbot .o { color: #666666 }
|
||||
#chatbot .ch { color: #408080; font-style: italic }
|
||||
#chatbot .cm { color: #408080; font-style: italic }
|
||||
#chatbot .cp { color: #BC7A00 }
|
||||
#chatbot .cpf { color: #408080; font-style: italic }
|
||||
#chatbot .c1 { color: #408080; font-style: italic }
|
||||
#chatbot .cs { color: #408080; font-style: italic }
|
||||
#chatbot .gd { color: #A00000 }
|
||||
#chatbot .ge { font-style: italic }
|
||||
#chatbot .gr { color: #FF0000 }
|
||||
#chatbot .gh { color: #000080; font-weight: bold }
|
||||
#chatbot .gi { color: #00A000 }
|
||||
#chatbot .go { color: #888888 }
|
||||
#chatbot .gp { color: #000080; font-weight: bold }
|
||||
#chatbot .gs { font-weight: bold }
|
||||
#chatbot .gu { color: #800080; font-weight: bold }
|
||||
#chatbot .gt { color: #0044DD }
|
||||
#chatbot .kc { color: #008000; font-weight: bold }
|
||||
#chatbot .kd { color: #008000; font-weight: bold }
|
||||
#chatbot .kn { color: #008000; font-weight: bold }
|
||||
#chatbot .kp { color: #008000 }
|
||||
#chatbot .kr { color: #008000; font-weight: bold }
|
||||
#chatbot .kt { color: #B00040 }
|
||||
#chatbot .m { color: #666666 }
|
||||
#chatbot .s { color: #BA2121 }
|
||||
#chatbot .na { color: #7D9029 }
|
||||
#chatbot .nb { color: #008000 }
|
||||
#chatbot .nc { color: #0000FF; font-weight: bold }
|
||||
#chatbot .no { color: #880000 }
|
||||
#chatbot .nd { color: #AA22FF }
|
||||
#chatbot .ni { color: #999999; font-weight: bold }
|
||||
#chatbot .ne { color: #D2413A; font-weight: bold }
|
||||
#chatbot .nf { color: #0000FF }
|
||||
#chatbot .nl { color: #A0A000 }
|
||||
#chatbot .nn { color: #0000FF; font-weight: bold }
|
||||
#chatbot .nt { color: #008000; font-weight: bold }
|
||||
#chatbot .nv { color: #19177C }
|
||||
#chatbot .ow { color: #AA22FF; font-weight: bold }
|
||||
#chatbot .w { color: #bbbbbb }
|
||||
#chatbot .mb { color: #666666 }
|
||||
#chatbot .mf { color: #666666 }
|
||||
#chatbot .mh { color: #666666 }
|
||||
#chatbot .mi { color: #666666 }
|
||||
#chatbot .mo { color: #666666 }
|
||||
#chatbot .sa { color: #BA2121 }
|
||||
#chatbot .sb { color: #BA2121 }
|
||||
#chatbot .sc { color: #BA2121 }
|
||||
#chatbot .dl { color: #BA2121 }
|
||||
#chatbot .sd { color: #BA2121; font-style: italic }
|
||||
#chatbot .s2 { color: #BA2121 }
|
||||
#chatbot .se { color: #BB6622; font-weight: bold }
|
||||
#chatbot .sh { color: #BA2121 }
|
||||
#chatbot .si { color: #BB6688; font-weight: bold }
|
||||
#chatbot .sx { color: #008000 }
|
||||
#chatbot .sr { color: #BB6688 }
|
||||
#chatbot .s1 { color: #BA2121 }
|
||||
#chatbot .ss { color: #19177C }
|
||||
#chatbot .bp { color: #008000 }
|
||||
#chatbot .fm { color: #0000FF }
|
||||
#chatbot .vc { color: #19177C }
|
||||
#chatbot .vg { color: #19177C }
|
||||
#chatbot .vi { color: #19177C }
|
||||
#chatbot .vm { color: #19177C }
|
||||
#chatbot .il { color: #666666 }
|
||||
""")
|
||||
#.highlight { background: #f8f8f8; }
|
||||
|
169
models/mPLUG_Owl/serve/gradio_patch.py
Normal file
169
models/mPLUG_Owl/serve/gradio_patch.py
Normal file
@@ -0,0 +1,169 @@
|
||||
"""
|
||||
Adopted from https://github.com/gradio-app/gradio/blob/main/gradio/components.py
|
||||
Fix a markdown render problem.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from gradio.components import *
|
||||
from markdown2 import Markdown
|
||||
|
||||
|
||||
class _Keywords(Enum):
|
||||
NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
|
||||
FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
|
||||
|
||||
|
||||
@document("style")
|
||||
# class Chatbot(Changeable, Selectable, IOComponent, JSONSerializable):
|
||||
class Chatbot(Changeable, IOComponent, JSONSerializable):
|
||||
"""
|
||||
Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
|
||||
Preprocessing: this component does *not* accept input.
|
||||
Postprocessing: expects function to return a {List[Tuple[str | None | Tuple, str | None | Tuple]]}, a list of tuples with user message and response messages. Messages should be strings, tuples, or Nones. If the message is a string, it can include Markdown. If it is a tuple, it should consist of (string filepath to image/video/audio, [optional string alt text]). Messages that are `None` are not displayed.
|
||||
|
||||
Demos: chatbot_simple, chatbot_multimodal
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
value: List[Tuple[str | None, str | None]] | Callable | None = None,
|
||||
color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
|
||||
*,
|
||||
label: str | None = None,
|
||||
every: float | None = None,
|
||||
show_label: bool = True,
|
||||
visible: bool = True,
|
||||
elem_id: str | None = None,
|
||||
elem_classes: List[str] | str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Parameters:
|
||||
value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
|
||||
label: component name in interface.
|
||||
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
|
||||
show_label: if True, will display label.
|
||||
visible: If False, component will be hidden.
|
||||
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
|
||||
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
|
||||
"""
|
||||
if color_map is not None:
|
||||
warnings.warn(
|
||||
"The 'color_map' parameter has been deprecated.",
|
||||
)
|
||||
#self.md = utils.get_markdown_parser()
|
||||
self.md = Markdown(extras=["fenced-code-blocks", "tables", "break-on-newline"])
|
||||
self.select: EventListenerMethod
|
||||
"""
|
||||
Event listener for when the user selects message from Chatbot.
|
||||
Uses event data gradio.SelectData to carry `value` referring to text of selected message, and `index` tuple to refer to [message, participant] index.
|
||||
See EventData documentation on how to use this event data.
|
||||
"""
|
||||
|
||||
IOComponent.__init__(
|
||||
self,
|
||||
label=label,
|
||||
every=every,
|
||||
show_label=show_label,
|
||||
visible=visible,
|
||||
elem_id=elem_id,
|
||||
elem_classes=elem_classes,
|
||||
value=value,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def get_config(self):
|
||||
return {
|
||||
"value": self.value,
|
||||
# "selectable": self.selectable,
|
||||
**IOComponent.get_config(self),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def update(
|
||||
value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
|
||||
label: str | None = None,
|
||||
show_label: bool | None = None,
|
||||
visible: bool | None = None,
|
||||
):
|
||||
updated_config = {
|
||||
"label": label,
|
||||
"show_label": show_label,
|
||||
"visible": visible,
|
||||
"value": value,
|
||||
"__type__": "update",
|
||||
}
|
||||
return updated_config
|
||||
|
||||
def _process_chat_messages(
|
||||
self, chat_message: str | Tuple | List | Dict | None
|
||||
) -> str | Dict | None:
|
||||
if chat_message is None:
|
||||
return None
|
||||
elif isinstance(chat_message, (tuple, list)):
|
||||
mime_type = processing_utils.get_mimetype(chat_message[0])
|
||||
return {
|
||||
"name": chat_message[0],
|
||||
"mime_type": mime_type,
|
||||
"alt_text": chat_message[1] if len(chat_message) > 1 else None,
|
||||
"data": None, # These last two fields are filled in by the frontend
|
||||
"is_file": True,
|
||||
}
|
||||
elif isinstance(
|
||||
chat_message, dict
|
||||
): # This happens for previously processed messages
|
||||
return chat_message
|
||||
elif isinstance(chat_message, str):
|
||||
#return self.md.render(chat_message)
|
||||
return str(self.md.convert(chat_message))
|
||||
else:
|
||||
raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
|
||||
|
||||
def postprocess(
|
||||
self,
|
||||
y: List[
|
||||
Tuple[str | Tuple | List | Dict | None, str | Tuple | List | Dict | None]
|
||||
],
|
||||
) -> List[Tuple[str | Dict | None, str | Dict | None]]:
|
||||
"""
|
||||
Parameters:
|
||||
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
|
||||
Returns:
|
||||
List of tuples representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information.
|
||||
"""
|
||||
if y is None:
|
||||
return []
|
||||
processed_messages = []
|
||||
for message_pair in y:
|
||||
assert isinstance(
|
||||
message_pair, (tuple, list)
|
||||
), f"Expected a list of lists or list of tuples. Received: {message_pair}"
|
||||
assert (
|
||||
len(message_pair) == 2
|
||||
), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
|
||||
processed_messages.append(
|
||||
(
|
||||
#self._process_chat_messages(message_pair[0]),
|
||||
'<pre style="font-family: var(--font)">' +
|
||||
message_pair[0] + "</pre>",
|
||||
self._process_chat_messages(message_pair[1]),
|
||||
)
|
||||
)
|
||||
return processed_messages
|
||||
|
||||
def style(self, height: int | None = None, **kwargs):
|
||||
"""
|
||||
This method can be used to change the appearance of the Chatbot component.
|
||||
"""
|
||||
if height is not None:
|
||||
self._style["height"] = height
|
||||
if kwargs.get("color_map") is not None:
|
||||
warnings.warn("The 'color_map' parameter has been deprecated.")
|
||||
|
||||
Component.style(
|
||||
self,
|
||||
**kwargs,
|
||||
)
|
||||
return self
|
||||
|
||||
|
449
models/mPLUG_Owl/serve/io_utils.py
Normal file
449
models/mPLUG_Owl/serve/io_utils.py
Normal file
@@ -0,0 +1,449 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
@Time : 2023-03-23 11:42
|
||||
@Author : zhimiao.chh
|
||||
@Desc :
|
||||
"""
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import hashlib
|
||||
from io import StringIO, BytesIO
|
||||
from contextlib import contextmanager
|
||||
from typing import List
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
class IO:
|
||||
@staticmethod
|
||||
def register(options):
|
||||
pass
|
||||
|
||||
def open(self, path: str, mode: str):
|
||||
raise NotImplementedError
|
||||
|
||||
def exists(self, path: str) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def move(self, src: str, dst: str):
|
||||
raise NotImplementedError
|
||||
|
||||
def copy(self, src: str, dst: str):
|
||||
raise NotImplementedError
|
||||
|
||||
def makedirs(self, path: str, exist_ok=True):
|
||||
raise NotImplementedError
|
||||
|
||||
def remove(self, path: str):
|
||||
raise NotImplementedError
|
||||
|
||||
def listdir(self, path: str, recursive=False, full_path=False, contains=None):
|
||||
raise NotImplementedError
|
||||
|
||||
def isdir(self, path: str) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def isfile(self, path: str) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
def abspath(self, path: str) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
def last_modified(self, path: str) -> datetime:
|
||||
raise NotImplementedError
|
||||
|
||||
def md5(self, path: str) -> str:
|
||||
hash_md5 = hashlib.md5()
|
||||
with self.open(path, 'rb') as f:
|
||||
for chunk in iter(lambda: f.read(4096), b''):
|
||||
hash_md5.update(chunk)
|
||||
return hash_md5.hexdigest()
|
||||
|
||||
re_remote = re.compile(r'(oss|https?)://')
|
||||
|
||||
def islocal(self, path: str) -> bool:
|
||||
return not self.re_remote.match(path.lstrip())
|
||||
|
||||
|
||||
class DefaultIO(IO):
|
||||
__name__ = 'DefaultIO'
|
||||
|
||||
def _check_path(self, path):
|
||||
if not self.islocal(path):
|
||||
raise RuntimeError(
|
||||
'Credentials must be provided to use oss path. '
|
||||
'Make sure you have created "user/modules/oss_credentials.py" according to ReadMe.')
|
||||
|
||||
def open(self, path, mode='r'):
|
||||
self._check_path(path)
|
||||
path = self.abspath(path)
|
||||
return open(path, mode=mode)
|
||||
|
||||
def exists(self, path):
|
||||
self._check_path(path)
|
||||
path = self.abspath(path)
|
||||
return os.path.exists(path)
|
||||
|
||||
def move(self, src, dst):
|
||||
self._check_path(src)
|
||||
self._check_path(dst)
|
||||
src = self.abspath(src)
|
||||
dst = self.abspath(dst)
|
||||
shutil.move(src, dst)
|
||||
|
||||
def copy(self, src, dst):
|
||||
self._check_path(src)
|
||||
self._check_path(dst)
|
||||
src = self.abspath(src)
|
||||
dst = self.abspath(dst)
|
||||
try:
|
||||
shutil.copyfile(src, dst)
|
||||
except shutil.SameFileError:
|
||||
pass
|
||||
|
||||
def makedirs(self, path, exist_ok=True):
|
||||
self._check_path(path)
|
||||
path = self.abspath(path)
|
||||
os.makedirs(path, exist_ok=exist_ok)
|
||||
|
||||
def remove(self, path):
|
||||
self._check_path(path)
|
||||
path = self.abspath(path)
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
os.remove(path)
|
||||
|
||||
def listdir(self, path, recursive=False, full_path=False, contains=None):
|
||||
self._check_path(path)
|
||||
path = self.abspath(path)
|
||||
contains = contains or ''
|
||||
if recursive:
|
||||
files = (os.path.join(dp, f) if full_path else f for dp, dn, fn in os.walk(path) for f in fn)
|
||||
files = [file for file in files if contains in file]
|
||||
else:
|
||||
files = os.listdir(path)
|
||||
if full_path:
|
||||
files = [os.path.join(path, file) for file in files if contains in file]
|
||||
return files
|
||||
|
||||
def isdir(self, path):
|
||||
return os.path.isdir(path)
|
||||
|
||||
def isfile(self, path):
|
||||
return os.path.isfile(path)
|
||||
|
||||
def abspath(self, path):
|
||||
return os.path.abspath(path)
|
||||
|
||||
def last_modified(self, path):
|
||||
return datetime.fromtimestamp(os.path.getmtime(path))
|
||||
|
||||
|
||||
class OSS(DefaultIO):
|
||||
"Mixed IO module to support both system-level and OSS IO methods"
|
||||
__name__ = 'OSS'
|
||||
|
||||
def __init__(self, access_key_id: str, access_key_secret: str, region_bucket: List[List[str]]):
|
||||
"""
|
||||
the value of "region_bucket" should be something like [["cn-hangzhou", "<yourBucketName>"], ["cn-zhangjiakou", "<yourBucketName>"]],
|
||||
specifying your buckets and corresponding regions
|
||||
"""
|
||||
from oss2 import Auth, Bucket, ObjectIterator
|
||||
super().__init__()
|
||||
self.ObjectIterator = ObjectIterator
|
||||
self.auth = Auth(access_key_id, access_key_secret)
|
||||
self.buckets = {
|
||||
bucket_name: Bucket(self.auth, f'http://oss-{region}.aliyuncs.com', bucket_name)
|
||||
for region, bucket_name in region_bucket
|
||||
}
|
||||
self.oss_pattern = re.compile(r'oss://([^/]+)/(.+)')
|
||||
|
||||
def _split_name(self, path):
|
||||
m = self.oss_pattern.match(path)
|
||||
if not m:
|
||||
raise IOError(f'invalid oss path: "{path}", should be "oss://<bucket_name>/path"')
|
||||
bucket_name, path = m.groups()
|
||||
return bucket_name, path
|
||||
|
||||
def _split(self, path):
|
||||
bucket_name, path = self._split_name(path)
|
||||
try:
|
||||
bucket = self.buckets[bucket_name]
|
||||
except KeyError:
|
||||
raise IOError(f'Bucket {bucket_name} not registered in oss_credentials.py')
|
||||
return bucket, path
|
||||
|
||||
def open(self, full_path, mode='r'):
|
||||
if not full_path.startswith('oss://'):
|
||||
return super().open(full_path, mode)
|
||||
|
||||
bucket, path = self._split(full_path)
|
||||
with mute_stderr():
|
||||
path_exists = bucket.object_exists(path)
|
||||
if 'w' in mode:
|
||||
if path_exists:
|
||||
bucket.delete_object(path)
|
||||
if 'b' in mode:
|
||||
return BinaryOSSFile(bucket, path)
|
||||
return OSSFile(bucket, path)
|
||||
elif mode == 'a':
|
||||
position = bucket.head_object(path).content_length if path_exists else 0
|
||||
return OSSFile(bucket, path, position=position)
|
||||
else:
|
||||
if not path_exists:
|
||||
raise FileNotFoundError(full_path)
|
||||
obj = bucket.get_object(path)
|
||||
# auto cache large files to avoid memory issues
|
||||
# if obj.content_length > 30 * 1024 ** 2: # 30M
|
||||
# from da.utils import cache_file
|
||||
# path = cache_file(full_path)
|
||||
# return super().open(path, mode)
|
||||
if mode == 'rb':
|
||||
# TODO for a large file, this will load the whole file into memory
|
||||
return NullContextWrapper(BytesIO(obj.read()))
|
||||
else:
|
||||
assert mode == 'r'
|
||||
return NullContextWrapper(StringIO(obj.read().decode()))
|
||||
|
||||
def exists(self, path):
|
||||
if not path.startswith('oss://'):
|
||||
return super().exists(path)
|
||||
|
||||
bucket, _path = self._split(path)
|
||||
# if file exists
|
||||
exists = self._file_exists(bucket, _path)
|
||||
# if directory exists
|
||||
if not exists:
|
||||
try:
|
||||
self.listdir(path)
|
||||
exists = True
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return exists
|
||||
|
||||
def _file_exists(self, bucket, path):
|
||||
with mute_stderr():
|
||||
return bucket.object_exists(path)
|
||||
|
||||
def move(self, src, dst):
|
||||
if not src.startswith('oss://') and not dst.startswith('oss://'):
|
||||
return super().move(src, dst)
|
||||
self.copy(src, dst)
|
||||
self.remove(src)
|
||||
|
||||
def copy(self, src, dst):
|
||||
cloud_src = src.startswith('oss://')
|
||||
cloud_dst = dst.startswith('oss://')
|
||||
if not cloud_src and not cloud_dst:
|
||||
return super().copy(src, dst)
|
||||
|
||||
# download
|
||||
if cloud_src and not cloud_dst:
|
||||
bucket, src = self._split(src)
|
||||
obj = bucket.get_object(src)
|
||||
if obj.content_length > 100 * 1024 ** 2: # 100M
|
||||
from tqdm import tqdm
|
||||
progress = None
|
||||
|
||||
def callback(i, n):
|
||||
nonlocal progress
|
||||
if progress is None:
|
||||
progress = tqdm(total=n, unit='B', unit_scale=True, unit_divisor=1024, leave=False,
|
||||
desc=f'downloading')
|
||||
progress.update(i - progress.n)
|
||||
|
||||
bucket.get_object_to_file(src, dst, progress_callback=callback)
|
||||
if progress is not None:
|
||||
progress.close()
|
||||
else:
|
||||
bucket.get_object_to_file(src, dst)
|
||||
return
|
||||
bucket, dst = self._split(dst)
|
||||
# upload
|
||||
if cloud_dst and not cloud_src:
|
||||
bucket.put_object_from_file(dst, src)
|
||||
return
|
||||
# copy between oss paths
|
||||
if src != dst:
|
||||
src_bucket_name, src = self._split_name(src)
|
||||
bucket.copy_object(src_bucket_name, src, dst)
|
||||
# TODO: support large file copy
|
||||
# https://help.aliyun.com/document_detail/88465.html?spm=a2c4g.11174283.6.882.4d157da2mgp3xc
|
||||
|
||||
def listdir(self, path, recursive=False, full_path=False, contains=None):
|
||||
if not path.startswith('oss://'):
|
||||
return super().listdir(path, recursive, full_path, contains)
|
||||
|
||||
bucket, path = self._split(path)
|
||||
path = path.rstrip('/') + '/'
|
||||
files = [obj.key for obj in self.ObjectIterator(bucket, prefix=path, delimiter='' if recursive else '/')]
|
||||
try:
|
||||
files.remove(path)
|
||||
except ValueError:
|
||||
pass
|
||||
if full_path:
|
||||
files = [f'oss://{bucket.bucket_name}/{file}' for file in files]
|
||||
else:
|
||||
files = [file[len(path):] for file in files]
|
||||
if not files:
|
||||
raise FileNotFoundError(f'No such directory: oss://{bucket.bucket_name}/{path}')
|
||||
files = [file for file in files if (contains or '') in file]
|
||||
return files
|
||||
|
||||
def remove(self, path):
|
||||
if not path.startswith('oss://'):
|
||||
return super().remove(path)
|
||||
|
||||
if self.isfile(path):
|
||||
paths = [path]
|
||||
else:
|
||||
paths = self.listdir(path, recursive=True, full_path=True)
|
||||
for path in paths:
|
||||
bucket, path = self._split(path)
|
||||
bucket.delete_object(path)
|
||||
|
||||
def makedirs(self, path, exist_ok=True):
|
||||
# there is no need to create directory in oss
|
||||
if not path.startswith('oss://'):
|
||||
return super().makedirs(path)
|
||||
|
||||
def isdir(self, path):
|
||||
if not path.startswith('oss://'):
|
||||
return super().isdir(path)
|
||||
return self.exists(path.rstrip('/') + '/')
|
||||
|
||||
def isfile(self, path):
|
||||
if not path.startswith('oss://'):
|
||||
return super().isdir(path)
|
||||
return self.exists(path) and not self.isdir(path)
|
||||
|
||||
def abspath(self, path):
|
||||
if not path.startswith('oss://'):
|
||||
return super().abspath(path)
|
||||
return path
|
||||
|
||||
def authorize(self, path):
|
||||
if not path.startswith('oss://'):
|
||||
raise ValueError('Only oss path can use "authorize"')
|
||||
import oss2
|
||||
bucket, path = self._split(path)
|
||||
bucket.put_object_acl(path, oss2.OBJECT_ACL_PUBLIC_READ)
|
||||
|
||||
def last_modified(self, path):
|
||||
if not path.startswith('oss://'):
|
||||
return super().last_modified(path)
|
||||
bucket, path = self._split(path)
|
||||
return datetime.strptime(
|
||||
bucket.get_object_meta(path).headers['Last-Modified'],
|
||||
r'%a, %d %b %Y %H:%M:%S %Z'
|
||||
) + timedelta(hours=8)
|
||||
|
||||
|
||||
class OSSFile:
|
||||
def __init__(self, bucket, path, position=0):
|
||||
self.position = position
|
||||
self.bucket = bucket
|
||||
self.path = path
|
||||
self.buffer = StringIO()
|
||||
|
||||
def write(self, content):
|
||||
# without a "with" statement, the content is written immediately without buffer
|
||||
# when writing a large batch of contents at a time, this will be quite slow
|
||||
import oss2
|
||||
buffer = self.buffer.getvalue()
|
||||
if buffer:
|
||||
content = buffer + content
|
||||
self.buffer.close()
|
||||
self.buffer = StringIO()
|
||||
try:
|
||||
result = self.bucket.append_object(self.path, self.position, content)
|
||||
self.position = result.next_position
|
||||
except oss2.exceptions.PositionNotEqualToLength:
|
||||
raise RuntimeError(
|
||||
f'Race condition detected. It usually means multiple programs were writing to the same file'
|
||||
f'oss://{self.bucket.bucket_name}/{self.path} (Error 409: PositionNotEqualToLength)')
|
||||
except (oss2.exceptions.RequestError, oss2.exceptions.ServerError) as e:
|
||||
self.buffer.write(content)
|
||||
sys.stderr.write(str(e) + f'when writing to oss://{self.bucket.bucket_name}/{self.path}. Content buffered.')
|
||||
|
||||
def flush(self):
|
||||
"Dummy method for compatibility."
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"Dummy method for compatibility."
|
||||
pass
|
||||
|
||||
def seek(self, position):
|
||||
self.position = position
|
||||
|
||||
def __enter__(self):
|
||||
return self.buffer
|
||||
|
||||
def __exit__(self, *args):
|
||||
import oss2
|
||||
try:
|
||||
self.bucket.append_object(self.path, self.position, self.buffer.getvalue())
|
||||
except oss2.exceptions.RequestError as e:
|
||||
# TODO test whether this works
|
||||
if 'timeout' not in str(e):
|
||||
raise e
|
||||
# retry if timeout
|
||||
import time
|
||||
time.sleep(5)
|
||||
self.bucket.append_object(self.path, self.position, self.buffer.getvalue())
|
||||
|
||||
|
||||
class BinaryOSSFile:
|
||||
def __init__(self, bucket, path):
|
||||
self.bucket = bucket
|
||||
self.path = path
|
||||
self.buffer = BytesIO()
|
||||
|
||||
def __enter__(self):
|
||||
return self.buffer
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.bucket.put_object(self.path, self.buffer.getvalue())
|
||||
|
||||
|
||||
class NullContextWrapper:
|
||||
def __init__(self, obj):
|
||||
self._obj = obj
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._obj, name)
|
||||
|
||||
def __iter__(self):
|
||||
return self._obj.__iter__()
|
||||
|
||||
def __next__(self):
|
||||
return self._obj.__next__()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
||||
|
||||
@contextmanager
|
||||
def ignore_io_error(msg=''):
|
||||
import oss2
|
||||
try:
|
||||
yield
|
||||
except (oss2.exceptions.RequestError, oss2.exceptions.ServerError) as e:
|
||||
sys.stderr.write(str(e) + ' ' + msg)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def mute_stderr():
|
||||
cache = sys.stderr
|
||||
sys.stderr = StringIO()
|
||||
try:
|
||||
yield None
|
||||
finally:
|
||||
sys.stderr = cache
|
92
models/mPLUG_Owl/serve/model_utils.py
Normal file
92
models/mPLUG_Owl/serve/model_utils.py
Normal file
@@ -0,0 +1,92 @@
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import torch
|
||||
import transformers
|
||||
import traceback
|
||||
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
|
||||
def post_process_output(text):
|
||||
text = text.strip()
|
||||
pattern = re.compile(
|
||||
r"<unk>|<pad>|<s>|</s>|\[PAD\]|<\|endoftext\|>|\[UNK\]|\[CLS\]|\[MASK\]|<\|startofpiece\|>|<\|endofpiece\|>|\[gMASK\]|\[sMASK\]"
|
||||
)
|
||||
text = pattern.sub("", text.strip()).strip()
|
||||
return text
|
||||
|
||||
|
||||
def post_process_code(code):
|
||||
sep = "\n```"
|
||||
if sep in code:
|
||||
blocks = code.split(sep)
|
||||
if len(blocks) % 2 == 1:
|
||||
for i in range(1, len(blocks), 2):
|
||||
blocks[i] = blocks[i].replace("\\_", "_")
|
||||
code = sep.join(blocks)
|
||||
return code
|
||||
|
||||
|
||||
class Stream(transformers.StoppingCriteria):
|
||||
def __init__(self, callback_func=None):
|
||||
self.callback_func = callback_func
|
||||
|
||||
def __call__(self, input_ids, scores) -> bool:
|
||||
if self.callback_func is not None:
|
||||
self.callback_func(input_ids[0])
|
||||
return False
|
||||
|
||||
|
||||
class Iteratorize:
|
||||
|
||||
"""
|
||||
Transforms a function that takes a callback
|
||||
into a lazy iterator (generator).
|
||||
"""
|
||||
|
||||
def __init__(self, func, kwargs={}, callback=None):
|
||||
self.mfunc = func
|
||||
self.c_callback = callback
|
||||
self.q = Queue()
|
||||
self.sentinel = object()
|
||||
self.kwargs = kwargs
|
||||
self.stop_now = False
|
||||
|
||||
def _callback(val):
|
||||
if self.stop_now:
|
||||
raise ValueError
|
||||
self.q.put(val)
|
||||
|
||||
def gentask():
|
||||
try:
|
||||
ret = self.mfunc(callback=_callback, **self.kwargs)
|
||||
except ValueError:
|
||||
pass
|
||||
except:
|
||||
traceback.print_exc()
|
||||
pass
|
||||
|
||||
self.q.put(self.sentinel)
|
||||
if self.c_callback:
|
||||
self.c_callback(ret)
|
||||
|
||||
self.thread = Thread(target=gentask)
|
||||
self.thread.start()
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
obj = self.q.get(True, None)
|
||||
if obj is self.sentinel:
|
||||
raise StopIteration
|
||||
else:
|
||||
return obj
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.stop_now = True
|
171
models/mPLUG_Owl/serve/model_worker.py
Normal file
171
models/mPLUG_Owl/serve/model_worker.py
Normal file
@@ -0,0 +1,171 @@
|
||||
from PIL import Image
|
||||
import torch
|
||||
import gradio as gr
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
import datetime
|
||||
import uuid
|
||||
import base64
|
||||
from io import BytesIO
|
||||
import time
|
||||
import sys
|
||||
sys.path.append("..")
|
||||
|
||||
from .io_utils import IO, DefaultIO, OSS
|
||||
|
||||
import transformers
|
||||
from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor
|
||||
from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration
|
||||
from mplug_owl.configuration_mplug_owl import MplugOwlConfig
|
||||
from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer
|
||||
from transformers import GenerationConfig
|
||||
|
||||
from .model_utils import post_process_output, Stream, Iteratorize
|
||||
|
||||
server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
||||
|
||||
class mPLUG_Owl_Server:
|
||||
def __init__(
|
||||
self,
|
||||
base_model='MAGAer13/mplug-owl-llama-7b',
|
||||
log_dir='./',
|
||||
load_in_8bit=False,
|
||||
bf16=True,
|
||||
device="cuda",
|
||||
io=None
|
||||
):
|
||||
self.log_dir = log_dir
|
||||
self.image_processor= MplugOwlImageProcessor.from_pretrained(base_model)
|
||||
self.tokenizer= MplugOwlTokenizer.from_pretrained(base_model)
|
||||
self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer)
|
||||
self.model = MplugOwlForConditionalGeneration.from_pretrained(
|
||||
base_model,
|
||||
load_in_8bit=load_in_8bit,
|
||||
torch_dtype=torch.bfloat16 if bf16 else torch.half,
|
||||
device_map="auto"
|
||||
)
|
||||
self.tokenizer = self.processor.tokenizer
|
||||
self.bf16 = bf16
|
||||
self.load_in_8bit = load_in_8bit
|
||||
|
||||
if not load_in_8bit:
|
||||
if bf16:
|
||||
self.model.bfloat16()
|
||||
else:
|
||||
self.model.half()
|
||||
self.model.eval()
|
||||
|
||||
self.io = io
|
||||
|
||||
def evaluate(
|
||||
self,
|
||||
pixel_values=None,
|
||||
input_ids=None,
|
||||
temperature=1.0,
|
||||
top_p=0.9,
|
||||
top_k=5,
|
||||
num_beams=3,
|
||||
max_new_tokens=256,
|
||||
stream_output=True,
|
||||
length_penalty=1.0,
|
||||
no_repeat_ngram_size=2,
|
||||
do_sample=False,
|
||||
early_stopping=True,
|
||||
**kwargs
|
||||
):
|
||||
generation_config = dict(
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
top_k=top_k,
|
||||
num_beams=num_beams,
|
||||
no_repeat_ngram_size=no_repeat_ngram_size,
|
||||
do_sample=do_sample,
|
||||
early_stopping=early_stopping,
|
||||
length_penalty=length_penalty,
|
||||
)
|
||||
|
||||
generate_params = {
|
||||
"pixel_values": pixel_values,
|
||||
"input_ids": input_ids,
|
||||
"return_dict_in_generate": True,
|
||||
"output_scores": True,
|
||||
"max_new_tokens": max_new_tokens,
|
||||
}
|
||||
generate_params.update(generation_config)
|
||||
|
||||
if stream_output:
|
||||
# Stream the reply 1 token at a time.
|
||||
# This is based on the trick of using 'stopping_criteria' to create an iterator,
|
||||
# from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243.
|
||||
|
||||
def generate_with_callback(callback=None, **kwargs):
|
||||
kwargs.setdefault(
|
||||
"stopping_criteria", transformers.StoppingCriteriaList()
|
||||
)
|
||||
kwargs["stopping_criteria"].append(Stream(callback_func=callback))
|
||||
with torch.no_grad():
|
||||
self.model.generate(**kwargs)
|
||||
|
||||
def generate_with_streaming(**kwargs):
|
||||
return Iteratorize(generate_with_callback, kwargs, callback=None)
|
||||
|
||||
with generate_with_streaming(**generate_params) as generator:
|
||||
for output in generator:
|
||||
# new_tokens = len(output) - len(input_ids[0])
|
||||
decoded_output = self.tokenizer.decode(output)
|
||||
|
||||
if output[-1] in [self.tokenizer.eos_token_id]:
|
||||
break
|
||||
|
||||
yield post_process_output(decoded_output)
|
||||
return # early return for stream_output
|
||||
|
||||
with torch.no_grad():
|
||||
generation_output = self.model.generate(
|
||||
pixel_values=pixel_values,
|
||||
input_ids=input_ids,
|
||||
return_dict_in_generate=True,
|
||||
output_scores=True,
|
||||
max_new_tokens=max_new_tokens,
|
||||
**generation_config
|
||||
)
|
||||
s = generation_output.sequences[0].cpu()
|
||||
output = self.tokenizer.decode(s)
|
||||
yield post_process_output(output)
|
||||
|
||||
|
||||
def predict(self, data):
|
||||
prompt = [data['text_input']]
|
||||
images = data['images'] if len(data['images']) > 0 else None
|
||||
if images:
|
||||
images = [Image.open(BytesIO(base64.b64decode(image))) for image in images]
|
||||
inputs = self.processor(text=prompt, images=images, return_tensors='pt')
|
||||
|
||||
input_ids = inputs['input_ids'].to(self.model.device)
|
||||
if 'pixel_values' in inputs:
|
||||
if self.load_in_8bit:
|
||||
pixel_values = inputs['pixel_values'].half().to(self.model.device)
|
||||
elif self.bf16:
|
||||
pixel_values = inputs['pixel_values'].bfloat16().to(self.model.device)
|
||||
else:
|
||||
pixel_values = inputs['pixel_values'].half().to(self.model.device)
|
||||
else:
|
||||
pixel_values = None
|
||||
|
||||
cache = None
|
||||
|
||||
try:
|
||||
for x in self.evaluate(pixel_values, input_ids, stream_output=True, **data['generation_config']):
|
||||
cache = x
|
||||
yield (x, True)
|
||||
except ValueError as e:
|
||||
print("Caught ValueError:", e)
|
||||
yield (server_error_msg, False)
|
||||
except torch.cuda.CudaError as e:
|
||||
print("Caught torch.cuda.CudaError:", e)
|
||||
yield (server_error_msg, False)
|
||||
|
||||
return
|
117
models/mPLUG_Owl/serve/serve_utils.py
Normal file
117
models/mPLUG_Owl/serve/serve_utils.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from PIL import Image
|
||||
import torch
|
||||
import gradio as gr
|
||||
import logging
|
||||
import sys
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
from .conversation import default_conversation
|
||||
from .gradio_patch import Chatbot as grChatbot
|
||||
from .gradio_css import code_highlight_css
|
||||
import datetime
|
||||
import uuid
|
||||
import base64
|
||||
from io import BytesIO
|
||||
import time
|
||||
|
||||
from .io_utils import IO, DefaultIO, OSS
|
||||
|
||||
|
||||
handler = None
|
||||
|
||||
|
||||
class _IOWrapper:
|
||||
def __init__(self):
|
||||
self._io = DefaultIO()
|
||||
|
||||
def set_io(self, new_io):
|
||||
self._io = new_io
|
||||
|
||||
def __getattr__(self, name):
|
||||
if hasattr(self._io, name):
|
||||
return getattr(self._io, name)
|
||||
return super().__getattr__(name)
|
||||
|
||||
def __str__(self):
|
||||
return self._io.__name__
|
||||
|
||||
def init():
|
||||
io = _IOWrapper()
|
||||
return io
|
||||
|
||||
|
||||
def vote_last_response(state, vote_type, model_selector, request: gr.Request):
|
||||
pass
|
||||
|
||||
def upvote_last_response(state, model_selector, request: gr.Request):
|
||||
vote_last_response(state, "upvote", model_selector, request)
|
||||
return ("",) + (disable_btn,) * 3
|
||||
|
||||
def downvote_last_response(state, model_selector, request: gr.Request):
|
||||
vote_last_response(state, "downvote", model_selector, request)
|
||||
return ("",) + (disable_btn,) * 3
|
||||
|
||||
def flag_last_response(state, model_selector, request: gr.Request):
|
||||
vote_last_response(state, "flag", model_selector, request)
|
||||
return ("",) + (disable_btn,) * 3
|
||||
|
||||
def regenerate(state, request: gr.Request):
|
||||
state.messages[-1][-1] = None
|
||||
state.skip_next = False
|
||||
return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
def clear_history(request: gr.Request):
|
||||
state = default_conversation.copy()
|
||||
return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
|
||||
def add_text(state, text, image, video, request: gr.Request):
|
||||
if len(text) <= 0 and (image is None or video is None):
|
||||
state.skip_next = True
|
||||
return (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
|
||||
|
||||
if image is not None:
|
||||
if '<image>' not in text:
|
||||
text = text + '\n<image>'
|
||||
text = (text, image)
|
||||
|
||||
if video is not None:
|
||||
num_frames = 4
|
||||
if '<image>' not in text:
|
||||
text = text + '\n<image>' * num_frames
|
||||
text = (text, video)
|
||||
|
||||
state.append_message(state.roles[0], text)
|
||||
state.append_message(state.roles[1], None)
|
||||
state.skip_next = False
|
||||
return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
def after_process_image(prompt):
|
||||
prompt = prompt.replace("\n<image>", "<image>")
|
||||
pro_prompt = ""
|
||||
prompt = prompt.split("\n")
|
||||
for p in prompt:
|
||||
if p.count("<image>") > 0:
|
||||
pro_prompt += "Human: <image>\n"
|
||||
if p != "":
|
||||
pro_prompt += p.replace("<image>", "") + "\n"
|
||||
else:
|
||||
pro_prompt += p + "\n"
|
||||
return (pro_prompt[:-1]+" ").replace("\n Human", "\nHuman").replace("\n AI", "\nAI")
|
||||
|
||||
|
||||
headers = {"User-Agent": "mPLUG-Owl Client"}
|
||||
|
||||
no_change_btn = gr.Button.update()
|
||||
enable_btn = gr.Button.update(interactive=True)
|
||||
disable_btn = gr.Button.update(interactive=False)
|
||||
|
||||
get_window_url_params = """
|
||||
function() {
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
url_params = Object.fromEntries(params);
|
||||
console.log(url_params);
|
||||
return url_params;
|
||||
}
|
||||
"""
|
400
models/mPLUG_Owl/serve/web_server.py
Normal file
400
models/mPLUG_Owl/serve/web_server.py
Normal file
@@ -0,0 +1,400 @@
|
||||
import argparse
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import torch
|
||||
|
||||
import gradio as gr
|
||||
import requests
|
||||
|
||||
from .conversation import default_conversation
|
||||
from .gradio_css import code_highlight_css
|
||||
from .gradio_patch import Chatbot as grChatbot
|
||||
from .serve_utils import (
|
||||
add_text, after_process_image, disable_btn, no_change_btn,
|
||||
downvote_last_response, enable_btn, flag_last_response,
|
||||
get_window_url_params, init, regenerate, upvote_last_response
|
||||
)
|
||||
from .model_worker import mPLUG_Owl_Server
|
||||
from .model_utils import post_process_code
|
||||
|
||||
SHARED_UI_WARNING = f'''### [NOTE] You can duplicate and use it with a paid private GPU.
|
||||
<a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/MAGAer13/mPLUG-Owl?duplicate=true"><img style="margin-top:0;margin-bottom:0" src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-md.svg" alt="Duplicate Space"></a>
|
||||
'''
|
||||
|
||||
def load_demo(url_params, request: gr.Request):
|
||||
|
||||
dropdown_update = gr.Dropdown.update(visible=True)
|
||||
state = default_conversation.copy()
|
||||
|
||||
return (state,
|
||||
dropdown_update,
|
||||
gr.Chatbot.update(visible=True),
|
||||
gr.Textbox.update(visible=True),
|
||||
gr.Button.update(visible=True),
|
||||
gr.Row.update(visible=True),
|
||||
gr.Accordion.update(visible=True))
|
||||
|
||||
def clear_history(request: gr.Request):
|
||||
state = default_conversation.copy()
|
||||
|
||||
return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
def http_bot(state, max_output_tokens, temperature, top_k, top_p,
|
||||
num_beams, no_repeat_ngram_size, length_penalty,
|
||||
do_sample, request: gr.Request):
|
||||
if state.skip_next:
|
||||
# This generate call is skipped due to invalid inputs
|
||||
yield (state, state.to_gradio_chatbot()) + (no_change_btn,) * 5
|
||||
return
|
||||
|
||||
prompt = after_process_image(state.get_prompt())
|
||||
images = state.get_images()
|
||||
|
||||
data = {
|
||||
"text_input": prompt,
|
||||
"images": images if len(images) > 0 else [],
|
||||
"generation_config": {
|
||||
"top_k": int(top_k),
|
||||
"top_p": float(top_p),
|
||||
"num_beams": int(num_beams),
|
||||
"no_repeat_ngram_size": int(no_repeat_ngram_size),
|
||||
"length_penalty": float(length_penalty),
|
||||
"do_sample": bool(do_sample),
|
||||
"temperature": float(temperature),
|
||||
"max_new_tokens": min(int(max_output_tokens), 1536),
|
||||
}
|
||||
}
|
||||
|
||||
state.messages[-1][-1] = "▌"
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
|
||||
|
||||
try:
|
||||
for chunk in model.predict(data):
|
||||
if chunk:
|
||||
if chunk[1]:
|
||||
output = chunk[0].strip()
|
||||
output = post_process_code(output)
|
||||
state.messages[-1][-1] = output + "▌"
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
|
||||
else:
|
||||
output = chunk[0].strip()
|
||||
state.messages[-1][-1] = output
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
time.sleep(0.03)
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
state.messages[-1][-1] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
||||
yield (state, state.to_gradio_chatbot()) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
|
||||
state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
||||
yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
|
||||
|
||||
|
||||
def add_text_http_bot(
|
||||
state, text, image, video,
|
||||
max_output_tokens, temperature, top_k, top_p,
|
||||
num_beams, no_repeat_ngram_size, length_penalty,
|
||||
do_sample, request: gr.Request):
|
||||
if len(text) <= 0 and (image is None or video is None):
|
||||
state.skip_next = True
|
||||
return (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
|
||||
|
||||
if image is not None:
|
||||
if '<image>' not in text:
|
||||
text = text + '\n<image>'
|
||||
text = (text, image)
|
||||
|
||||
if video is not None:
|
||||
num_frames = 4
|
||||
if '<image>' not in text:
|
||||
text = text + '\n<image>' * num_frames
|
||||
text = (text, video)
|
||||
|
||||
state.append_message(state.roles[0], text)
|
||||
state.append_message(state.roles[1], None)
|
||||
state.skip_next = False
|
||||
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
if state.skip_next:
|
||||
# This generate call is skipped due to invalid inputs
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
|
||||
return
|
||||
|
||||
prompt = after_process_image(state.get_prompt())
|
||||
images = state.get_images()
|
||||
|
||||
data = {
|
||||
"text_input": prompt,
|
||||
"images": images if len(images) > 0 else [],
|
||||
"generation_config": {
|
||||
"top_k": int(top_k),
|
||||
"top_p": float(top_p),
|
||||
"num_beams": int(num_beams),
|
||||
"no_repeat_ngram_size": int(no_repeat_ngram_size),
|
||||
"length_penalty": float(length_penalty),
|
||||
"do_sample": bool(do_sample),
|
||||
"temperature": float(temperature),
|
||||
"max_new_tokens": min(int(max_output_tokens), 1536),
|
||||
}
|
||||
}
|
||||
|
||||
state.messages[-1][-1] = "▌"
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
try:
|
||||
for chunk in model.predict(data):
|
||||
if chunk:
|
||||
if chunk[1]:
|
||||
output = chunk[0].strip()
|
||||
output = post_process_code(output)
|
||||
state.messages[-1][-1] = output + "▌"
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
else:
|
||||
output = chunk[0].strip()
|
||||
state.messages[-1][-1] = output
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
time.sleep(0.03)
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
state.messages[-1][-1] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
|
||||
state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5
|
||||
|
||||
|
||||
def regenerate_http_bot(state,
|
||||
max_output_tokens, temperature, top_k, top_p,
|
||||
num_beams, no_repeat_ngram_size, length_penalty,
|
||||
do_sample, request: gr.Request):
|
||||
state.messages[-1][-1] = None
|
||||
state.skip_next = False
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
prompt = after_process_image(state.get_prompt())
|
||||
images = state.get_images()
|
||||
|
||||
data = {
|
||||
"text_input": prompt,
|
||||
"images": images if len(images) > 0 else [],
|
||||
"generation_config": {
|
||||
"top_k": int(top_k),
|
||||
"top_p": float(top_p),
|
||||
"num_beams": int(num_beams),
|
||||
"no_repeat_ngram_size": int(no_repeat_ngram_size),
|
||||
"length_penalty": float(length_penalty),
|
||||
"do_sample": bool(do_sample),
|
||||
"temperature": float(temperature),
|
||||
"max_new_tokens": min(int(max_output_tokens), 1536),
|
||||
}
|
||||
}
|
||||
|
||||
state.messages[-1][-1] = "▌"
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
|
||||
try:
|
||||
for chunk in model.predict(data):
|
||||
if chunk:
|
||||
if chunk[1]:
|
||||
output = chunk[0].strip()
|
||||
output = post_process_code(output)
|
||||
state.messages[-1][-1] = output + "▌"
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
|
||||
else:
|
||||
output = chunk[0].strip()
|
||||
state.messages[-1][-1] = output
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
time.sleep(0.03)
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
state.messages[-1][-1] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**"
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn, disable_btn, disable_btn, enable_btn, enable_btn)
|
||||
return
|
||||
|
||||
state.messages[-1][-1] = state.messages[-1][-1][:-1]
|
||||
yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5
|
||||
|
||||
# [](https://github.com/X-PLUG/mPLUG-Owl/stargazers)
|
||||
# **If you are facing ERROR, it might be Out-Of-Memory (OOM) issue due to the limited GPU memory, please refresh the page to restart.** Besides, we recommand you to duplicate the space with a single A10 GPU to have a better experience. Or you can visit our demo hosted on [Modelscope](https://www.modelscope.cn/studios/damo/mPLUG-Owl/summary) which is hosted on a V100 machine.
|
||||
|
||||
title_markdown = ("""
|
||||
<h1 align="center"><a href="https://github.com/X-PLUG/mPLUG-Owl"><img src="https://s1.ax1x.com/2023/05/12/p9yGA0g.png", alt="mPLUG-Owl" border="0" style="margin: 0 auto; height: 200px;" /></a> </h1>
|
||||
|
||||
<h2 align="center"> mPLUG-Owl🦉: Modularization Empowers Large Language Models with Multimodality </h2>
|
||||
|
||||
<h5 align="center"> If you like our project, please give us a star ✨ on Github for latest update. </h2>
|
||||
|
||||
<div align="center">
|
||||
<div style="display:flex; gap: 0.25rem;" align="center">
|
||||
<a href='https://github.com/X-PLUG/mPLUG-Owl'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
|
||||
<a href="https://arxiv.org/abs/2304.14178"><img src="https://img.shields.io/badge/Arxiv-2304.14178-red"></a>
|
||||
<a href='https://github.com/X-PLUG/mPLUG-Owl/stargazers'><img src='https://img.shields.io/github/stars/X-PLUG/mPLUG-Owl.svg?style=social'></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
**Notice**: The output is generated by top-k sampling scheme and may involve some randomness. For multiple images and video, we cannot ensure it's performance since only image-text pairs are used during training. For Video inputs, we recommand use the video **less than 10 seconds**.
|
||||
""")
|
||||
|
||||
tos_markdown = ("""
|
||||
### Terms of use
|
||||
By using this service, users are required to agree to the following terms:
|
||||
The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
|
||||
Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
|
||||
For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
|
||||
|
||||
**Copyright 2023 Alibaba DAMO Academy.**
|
||||
""")
|
||||
|
||||
learn_more_markdown = ("""
|
||||
### License
|
||||
The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
|
||||
""")
|
||||
|
||||
css = code_highlight_css + """
|
||||
pre {
|
||||
white-space: pre-wrap; /* Since CSS 2.1 */
|
||||
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
|
||||
white-space: -pre-wrap; /* Opera 4-6 */
|
||||
white-space: -o-pre-wrap; /* Opera 7 */
|
||||
word-wrap: break-word; /* Internet Explorer 5.5+ */
|
||||
}
|
||||
"""
|
||||
|
||||
def build_demo():
|
||||
# with gr.Blocks(title="mPLUG-Owl🦉", theme=gr.themes.Base(), css=css) as demo:
|
||||
with gr.Blocks(title="mPLUG-Owl🦉", css=css) as demo:
|
||||
state = gr.State()
|
||||
gr.Markdown(SHARED_UI_WARNING)
|
||||
|
||||
gr.Markdown(title_markdown)
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column(scale=3):
|
||||
|
||||
imagebox = gr.Image(type="pil")
|
||||
videobox = gr.Video()
|
||||
|
||||
with gr.Accordion("Parameters", open=True, visible=False) as parameter_row:
|
||||
max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
|
||||
temperature = gr.Slider(minimum=0, maximum=1, value=1, step=0.1, interactive=True, label="Temperature",)
|
||||
top_k = gr.Slider(minimum=1, maximum=5, value=3, step=1, interactive=True, label="Top K",)
|
||||
top_p = gr.Slider(minimum=0, maximum=1, value=0.9, step=0.1, interactive=True, label="Top p",)
|
||||
length_penalty = gr.Slider(minimum=1, maximum=5, value=1, step=0.1, interactive=True, label="length_penalty",)
|
||||
num_beams = gr.Slider(minimum=1, maximum=5, value=1, step=1, interactive=True, label="Beam Size",)
|
||||
no_repeat_ngram_size = gr.Slider(minimum=1, maximum=5, value=2, step=1, interactive=True, label="no_repeat_ngram_size",)
|
||||
do_sample = gr.Checkbox(interactive=True, value=True, label="do_sample")
|
||||
|
||||
gr.Markdown(tos_markdown)
|
||||
|
||||
with gr.Column(scale=6):
|
||||
chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=1000)
|
||||
with gr.Row():
|
||||
with gr.Column(scale=8):
|
||||
textbox = gr.Textbox(show_label=False,
|
||||
placeholder="Enter text and press ENTER", visible=False).style(container=False)
|
||||
with gr.Column(scale=1, min_width=60):
|
||||
submit_btn = gr.Button(value="Submit", visible=False)
|
||||
with gr.Row(visible=False) as button_row:
|
||||
upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
|
||||
downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
|
||||
flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
|
||||
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
|
||||
clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
|
||||
|
||||
gr.Examples(examples=[
|
||||
[f"examples/monday.jpg", "Explain why this meme is funny."],
|
||||
[f'examples/rap.jpeg', 'Can you write me a master rap song that rhymes very well based on this image?'],
|
||||
[f'examples/titanic.jpeg', 'What happened at the end of this movie?'],
|
||||
[f'examples/vga.jpeg', 'What is funny about this image? Describe it panel by panel.'],
|
||||
[f'examples/mug_ad.jpeg', 'We design new mugs shown in the image. Can you help us write an advertisement?'],
|
||||
[f'examples/laundry.jpeg', 'Why this happens and how to fix it?'],
|
||||
[f'examples/ca.jpeg', "What do you think about the person's behavior?"],
|
||||
[f'examples/monalisa-fun.jpg', 'Do you know who drew this painting?'],
|
||||
], inputs=[imagebox, textbox])
|
||||
|
||||
gr.Markdown(learn_more_markdown)
|
||||
url_params = gr.JSON(visible=False)
|
||||
|
||||
btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
|
||||
parameter_list = [
|
||||
max_output_tokens, temperature, top_k, top_p,
|
||||
num_beams, no_repeat_ngram_size, length_penalty,
|
||||
do_sample
|
||||
]
|
||||
upvote_btn.click(upvote_last_response,
|
||||
[state], [textbox, upvote_btn, downvote_btn, flag_btn])
|
||||
downvote_btn.click(downvote_last_response,
|
||||
[state], [textbox, upvote_btn, downvote_btn, flag_btn])
|
||||
flag_btn.click(flag_last_response,
|
||||
[state], [textbox, upvote_btn, downvote_btn, flag_btn])
|
||||
# regenerate_btn.click(regenerate, state,
|
||||
# [state, chatbot, textbox, imagebox, videobox] + btn_list).then(
|
||||
# http_bot, [state] + parameter_list,
|
||||
# [state, chatbot] + btn_list)
|
||||
regenerate_btn.click(regenerate_http_bot, [state] + parameter_list,
|
||||
[state, chatbot, textbox, imagebox, videobox] + btn_list)
|
||||
|
||||
clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox, videobox] + btn_list)
|
||||
|
||||
# textbox.submit(add_text, [state, textbox, imagebox, videobox], [state, chatbot, textbox, imagebox, videobox] + btn_list
|
||||
# ).then(http_bot, [state] + parameter_list,
|
||||
# [state, chatbot] + btn_list)
|
||||
# submit_btn.click(add_text, [state, textbox, imagebox, videobox], [state, chatbot, textbox, imagebox, videobox] + btn_list
|
||||
# ).then(http_bot, [state] + parameter_list,
|
||||
# [state, chatbot] + btn_list)
|
||||
|
||||
textbox.submit(add_text_http_bot,
|
||||
[state, textbox, imagebox, videobox] + parameter_list,
|
||||
[state, chatbot, textbox, imagebox, videobox] + btn_list
|
||||
)
|
||||
|
||||
submit_btn.click(add_text_http_bot,
|
||||
[state, textbox, imagebox, videobox] + parameter_list,
|
||||
[state, chatbot, textbox, imagebox, videobox] + btn_list
|
||||
)
|
||||
|
||||
demo.load(load_demo, [url_params], [state,
|
||||
chatbot, textbox, submit_btn, button_row, parameter_row],
|
||||
_js=get_window_url_params)
|
||||
|
||||
return demo
|
||||
|
||||
if __name__ == "__main__":
|
||||
io = init()
|
||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
log_dir = cur_dir[:-9] + "log"
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--host", type=str, default="0.0.0.0")
|
||||
parser.add_argument("--debug", action="store_true", help="using debug mode")
|
||||
parser.add_argument("--port", type=int)
|
||||
parser.add_argument("--concurrency-count", type=int, default=100)
|
||||
parser.add_argument("--base-model",type=str, default='MAGAer13/mplug-owl-llama-7b')
|
||||
parser.add_argument("--load-8bit", action="store_true", help="using 8bit mode")
|
||||
parser.add_argument("--bf16", action="store_true", help="using 8bit mode")
|
||||
args = parser.parse_args()
|
||||
|
||||
if torch.cuda.is_available():
|
||||
device = "cuda"
|
||||
else:
|
||||
device = "cpu"
|
||||
|
||||
model = mPLUG_Owl_Server(
|
||||
base_model=args.base_model,
|
||||
log_dir=log_dir,
|
||||
load_in_8bit=args.load_8bit,
|
||||
bf16=args.bf16,
|
||||
device=device,
|
||||
io=io
|
||||
)
|
||||
demo = build_demo()
|
||||
demo.queue(concurrency_count=args.concurrency_count, status_update_rate=10, api_open=False).launch(server_name=args.host, debug=args.debug, server_port=args.port, share=False)
|
||||
|
Reference in New Issue
Block a user