Compare commits

..

No commits in common. "2ac57a64340013d326473a2f2921e2b64d1e7c21" and "63340f25948f8055590dd16bfbea2d8b2ec527d0" have entirely different histories.

4 changed files with 25 additions and 93 deletions

1
.gitignore vendored
View File

@ -1,7 +1,6 @@
deepseek.txt deepseek.txt
passcode.txt passcode.txt
config.toml config.toml
secret-config.toml
.idea .idea
target target
local_file_storage/ local_file_storage/

View File

@ -1,44 +0,0 @@
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Mapping
import tomllib
@dataclass(frozen=True)
class Config:
listening_addr: str
listening_port: int
secret: str
def _read_toml(path: Path) -> Mapping[str, Any]:
with path.open("rb") as handle:
return tomllib.load(handle)
def read_config(
config_path: str = "config.toml",
secret_config_path: str = "secret-config.toml",
) -> Config:
config_data = _read_toml(Path(config_path))
secret_data = _read_toml(Path(secret_config_path))
addr = str(config_data.get("listening_addr", "127.0.0.1"))
port_raw = config_data.get("listening_port", 9000)
try:
port = int(port_raw)
except (TypeError, ValueError) as exc:
raise ValueError(f"Invalid listening_port value {port_raw!r}") from exc
secret = secret_data.get("secret")
if not secret:
raise ValueError("Missing secret in secret-config.toml")
return Config(
listening_addr=addr,
listening_port=port,
secret=str(secret),
)

View File

@ -10,12 +10,16 @@ import torch
from transformers import AutoModelForCausalLM, AutoTokenizer from transformers import AutoModelForCausalLM, AutoTokenizer
from api import Request, Response from api import Request, Response
from config import Config, read_config
from secret_stream_socket import ProtocolError, SecretStreamSocket, wrap_connection_socket from secret_stream_socket import ProtocolError, SecretStreamSocket, wrap_connection_socket
SERVER_HOST = "127.0.0.1"
SERVER_PORT = 9000
SHARED_SECRET = "change-me"
MODEL_ID = "zai-org/GLM-4.7-Flash" MODEL_ID = "zai-org/GLM-4.7-Flash"
MAX_NEW_TOKENS = 256 MAX_NEW_TOKENS = 256
PIECE_CHUNK_SIZE = 64
@dataclass @dataclass
@ -31,6 +35,9 @@ class PendingChatCompletionRecord:
was_cancelled: bool = False was_cancelled: bool = False
_lock: threading.Lock = field(default_factory=threading.Lock, repr=False) _lock: threading.Lock = field(default_factory=threading.Lock, repr=False)
def mark_cancelled(self) -> None: def mark_cancelled(self) -> None:
with self._lock: with self._lock:
self.was_cancelled = True self.was_cancelled = True
@ -47,7 +54,6 @@ class WorkItem:
record: PendingChatCompletionRecord record: PendingChatCompletionRecord
@dataclass @dataclass
class ModelBundle: class ModelBundle:
tokenizer: Any tokenizer: Any
@ -108,46 +114,21 @@ def generate_llm_pieces(bundle: ModelBundle, messages: list) -> Iterable[str]:
inputs = {name: tensor.to(bundle.model.device) for name, tensor in inputs.items()} inputs = {name: tensor.to(bundle.model.device) for name, tensor in inputs.items()}
input_ids = inputs.get("input_ids") input_ids = inputs.get("input_ids")
attention_mask = inputs.get("attention_mask") input_len = int(input_ids.shape[1]) if input_ids is not None else 0
past_key_values = None
eos_token_id = bundle.model.config.eos_token_id
if eos_token_id is None:
eos_token_id = bundle.tokenizer.eos_token_id
if eos_token_id is None:
eos_token_ids = set()
elif isinstance(eos_token_id, (list, tuple, set)):
eos_token_ids = set(int(x) for x in eos_token_id)
else:
eos_token_ids = {int(eos_token_id)}
for _ in range(MAX_NEW_TOKENS):
with torch.inference_mode(): with torch.inference_mode():
outputs = bundle.model( output_ids = bundle.model.generate(
input_ids=input_ids, **inputs,
attention_mask=attention_mask, max_new_tokens=MAX_NEW_TOKENS,
past_key_values=past_key_values, do_sample=False,
use_cache=True,
) )
logits = outputs.logits[:, -1, :] generated_ids = output_ids[0][input_len:]
next_token_id = torch.argmax(logits, dim=-1) text = bundle.tokenizer.decode(generated_ids, skip_special_tokens=True)
token_id = int(next_token_id.item()) if not text:
return []
if token_id in eos_token_ids: return [text[i:i + PIECE_CHUNK_SIZE] for i in range(0, len(text), PIECE_CHUNK_SIZE)]
break
token_text = bundle.tokenizer.decode([token_id], skip_special_tokens=True)
if token_text:
yield token_text
-
past_key_values = outputs.past_key_values
input_ids = next_token_id.unsqueeze(1)
if attention_mask is not None:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))],
dim=1,
)
def worker_loop( def worker_loop(
@ -332,7 +313,7 @@ async def handle_client(
await transport.close() await transport.close()
async def run_server(model_bundle: ModelBundle, config: Config) -> None: async def run_server(model_bundle: ModelBundle) -> None:
pending: Dict[int, PendingChatCompletionRecord] = {} pending: Dict[int, PendingChatCompletionRecord] = {}
pending_lock = threading.Lock() pending_lock = threading.Lock()
work_queue: queue.Queue[WorkItem | None] = queue.Queue() work_queue: queue.Queue[WorkItem | None] = queue.Queue()
@ -349,17 +330,13 @@ async def run_server(model_bundle: ModelBundle, config: Config) -> None:
await handle_client( await handle_client(
reader, reader,
writer, writer,
config.secret, SHARED_SECRET,
work_queue, work_queue,
pending, pending,
pending_lock, pending_lock,
) )
server = await asyncio.start_server( server = await asyncio.start_server(client_handler, SERVER_HOST, SERVER_PORT)
client_handler,
config.listening_addr,
config.listening_port,
)
addr = ", ".join(str(sock.getsockname()) for sock in server.sockets or []) addr = ", ".join(str(sock.getsockname()) for sock in server.sockets or [])
print(f"[listening] {addr}") print(f"[listening] {addr}")
@ -368,10 +345,9 @@ async def run_server(model_bundle: ModelBundle, config: Config) -> None:
def main() -> None: def main() -> None:
config = read_config()
model_bundle = load_local_model() model_bundle = load_local_model()
print("[model] loaded", flush=True) print("[model] loaded", flush=True)
asyncio.run(run_server(model_bundle, config)) asyncio.run(run_server(model_bundle))
if __name__ == "__main__": if __name__ == "__main__":

1
secret-config.toml Normal file
View File

@ -0,0 +1 @@
dedicated_ai_server_secret="change-me"