You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: coreagent/config.py
+4-2Lines changed: 4 additions & 2 deletions
Original file line number
Diff line number
Diff line change
@@ -15,12 +15,14 @@ class Config:
15
15
temperature: Optional[float] =None
16
16
frequency_penalty: float=None# generally don't set this, may cause problems.
17
17
generation_limit: int=5000
18
-
show_generation: bool=False# Don't use it for now, a bug in vLLM (tested as of <= v0.8.0) caused random junks to be streamed, check out vLLM Issue #15188.
19
18
# ---- optional settings ----
20
-
progressbar_length: int=50# Not used for now
19
+
use_guided_generation: bool=False# Disable if you're using non vLLM deployments
21
20
guided_decoding_backend: str='xgrammar:no-fallback'# Tested with vLLM with Engine v0.
22
21
use_stop_token: bool=False# Tested not working with vLLM <= 0.8.0, since stop tokens are also considered during reasoning, see vLLM Issue #14170.
23
22
chat_template_type: Optional[str] =None# modified chat templates, only for vLLM, one of ["qwq" or None]
23
+
# ---- display only ----
24
+
show_generation: bool=False# Don't use it for now, a bug in vLLM (tested as of <= v0.8.0) caused random junks to be streamed, check out vLLM Issue #15188.
25
+
progressbar_length: int=50# Not used for now
24
26
25
27
# Default configuration (used internally, do NOT modify directly! )
0 commit comments