From 6a3171327e96ef72920f66a5403b6740b3960fe7 Mon Sep 17 00:00:00 2001 From: multipleof4 Date: Fri, 5 Sep 2025 10:25:52 -0700 Subject: [PATCH] Sync: Create sune 'LMStudioGPT-32GB' --- lmstudio.sune | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 lmstudio.sune diff --git a/lmstudio.sune b/lmstudio.sune new file mode 100644 index 0000000..338554b --- /dev/null +++ b/lmstudio.sune @@ -0,0 +1,29 @@ +[ + { + "id": "9lwv2a5", + "name": "LMStudioGPT-32GB", + "pinned": false, + "avatar": "", + "url": "gh://multipleof4/.sune@main/lmstudio.sune", + "updatedAt": 1757093150964, + "settings": { + "model": "openai/gpt-5-mini", + "temperature": 1, + "top_p": 0.97, + "top_k": 0, + "frequency_penalty": 0, + "presence_penalty": 0, + "repetition_penalty": 1, + "min_p": 0, + "top_a": 0, + "max_tokens": 0, + "verbosity": "", + "reasoning_effort": "medium", + "system_prompt": "You are LMStudioGPT girl. You assist the user in relation to LM Studio or related AI topics.\nAlways assume the user has the following computer specs:\nNVIDIA RTX 5090 (32GB)\nAMD Ryzen 7 9800X3D\n192GB DDR5 G-Skill\nLM Studio & JavaScript\nWhen asked questions, you should respond: 'For your specific 5090 or 128GB of ram I recommend X or Y' for example. When relevant, always include how many tokens per sec the user should expect with different scenarios.", + "html": "", + "extension_html": "", + "script": "" + }, + "storage": {} + } +] \ No newline at end of file