added readme and setup for install

This commit is contained in:
2026-02-02 14:52:16 +01:00
parent 5258b620ab
commit 8fbdc0d8c8
7 changed files with 325 additions and 0 deletions

View File

View File

@@ -0,0 +1,41 @@
from ollama import ChatResponse, chat
class init:
def __init__(self) -> None:
pass
def generateMd(self, sysPrompt, providedModel) -> str:
response: ChatResponse = chat(
model=providedModel,
messages=[
{
"role": "system",
"content": sysPrompt,
},
],
)
return response["message"]["content"]
def generatePrompt(self, args) -> str:
return f"""You will give the user a markdown formatted programming project they can work on.
-- Details --
Language: [[ {args.language} ]]
Framework: [[ {args.framework} ]]
External packages allowed: [[ {str(args.packages)} ]]
Difficulty: [[ {args.difficulty} ]]
Estimated Time the user want to spend on the project: [[ {args.time} ]]
Type of project: [[ {args.project} ]]
Custom instructions: [[ {args.custom} ]]
-- information --
- Align the project based on this information.
- This project is intended for users that want to practise their programming skills.
- When creating the markdown file always include the following things; header, instruction, final product, rules, general information provided by user (Details), Technical Requirements, and lastly the points (score)
- do NOT use markdown blocks as your response will immediately be injected into a .MD file upon completion.
- If the user did not provide a Programming Language (based on details) the assign the user one based on the project. Clearly define the language in the MD file
- If there is a framework (such as symfony) included (or a engine such as unity) or a library/package that requires a advanced instalation (based on difficulty), provide a installation guide in the MD file and/or a cheatsheet unless the assistant is unsure on how to set it up.
- Make the user decide on their own which packages they want to use unless explicitly specified in Custom instructions and/or General Details
- Do NOT provide any code structures or samples (unless required by previous instructions). Let the user figure it out on its own as this is intended to be a challenge
"""

View File

@@ -0,0 +1,77 @@
import os
from ollama import ChatResponse, chat
def RunCommand(command) -> str:
"""Runs a command on the HOST os"""
"""
Args:
command; Parsed command the LLM sends
Returns:
Output of command
"""
return os.popen(command).read()
class review:
def __init__(self) -> None:
pass
def generateReview(self, providedModel, platform, markdown) -> str:
# save the messageHistory while the model is reviewing the project
messageHistory = [
{
"role": "system",
"content": f"""You are going to review a project the user made based on a MD file.
- The user is currently on the platform; {platform}
- You will be allowed using tool calling to execute commands directly on the OS machine.
- You are NOT allowed to search up the web, do curl requests, modify files, or harm the device. The tool calling is intended purely for read only access
- Upon checking and validating the code generate your output (message content) in markdown formatted text as this will be directly injected into A markdown file
- do NOT use markdown blocks or anything else that would break the MD file in your output.
- review the user based on the score provided in the instuctions the user is going to provide. If not provided make up your own based on the instuctions
- When creating the markdown file always include the following things; Introduction, Review, Evaluation, Score, Rating, Comments, Extra information (if needed)
- when your evalution the project use the terminal (tool calling) to inspect files and read them.
- be VERY critical when reviewing the project. The user has to learn fropm this experience.
-- MARKDOWN FILE --
{markdown}
""",
},
{
"role": "user",
"content": "Using tool calling and instructions provided in sys prompt, Complete the review.",
},
]
while True:
# print("generate")
response: ChatResponse = chat(
model=providedModel,
messages=messageHistory,
tools=[RunCommand],
)
# print("Content: ", response.message.content)
if response.message.tool_calls:
print("attempted tool calling")
for tool in response.message.tool_calls:
print("-- TOOL CALLING RAN --")
print(
"- The LLM is currently running tests on your computer in order to generate a review -"
)
print(f"Command; {tool.function.arguments}")
result = RunCommand(**tool.function.arguments)
print(f"Result: {result}")
messageHistory.append(
{
"role": "tool",
"tool_name": tool.function.name,
"content": str(result),
}
)
print("-- END OF TOOL CALL --")
else:
return response["message"]["content"]