added readme and setup for install

This commit is contained in:
2026-02-02 14:52:16 +01:00
parent 5258b620ab
commit 8fbdc0d8c8
7 changed files with 325 additions and 0 deletions

0
ProjectCli/__init__.py Normal file
View File

171
ProjectCli/main.py Normal file
View File

@@ -0,0 +1,171 @@
# import libraries
import argparse
import os
from .modules.init import init
from .modules.review import review
# load the classes
initClass = init()
reviewClass = review()
"""
Parser functions
"""
# init funtion
def initParser(args):
print("Creating project idea. This may take some time based on hardware.")
# get systemPrompt
print("Creating system prompt. (1/3)")
systemPrompt: str = initClass.generatePrompt(args)
# print(systemPrompt)
# Let the LLM generate a system prompt
print("Generating Instructions (.MD) file. (2/3)")
instructions = initClass.generateMd(systemPrompt, args.model)
print(f"Creating instructions file. (3/3)")
with open(f"{os.path.abspath(os.getcwd())}/Instructions.md", "w") as f:
f.write(instructions)
print(
f"-- COMPLETED --\nInstructions: {os.path.abspath(os.getcwd())}/Instructions.md"
)
return
def reviewParser(args):
print("Generating review. This may take some time based on hardware.")
print(
"THE LLM MAY ACCESS FILES AND IF NOT HANDLED PROPERLY MODIFY THEM. YOU WILL BE NOTIFIED WHEN THE LLM IS DOING SUCH ACTIONS."
)
# get Instuctions
print("Getting instructions. (1/3)")
markdownFile: str = ""
with open(f"{os.path.abspath(os.getcwd())}/Instructions.md", "r") as f:
markdownFile = f.read()
print("Generating Review (.MD) file. (2/3)")
review = reviewClass.generateReview(
providedModel=args.model, platform=args.platform, markdown=markdownFile
)
print("Creating Review MD file. (3/3)")
with open(f"{os.path.abspath(os.getcwd())}/Review.md", "w") as f:
f.write(review)
print(f"-- COMPLETED --\nReview path: {os.path.abspath(os.getcwd())}/Review.md")
return
"""
Main class
"""
def main():
# create the parser and subparser
parser = argparse.ArgumentParser(
description="Use LLM's to create project ideas. Powered by ollama!"
)
subparsers = parser.add_subparsers(required=True)
# init parser
parser_init = subparsers.add_parser(
"init", help="Creates a new project using LLM's"
)
parser_init.add_argument(
"-m",
"--model",
help="Defines the ollama model. [Default; ministral-3]",
default="ministral-3",
required=False,
type=str,
)
parser_init.add_argument(
"-l",
"--language",
help="Defines which programming language you want to create the project in. This will be send to the LLM [default: Any]",
required=False,
default="Any",
type=str,
)
parser_init.add_argument(
"-f",
"--framework",
help="Defines a framework if prefered. [default; Not specified]",
required=False,
default="Not specified",
type=str,
)
parser_init.add_argument(
"-e",
"--packages",
help="Defines if external packages are allowed",
required=False,
default=True,
type=bool,
)
parser_init.add_argument(
"-d",
"--difficulty",
help="Set your difficulty to the LLM [Default; Beginner]",
default="Beginner",
required=False,
)
parser_init.add_argument(
"-t",
"--time",
help="Set the estimated time you want to work on the project. [Default; 1 Hour]",
default="1 Hour",
type=str,
)
parser_init.add_argument(
"-p",
"--project",
help="Set the type of project. (For example; CLI-app, Website, Etc) [Default; Any]",
default="Any",
type=str,
)
parser_init.add_argument(
"-c",
"--custom",
help="Give the llm custom instructions if needed. [Default; None]",
default="none",
type=str,
)
parser_init.set_defaults(func=initParser)
# review parser
parser_review = subparsers.add_parser(
"review", help="Reviews project assignment [REQUIRES 'Instructions.md' FILE]"
)
parser_review.add_argument(
"-m",
"--model",
help="Defines the ollama model. [Default; ministral-3]",
default="ministral-3",
required=False,
type=str,
)
parser_review.add_argument(
"-p",
"--platform",
help="Defines the platform the user is on. [Default; Ubuntu]",
default="Ubuntu",
required=False,
type=str,
)
parser_review.set_defaults(func=reviewParser)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()

View File

View File

@@ -0,0 +1,41 @@
from ollama import ChatResponse, chat
class init:
def __init__(self) -> None:
pass
def generateMd(self, sysPrompt, providedModel) -> str:
response: ChatResponse = chat(
model=providedModel,
messages=[
{
"role": "system",
"content": sysPrompt,
},
],
)
return response["message"]["content"]
def generatePrompt(self, args) -> str:
return f"""You will give the user a markdown formatted programming project they can work on.
-- Details --
Language: [[ {args.language} ]]
Framework: [[ {args.framework} ]]
External packages allowed: [[ {str(args.packages)} ]]
Difficulty: [[ {args.difficulty} ]]
Estimated Time the user want to spend on the project: [[ {args.time} ]]
Type of project: [[ {args.project} ]]
Custom instructions: [[ {args.custom} ]]
-- information --
- Align the project based on this information.
- This project is intended for users that want to practise their programming skills.
- When creating the markdown file always include the following things; header, instruction, final product, rules, general information provided by user (Details), Technical Requirements, and lastly the points (score)
- do NOT use markdown blocks as your response will immediately be injected into a .MD file upon completion.
- If the user did not provide a Programming Language (based on details) the assign the user one based on the project. Clearly define the language in the MD file
- If there is a framework (such as symfony) included (or a engine such as unity) or a library/package that requires a advanced instalation (based on difficulty), provide a installation guide in the MD file and/or a cheatsheet unless the assistant is unsure on how to set it up.
- Make the user decide on their own which packages they want to use unless explicitly specified in Custom instructions and/or General Details
- Do NOT provide any code structures or samples (unless required by previous instructions). Let the user figure it out on its own as this is intended to be a challenge
"""

View File

@@ -0,0 +1,77 @@
import os
from ollama import ChatResponse, chat
def RunCommand(command) -> str:
"""Runs a command on the HOST os"""
"""
Args:
command; Parsed command the LLM sends
Returns:
Output of command
"""
return os.popen(command).read()
class review:
def __init__(self) -> None:
pass
def generateReview(self, providedModel, platform, markdown) -> str:
# save the messageHistory while the model is reviewing the project
messageHistory = [
{
"role": "system",
"content": f"""You are going to review a project the user made based on a MD file.
- The user is currently on the platform; {platform}
- You will be allowed using tool calling to execute commands directly on the OS machine.
- You are NOT allowed to search up the web, do curl requests, modify files, or harm the device. The tool calling is intended purely for read only access
- Upon checking and validating the code generate your output (message content) in markdown formatted text as this will be directly injected into A markdown file
- do NOT use markdown blocks or anything else that would break the MD file in your output.
- review the user based on the score provided in the instuctions the user is going to provide. If not provided make up your own based on the instuctions
- When creating the markdown file always include the following things; Introduction, Review, Evaluation, Score, Rating, Comments, Extra information (if needed)
- when your evalution the project use the terminal (tool calling) to inspect files and read them.
- be VERY critical when reviewing the project. The user has to learn fropm this experience.
-- MARKDOWN FILE --
{markdown}
""",
},
{
"role": "user",
"content": "Using tool calling and instructions provided in sys prompt, Complete the review.",
},
]
while True:
# print("generate")
response: ChatResponse = chat(
model=providedModel,
messages=messageHistory,
tools=[RunCommand],
)
# print("Content: ", response.message.content)
if response.message.tool_calls:
print("attempted tool calling")
for tool in response.message.tool_calls:
print("-- TOOL CALLING RAN --")
print(
"- The LLM is currently running tests on your computer in order to generate a review -"
)
print(f"Command; {tool.function.arguments}")
result = RunCommand(**tool.function.arguments)
print(f"Result: {result}")
messageHistory.append(
{
"role": "tool",
"tool_name": tool.function.name,
"content": str(result),
}
)
print("-- END OF TOOL CALL --")
else:
return response["message"]["content"]

27
README.md Normal file
View File

@@ -0,0 +1,27 @@
# ProjectCreator
A easier way to create projects for educational purposes
> [!IMPORTANT]
> This entire project is still being worked on and is on early development. Please report any issues you've saw! This app is currently tested on MacOS.
> [!NOTE]
> Readme is still in early WIP. Changed may be made
## getting started
Make sure you have python installed. The currently supported/tested version is [python 3.11.9](https://www.python.org/downloads/release/python-3119/)
1. clone this repository and navigate to the folder.
2. run the command ``pip install .``
## usage
Using this CLI tool is really easy. If you're stuck somewhere just use ``projectcreator -h``
### creating a assignment
to create a assignment navigate to the folder of your choice and run ``projectcreator init``. For parameters you could add run ``projectcreator init -h``. Default params are shown as well. Upon finishing it would create a MD file on the folder you're working in.
### Reviewing a assignment
> [!CAUTION]
> Reviewing would allow the LLM to gain full access over your computer terminal. While im planning to update that soon; **use it at your own risk**
to review a assignment navigate to the folder you're working in and run ``projectcreator review``. For parameters you could add run ``projectcreator init -h``. Default params are shown as well.
ProjectCreator assumes upon reviewing the code you're running on linux. If you're on another os such as Windows run; ``projectcreator review -p windows``.

9
setup.py Normal file
View File

@@ -0,0 +1,9 @@
from setuptools import find_packages, setup
setup(
name="ProjectCreator",
version="0.1",
install_requires=["ollama"],
packages=find_packages(),
entry_points={"console_scripts": ["ProjectCreator=ProjectCli.main:main"]},
)