end of project
This commit is contained in:
92
main.py
92
main.py
@@ -5,28 +5,92 @@ from dotenv import load_dotenv
|
||||
from google import genai
|
||||
from google.genai import types
|
||||
|
||||
from config import SYSTEM_PROMPT, MAX_ITERS
|
||||
from call_function import available_functions, call_function
|
||||
|
||||
def main():
|
||||
print("Hello from python-ai-agent!")
|
||||
print('argv', sys.argv)
|
||||
if len(sys.argv) < 2 or str(sys.argv[1]) == "--verbose":
|
||||
load_dotenv()
|
||||
|
||||
verbose = "--verbose" in sys.argv
|
||||
args = []
|
||||
for arg in sys.argv[1:]:
|
||||
if not arg.startswith("--"):
|
||||
args.append(arg)
|
||||
|
||||
if not args:
|
||||
print("AI Code Assistant")
|
||||
print('\nUsage: python main.py "your prompt here" [--verbose]')
|
||||
print('Example: python main.py "How do I fix the calculator?"')
|
||||
sys.exit(1)
|
||||
|
||||
load_dotenv()
|
||||
api_key = os.environ.get("GEMINI_API_KEY")
|
||||
|
||||
client = genai.Client(api_key=api_key)
|
||||
|
||||
|
||||
user_prompt = " ".join(args)
|
||||
|
||||
if verbose:
|
||||
print(f"User prompt: {user_prompt}\n")
|
||||
|
||||
messages = [
|
||||
types.Content(role="user", parts=[types.Part(text=sys.argv[1])]),
|
||||
types.Content(role="user", parts=[types.Part(text=user_prompt)]),
|
||||
]
|
||||
|
||||
response = client.models.generate_content(model="gemini-2.0-flash-001", contents=messages)
|
||||
print(f"Response: {response.text}")
|
||||
|
||||
if "--verbose" in sys.argv:
|
||||
print(f"User prompt: {sys.argv[1]}")
|
||||
print(f"Prompt tokens: {response.usage_metadata.prompt_token_count}")
|
||||
print(f"Response tokens: {response.usage_metadata.candidates_token_count}")
|
||||
iters = 0
|
||||
while True:
|
||||
iters += 1
|
||||
if iters > MAX_ITERS:
|
||||
print(f"Maximum iterations ({MAX_ITERS}) reached.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
final_response = generate_content(client, messages, verbose)
|
||||
if final_response:
|
||||
print("Final response:")
|
||||
print(final_response)
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"Error in generate_content: {e}")
|
||||
|
||||
generate_content(client, messages, verbose)
|
||||
|
||||
|
||||
def generate_content(client, messages, verbose):
|
||||
response = client.models.generate_content(
|
||||
model="gemini-2.0-flash-001",
|
||||
contents=messages,
|
||||
config=types.GenerateContentConfig(
|
||||
tools=[available_functions], system_instruction=SYSTEM_PROMPT
|
||||
),
|
||||
)
|
||||
if verbose:
|
||||
print("Prompt tokens:", response.usage_metadata.prompt_token_count)
|
||||
print("Response tokens:", response.usage_metadata.candidates_token_count)
|
||||
|
||||
|
||||
if response.candidates:
|
||||
for candidate in response.candidates:
|
||||
function_call_content = candidate.content
|
||||
messages.append(function_call_content)
|
||||
|
||||
if not response.function_calls:
|
||||
return response.text
|
||||
|
||||
function_responses = []
|
||||
for function_call_part in response.function_calls:
|
||||
function_call_result = call_function(function_call_part, verbose)
|
||||
if (
|
||||
not function_call_result.parts
|
||||
or not function_call_result.parts[0].function_response
|
||||
):
|
||||
raise Exception("empty function call result")
|
||||
if verbose:
|
||||
print(f"-> {function_call_result.parts[0].function_response.response}")
|
||||
function_responses.append(function_call_result.parts[0])
|
||||
|
||||
if not function_responses:
|
||||
raise Exception("no function responses generated, exiting.")
|
||||
|
||||
messages.append(types.Content(role="user", parts=function_responses))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user