mirror of
https://github.com/ION606/ML-pipeline.git
synced 2026-05-14 21:06:54 +00:00
quality of life upgrades and bug fixes
This commit is contained in:
+33
-5
@@ -3,10 +3,12 @@ from pathlib import Path
|
||||
import re
|
||||
from types import FunctionType
|
||||
import docker
|
||||
import json
|
||||
|
||||
import debug as debugMod
|
||||
import conversation_store
|
||||
from config import Config
|
||||
from queries import show_thinking
|
||||
|
||||
|
||||
class UserEnvironment:
|
||||
@@ -14,6 +16,30 @@ class UserEnvironment:
|
||||
self.user_id = user_id
|
||||
self.client = docker.from_env()
|
||||
self.temp_dir = tempfile.TemporaryDirectory(prefix=f"{user_id}_code_")
|
||||
self._ensure_sandbox_image()
|
||||
|
||||
def _ensure_sandbox_image(self):
|
||||
try:
|
||||
self.client.images.get("code-sandbox")
|
||||
except docker.errors.ImageNotFound:
|
||||
debugMod.log("building code-sandbox image from Dockerfile.sandbox...")
|
||||
|
||||
try:
|
||||
self.client.images.build(
|
||||
path=".",
|
||||
dockerfile="Dockerfile.sandbox",
|
||||
tag="code-sandbox",
|
||||
rm=True,
|
||||
forcerm=True
|
||||
)
|
||||
|
||||
debugMod.log("successfully built code-sandbox image")
|
||||
|
||||
except docker.errors.BuildError as e:
|
||||
raise RuntimeError(f"Failed to build Docker image: {str(e)}") from e
|
||||
|
||||
except docker.errors.APIError as e:
|
||||
raise RuntimeError(f"Docker API error: {str(e)}") from e
|
||||
|
||||
def execute_code(self, code: str, context=None, timeout=15, memory_limit=100):
|
||||
# Validate input
|
||||
@@ -48,7 +74,6 @@ class UserEnvironment:
|
||||
detach=True,
|
||||
stdout=True,
|
||||
stderr=True,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
# Wait for completion
|
||||
@@ -98,10 +123,10 @@ def orchestrate_code(orchestrate: FunctionType, vector_store, chunks, user_env:
|
||||
execution_result = user_env.execute_code(
|
||||
current_code, context=chunks if chunks else None)
|
||||
|
||||
if isinstance(execution_result, dict) and 'err' in execution_result:
|
||||
if isinstance(execution_result, dict) and execution_result['error']:
|
||||
# hard code to let user know the program didn't explode
|
||||
debugMod.log(
|
||||
"\n\nhmmm...looks like this code didn't work properly, I'll try debugging it now!\n")
|
||||
show_thinking(
|
||||
"[hmmm...looks like this code didn't work properly, I'll try debugging it now!]")
|
||||
|
||||
last_error = execution_result['err']
|
||||
debugMod.log(f"\nExecution error: {last_error}\n")
|
||||
@@ -128,7 +153,9 @@ def orchestrate_code(orchestrate: FunctionType, vector_store, chunks, user_env:
|
||||
else:
|
||||
break
|
||||
else:
|
||||
debugMod.log("\nCode Execution Result:\n", execution_result)
|
||||
debugMod.log("\nCode Execution Result:\n", json.dumps(execution_result))
|
||||
print("\nCode Execution Result:\n", execution_result['output'].strip())
|
||||
|
||||
if execution_result:
|
||||
# Get current conversation ID after saving conversation
|
||||
conv_id = conversation_store.save_conversation(query, response, links)
|
||||
@@ -142,6 +169,7 @@ def orchestrate_code(orchestrate: FunctionType, vector_store, chunks, user_env:
|
||||
retries=retry_count,
|
||||
conversation_id=conv_id
|
||||
)
|
||||
|
||||
break
|
||||
|
||||
if last_error and retry_count >= Config.MAX_CODE_RETRIES:
|
||||
|
||||
Reference in New Issue
Block a user