diff --git a/README.md b/README.md index 37f70ed..773f4b2 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ # rubber ducky +

+ Ducky Image +

## tl;dr - `pip install rubber-ducky` diff --git a/ducky/ducky.py b/ducky/ducky.py index 5922797..b4725f7 100644 --- a/ducky/ducky.py +++ b/ducky/ducky.py @@ -25,9 +25,14 @@ async def call_llama(self, code: str = "", prompt: Optional[str] = None, chain: while True: # Include previous responses in the prompt for context context_prompt = "\n".join(responses) + "\n" + prompt - response = await self.client.generate(model=self.model, prompt=context_prompt) - print(response['response']) - responses.append(response['response']) + stream = await self.client.generate(model=self.model, prompt=context_prompt, stream=True) + response_text = "" + async for chunk in stream: + if 'response' in chunk: + print(chunk['response'], end='', flush=True) + response_text += chunk['response'] + print() # New line after response completes + responses.append(response_text) if not chain: break prompt = input("\nAny questions? \n") @@ -67,7 +72,7 @@ async def ducky() -> None: # Handle direct question from CLI if args.question is not None: - question = " ".join(args.question) + question = " ".join(args.question) + " be as concise as possible" await rubber_ducky.call_llama(prompt=question, chain=args.chain) return diff --git a/ducky_img.webp b/ducky_img.webp new file mode 100644 index 0000000..045582b Binary files /dev/null and b/ducky_img.webp differ diff --git a/setup.py b/setup.py index 19a4d16..f81b435 100644 --- a/setup.py +++ b/setup.py @@ -5,7 +5,7 @@ setup( name='rubber-ducky', - version='1.1.2', + version='1.1.3', description='AI Companion for Pair Programming', long_description=long_description, long_description_content_type='text/markdown',