Skip to content

Commit

Permalink
Added boiler plate code
Browse files Browse the repository at this point in the history
  • Loading branch information
mayurvir committed Mar 24, 2024
1 parent 3bee937 commit d3c63ee
Show file tree
Hide file tree
Showing 15 changed files with 990 additions and 0 deletions.
5 changes: 5 additions & 0 deletions .env.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
OPENAI_API_KEY=
OPENAI_MODEL_ID=gpt-3.5-turbo-1106
OPEN_AI_EMBEDDINGS_MODEL=text-embedding-ada-002
PORT=8080
SERVER_PORT=3001
7 changes: 7 additions & 0 deletions .eslintignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
node_modules/*
migrations/*
seeders/*
models/*
package*.json
LICENSE
*.md
29 changes: 29 additions & 0 deletions .eslintrc.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
{
"extends": ["eslint:recommended", "plugin:node/recommended", "prettier"],
"env": {
"node": true,
"mocha": true,
"es2021": true
},
"parserOptions": {
"ecmaVersion": "latest",
"sourceType": "module"
},
"rules": {
"node/no-unpublished-require": 0,
"node/exports-style": ["error", "module.exports"],
"node/prefer-global/buffer": ["error", "always"],
"node/prefer-global/console": ["error", "always"],
"node/prefer-global/process": ["error", "always"],
"node/prefer-global/url-search-params": ["error", "always"],
"node/prefer-global/url": ["error", "always"],
"node/no-unsupported-features/es-syntax": [
"error",
{
"version": ">=12",
"ignores": ["modules"]
}
],
"no-console": "error"
}
}
35 changes: 35 additions & 0 deletions .github/workflows/checks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
name: Run linting and tests

on:
push:
branches-ignore:
- main
jobs:
test-lint:
name: Test
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Set up environment variables
run: |
echo "OPENAI_API_KEY=${{secrets.OPENAI_API_KEY}}" >> .env
echo "OPENAI_MODEL_ID=${{secrets.OPENAI_MODEL_ID}}" >> .env
echo "OPEN_AI_EMBEDDINGS_MODEL=${{secrets.OPEN_AI_EMBEDDINGS_MODEL}}" >> .env
echo "PORT=${{secrets.PORT}}" >> .env
echo "SERVER_PORT=${{secrets.SERVER_PORT}}" >> .env
- name: Set up Node.js
uses: actions/setup-node@v2
with:
node-version: '20'

- name: Install dependencies
run: npm install

- name: Run linting
run: npm run docker:lint

- name: Run tests
run: npm run docker:test
58 changes: 58 additions & 0 deletions .github/workflows/deploy.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
name: Docker Build, Push and Deploy

on:
push:
branches:
- main

jobs:
build-push-deploy:
runs-on: ubuntu-latest

steps:
- name: Checkout code
uses: actions/checkout@v2

- name: Set up environment variables
run: |
echo "OPENAI_API_KEY=${{secrets.OPENAI_API_KEY}}" >> .env
echo "OPENAI_MODEL_ID=${{secrets.OPENAI_MODEL_ID}}" >> .env
echo "OPEN_AI_EMBEDDINGS_MODEL=${{secrets.OPEN_AI_EMBEDDINGS_MODEL}}" >> .env
echo "PORT=${{secrets.PORT}}" >> .env
echo "SERVER_PORT=${{secrets.SERVER_PORT}}" >> .env
- name: Create SSH key file
run: echo -e "${{ secrets.EC2_SSH_KEY }}" > ~/ec2_key
env:
EC2_SSH_KEY: ${{ secrets.EC2_SSH_KEY }}

- name: Set permissions for SSH key file
run: chmod 600 ~/ec2_key

- name: Login to Docker Hub
run: echo "${{ secrets.DOCKERHUB_TOKEN }}" | docker login -u "${{ secrets.DOCKERHUB_USER }}" --password-stdin
env:
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USER }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}

- name: Build and push Docker images
run: |
docker-compose build
docker-compose push
- name: Copy environment file
run: scp -i ~/ec2_key -o StrictHostKeyChecking=no ./.env ec2-user@${{ secrets.EC2_IP }}:~/.env

- name: Copy docker-compose.yml to the server
run: scp -i ~/ec2_key -o StrictHostKeyChecking=no ./docker-compose.yml ec2-user@${{ secrets.EC2_IP }}:~/docker-compose.yml

- name: SSH into EC2 instance and deploy
run: |
ssh -i ~/ec2_key ec2-user@${{ secrets.EC2_IP }} '
echo "${{ secrets.DOCKERHUB_TOKEN }}" | docker login -u "${{ secrets.DOCKERHUB_USER }}" --password-stdin &&
docker-compose pull &&
docker-compose up -d'
env:
EC2_SSH_KEY: ${{ secrets.EC2_SSH_KEY }}
DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USER }}
DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
131 changes: 131 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
.pnpm-debug.log*

# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage
*.lcov

# nyc test coverage
.nyc_output

# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/
jspm_packages/

# Snowpack dependency directory (https://snowpack.dev/)
web_modules/

# TypeScript cache
*.tsbuildinfo

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Optional stylelint cache
.stylelintcache

# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local

# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache

# Next.js build output
.next
out

# Nuxt.js build / generate output
.nuxt
dist

# Gatsby files
.cache/
# Comment in the public line in if your project uses Gatsby and not Next.js
# https://nextjs.org/blog/next-9-1#public-directory-support
# public

# vuepress build output
.vuepress/dist

# vuepress v2.x temp and cache directory
.temp
.cache

# Docusaurus cache and generated files
.docusaurus

# Serverless directories
.serverless/

# FuseBox cache
.fusebox/

# DynamoDB Local files
.dynamodb/

# TernJS port file
.tern-port

# Stores VSCode versions used for testing VSCode extensions
.vscode-test

# yarn v2
.yarn/cache
.yarn/unplugged
.yarn/build-state.yml
.yarn/install-state.gz
.pnp.*
.DS_Store
7 changes: 7 additions & 0 deletions .prettierrc.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"trailingComma": "es5",
"tabWidth": 4,
"semi": false,
"singleQuote": true,
"ignore": ["schemas"]
}
11 changes: 11 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
FROM node:20-alpine3.17

WORKDIR /app

COPY . .

RUN npm install

EXPOSE ${PORT}

CMD ["npm", "run", "prod"]
1 change: 1 addition & 0 deletions config/GPT/becknGPT_instructions.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
BecknGPT is designed to assist users with shopping needs, efficiently managing tasks from product searches to order confirmations. When a user indicates a desire to init an order, BecknGPT will ask for essential details such as name, email, phone number, and addresses before making the API call. During the init call, always mention that 'fulfillment details same as billing details' in the context. This information is then carefully included under the context field in API calls, ensuring a personalized and secure shopping experience. After init, BecknGPT should ask for confirmation to place the order. BecknGPT remains versatile, capable of searching for a wide array of products or services as requested by the user. It upholds the highest standards of user privacy and data security, ensuring that personal data is neither stored nor misused. With clear, concise instructions and a friendly, professional demeanor, BecknGPT aims to provide users with a smooth and efficient shopping journey, always ready to seek clarification to deliver precise assistance. BecknGPT must call select at least once before the init call. It should send sessionId for subsequent search actions as well. If the search call does not yield any results or incorrect results, BecknGPT will now ask the user if they would like to widen the search criteria and look into other networks
64 changes: 64 additions & 0 deletions config/GPT/becknGPT_schema.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
openapi: 3.0.0
info:
title: Beckn API middleware
version: 1.0.0
description: API for interacting with Beckn chatbot
servers:
- url: https://open.network.becknprotocol.io
paths:
/act:
post:
operationId: postAct
x-openai-isConsequential: false
summary: Send a context string to get a response.
requestBody:
required: true
content:
application/json:
schema:
type: object
required:
- context
- action
- recipient
properties:
recipient :
type: string
description: Who is using the api bap or bpp.
enum:
- bap
action :
type: string
description: Based on the user's instruction, it should pass which type of action this API should perform.
enum:
- search
- select
- init
- confirm
sessionId:
type: string
description: This should not be sent for the firat api call and should be sent for subsequent calls. The value of this field should be picked up from the response of the first call and should be consistent for all subsequent calls.
context:
type: string
description: User message should be sent here. It should includde any important information shared earlier in the chat by user that can help with the instructions of this context.
relookup_yn:
type: string
description: This field should be sent as 1 if the user wants to search again through a different network. The default value should be 0.
enum:
- 0
- 1
responses:
'200':
description: Successful response
content:
application/json:
schema:
type: object
properties:
result:
type: string
description: The response from the server.
'400':
description: Bad request
'500':
description: Server error
Loading

0 comments on commit d3c63ee

Please sign in to comment.