Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remove un-used packages, remove installation of third party libs #38

Merged
merged 2 commits into from
Jan 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 3 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
FROM python:3.9-slim-buster
FROM python:3.10-slim

WORKDIR /app

RUN apt-get update && apt-get -y install curl build-essential
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="$PATH:/root/.cargo/bin"
RUN apt-get update
RUN pip install --upgrade pip setuptools

COPY requirements.txt .
RUN pip3 install -r requirements.txt
Expand Down
7 changes: 3 additions & 4 deletions custom.Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
FROM python:3.9-slim-buster
FROM python:3.10-slim

WORKDIR /app

RUN apt-get update && apt-get -y install curl build-essential
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
ENV PATH="$PATH:/root/.cargo/bin"
RUN apt-get update
RUN pip install --upgrade pip setuptools

COPY requirements.txt .
RUN pip3 install -r requirements.txt
Expand Down
9 changes: 7 additions & 2 deletions download.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
#!/usr/bin/env python3

from transformers import AutoModel, AutoTokenizer, AutoConfig
import nltk
import os
import sys
import nltk
from transformers import (
AutoModel,
AutoTokenizer,
AutoConfig,
)


model_name = os.getenv('MODEL_NAME', None)
force_automodel = os.getenv('FORCE_AUTOMODEL', False)
Expand Down
1 change: 1 addition & 0 deletions meta.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from transformers import AutoConfig


class Meta:
config: AutoConfig

Expand Down
16 changes: 7 additions & 9 deletions requirements-test.txt
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
requests==2.28.1
tokenizers==0.11.6
transformers==4.20.1
fastapi==0.78.0
uvicorn==0.18.2
nltk==3.7
torch==1.12.0
sentencepiece==0.1.96
protobuf==4.21.2
requests==2.28.2
transformers==4.25.1
fastapi==0.89.1
uvicorn==0.20.0
nltk==3.8.1
torch==1.13.1
sentencepiece==0.1.97
pytest
2 changes: 0 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
tokenizers==0.13.2
transformers==4.25.1
fastapi==0.89.1
uvicorn==0.20.0
nltk==3.8.1
torch==1.13.1
sentencepiece==0.1.97
protobuf==4.21.12
4 changes: 2 additions & 2 deletions smoke_test.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import time
import unittest
import requests
import time


class SmokeTest(unittest.TestCase):
Expand All @@ -18,7 +18,7 @@ def _waitForStartup(self):
"status code is {}".format(res.status_code))
except Exception as e:
print("Attempt {}: {}".format(i, e))
time.sleep(1)
time.sleep(2)

raise Exception("did not start up")

Expand Down
2 changes: 1 addition & 1 deletion test_app.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
import subprocess
import time
import subprocess
from multiprocessing import Process

import pytest
Expand Down
12 changes: 9 additions & 3 deletions vectorizer.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
import math
from typing import Optional

import torch
from nltk.tokenize import sent_tokenize
from pydantic import BaseModel
from transformers import AutoModel, AutoTokenizer, T5ForConditionalGeneration, T5Tokenizer, DPRContextEncoder, \
DPRQuestionEncoder
from transformers import (
AutoModel,
AutoTokenizer,
T5ForConditionalGeneration,
T5Tokenizer,
DPRContextEncoder,
DPRQuestionEncoder,
)


# limit transformer batch size to limit parallel inference, otherwise we run
# into memory problems
Expand Down