Skip to content

Commit

Permalink
Merge pull request #437 from KCaverly/token_counting
Browse files Browse the repository at this point in the history
  • Loading branch information
CyrusNuevoDia authored Feb 23, 2024
2 parents a6042e5 + a0bf926 commit 1fb1f85
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 2 deletions.
5 changes: 5 additions & 0 deletions dspy/backends/lm/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,8 @@ def __call__(
) -> list[str]:
"""Generates `n` predictions for the signature output."""
...

@abstractmethod
def count_tokens(self, prompt: str) -> int:
"""Counts the number of tokens for a specific prompt."""
...
10 changes: 8 additions & 2 deletions dspy/backends/lm/litellm.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import typing as t

from litellm import completion
from litellm import completion, token_counter
from pydantic import Field


from .base import BaseLM


class LiteLLM(BaseLM):
STANDARD_PARAMS = {
STANDARD_PARAMS: t.Dict[str, t.Union[float, int]] = {
"temperature": 0.0,
"max_tokens": 150,
"top_p": 1,
Expand All @@ -33,3 +33,9 @@ def __call__(
)
choices = [c for c in response["choices"] if c["finish_reason"] != "length"]
return [c["message"]["content"] for c in choices]

def count_tokens(self, prompt: str) -> int:
"""Counts the number of tokens for a specific prompt."""
return token_counter(
model=self.model, messages=[{"role": "user", "content": prompt}]
)

0 comments on commit 1fb1f85

Please sign in to comment.