diff --git a/gemini_worker/src/gemini_sdk/constants.py b/gemini_worker/src/gemini_sdk/constants.py index 45ff68f..e69de29 100644 --- a/gemini_worker/src/gemini_sdk/constants.py +++ b/gemini_worker/src/gemini_sdk/constants.py @@ -1,3 +0,0 @@ - - -GEMINI_TOKENS_LIMIT: int = 500_000 \ No newline at end of file diff --git a/gemini_worker/src/service/limter_checker.py b/gemini_worker/src/service/limter_checker.py index c2e4fd1..4b558a7 100644 --- a/gemini_worker/src/service/limter_checker.py +++ b/gemini_worker/src/service/limter_checker.py @@ -2,8 +2,8 @@ import json from google.generativeai import GenerativeModel -from src.gemini_sdk.constants import GEMINI_TOKENS_LIMIT from src.service.storage import MESSAGES_STORAGE_SCHEMA +from src.settings.base import settings def check_current_token_limit( @@ -13,7 +13,7 @@ def check_current_token_limit( tokens_response_count = model.count_tokens(raw_response).total_tokens print("tokens", model.count_tokens(raw_response).total_tokens) - if tokens_response_count >= GEMINI_TOKENS_LIMIT: + if tokens_response_count >= settings.GEMINI.TOKENS_LIMIT: return False return True diff --git a/gemini_worker/src/settings/gemini.py b/gemini_worker/src/settings/gemini.py index c78d83c..2bfa7d1 100644 --- a/gemini_worker/src/settings/gemini.py +++ b/gemini_worker/src/settings/gemini.py @@ -4,4 +4,5 @@ from pydantic import BaseModel class GeminiSettings(BaseModel): API_KEY: str - MODEL_NAME: str = "gemini-1.5-flash" \ No newline at end of file + MODEL_NAME: str = "gemini-1.5-flash" + TOKENS_LIMIT: int \ No newline at end of file