Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test coverage #8

Merged
merged 1 commit into from
Apr 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions src/analyzer/analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@
from analyzer.models import LogChunk, Template

from config.config import SentryLogLevel, SENTRY_LOG_LEVEL
from handler.handlers import MessageSender
from connectors.connectors import Connector

from parser.models import ResponseItems
from parser.parser import parse_llm_response


async def analyze(logs: LogChunk, template: Template, sender: MessageSender, model: LLMModel,
async def analyze(logs: LogChunk, template: Template, sender: Connector, model: LLMModel,
min_level: SentryLogLevel = SENTRY_LOG_LEVEL) -> None:
message = await model.run_chunk(logs, template)
parsed_output = parse_llm_response(message)
Expand Down
5 changes: 1 addition & 4 deletions src/analyzer/anthropic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,10 @@

from analyzer.llm_model import LLMModel
from analyzer.models import LogChunk, Template, Prompt
from config.config import ANTHROPIC_API_KEY, ANTHROPIC_MODEL_ID, logger
from config.config import ANTHROPIC_MODEL_ID

from anthropic import Anthropic

from handler.handlers import MessageSender
from parser.parser import parse_llm_response


def create_prompt(logs: LogChunk, template: Template) -> Prompt:
prompt = template.template.replace("YOUR_LOG_CHUNK_HERE", "\n".join(logs.lines))
Expand Down
2 changes: 1 addition & 1 deletion src/analyzer/groq_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from analyzer.llm_model import LLMModel
from analyzer.models import LogChunk, Template
from config.config import GROQ_MODEL_ID, logger
from config.config import GROQ_MODEL_ID


# client = Groq()
Expand Down
File renamed without changes.
4 changes: 2 additions & 2 deletions src/handler/handlers.py → src/connectors/connectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from parser.models import ResponseItems, SentryLogLevel


class MessageSender(ABC):
class Connector(ABC):

@abstractmethod
def post_message(self, message: str) -> None:
Expand All @@ -24,7 +24,7 @@ def post_parsed_object(self, response_items: ResponseItems, logs: LogChunk) -> N
pass


class Slack(MessageSender):
class SlackConnector(Connector):
def __init__(self, token: str, channel: str):
self.client = WebClient(token=token)
self.channel = channel
Expand Down
6 changes: 3 additions & 3 deletions src/sentry.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
CONTAINER_TO_WATCH, logger, ModelType, MODEL_TO_USE

from analyzer.models import Template
from handler.handlers import Slack, MessageSender
from connectors.connectors import SlackConnector, Connector
from utils.prepocessing import log_chunk_preprocessor

client = docker.from_env()
Expand Down Expand Up @@ -48,7 +48,7 @@ def load_template() -> Template:
return Template(name=TEMPLATE, template=content)


async def watch_container_logs(container_name: str, template: Template, sender: MessageSender, model: LLMModel):
async def watch_container_logs(container_name: str, template: Template, sender: Connector, model: LLMModel):
try:
container = client.containers.get(container_name)
logger.info(f"Starting to watch logs from {container.name}...")
Expand All @@ -68,7 +68,7 @@ async def watch_container_logs(container_name: str, template: Template, sender:
async def main():
nginx = find_container(CONTAINER_TO_WATCH)
template = load_template()
sender = Slack(SLACK_TOKEN, SLACK_CHANNEL)
sender = SlackConnector(SLACK_TOKEN, SLACK_CHANNEL)
model = create_model()
await watch_container_logs(nginx, template, sender, model)

Expand Down
57 changes: 57 additions & 0 deletions src/tests/test_connectors.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import pytest

from analyzer.models import LogChunk
from config.config import SentryLogLevel
from parser.models import ResponseItems, ResponseItem
from connectors.connectors import SlackConnector


# class ResponseItem(BaseModel):
# category: SentryLogLevel
# type: str
# origin: str
# relevant_log: str
# recommendation: str

@pytest.fixture
def log_data() -> (ResponseItems, LogChunk):
return (ResponseItems(items=[
ResponseItem(category="INFO", type="Type1", origin="Origin1", relevant_log="Log1",
recommendation="Recommendation1"),
ResponseItem(category="Critical", type="Type2", origin="Origin2", relevant_log="Log2",
recommendation="Recommendation2"),
ResponseItem(category="warning", type="Type3", origin="Origin3", relevant_log="Log3",
recommendation="Recommendation3"),
], parsing_errors=2),
LogChunk(start_time="2024-01-01 00:00:00", end_time="2024-01-01 00:01:23", lines=["Line1", "Line2"]))


def test_markdown_formatter(log_data):
md = SlackConnector.format_to_markdown(*log_data)
assert md == """*Report:*

*Start time:* 2024-01-01 00:00:00
*End time:* 2024-01-01 00:01:23

✅ *INFO*
*Type:* Type1
*Origin:* Origin1
*Relevant Log:* Log1
*Recommendation:* Recommendation1

🔥 *CRITICAL*
*Type:* Type2
*Origin:* Origin2
*Relevant Log:* Log2
*Recommendation:* Recommendation2

⚠️ *WARNING*
*Type:* Type3
*Origin:* Origin3
*Relevant Log:* Log3
*Recommendation:* Recommendation3

*Parsing Errors:* 2
"""


Loading