Skip to content

metrics

metrics

Request metrics tracking for cocapi

MetricsTracker

MetricsTracker(max_metrics: int = 1000)

Tracks API request metrics and statistics

Initialize metrics tracker

PARAMETER DESCRIPTION
max_metrics

Maximum number of metrics to store (oldest are removed)

TYPE: int DEFAULT: 1000

Source code in cocapi/metrics.py
16
17
18
19
20
21
22
23
24
25
def __init__(self, max_metrics: int = 1000):
    """
    Initialize metrics tracker

    Args:
        max_metrics: Maximum number of metrics to store (oldest are removed)
    """
    self.max_metrics = max_metrics
    self.metrics: list[RequestMetric] = []
    self._enabled = False

enable

enable() -> None

Enable metrics tracking

Source code in cocapi/metrics.py
27
28
29
def enable(self) -> None:
    """Enable metrics tracking"""
    self._enabled = True

disable

disable() -> None

Disable metrics tracking

Source code in cocapi/metrics.py
31
32
33
def disable(self) -> None:
    """Disable metrics tracking"""
    self._enabled = False

is_enabled

is_enabled() -> bool

Check if metrics tracking is enabled

Source code in cocapi/metrics.py
35
36
37
def is_enabled(self) -> bool:
    """Check if metrics tracking is enabled"""
    return self._enabled

record_request

record_request(
    endpoint: str,
    method: str,
    status_code: int,
    response_time: float,
    cache_hit: bool,
    error_type: str | None = None,
) -> None

Record metrics for a request if metrics are enabled

Source code in cocapi/metrics.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def record_request(
    self,
    endpoint: str,
    method: str,
    status_code: int,
    response_time: float,
    cache_hit: bool,
    error_type: str | None = None,
) -> None:
    """Record metrics for a request if metrics are enabled"""
    if not self._enabled:
        return

    # Format endpoint for better grouping
    formatted_endpoint = format_endpoint_for_metrics(endpoint)

    metric = RequestMetric(
        endpoint=formatted_endpoint,
        method=method,
        status_code=status_code,
        response_time=response_time,
        timestamp=time.time(),
        cache_hit=cache_hit,
        error_type=error_type,
    )

    self.metrics.append(metric)

    # Keep only the most recent metrics
    if len(self.metrics) > self.max_metrics:
        self.metrics = self.metrics[-self.max_metrics :]

get_metrics_summary

get_metrics_summary() -> dict[str, Any]

Get comprehensive metrics summary

Source code in cocapi/metrics.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
def get_metrics_summary(self) -> dict[str, Any]:
    """Get comprehensive metrics summary"""
    if not self.metrics:
        return {"total_requests": 0, "message": "No metrics available"}

    total_requests = len(self.metrics)

    # Calculate success rate
    successful_requests = sum(1 for m in self.metrics if 200 <= m.status_code < 300)
    success_rate = (successful_requests / total_requests) * 100

    # Calculate cache hit rate
    cache_hits = sum(1 for m in self.metrics if m.cache_hit)
    cache_hit_rate = (cache_hits / total_requests) * 100

    # Calculate average response time
    avg_response_time = sum(m.response_time for m in self.metrics) / total_requests

    # Most used endpoints
    endpoint_counts: dict[str, int] = defaultdict(int)
    for metric in self.metrics:
        endpoint_counts[metric.endpoint] += 1

    most_used_endpoints = sorted(
        endpoint_counts.items(), key=lambda x: x[1], reverse=True
    )[:5]

    # Error breakdown
    error_counts: dict[str, int] = defaultdict(int)
    for metric in self.metrics:
        if metric.error_type:
            error_counts[metric.error_type] += 1

    # Status code breakdown
    status_counts: dict[int, int] = defaultdict(int)
    for metric in self.metrics:
        status_counts[metric.status_code] += 1

    # Response time percentiles
    response_times = sorted(m.response_time for m in self.metrics)
    percentiles = self._calculate_percentiles(response_times)

    return {
        "total_requests": total_requests,
        "success_rate": round(success_rate, 2),
        "cache_hit_rate": round(cache_hit_rate, 2),
        "average_response_time": round(avg_response_time, 3),
        "response_time_percentiles": percentiles,
        "most_used_endpoints": most_used_endpoints,
        "status_code_breakdown": dict(status_counts),
        "error_breakdown": dict(error_counts),
        "timespan": {
            "start": min(m.timestamp for m in self.metrics),
            "end": max(m.timestamp for m in self.metrics),
            "duration_seconds": max(m.timestamp for m in self.metrics)
            - min(m.timestamp for m in self.metrics),
        },
    }

get_endpoint_metrics

get_endpoint_metrics(endpoint: str) -> dict[str, Any]

Get metrics for a specific endpoint

Source code in cocapi/metrics.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
def get_endpoint_metrics(self, endpoint: str) -> dict[str, Any]:
    """Get metrics for a specific endpoint"""
    formatted_endpoint = format_endpoint_for_metrics(endpoint)
    endpoint_metrics = [m for m in self.metrics if m.endpoint == formatted_endpoint]

    if not endpoint_metrics:
        return {
            "endpoint": endpoint,
            "total_requests": 0,
            "message": "No metrics available for this endpoint",
        }

    total_requests = len(endpoint_metrics)
    successful_requests = sum(
        1 for m in endpoint_metrics if 200 <= m.status_code < 300
    )
    success_rate = (successful_requests / total_requests) * 100

    cache_hits = sum(1 for m in endpoint_metrics if m.cache_hit)
    cache_hit_rate = (cache_hits / total_requests) * 100

    avg_response_time = (
        sum(m.response_time for m in endpoint_metrics) / total_requests
    )

    response_times = sorted(m.response_time for m in endpoint_metrics)
    percentiles = self._calculate_percentiles(response_times)

    return {
        "endpoint": endpoint,
        "formatted_endpoint": formatted_endpoint,
        "total_requests": total_requests,
        "success_rate": round(success_rate, 2),
        "cache_hit_rate": round(cache_hit_rate, 2),
        "average_response_time": round(avg_response_time, 3),
        "response_time_percentiles": percentiles,
    }

get_recent_errors

get_recent_errors(limit: int = 10) -> list[dict[str, Any]]

Get recent error requests

Source code in cocapi/metrics.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def get_recent_errors(self, limit: int = 10) -> list[dict[str, Any]]:
    """Get recent error requests"""
    error_metrics = [
        m for m in self.metrics if m.status_code >= 400 or m.error_type
    ]

    # Sort by timestamp, most recent first
    error_metrics.sort(key=lambda x: x.timestamp, reverse=True)

    return [
        {
            "endpoint": m.endpoint,
            "method": m.method,
            "status_code": m.status_code,
            "error_type": m.error_type,
            "response_time": m.response_time,
            "timestamp": m.timestamp,
        }
        for m in error_metrics[:limit]
    ]

clear_metrics

clear_metrics() -> None

Clear all stored metrics

Source code in cocapi/metrics.py
189
190
191
def clear_metrics(self) -> None:
    """Clear all stored metrics"""
    self.metrics.clear()

export_metrics_csv

export_metrics_csv() -> str

Export metrics as CSV string

Source code in cocapi/metrics.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
def export_metrics_csv(self) -> str:
    """Export metrics as CSV string"""
    if not self.metrics:
        return "No metrics to export"

    lines = [
        "endpoint,method,status_code,response_time,timestamp,cache_hit,error_type"
    ]

    for metric in self.metrics:
        lines.append(
            f"{metric.endpoint},{metric.method},{metric.status_code},"
            f"{metric.response_time},{metric.timestamp},{metric.cache_hit},"
            f"{metric.error_type or ''}"
        )

    return "\n".join(lines)

get_performance_insights

get_performance_insights() -> dict[str, Any]

Get performance insights and recommendations

Source code in cocapi/metrics.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
def get_performance_insights(self) -> dict[str, Any]:
    """Get performance insights and recommendations"""
    if not self.metrics:
        return {"message": "No metrics available for insights"}

    insights = []
    summary = self.get_metrics_summary()

    # Check cache hit rate
    if summary["cache_hit_rate"] < 30:
        insights.append(
            {
                "type": "performance",
                "message": f"Low cache hit rate ({summary['cache_hit_rate']}%). Consider increasing cache TTL or reviewing caching strategy.",
                "severity": "medium",
            }
        )
    elif summary["cache_hit_rate"] > 80:
        insights.append(
            {
                "type": "performance",
                "message": f"Excellent cache hit rate ({summary['cache_hit_rate']}%)!",
                "severity": "info",
            }
        )

    # Check success rate
    if summary["success_rate"] < 95:
        insights.append(
            {
                "type": "reliability",
                "message": f"Success rate is {summary['success_rate']}%. Review error patterns.",
                "severity": "high" if summary["success_rate"] < 90 else "medium",
            }
        )

    # Check average response time
    if summary["average_response_time"] > 2.0:
        insights.append(
            {
                "type": "performance",
                "message": f"High average response time ({summary['average_response_time']}s). Consider optimizing requests.",
                "severity": "medium",
            }
        )

    # Check for frequent errors
    error_breakdown = summary.get("error_breakdown", {})
    if error_breakdown:
        most_common_error = max(error_breakdown.items(), key=lambda x: x[1])
        if (
            most_common_error[1] > len(self.metrics) * 0.1
        ):  # More than 10% of requests
            insights.append(
                {
                    "type": "reliability",
                    "message": f"Frequent {most_common_error[0]} errors ({most_common_error[1]} occurrences). Investigate root cause.",
                    "severity": "high",
                }
            )

    return {
        "insights": insights,
        "recommendations": self._generate_recommendations(insights),
    }