add load balancer metrics

This commit is contained in:
Marcos Uchoa 2025-08-13 23:12:10 -03:00
parent 78f477c70d
commit de2e425fae
6 changed files with 27 additions and 9 deletions

View file

@ -1,4 +1,4 @@
FROM alpine:latest as build FROM alpine:latest AS build
RUN apk update RUN apk update
RUN apk add --no-cache zig RUN apk add --no-cache zig
@ -9,7 +9,7 @@ COPY ./build.zig .
RUN zig build --release=fast RUN zig build --release=fast
FROM alpine:latest as app FROM alpine:latest AS app
WORKDIR /app WORKDIR /app
COPY --from=build /app/zig-out/bin/zigpay . COPY --from=build /app/zig-out/bin/zigpay .

View file

@ -19,7 +19,7 @@ services:
deploy: deploy:
resources: resources:
limits: limits:
cpus: "0.30" cpus: "0.20"
memory: "50MB" memory: "50MB"
zig1: &zig zig1: &zig
container_name: zig-pay-1 container_name: zig-pay-1
@ -40,7 +40,7 @@ services:
deploy: deploy:
resources: resources:
limits: limits:
cpus: "0.60" cpus: "0.20"
memory: "50MB" memory: "50MB"
zig2: zig2:
<<: *zig <<: *zig

View file

@ -95,7 +95,6 @@ const template_json_summary: []const u8 =
; ;
fn getSummary(req: *Request, res: *Response) void { fn getSummary(req: *Request, res: *Response) void {
std.Thread.sleep(1_000_000 * 10);
var from: ?DateTime = null; var from: ?DateTime = null;
var to: ?DateTime = null; var to: ?DateTime = null;

View file

@ -13,6 +13,10 @@ const Stream = net.Stream;
const BUFFER_SIZE = 1024; const BUFFER_SIZE = 1024;
var metrics_mutex = std.Thread.Mutex{};
var metrics_num_requests: u64 = 0;
var metrics_sum_req_time: u64 = 0;
const UpstreamConnectionState = enum { inactive, available, occupied }; const UpstreamConnectionState = enum { inactive, available, occupied };
const UpstreamConnection = struct { const UpstreamConnection = struct {
@ -147,6 +151,8 @@ pub const LoadBalancer = struct {
buffer_request[0] = 0; buffer_request[0] = 0;
var timer = std.time.Timer.start() catch return;
while (true) { while (true) {
var req_len: usize = 1; var req_len: usize = 1;
@ -168,6 +174,7 @@ pub const LoadBalancer = struct {
break; break;
} }
} }
timer.reset();
upstream.stream.writeAll(buffer_request[0..req_len]) catch |err| { upstream.stream.writeAll(buffer_request[0..req_len]) catch |err| {
log.err("Error when writing to upstream {}\n", .{err}); log.err("Error when writing to upstream {}\n", .{err});
@ -187,6 +194,18 @@ pub const LoadBalancer = struct {
log.err("Error when write from connection {}\n", .{err}); log.err("Error when write from connection {}\n", .{err});
return; return;
}; };
const req_time_ns = timer.lap();
metrics_mutex.lock();
metrics_num_requests += 1;
metrics_sum_req_time += req_time_ns;
if (metrics_num_requests % 5000 == 0) {
std.debug.print("average requests time ns: {d}\n", .{metrics_sum_req_time / metrics_num_requests});
}
metrics_mutex.unlock();
} }
} }
}; };

View file

@ -53,7 +53,7 @@ pub const PaymentIntegrator = struct {
fn startProcess(self: *PaymentIntegrator) void { fn startProcess(self: *PaymentIntegrator) void {
while (true) { while (true) {
Thread.sleep(10_000_000); Thread.sleep(50_000_000);
self.verifyTailSize(); self.verifyTailSize();
self.processPayments(); self.processPayments();
@ -112,8 +112,8 @@ pub const PaymentIntegrator = struct {
} }
pub fn newPaymentEvent(payment: *payments.Payment) void { pub fn newPaymentEvent(payment: *payments.Payment) void {
payment_integrator.mutex.lock(); //payment_integrator.mutex.lock();
defer payment_integrator.mutex.unlock(); //defer payment_integrator.mutex.unlock();
const head = payment_integrator.head % MAX_QUEUE_SIZE; const head = payment_integrator.head % MAX_QUEUE_SIZE;
payment_integrator.queue[head].payment = payment; payment_integrator.queue[head].payment = payment;

View file

@ -194,7 +194,7 @@ pub const HttpService = struct {
pub fn startMessenger(self: *HttpService, connections: []ServiceConnection, tickets: []ServiceTicket, thread_id: usize) void { pub fn startMessenger(self: *HttpService, connections: []ServiceConnection, tickets: []ServiceTicket, thread_id: usize) void {
while (true) { while (true) {
Thread.sleep(20_000_000); Thread.sleep(30_000_000);
if (self.thread_stop[thread_id]) { if (self.thread_stop[thread_id]) {
var conn_open = false; var conn_open = false;