|
@@ -1,8 +1,8 @@
|
|
|
const std = @import("std");
|
|
|
+const builtin = @import("builtin");
|
|
|
const zap = @import("zap");
|
|
|
const pg = @import("pg");
|
|
|
const regex = @import("regex");
|
|
|
-const dns = @import("dns");
|
|
|
const pool = @import("pool.zig");
|
|
|
|
|
|
const endpoints = @import("endpoints.zig");
|
|
@@ -23,6 +23,24 @@ pub fn main() !void {
|
|
|
|
|
|
const allocator = tsa.allocator();
|
|
|
|
|
|
+ var zap_port: []u8 = undefined;
|
|
|
+ var arg_string = try std.fmt.allocPrint(allocator, "{s}", .{"0"});
|
|
|
+ defer allocator.free(arg_string);
|
|
|
+
|
|
|
+ var args = try std.process.argsWithAllocator(allocator);
|
|
|
+ defer args.deinit();
|
|
|
+ while (args.next()) |arg| {
|
|
|
+ arg_string = try std.fmt.allocPrint(allocator, "{s}", .{arg});
|
|
|
+
|
|
|
+ zap_port = arg_string; // use arg
|
|
|
+ }
|
|
|
+
|
|
|
+ var port = try std.fmt.parseInt(u16, zap_port, 0);
|
|
|
+
|
|
|
+ if (port == 0) {
|
|
|
+ port = 3000;
|
|
|
+ }
|
|
|
+
|
|
|
var pg_pool = try pool.initPool(allocator);
|
|
|
defer pg_pool.deinit();
|
|
|
|
|
@@ -68,7 +86,7 @@ pub fn main() !void {
|
|
|
var listener = try zap.Middleware.Listener(middleware.Context).init(
|
|
|
.{
|
|
|
.on_request = null, // must be null
|
|
|
- .port = 3000,
|
|
|
+ .port = port,
|
|
|
.log = false,
|
|
|
.max_clients = 100000,
|
|
|
},
|
|
@@ -78,13 +96,15 @@ pub fn main() !void {
|
|
|
);
|
|
|
try listener.listen();
|
|
|
|
|
|
- const cpuCount = @as(i16, @intCast(std.Thread.getCpuCount() catch 1));
|
|
|
+ //const cpuCount = @as(i16, @intCast(std.Thread.getCpuCount() catch 1));
|
|
|
+ //const workers = if (builtin.mode == .Debug) 1 else cpuCount;
|
|
|
+ const threads = 128;
|
|
|
|
|
|
- std.debug.print("Listening on 0.0.0.0:3000 on {d} threads\n", .{cpuCount});
|
|
|
+ std.debug.print("Listening at 0.0.0.0:{d} on {d} threads\n", .{port, threads});
|
|
|
|
|
|
// start worker threads
|
|
|
zap.start(.{
|
|
|
- .threads = 16 * cpuCount,
|
|
|
+ .threads = threads,
|
|
|
.workers = 1,
|
|
|
});
|
|
|
}
|