mirror of
https://github.com/garrytan/gstack.git
synced 2026-05-18 02:22:04 +08:00
fix(browse): apply codex adversarial findings on the new lifecycle
Codex outside-voice review caught five real production-failure modes in
the v1.28.0.0 proxy/headed lifecycle. Fixed:
1) `browse disconnect` skip-graceful for proxy-only daemons
(browse/src/cli.ts). The graceful /command POST went out with stray
`domains,` shorthand and (even fixed) the server's disconnect handler
only tears down headed mode — proxy-only daemons returned 200 "Not
in headed mode" while leaving the bridge running. Now disconnect
short-circuits to force-cleanup for non-headed daemons, which kicks
process.on('exit') in server.ts to close the bridge + Xvfb.
2) sendCommand crash retry preserves --proxy / --headed
(browse/src/cli.ts). The ECONNRESET retry path called startServer()
with no extraEnv, silently dropping the proxied flags. A daemon that
died mid-command would silently restart in default direct/headless
mode and bypass the SOCKS bridge. Now reapplies BROWSE_PROXY_URL,
BROWSE_HEADED, and BROWSE_CONFIG_HASH from the resolved global flags.
3) `connect` honors --proxy (browse/src/cli.ts). The headed-mode
`connect` command built its own serverEnv that didn't include
BROWSE_PROXY_URL, so `browse --proxy <url> connect` launched headed
Chromium without the proxy. Now threads proxyUrl + configHash into
the connect serverEnv.
4) SOCKS5 bridge handles fragmented TCP frames
(browse/src/socks-bridge.ts). Previously used once('data') and
parsed each chunk as a complete SOCKS5 frame — TCP doesn't preserve
message boundaries and split greetings/CONNECT requests caused
intermittent handshake failures. Replaced with a single state
machine that buffers chunks and uses size predicates on the SOCKS5
header to know when a complete frame has arrived. Pauses the client
socket during upstream connect and replays any remainder bytes
into the upstream on success.
5) Xvfb cleanup-then-state-delete ordering
(browse/src/server.ts). emergencyCleanup() previously deleted the
state file BEFORE any Xvfb cleanup could read it, orphaning Xvfb
on uncaughtException / unhandledRejection. Now reads the state
file first, calls cleanupXvfb() (which validates cmdline +
start-time before kill), then deletes the state file.
Adds a regression test for #4: writes the SOCKS5 greeting + CONNECT
one byte at a time with 5ms ticks, asserts a clean round trip after
the fragmented handshake.
Codex's sixth finding (bridge advertises NO_AUTH on 127.0.0.1, so any
co-located process can use the authenticated upstream) is documented
as a known limitation — gstack's threat model assumes single-user
hosts. Adding bridge-side auth is a separate change.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -283,6 +283,87 @@ describe('startSocksBridge', () => {
|
||||
}
|
||||
});
|
||||
|
||||
test('handles SOCKS5 handshake split across multiple TCP packets (codex finding)', async () => {
|
||||
// TCP doesn't preserve message boundaries — production networks regularly
|
||||
// fragment small writes. This test simulates that by writing the greeting
|
||||
// and CONNECT request one byte at a time. If the bridge uses once('data')
|
||||
// and assumes each event is a complete frame, this test fails because
|
||||
// it parses the first byte as a frame.
|
||||
const echo = await startEcho();
|
||||
const upstream = await startMockUpstream({ expectedUser: 'u', expectedPass: 'p' });
|
||||
const bridge = await startSocksBridge({
|
||||
upstream: { host: '127.0.0.1', port: upstream.port, userId: 'u', password: 'p' },
|
||||
});
|
||||
|
||||
try {
|
||||
// Build the greeting + CONNECT request manually.
|
||||
const greeting = Buffer.from([0x05, 0x01, 0x00]);
|
||||
const hostBuf = Buffer.from(echo.host);
|
||||
const connect = Buffer.alloc(7 + hostBuf.length);
|
||||
connect[0] = 0x05; connect[1] = 0x01; connect[2] = 0x00; connect[3] = 0x03;
|
||||
connect[4] = hostBuf.length;
|
||||
hostBuf.copy(connect, 5);
|
||||
connect.writeUInt16BE(echo.port, 5 + hostBuf.length);
|
||||
|
||||
const sock = net.createConnection({ host: '127.0.0.1', port: bridge.port });
|
||||
await new Promise<void>((r, rej) => {
|
||||
sock.once('connect', () => r());
|
||||
sock.once('error', rej);
|
||||
});
|
||||
|
||||
// Persistent buffered reader. Using a single long-lived 'data'
|
||||
// listener avoids the bytes-dropped race that happens when you
|
||||
// attach `sock.once('data')`, get one event, and re-attach later —
|
||||
// any data arriving between those two attaches gets dropped because
|
||||
// the socket is in flowing mode without a listener.
|
||||
const inbox: Buffer[] = [];
|
||||
sock.on('data', (chunk) => inbox.push(chunk));
|
||||
const readAtLeast = async (n: number, timeoutMs = 2000): Promise<Buffer> => {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
while (Date.now() < deadline) {
|
||||
const total = inbox.reduce((s, b) => s + b.length, 0);
|
||||
if (total >= n) {
|
||||
const all = Buffer.concat(inbox);
|
||||
inbox.length = 0;
|
||||
if (all.length > n) inbox.push(all.subarray(n));
|
||||
return all.subarray(0, n);
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, 10));
|
||||
}
|
||||
throw new Error(`timeout waiting for ${n} bytes (have ${inbox.reduce((s, b) => s + b.length, 0)})`);
|
||||
};
|
||||
|
||||
// Write greeting one byte at a time.
|
||||
for (let i = 0; i < greeting.length; i++) {
|
||||
sock.write(Buffer.from([greeting[i]]));
|
||||
await new Promise((r) => setTimeout(r, 5));
|
||||
}
|
||||
const greetingReply = await readAtLeast(2);
|
||||
expect(greetingReply[0]).toBe(0x05);
|
||||
expect(greetingReply[1]).toBe(0x00);
|
||||
|
||||
// Write CONNECT one byte at a time.
|
||||
for (let i = 0; i < connect.length; i++) {
|
||||
sock.write(Buffer.from([connect[i]]));
|
||||
await new Promise((r) => setTimeout(r, 5));
|
||||
}
|
||||
const connectReply = await readAtLeast(10);
|
||||
expect(connectReply[0]).toBe(0x05);
|
||||
expect(connectReply[1]).toBe(0x00);
|
||||
|
||||
// Round trip should still work after the fragmented handshake.
|
||||
const payload = Buffer.from('payload-after-split-handshake');
|
||||
sock.write(payload);
|
||||
const received = await readAtLeast(payload.length);
|
||||
expect(received.toString()).toBe(payload.toString());
|
||||
sock.destroy();
|
||||
} finally {
|
||||
await bridge.close();
|
||||
await upstream.close();
|
||||
await echo.close();
|
||||
}
|
||||
});
|
||||
|
||||
test('close() tears down listener and in-flight clients', async () => {
|
||||
const upstream = await startMockUpstream({ expectedUser: 'u', expectedPass: 'p' });
|
||||
const bridge = await startSocksBridge({
|
||||
|
||||
Reference in New Issue
Block a user