refactor with async/await

Trying to be more robust about error handling and failure.
This commit is contained in:
Roman Shtylman 2016-07-09 17:06:13 -07:00
parent f12f1c81b3
commit a2a58f4c6f
10 changed files with 290 additions and 221 deletions

6
.babelrc Normal file
View File

@ -0,0 +1,6 @@
{
"plugins": [
"transform-es2015-modules-commonjs",
"transform-async-to-generator",
]
}

1
.npmrc Normal file
View File

@ -0,0 +1 @@
save-exact = true

View File

@ -1,13 +1,11 @@
FROM mhart/alpine-node:4.2.1
FROM mhart/alpine-node:6.3.0
RUN mkdir -p /app
WORKDIR /app
ADD package.json /app/
RUN apk add --update make git g++ python && \
npm install --production && \
apk del git make g++ python && \
RUN npm install --production && \
rm -rf /tmp/* /root/.npm /root/.node-gyp
ADD . /app

View File

@ -1,12 +1,13 @@
#!/usr/bin/env node
require('stackup');
var log = require('bookrc');
var localenv = require('localenv');
var debug = require('debug')('localtunnel');
var optimist = require('optimist');
require('localenv');
require('babel-register');
var argv = optimist
const log = require('bookrc');
const debug = require('debug')('localtunnel');
const optimist = require('optimist');
const argv = optimist
.usage('Usage: $0 --port [num]')
.options('secure', {
default: false,
@ -27,14 +28,18 @@ if (argv.help) {
process.exit();
}
var server = require('../server')({
const server = require('../server')({
max_tcp_sockets: argv['max-sockets'],
secure: argv.secure
});
server.listen(argv.port, function() {
server.listen(argv.port, () => {
debug('server listening on port: %d', server.address().port);
});
process.on('SIGINT', () => {
process.exit();
});
// vim: ft=javascript

View File

@ -1,6 +1,6 @@
var http = require('http');
var util = require('util');
var assert = require('assert');
import http from 'http';
import util from 'util';
import assert from 'assert';
// binding agent will return a given options.socket as the socket for the agent
// this is useful if you already have a socket established and want the request
@ -20,4 +20,4 @@ function create_connection(port, host, options) {
return this.socket;
}
module.exports = BindingAgent;
export default BindingAgent;

View File

@ -1,11 +1,11 @@
// all url safe
// can't use uppercase because hostnames are lowercased
var chars = 'abcdefghijklmnopqrstuvwxyz';
const chars = 'abcdefghijklmnopqrstuvwxyz';
module.exports = function rand_id() {
var randomstring = '';
export default function rand_id() {
let randomstring = '';
for (var i=0; i<10; ++i) {
var rnum = Math.floor(Math.random() * chars.length);
const rnum = Math.floor(Math.random() * chars.length);
randomstring += chars[rnum];
}

View File

@ -6,24 +6,28 @@
"license": "MIT",
"repository": {
"type": "git",
"url": "git://github.com/shtylman/localtunnel-server.git"
"url": "git://github.com/localtunnel/server.git"
},
"dependencies": {
"babel-plugin-transform-async-to-generator": "6.8.0",
"babel-plugin-transform-es2015-modules-commonjs": "6.10.3",
"babel-polyfill": "6.9.1",
"babel-register": "6.9.0",
"bluebird": "3.4.1",
"book": "1.3.1",
"book-git": "0.0.2",
"book-raven": "1.1.0",
"book-raven": "1.2.0",
"bookrc": "0.0.1",
"debug": "2.2.0",
"express": "4.13.3",
"http-proxy": "1.12.0",
"express": "4.14.0",
"http-proxy": "1.14.0",
"localenv": "0.2.2",
"on-finished": "2.3.0",
"optimist": "0.6.1",
"stackup": "1.0.1",
"tldjs": "1.6.1"
"tldjs": "1.6.2"
},
"devDependencies": {
"mocha": "2.0.1",
"mocha": "2.5.3",
"localtunnel": "1.8.0",
"ws": "0.8.0"
},

267
proxy.js
View File

@ -1,28 +1,51 @@
var net = require('net');
var EventEmitter = require('events').EventEmitter;
import net from 'net';
import EventEmitter from 'events';
import log from 'bookrc';
import Debug from 'debug';
var log = require('bookrc');
var debug = require('debug')('localtunnel-server');
const debug = Debug('localtunnel:server');
var Proxy = function(opt, cb) {
const Proxy = function(opt) {
if (!(this instanceof Proxy)) {
return new Proxy(opt, cb);
return new Proxy(opt);
}
var self = this;
const self = this;
self.sockets = [];
self.waiting = [];
var id = opt.id;
self.id = opt.id;
// default max is 10
var max_tcp_sockets = opt.max_tcp_sockets || 10;
self.max_tcp_sockets = opt.max_tcp_sockets || 10;
// new tcp server to service requests for this client
var client_server = net.createServer();
self.server = net.createServer();
client_server.on('error', function(err) {
// track initial user connection setup
self.conn_timeout = undefined;
self.debug = Debug(`localtunnel:server:${self.id}`);
};
Proxy.prototype.__proto__ = EventEmitter.prototype;
Proxy.prototype.start = function(cb) {
const self = this;
const server = self.server;
if (self.started) {
cb(new Error('already started'));
return;
}
self.started = true;
server.on('close', self._cleanup.bind(self));
server.on('connection', self._handle_socket.bind(self));
server.on('error', function(err) {
// where do these errors come from?
// other side creates a connection and then is killed?
if (err.code == 'ECONNRESET' || err.code == 'ETIMEDOUT') {
return;
}
@ -30,129 +53,121 @@ var Proxy = function(opt, cb) {
log.error(err);
});
// track initial user connection setup
var conn_timeout;
// user has 5 seconds to connect before their slot is given up
function maybe_tcp_close() {
clearTimeout(conn_timeout);
conn_timeout = setTimeout(function() {
// sometimes the server is already closed but the event has not fired?
try {
clearTimeout(conn_timeout);
client_server.close();
} catch (err) {
cleanup();
}
}, 5000);
}
maybe_tcp_close();
function cleanup() {
debug('closed tcp socket for client(%s)', id);
clearTimeout(conn_timeout);
// clear waiting by ending responses, (requests?)
self.waiting.forEach(function(waiting) {
waiting(null);
});
self.emit('end');
}
// no longer accepting connections for this id
client_server.on('close', cleanup);
// new tcp connection from lt client
client_server.on('connection', function(socket) {
// no more socket connections allowed
if (self.sockets.length >= max_tcp_sockets) {
return socket.end();
}
debug('new connection on port: %s', id);
// a single connection is enough to keep client id slot open
clearTimeout(conn_timeout);
socket.once('close', function(had_error) {
debug('client %s closed socket (error: %s)', id, had_error);
// what if socket was servicing a request at this time?
// then it will be put back in available after right?
// remove this socket
var idx = self.sockets.indexOf(socket);
if (idx >= 0) {
self.sockets.splice(idx, 1);
}
// need to track total sockets, not just active available
debug('remaining client sockets: %s', self.sockets.length);
// no more sockets for this ident
if (self.sockets.length === 0) {
debug('all client(%s) sockets disconnected', id);
maybe_tcp_close();
}
});
// close will be emitted after this
socket.on('error', function(err) {
// we don't log here to avoid logging crap for misbehaving clients
socket.destroy();
});
self.sockets.push(socket);
var wait_cb = self.waiting.shift();
if (wait_cb) {
debug('handling queued request');
self.next_socket(wait_cb);
}
});
client_server.listen(function() {
var port = client_server.address().port;
debug('tcp server listening on port: %d', port);
server.listen(function() {
const port = server.address().port;
self.debug('tcp server listening on port: %d', port);
cb(null, {
// port for lt client tcp connections
port: port,
// maximum number of tcp connections allowed by lt client
max_conn_count: max_tcp_sockets
max_conn_count: self.max_tcp_sockets
});
});
self._maybe_destroy();
};
Proxy.prototype.__proto__ = EventEmitter.prototype;
Proxy.prototype._maybe_destroy = function() {
const self = this;
Proxy.prototype.next_socket = function(cb) {
var self = this;
clearTimeout(self.conn_timeout);
self.conn_timeout = setTimeout(function() {
// sometimes the server is already closed but the event has not fired?
try {
clearTimeout(self.conn_timeout);
self.server.close();
}
catch (err) {
self._cleanup();
}
}, 5000);
}
// socket is a tcp connection back to the user hosting the site
var sock = self.sockets.shift();
// new socket connection from client for tunneling requests to client
Proxy.prototype._handle_socket = function(socket) {
const self = this;
// TODO how to handle queue?
// queue request
if (!sock) {
debug('no more client, queue callback');
return self.waiting.push(cb);
// no more socket connections allowed
if (self.sockets.length >= self.max_tcp_sockets) {
return socket.end();
}
var done_called = false;
// put the socket back
function done() {
if (done_called) {
throw new Error('done called multiple times');
self.debug('new connection from: %s:%s', socket.address().address, socket.address().port);
// a single connection is enough to keep client id slot open
clearTimeout(self.conn_timeout);
socket.once('close', function(had_error) {
self.debug('closed socket (error: %s)', had_error);
// what if socket was servicing a request at this time?
// then it will be put back in available after right?
// we need a list of sockets servicing requests?
// remove this socket
const idx = self.sockets.indexOf(socket);
if (idx >= 0) {
self.sockets.splice(idx, 1);
}
done_called = true;
// need to track total sockets, not just active available
self.debug('remaining client sockets: %s', self.sockets.length);
// no more sockets for this ident
if (self.sockets.length === 0) {
self.debug('all sockets disconnected');
self._maybe_destroy();
}
});
// close will be emitted after this
socket.on('error', function(err) {
// we don't log here to avoid logging crap for misbehaving clients
socket.destroy();
});
self.sockets.push(socket);
const wait_cb = self.waiting.shift();
if (wait_cb) {
self.debug('handling queued request');
self.next_socket(wait_cb);
}
};
Proxy.prototype._cleanup = function() {
const self = this;
self.debug('closed tcp socket for client(%s)', self.id);
clearTimeout(self.conn_timeout);
// clear waiting by ending responses, (requests?)
self.waiting.forEach(handler => handler(null));
self.emit('end');
};
Proxy.prototype.next_socket = function(handler) {
const self = this;
// socket is a tcp connection back to the user hosting the site
const sock = self.sockets.shift();
if (!sock) {
self.debug('no more client, queue callback');
self.waiting.push(handler);
return;
}
self.debug('processing request');
handler(sock)
.catch((err) => {
log.error(err);
})
.finally(() => {
if (!sock.destroyed) {
debug('retuning socket');
self.debug('retuning socket');
self.sockets.push(sock);
}
@ -161,19 +176,17 @@ Proxy.prototype.next_socket = function(cb) {
return;
}
var wait = self.waiting.shift();
debug('processing queued cb');
const wait = self.waiting.shift();
self.debug('processing queued cb');
if (wait) {
return self.next_socket(cb);
self.next_socket(cb);
return;
}
};
debug('processing request');
cb(sock, done);
});
};
Proxy.prototype._done = function() {
var self = this;
const self = this;
};
module.exports = Proxy;
export default Proxy;

172
server.js
View File

@ -1,14 +1,19 @@
var log = require('bookrc');
var express = require('express');
var tldjs = require('tldjs');
var on_finished = require('on-finished');
var debug = require('debug')('localtunnel-server');
var http_proxy = require('http-proxy');
var http = require('http');
import log from 'bookrc';
import express from 'express';
import tldjs from 'tldjs';
import on_finished from 'on-finished';
import Debug from 'debug';
import http_proxy from 'http-proxy';
import http from 'http';
import Promise from 'bluebird';
var BindingAgent = require('./lib/BindingAgent');
import Proxy from './proxy';
import rand_id from './lib/rand_id';
import BindingAgent from './lib/BindingAgent';
var proxy = http_proxy.createProxyServer({
const debug = Debug('localtunnel:server');
const proxy = http_proxy.createProxyServer({
target: 'http://localtunnel.github.io'
});
@ -23,51 +28,54 @@ proxy.on('proxyReq', function(proxyReq, req, res, options) {
proxyReq.setHeader('host', 'localtunnel.github.io');
});
var Proxy = require('./proxy');
var rand_id = require('./lib/rand_id');
var PRODUCTION = process.env.NODE_ENV === 'production';
const PRODUCTION = process.env.NODE_ENV === 'production';
// id -> client http server
var clients = Object.create(null);
const clients = Object.create(null);
// proxy statistics
var stats = {
const stats = {
tunnels: 0
};
// handle proxying a request to a client
// will wait for a tunnel socket to become available
function maybe_bounce(req, res, sock, head) {
// without a hostname, we won't know who the request is for
var hostname = req.headers.host;
const hostname = req.headers.host;
if (!hostname) {
return false;
}
var subdomain = tldjs.getSubdomain(hostname);
const subdomain = tldjs.getSubdomain(hostname);
if (!subdomain) {
return false;
}
var client_id = subdomain;
var client = clients[client_id];
const client = clients[subdomain];
// no such subdomain
// we use 502 error to the client to signify we can't service the request
if (!client) {
res.statusCode = 502;
res.end('localtunnel error: no active client for \'' + client_id + '\'');
req.connection.destroy();
if (res) {
res.statusCode = 502;
res.end(`no active client for '${subdomain}'`);
req.connection.destroy();
}
else if (sock) {
sock.destroy();
}
return true;
}
var finished = false;
let finished = false;
if (sock) {
sock.once('end', function() {
finished = true;
});
}
if (res) {
else if (res) {
// flag if we already finished before we get a socket
// we can't respond to these requests
on_finished(res, function(err) {
@ -75,16 +83,19 @@ function maybe_bounce(req, res, sock, head) {
req.connection.destroy();
});
}
// not something we are expecting, need a sock or a res
else {
req.connection.destroy();
return true;
}
// TODO add a timeout, if we run out of sockets, then just 502
// get client port
client.next_socket(function(socket, done) {
done = done || function() {};
client.next_socket(async (socket) => {
// the request already finished or client disconnected
if (finished) {
return done();
return;
}
// happens when client upstream is disconnected
@ -103,9 +114,9 @@ function maybe_bounce(req, res, sock, head) {
// and directly pipe the socket data
// avoids having to rebuild the request and handle upgrades via the http client
if (res === null) {
var arr = [req.method + ' ' + req.url + ' HTTP/' + req.httpVersion];
for (var i=0 ; i < (req.rawHeaders.length-1) ; i+=2) {
arr.push(req.rawHeaders[i] + ': ' + req.rawHeaders[i+1]);
const arr = [`${req.method} ${req.url} HTTP/${req.httpVersion}`];
for (let i=0 ; i < (req.rawHeaders.length-1) ; i+=2) {
arr.push(`${req.rawHeaders[i]}: ${req.rawHeaders[i+1]}`);
}
arr.push('');
@ -113,40 +124,55 @@ function maybe_bounce(req, res, sock, head) {
socket.pipe(sock).pipe(socket);
socket.write(arr.join('\r\n'));
socket.once('end', function() {
done();
await new Promise((resolve) => {
socket.once('end', resolve);
});
return;
}
var agent = new BindingAgent({
// regular http request
const agent = new BindingAgent({
socket: socket
});
var opt = {
const opt = {
path: req.url,
agent: agent,
method: req.method,
headers: req.headers
};
var client_req = http.request(opt, function(client_res) {
// write response code and headers
res.writeHead(client_res.statusCode, client_res.headers);
client_res.pipe(res);
on_finished(client_res, function(err) {
done();
});
});
await new Promise((resolve) => {
// what if error making this request?
const client_req = http.request(opt, function(client_res) {
// write response code and headers
res.writeHead(client_res.statusCode, client_res.headers);
req.pipe(client_req);
client_res.pipe(res);
on_finished(client_res, function(err) {
resolve();
});
});
// happens if the other end dies while we are making the request
// so we just end the req and move on
// we can't really do more with the response here because headers
// may already be sent
client_req.on('error', (err) => {
req.connection.destroy();
});
req.pipe(client_req);
});
});
return true;
}
// create a new tunnel with `id`
function new_client(id, opt, cb) {
// can't ask for id already is use
@ -155,43 +181,49 @@ function new_client(id, opt, cb) {
id = rand_id();
}
var popt = {
const popt = {
id: id,
max_tcp_sockets: opt.max_tcp_sockets
};
var client = Proxy(popt, function(err, info) {
if (err) {
return cb(err);
}
const client = Proxy(popt);
++stats.tunnels;
clients[id] = client;
info.id = id;
cb(err, info);
});
// add to clients map immediately
// avoiding races with other clients requesting same id
clients[id] = client;
client.on('end', function() {
--stats.tunnels;
delete clients[id];
});
client.start((err, info) => {
if (err) {
delete clients[id];
cb(err);
return;
}
++stats.tunnels;
info.id = id;
cb(err, info);
});
}
module.exports = function(opt) {
opt = opt || {};
var schema = opt.secure ? 'https' : 'http';
const schema = opt.secure ? 'https' : 'http';
var app = express();
const app = express();
app.get('/', function(req, res, next) {
if (req.query['new'] === undefined) {
return next();
}
var req_id = rand_id();
const req_id = rand_id();
debug('making new client with id %s', req_id);
new_client(req_id, opt, function(err, info) {
if (err) {
@ -199,7 +231,7 @@ module.exports = function(opt) {
return res.end(err.message);
}
var url = schema + '://' + req_id + '.' + req.headers.host;
const url = schema + '://' + req_id + '.' + req.headers.host;
info.url = url;
res.json(info);
});
@ -217,12 +249,18 @@ module.exports = function(opt) {
proxy.web(req, res);
});
app.get('/api/status', function(req, res, next) {
res.json({
tunnels: stats.tunnels,
});
});
app.get('/:req_id', function(req, res, next) {
var req_id = req.params.req_id;
const req_id = req.params.req_id;
// limit requested hostnames to 63 characters
if (! /^[a-z0-9]{4,63}$/.test(req_id)) {
var err = new Error('Invalid subdomain. Subdomains must be lowercase and between 4 and 63 alphanumeric characters.');
const err = new Error('Invalid subdomain. Subdomains must be lowercase and between 4 and 63 alphanumeric characters.');
err.statusCode = 403;
return next(err);
}
@ -233,7 +271,7 @@ module.exports = function(opt) {
return next(err);
}
var url = schema + '://' + req_id + '.' + req.headers.host;
const url = schema + '://' + req_id + '.' + req.headers.host;
info.url = url;
res.json(info);
});
@ -241,13 +279,13 @@ module.exports = function(opt) {
});
app.use(function(err, req, res, next) {
var status = err.statusCode || err.status || 500;
const status = err.statusCode || err.status || 500;
res.status(status).json({
message: err.message
});
});
var server = http.createServer();
const server = http.createServer();
server.on('request', function(req, res) {
debug('request %s', req.url);

4
test/mocha.opts Normal file
View File

@ -0,0 +1,4 @@
--check-leaks
--reporter spec
--ui qunit
--compilers js:babel-register