mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 05:39:57 +00:00
AUTH-2105: Adds support for local forwarding. Refactor auditlogger creation.
AUTH-2088: Adds dynamic destination routing
This commit is contained in:
2
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
2
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
@@ -150,7 +150,7 @@ func appendIndexed(dst []byte, i uint64) []byte {
|
||||
// extended buffer.
|
||||
//
|
||||
// If f.Sensitive is true, "Never Indexed" representation is used. If
|
||||
// f.Sensitive is false and indexing is true, "Inremental Indexing"
|
||||
// f.Sensitive is false and indexing is true, "Incremental Indexing"
|
||||
// representation is used.
|
||||
func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
|
||||
dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
|
||||
|
76
vendor/golang.org/x/net/http2/server.go
generated
vendored
76
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -52,10 +52,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
prefaceTimeout = 10 * time.Second
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
prefaceTimeout = 10 * time.Second
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
maxQueuedControlFrames = 10000
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -163,6 +164,15 @@ func (s *Server) maxConcurrentStreams() uint32 {
|
||||
return defaultMaxStreams
|
||||
}
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
func (s *Server) maxQueuedControlFrames() int {
|
||||
// TODO: if anybody asks, add a Server field, and remember to define the
|
||||
// behavior of negative values.
|
||||
return maxQueuedControlFrames
|
||||
}
|
||||
|
||||
type serverInternalState struct {
|
||||
mu sync.Mutex
|
||||
activeConns map[*serverConn]struct{}
|
||||
@@ -273,7 +283,20 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||
if testHookOnConn != nil {
|
||||
testHookOnConn()
|
||||
}
|
||||
// The TLSNextProto interface predates contexts, so
|
||||
// the net/http package passes down its per-connection
|
||||
// base context via an exported but unadvertised
|
||||
// method on the Handler. This is for internal
|
||||
// net/http<=>http2 use only.
|
||||
var ctx context.Context
|
||||
type baseContexter interface {
|
||||
BaseContext() context.Context
|
||||
}
|
||||
if bc, ok := h.(baseContexter); ok {
|
||||
ctx = bc.BaseContext()
|
||||
}
|
||||
conf.ServeConn(c, &ServeConnOpts{
|
||||
Context: ctx,
|
||||
Handler: h,
|
||||
BaseConfig: hs,
|
||||
})
|
||||
@@ -284,6 +307,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
|
||||
|
||||
// ServeConnOpts are options for the Server.ServeConn method.
|
||||
type ServeConnOpts struct {
|
||||
// Context is the base context to use.
|
||||
// If nil, context.Background is used.
|
||||
Context context.Context
|
||||
|
||||
// BaseConfig optionally sets the base configuration
|
||||
// for values. If nil, defaults are used.
|
||||
BaseConfig *http.Server
|
||||
@@ -294,6 +321,13 @@ type ServeConnOpts struct {
|
||||
Handler http.Handler
|
||||
}
|
||||
|
||||
func (o *ServeConnOpts) context() context.Context {
|
||||
if o != nil && o.Context != nil {
|
||||
return o.Context
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (o *ServeConnOpts) baseConfig() *http.Server {
|
||||
if o != nil && o.BaseConfig != nil {
|
||||
return o.BaseConfig
|
||||
@@ -439,7 +473,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
}
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ctx, cancel = context.WithCancel(opts.context())
|
||||
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
|
||||
if hs := opts.baseConfig(); hs != nil {
|
||||
ctx = context.WithValue(ctx, http.ServerContextKey, hs)
|
||||
@@ -482,6 +516,7 @@ type serverConn struct {
|
||||
sawFirstSettings bool // got the initial SETTINGS frame after the preface
|
||||
needToSendSettingsAck bool
|
||||
unackedSettings int // how many SETTINGS have we sent without ACKs?
|
||||
queuedControlFrames int // control frames in the writeSched queue
|
||||
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
|
||||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
@@ -870,6 +905,14 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
// If the peer is causing us to generate a lot of control frames,
|
||||
// but not reading them from us, assume they are trying to make us
|
||||
// run out of memory.
|
||||
if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
|
||||
sc.vlogf("http2: too many control frames in send queue, closing connection")
|
||||
return
|
||||
}
|
||||
|
||||
// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
|
||||
// with no error code (graceful shutdown), don't start the timer until
|
||||
// all open streams have been completed.
|
||||
@@ -1069,6 +1112,14 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
|
||||
}
|
||||
|
||||
if !ignoreWrite {
|
||||
if wr.isControl() {
|
||||
sc.queuedControlFrames++
|
||||
// For extra safety, detect wraparounds, which should not happen,
|
||||
// and pull the plug.
|
||||
if sc.queuedControlFrames < 0 {
|
||||
sc.conn.Close()
|
||||
}
|
||||
}
|
||||
sc.writeSched.Push(wr)
|
||||
}
|
||||
sc.scheduleFrameWrite()
|
||||
@@ -1186,10 +1237,8 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
|
||||
// If a frame is already being written, nothing happens. This will be called again
|
||||
// when the frame is done being written.
|
||||
//
|
||||
// If a frame isn't being written we need to send one, the best frame
|
||||
// to send is selected, preferring first things that aren't
|
||||
// stream-specific (e.g. ACKing settings), and then finding the
|
||||
// highest priority stream.
|
||||
// If a frame isn't being written and we need to send one, the best frame
|
||||
// to send is selected by writeSched.
|
||||
//
|
||||
// If a frame isn't being written and there's nothing else to send, we
|
||||
// flush the write buffer.
|
||||
@@ -1217,6 +1266,9 @@ func (sc *serverConn) scheduleFrameWrite() {
|
||||
}
|
||||
if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
|
||||
if wr, ok := sc.writeSched.Pop(); ok {
|
||||
if wr.isControl() {
|
||||
sc.queuedControlFrames--
|
||||
}
|
||||
sc.startFrameWrite(wr)
|
||||
continue
|
||||
}
|
||||
@@ -1509,6 +1561,8 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
|
||||
if err := f.ForeachSetting(sc.processSetting); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
|
||||
// acknowledged individually, even if multiple are received before the ACK.
|
||||
sc.needToSendSettingsAck = true
|
||||
sc.scheduleFrameWrite()
|
||||
return nil
|
||||
@@ -2470,7 +2524,7 @@ const TrailerPrefix = "Trailer:"
|
||||
// trailers. That worked for a while, until we found the first major
|
||||
// user of Trailers in the wild: gRPC (using them only over http2),
|
||||
// and gRPC libraries permit setting trailers mid-stream without
|
||||
// predeclarnig them. So: change of plans. We still permit the old
|
||||
// predeclaring them. So: change of plans. We still permit the old
|
||||
// way, but we also permit this hack: if a Header() key begins with
|
||||
// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
|
||||
// invalid token byte anyway, there is no ambiguity. (And it's already
|
||||
@@ -2770,7 +2824,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
||||
// PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
|
||||
// is in either the "open" or "half-closed (remote)" state.
|
||||
if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
|
||||
// responseWriter.Push checks that the stream is peer-initiaed.
|
||||
// responseWriter.Push checks that the stream is peer-initiated.
|
||||
msg.done <- errStreamClosed
|
||||
return
|
||||
}
|
||||
|
39
vendor/golang.org/x/net/http2/transport.go
generated
vendored
39
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -28,6 +28,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
@@ -199,6 +200,7 @@ type ClientConn struct {
|
||||
t *Transport
|
||||
tconn net.Conn // usually *tls.Conn, except specialized impls
|
||||
tlsState *tls.ConnectionState // nil only for specialized impls
|
||||
reused uint32 // whether conn is being reused; atomic
|
||||
singleUse bool // whether being used for a single http.Request
|
||||
|
||||
// readLoop goroutine fields:
|
||||
@@ -440,7 +442,8 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
|
||||
return nil, err
|
||||
}
|
||||
traceGotConn(req, cc)
|
||||
reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
|
||||
traceGotConn(req, cc, reused)
|
||||
res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
|
||||
if err != nil && retry <= 6 {
|
||||
if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
|
||||
@@ -989,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
|
||||
req.Method != "HEAD" {
|
||||
// Request gzip only, not deflate. Deflate is ambiguous and
|
||||
// not as universally supported anyway.
|
||||
// See: http://www.gzip.org/zlib/zlib_faq.html#faq38
|
||||
// See: https://zlib.net/zlib_faq.html#faq39
|
||||
//
|
||||
// Note that we don't request this for HEAD requests,
|
||||
// due to a bug in nginx:
|
||||
@@ -1213,6 +1216,8 @@ var (
|
||||
|
||||
// abort request body write, but send stream reset of cancel.
|
||||
errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
|
||||
|
||||
errReqBodyTooLong = errors.New("http2: request body larger than specified content length")
|
||||
)
|
||||
|
||||
func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
|
||||
@@ -1235,10 +1240,32 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
|
||||
|
||||
req := cs.req
|
||||
hasTrailers := req.Trailer != nil
|
||||
remainLen := actualContentLength(req)
|
||||
hasContentLen := remainLen != -1
|
||||
|
||||
var sawEOF bool
|
||||
for !sawEOF {
|
||||
n, err := body.Read(buf)
|
||||
n, err := body.Read(buf[:len(buf)-1])
|
||||
if hasContentLen {
|
||||
remainLen -= int64(n)
|
||||
if remainLen == 0 && err == nil {
|
||||
// The request body's Content-Length was predeclared and
|
||||
// we just finished reading it all, but the underlying io.Reader
|
||||
// returned the final chunk with a nil error (which is one of
|
||||
// the two valid things a Reader can do at EOF). Because we'd prefer
|
||||
// to send the END_STREAM bit early, double-check that we're actually
|
||||
// at EOF. Subsequent reads should return (0, EOF) at this point.
|
||||
// If either value is different, we return an error in one of two ways below.
|
||||
var n1 int
|
||||
n1, err = body.Read(buf[n:])
|
||||
remainLen -= int64(n1)
|
||||
}
|
||||
if remainLen < 0 {
|
||||
err = errReqBodyTooLong
|
||||
cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
sawEOF = true
|
||||
err = nil
|
||||
@@ -2559,15 +2586,15 @@ func traceGetConn(req *http.Request, hostPort string) {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
func traceGotConn(req *http.Request, cc *ClientConn) {
|
||||
func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
if trace == nil || trace.GotConn == nil {
|
||||
return
|
||||
}
|
||||
ci := httptrace.GotConnInfo{Conn: cc.tconn}
|
||||
ci.Reused = reused
|
||||
cc.mu.Lock()
|
||||
ci.Reused = cc.nextStreamID > 1
|
||||
ci.WasIdle = len(cc.streams) == 0 && ci.Reused
|
||||
ci.WasIdle = len(cc.streams) == 0 && reused
|
||||
if ci.WasIdle && !cc.lastActive.IsZero() {
|
||||
ci.IdleTime = time.Now().Sub(cc.lastActive)
|
||||
}
|
||||
|
8
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
8
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
@@ -32,7 +32,7 @@ type WriteScheduler interface {
|
||||
|
||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||
// order they are Push'd.
|
||||
// order they are Push'd. No frames should be discarded except by CloseStream.
|
||||
Pop() (wr FrameWriteRequest, ok bool)
|
||||
}
|
||||
|
||||
@@ -76,6 +76,12 @@ func (wr FrameWriteRequest) StreamID() uint32 {
|
||||
return wr.stream.id
|
||||
}
|
||||
|
||||
// isControl reports whether wr is a control frame for MaxQueuedControlFrames
|
||||
// purposes. That includes non-stream frames and RST_STREAM frames.
|
||||
func (wr FrameWriteRequest) isControl() bool {
|
||||
return wr.stream == nil
|
||||
}
|
||||
|
||||
// DataSize returns the number of flow control bytes that must be consumed
|
||||
// to write this entire frame. This is 0 for non-DATA frames.
|
||||
func (wr FrameWriteRequest) DataSize() int {
|
||||
|
2
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
2
vendor/golang.org/x/net/http2/writesched_priority.go
generated
vendored
@@ -149,7 +149,7 @@ func (n *priorityNode) addBytes(b int64) {
|
||||
}
|
||||
|
||||
// walkReadyInOrder iterates over the tree in priority order, calling f for each node
|
||||
// with a non-empty write queue. When f returns true, this funcion returns true and the
|
||||
// with a non-empty write queue. When f returns true, this function returns true and the
|
||||
// walk halts. tmp is used as scratch space for sorting.
|
||||
//
|
||||
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
|
||||
|
9
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
9
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
@@ -19,7 +19,8 @@ type randomWriteScheduler struct {
|
||||
zero writeQueue
|
||||
|
||||
// sq contains the stream-specific queues, keyed by stream ID.
|
||||
// When a stream is idle or closed, it's deleted from the map.
|
||||
// When a stream is idle, closed, or emptied, it's deleted
|
||||
// from the map.
|
||||
sq map[uint32]*writeQueue
|
||||
|
||||
// pool of empty queues for reuse.
|
||||
@@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||
return ws.zero.shift(), true
|
||||
}
|
||||
// Iterate over all non-idle streams until finding one that can be consumed.
|
||||
for _, q := range ws.sq {
|
||||
for streamID, q := range ws.sq {
|
||||
if wr, ok := q.consume(math.MaxInt32); ok {
|
||||
if q.empty() {
|
||||
delete(ws.sq, streamID)
|
||||
ws.queuePool.put(q)
|
||||
}
|
||||
return wr, true
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user