TUN-3863: Consolidate header handling logic in the connection package; move headers definitions from h2mux to packages that manage them; cleanup header conversions

All header transformation code from h2mux has been consolidated in the connection package since it's used by both h2mux and http2 logic.
Exported headers used by proxying between edge and cloudflared so then can be shared by tunnel service on the edge.
Moved access-related headers to corresponding packages that have the code that sets/uses these headers.
Removed tunnel hostname tracking from h2mux since it wasn't used by anything. We will continue to set the tunnel hostname header from the edge for backward compatibilty, but it's no longer used by cloudflared.
Move bastion-related logic into carrier package, untangled dependencies between carrier, origin, and websocket packages.
This commit is contained in:
Igor Postelnik
2021-03-25 23:04:56 -05:00
parent ebf5292bf9
commit 8ca0d86c85
29 changed files with 541 additions and 713 deletions

118
retry/backoffhandler.go Normal file
View File

@@ -0,0 +1,118 @@
package retry
import (
"context"
"math/rand"
"time"
)
// Redeclare time functions so they can be overridden in tests.
type clock struct {
Now func() time.Time
After func(d time.Duration) <-chan time.Time
}
var Clock = clock{
Now: time.Now,
After: time.After,
}
// BackoffHandler manages exponential backoff and limits the maximum number of retries.
// The base time period is 1 second, doubling with each retry.
// After initial success, a grace period can be set to reset the backoff timer if
// a connection is maintained successfully for a long enough period. The base grace period
// is 2 seconds, doubling with each retry.
type BackoffHandler struct {
// MaxRetries sets the maximum number of retries to perform. The default value
// of 0 disables retry completely.
MaxRetries uint
// RetryForever caps the exponential backoff period according to MaxRetries
// but allows you to retry indefinitely.
RetryForever bool
// BaseTime sets the initial backoff period.
BaseTime time.Duration
retries uint
resetDeadline time.Time
}
func (b BackoffHandler) GetMaxBackoffDuration(ctx context.Context) (time.Duration, bool) {
// Follows the same logic as Backoff, but without mutating the receiver.
// This select has to happen first to reflect the actual behaviour of the Backoff function.
select {
case <-ctx.Done():
return time.Duration(0), false
default:
}
if !b.resetDeadline.IsZero() && Clock.Now().After(b.resetDeadline) {
// b.retries would be set to 0 at this point
return time.Second, true
}
if b.retries >= b.MaxRetries && !b.RetryForever {
return time.Duration(0), false
}
maxTimeToWait := b.GetBaseTime() * 1 << (b.retries + 1)
return maxTimeToWait, true
}
// BackoffTimer returns a channel that sends the current time when the exponential backoff timeout expires.
// Returns nil if the maximum number of retries have been used.
func (b *BackoffHandler) BackoffTimer() <-chan time.Time {
if !b.resetDeadline.IsZero() && Clock.Now().After(b.resetDeadline) {
b.retries = 0
b.resetDeadline = time.Time{}
}
if b.retries >= b.MaxRetries {
if !b.RetryForever {
return nil
}
} else {
b.retries++
}
maxTimeToWait := time.Duration(b.GetBaseTime() * 1 << (b.retries))
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
return Clock.After(timeToWait)
}
// Backoff is used to wait according to exponential backoff. Returns false if the
// maximum number of retries have been used or if the underlying context has been cancelled.
func (b *BackoffHandler) Backoff(ctx context.Context) bool {
c := b.BackoffTimer()
if c == nil {
return false
}
select {
case <-c:
return true
case <-ctx.Done():
return false
}
}
// Sets a grace period within which the the backoff timer is maintained. After the grace
// period expires, the number of retries & backoff duration is reset.
func (b *BackoffHandler) SetGracePeriod() {
maxTimeToWait := b.GetBaseTime() * 2 << (b.retries + 1)
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
b.resetDeadline = Clock.Now().Add(timeToWait)
}
func (b BackoffHandler) GetBaseTime() time.Duration {
if b.BaseTime == 0 {
return time.Second
}
return b.BaseTime
}
// Retries returns the number of retries consumed so far.
func (b *BackoffHandler) Retries() int {
return int(b.retries)
}
func (b *BackoffHandler) ReachedMaxRetries() bool {
return b.retries == b.MaxRetries
}
func (b *BackoffHandler) ResetNow() {
b.resetDeadline = time.Now()
}

View File

@@ -0,0 +1,147 @@
package retry
import (
"context"
"testing"
"time"
)
func immediateTimeAfter(time.Duration) <-chan time.Time {
c := make(chan time.Time, 1)
c <- time.Now()
return c
}
func TestBackoffRetries(t *testing.T) {
// make backoff return immediately
Clock.After = immediateTimeAfter
ctx := context.Background()
backoff := BackoffHandler{MaxRetries: 3}
if !backoff.Backoff(ctx) {
t.Fatalf("backoff failed immediately")
}
if !backoff.Backoff(ctx) {
t.Fatalf("backoff failed after 1 retry")
}
if !backoff.Backoff(ctx) {
t.Fatalf("backoff failed after 2 retry")
}
if backoff.Backoff(ctx) {
t.Fatalf("backoff allowed after 3 (max) retries")
}
}
func TestBackoffCancel(t *testing.T) {
// prevent backoff from returning normally
Clock.After = func(time.Duration) <-chan time.Time { return make(chan time.Time) }
ctx, cancelFunc := context.WithCancel(context.Background())
backoff := BackoffHandler{MaxRetries: 3}
cancelFunc()
if backoff.Backoff(ctx) {
t.Fatalf("backoff allowed after cancel")
}
if _, ok := backoff.GetMaxBackoffDuration(ctx); ok {
t.Fatalf("backoff allowed after cancel")
}
}
func TestBackoffGracePeriod(t *testing.T) {
currentTime := time.Now()
// make Clock.Now return whatever we like
Clock.Now = func() time.Time { return currentTime }
// make backoff return immediately
Clock.After = immediateTimeAfter
ctx := context.Background()
backoff := BackoffHandler{MaxRetries: 1}
if !backoff.Backoff(ctx) {
t.Fatalf("backoff failed immediately")
}
// the next call to Backoff would fail unless it's after the grace period
backoff.SetGracePeriod()
// advance time to after the grace period (~4 seconds) and see what happens
currentTime = currentTime.Add(time.Second * 5)
if !backoff.Backoff(ctx) {
t.Fatalf("backoff failed after the grace period expired")
}
// confirm we ignore grace period after backoff
if backoff.Backoff(ctx) {
t.Fatalf("backoff allowed after 1 (max) retry")
}
}
func TestGetMaxBackoffDurationRetries(t *testing.T) {
// make backoff return immediately
Clock.After = immediateTimeAfter
ctx := context.Background()
backoff := BackoffHandler{MaxRetries: 3}
if _, ok := backoff.GetMaxBackoffDuration(ctx); !ok {
t.Fatalf("backoff failed immediately")
}
backoff.Backoff(ctx) // noop
if _, ok := backoff.GetMaxBackoffDuration(ctx); !ok {
t.Fatalf("backoff failed after 1 retry")
}
backoff.Backoff(ctx) // noop
if _, ok := backoff.GetMaxBackoffDuration(ctx); !ok {
t.Fatalf("backoff failed after 2 retry")
}
backoff.Backoff(ctx) // noop
if _, ok := backoff.GetMaxBackoffDuration(ctx); ok {
t.Fatalf("backoff allowed after 3 (max) retries")
}
if backoff.Backoff(ctx) {
t.Fatalf("backoff allowed after 3 (max) retries")
}
}
func TestGetMaxBackoffDuration(t *testing.T) {
// make backoff return immediately
Clock.After = immediateTimeAfter
ctx := context.Background()
backoff := BackoffHandler{MaxRetries: 3}
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*2 {
t.Fatalf("backoff (%s) didn't return < 2 seconds on first retry", duration)
}
backoff.Backoff(ctx) // noop
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*4 {
t.Fatalf("backoff (%s) didn't return < 4 seconds on second retry", duration)
}
backoff.Backoff(ctx) // noop
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*8 {
t.Fatalf("backoff (%s) didn't return < 8 seconds on third retry", duration)
}
backoff.Backoff(ctx) // noop
if duration, ok := backoff.GetMaxBackoffDuration(ctx); ok || duration != 0 {
t.Fatalf("backoff (%s) didn't return 0 seconds on fourth retry (exceeding limit)", duration)
}
}
func TestBackoffRetryForever(t *testing.T) {
// make backoff return immediately
Clock.After = immediateTimeAfter
ctx := context.Background()
backoff := BackoffHandler{MaxRetries: 3, RetryForever: true}
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*2 {
t.Fatalf("backoff (%s) didn't return < 2 seconds on first retry", duration)
}
backoff.Backoff(ctx) // noop
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*4 {
t.Fatalf("backoff (%s) didn't return < 4 seconds on second retry", duration)
}
backoff.Backoff(ctx) // noop
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*8 {
t.Fatalf("backoff (%s) didn't return < 8 seconds on third retry", duration)
}
if !backoff.Backoff(ctx) {
t.Fatalf("backoff refused on fourth retry despire RetryForever")
}
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*16 {
t.Fatalf("backoff returned %v instead of 8 seconds on fourth retry", duration)
}
if !backoff.Backoff(ctx) {
t.Fatalf("backoff refused on fifth retry despire RetryForever")
}
if duration, ok := backoff.GetMaxBackoffDuration(ctx); !ok || duration > time.Second*16 {
t.Fatalf("backoff returned %v instead of 8 seconds on fifth retry", duration)
}
}