I ran across a blog post by Josh Bleecher Snyder today which has some beautiful backoff algorithms in Go.

Capturing them here in case his blog ever goes offline.

Algorithm 1

func do(ctx context.Context) error {
    const (
        maxAttempts = 10
        baseDelay   = 1 * time.Second
        maxDelay    = 60 * time.Second
    )

    delay := baseDelay
    for attempt := range maxAttempts {
        err := request(ctx)
        if err == nil {
            return nil
        }

        delay *= 2
        delay = min(delay, maxDelay)

        jitter := multiplyDuration(delay, rand.Float64()*0.5-0.25) // ±25%
        sleepTime := delay + jitter

        select {
        case <-ctx.Done():
            return ctx.Err()
        case <-time.After(sleepTime):
        }
    }

    return fmt.Errorf("failed after %d attempts", maxAttempts)
}

func multiplyDuration(d time.Duration, mul float64) time.Duration {
    return time.Duration(float64(d) * mul)
}

Algorithm 2

I really like this one. It’s very easy to read.

The only drawback is that, as currently written, it times out. Sometimes I don’t want that…

func do(ctx context.Context) error {
    delays := []time.Duration{
        1 * time.Second, 2 * time.Second,
        4 * time.Second, 8 * time.Second,
        16 * time.Second, 32 * time.Second,
        60 * time.Second, 60 * time.Second,
        60 * time.Second, 60 * time.Second,
    }

    for _, delay := range delays {
        err := request(ctx)
        if err == nil {
            return nil
        }

        delay = multiplyDuration(delay, 0.75 + rand.Float64()*0.5) // ±25%
        select {
        case <-ctx.Done():
            return ctx.Err()
        case <-time.After(delay):
        }
    }

    return fmt.Errorf("failed after %d attempts", len(delays))
}