Skip to content

Commit

Permalink
Some latency optimisations
Browse files Browse the repository at this point in the history
  • Loading branch information
lonelycode committed Oct 7, 2015
1 parent ea5e70b commit d3456e1
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 8 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

- Gateway Mongo Driver updated to be compatible with MongoDB v3.0
- Fixed OAuth client listings with redis cluster
- Some latency improvements
- Key detection now checks a local in-memory cache before reaching out to Redis, keys are cached for 10 seconds, with a 5 second purge rate (so a maximum key existence of 15s). Policies will still tkake instant effect on keys

# 1.8.3.2

Expand Down
19 changes: 19 additions & 0 deletions handler_success.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package main

import (
"github.com/gorilla/context"
"github.com/pmylund/go-cache"
"net/http"
"runtime/pprof"
"strconv"
Expand All @@ -21,6 +22,8 @@ const (
VersionKeyContext = 3
)

var SessionCache *cache.Cache = cache.New(10*time.Second, 5*time.Second)

// TykMiddleware wraps up the ApiSpec and Proxy objects to be included in a
// middleware handler, this can probably be handled better.
type TykMiddleware struct {
Expand Down Expand Up @@ -80,9 +83,21 @@ func (t TykMiddleware) CheckSessionAndIdentityForValidKey(key string) (SessionSt
var thisSession SessionState
var found bool

// Check in-memory cache
cachedVal, found := SessionCache.Get(key)
if found {
log.Debug("Key found in local cache")
thisSession = cachedVal.(SessionState)
t.ApplyPolicyIfExists(key, &thisSession)
return thisSession, true
}

// Check session store
thisSession, found = t.Spec.SessionManager.GetSessionDetail(key)
if found {
// If exists, assume it has been authorized and pass on
// cache it
go SessionCache.Set(key, thisSession, cache.DefaultExpiration)

// Check for a policy, if there is a policy, pull it and overwrite the session values
t.ApplyPolicyIfExists(key, &thisSession)
Expand All @@ -95,6 +110,10 @@ func (t TykMiddleware) CheckSessionAndIdentityForValidKey(key string) (SessionSt
if found {
// If not in Session, and got it from AuthHandler, create a session with a new TTL
log.Info("Recreating session for key: ", key)

// cache it
go SessionCache.Set(key, thisSession, cache.DefaultExpiration)

// Check for a policy, if there is a policy, pull it and overwrite the session values
t.ApplyPolicyIfExists(key, &thisSession)
t.Spec.SessionManager.UpdateSession(key, thisSession, t.Spec.APIDefinition.SessionLifetime)
Expand Down
7 changes: 4 additions & 3 deletions middleware_rate_limiting.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ import (
"github.com/gorilla/context"
)

var sessionLimiter = SessionLimiter{}
var sessionMonitor = Monitor{}

// RateLimitAndQuotaCheck will check the incomming request and key whether it is within it's quota and
// within it's rate limit, it makes use of the SessionLimiter object to do this
type RateLimitAndQuotaCheck struct {
Expand All @@ -24,7 +27,6 @@ func (k *RateLimitAndQuotaCheck) GetConfig() (interface{}, error) {

// ProcessRequest will run any checks on the request on the way through the system, return an error to have the chain fail
func (k *RateLimitAndQuotaCheck) ProcessRequest(w http.ResponseWriter, r *http.Request, configuration interface{}) (error, int) {
sessionLimiter := SessionLimiter{}
thisSessionState := context.Get(r, SessionData).(SessionState)
authHeaderValue := context.Get(r, AuthHeaderValue).(string)

Expand Down Expand Up @@ -92,8 +94,7 @@ func (k *RateLimitAndQuotaCheck) ProcessRequest(w http.ResponseWriter, r *http.R

// Run the trigger monitor
if config.Monitor.MonitorUserKeys {
mon := Monitor{}
mon.Check(&thisSessionState, authHeaderValue)
sessionMonitor.Check(&thisSessionState, authHeaderValue)
}

// Request is valid, carry on
Expand Down
23 changes: 18 additions & 5 deletions session_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,22 +68,35 @@ const (
// check if a message should pass through or not
type SessionLimiter struct{}

// ForwardMessage will enforce rate limiting, returning false if session limits have been exceeded.
// Key values to manage rate are Rate and Per, e.g. Rate of 10 messages Per 10 seconds
func (l SessionLimiter) ForwardMessage(currentSession *SessionState, key string, store StorageHandler) (bool, int) {

func (l SessionLimiter) doRollingWindowWrite(key, rateLimiterKey, rateLimiterSentinelKey string, currentSession *SessionState, store StorageHandler) {
log.Debug("[RATELIMIT] Inbound raw key is: ", key)
rateLimiterKey := RateLimitKeyPrefix + publicHash(key)
log.Debug("[RATELIMIT] Rate limiter key is: ", rateLimiterKey)
ratePerPeriodNow := store.SetRollingWindow(rateLimiterKey, int64(currentSession.Per), int64(currentSession.Per))

log.Debug("Num Requests: ", ratePerPeriodNow)

// Subtract by 1 because of the delayed add in the window
if ratePerPeriodNow > (int(currentSession.Rate) - 1) {
// Set a sentinel value with expire
store.SetRawKey(rateLimiterSentinelKey, "1", int64(currentSession.Per))
}
}

// ForwardMessage will enforce rate limiting, returning false if session limits have been exceeded.
// Key values to manage rate are Rate and Per, e.g. Rate of 10 messages Per 10 seconds
func (l SessionLimiter) ForwardMessage(currentSession *SessionState, key string, store StorageHandler) (bool, int) {
rateLimiterKey := RateLimitKeyPrefix + publicHash(key)
rateLimiterSentinelKey := RateLimitKeyPrefix + publicHash(key) + ".BLOCKED"
// Check sentinel
_, sentinelActive := store.GetRawKey(rateLimiterSentinelKey)
if sentinelActive == nil {
// Sentinel is set, fail
return false, 1
}

// if not - set rolling window (off thread)
go l.doRollingWindowWrite(key, rateLimiterKey, rateLimiterSentinelKey, currentSession, store)

currentSession.Allowance--
if !l.IsRedisQuotaExceeded(currentSession, key, store) {
return true, 0
Expand Down

0 comments on commit d3456e1

Please sign in to comment.