diff --git a/collects/web-server/dispatchers/limit.ss b/collects/web-server/dispatchers/limit.ss index 8cfe48cc6e..ad37adb9e1 100644 --- a/collects/web-server/dispatchers/limit.ss +++ b/collects/web-server/dispatchers/limit.ss @@ -2,10 +2,10 @@ (require "dispatch.ss") (provide/contract [interface-version dispatcher-interface-version/c] - [make (number? dispatcher/c . -> . dispatcher/c)]) + [make ((number? dispatcher/c) (#:over-limit (symbols 'block 'kill-new 'kill-old)) . ->* . dispatcher/c)]) (define interface-version 'v1) -(define (make num inner) +(define (make num inner #:over-limit [over-limit 'block]) (define-struct in-req (partner reply-ch)) (define in-ch (make-channel)) (define-struct out-req (partner)) @@ -16,17 +16,50 @@ (let loop ([i 0] [partners empty]) (apply sync + ; Do we have room for another... (if (< i num) + ; If so, allow them in (handle-evt in-ch (lambda (req) - (channel-put (in-req-reply-ch req) #t) + ; Reply asynchronously + (thread + (lambda () + (channel-put (in-req-reply-ch req) #t))) (loop (add1 i) (list* (in-req-partner req) partners)))) - never-evt) + ; Otherwise, decide what to do with new requests + (case over-limit + ; Make them block... + [(block) + never-evt] + ; Instruct the new request to die + [(kill-new) + (handle-evt in-ch + (lambda (req) + ; Reply asynchronously + (thread + (lambda () + (channel-put (in-req-reply-ch req) #f))) + (loop i partners)))] + ; Kill an old request handler and allow this one + [(kill-old) + (handle-evt in-ch + (lambda (req) + (define oldest (last partners)) + (define remaining (take partners (sub1 (length partners)))) + ; Kill the oldest thread + (kill-thread oldest) + ; Reply asynchronously + (thread + (lambda () + (channel-put (in-req-reply-ch req) #t))) + (loop i (list* (in-req-partner req) remaining))))])) + ; Wait for partners to complete (handle-evt out-ch (lambda (req) (loop (sub1 i) (remq (out-req-partner req) partners)))) + ; Check if partners are dead (map (lambda (p) (handle-evt (thread-dead-evt p) (lambda _ @@ -35,7 +68,8 @@ (define (in) (define reply (make-channel)) (channel-put in-ch (make-in-req (current-thread) reply)) - (channel-get reply)) + (unless (channel-get reply) + (error 'limit "limit-manager requested load shedding"))) (define (out) (channel-put out-ch (make-out-req (current-thread)))) (lambda (conn req) diff --git a/collects/web-server/scribblings/dispatchers.scrbl b/collects/web-server/scribblings/dispatchers.scrbl index efb98175fb..20adde5454 100644 --- a/collects/web-server/scribblings/dispatchers.scrbl +++ b/collects/web-server/scribblings/dispatchers.scrbl @@ -395,9 +395,16 @@ a URL that refreshes the password file, servlet cache, etc.} @elem{provides a wrapper dispatcher that limits how many requests are serviced at once.}]{ @defproc[(make [limit number?] - [inner dispatcher/c]) + [inner dispatcher/c] + [#:over-limit over-limit (symbols 'block 'kill-new 'kill-old) 'block]) dispatcher/c]{ Returns a dispatcher that defers to @scheme[inner] for work, but will forward a maximum of @scheme[limit] requests concurrently. + + If there are no additional spaces inside the limit and a new request is received, the @scheme[over-limit] option determines what is done. + The default (@scheme['block]) causes the new request to block until an old request is finished being handled. + If @scheme[over-limit] is @scheme['kill-new], then the new request handler is killed---a form of load-shedding. + If @scheme[over-limit] is @scheme['kill-old], then the oldest request handler is killed---prioritizing new connections over old. + (This setting is a little dangerous because requests might never finish if there is constant load.) }} @(require (for-label @@ -434,7 +441,8 @@ Consider this example: (list (format "hello world ~a" (sort (build-list 100000 (λ x (random 1000))) <)))) - (request-method req))))) + (request-method req))) + #:over-limit 'block)) (lambda (conn req) (output-response/method conn