source: publico/il.spdo/trunk/Paste-1.7.5.1-py2.6.egg/paste/httpserver.py @ 5327

Última Alteração nesse arquivo desde 5327 foi 5327, incluída por fabianosantos, 8 anos atrás

Import inicial.

File size: 54.4 KB
Linha 
1# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
2# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
3# (c) 2005 Clark C. Evans
4# This module is part of the Python Paste Project and is released under
5# the MIT License: http://www.opensource.org/licenses/mit-license.php
6# This code was written with funding by http://prometheusresearch.com
7"""
8WSGI HTTP Server
9
10This is a minimalistic WSGI server using Python's built-in BaseHTTPServer;
11if pyOpenSSL is installed, it also provides SSL capabilities.
12"""
13
14# @@: add in protection against HTTP/1.0 clients who claim to
15#     be 1.1 but do not send a Content-Length
16
17# @@: add support for chunked encoding, this is not a 1.1 server
18#     till this is completed.
19
20import atexit
21import traceback
22import socket, sys, threading, urlparse, Queue, urllib
23import posixpath
24import time
25import thread
26import os
27from itertools import count
28from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
29from SocketServer import ThreadingMixIn
30from paste.util import converters
31import logging
32try:
33    from paste.util import killthread
34except ImportError:
35    # Not available, probably no ctypes
36    killthread = None
37
38__all__ = ['WSGIHandlerMixin', 'WSGIServer', 'WSGIHandler', 'serve']
39__version__ = "0.5"
40
41class ContinueHook(object):
42    """
43    When a client request includes a 'Expect: 100-continue' header, then
44    it is the responsibility of the server to send 100 Continue when it
45    is ready for the content body.  This allows authentication, access
46    levels, and other exceptions to be detected *before* bandwith is
47    spent on the request body.
48
49    This is a rfile wrapper that implements this functionality by
50    sending 100 Continue to the client immediately after the user
51    requests the content via a read() operation on the rfile stream.
52    After this response is sent, it becomes a pass-through object.
53    """
54
55    def __init__(self, rfile, write):
56        self._ContinueFile_rfile = rfile
57        self._ContinueFile_write = write
58        for attr in ('close', 'closed', 'fileno', 'flush',
59                     'mode', 'bufsize', 'softspace'):
60            if hasattr(rfile, attr):
61                setattr(self, attr, getattr(rfile, attr))
62        for attr in ('read', 'readline', 'readlines'):
63            if hasattr(rfile, attr):
64                setattr(self, attr, getattr(self, '_ContinueFile_' + attr))
65
66    def _ContinueFile_send(self):
67        self._ContinueFile_write("HTTP/1.1 100 Continue\r\n\r\n")
68        rfile = self._ContinueFile_rfile
69        for attr in ('read', 'readline', 'readlines'):
70            if hasattr(rfile, attr):
71                setattr(self, attr, getattr(rfile, attr))
72
73    def _ContinueFile_read(self, size=-1):
74        self._ContinueFile_send()
75        return self._ContinueFile_rfile.read(size)
76
77    def _ContinueFile_readline(self, size=-1):
78        self._ContinueFile_send()
79        return self._ContinueFile_rfile.readline(size)
80
81    def _ContinueFile_readlines(self, sizehint=0):
82        self._ContinueFile_send()
83        return self._ContinueFile_rfile.readlines(sizehint)
84
85class WSGIHandlerMixin:
86    """
87    WSGI mix-in for HTTPRequestHandler
88
89    This class is a mix-in to provide WSGI functionality to any
90    HTTPRequestHandler derivative (as provided in Python's BaseHTTPServer).
91    This assumes a ``wsgi_application`` handler on ``self.server``.
92    """
93    lookup_addresses = True
94
95    def log_request(self, *args, **kwargs):
96        """ disable success request logging
97
98        Logging transactions should not be part of a WSGI server,
99        if you want logging; look at paste.translogger
100        """
101        pass
102
103    def log_message(self, *args, **kwargs):
104        """ disable error message logging
105
106        Logging transactions should not be part of a WSGI server,
107        if you want logging; look at paste.translogger
108        """
109        pass
110
111    def version_string(self):
112        """ behavior that BaseHTTPServer should have had """
113        if not self.sys_version:
114            return self.server_version
115        else:
116            return self.server_version + ' ' + self.sys_version
117
118    def wsgi_write_chunk(self, chunk):
119        """
120        Write a chunk of the output stream; send headers if they
121        have not already been sent.
122        """
123        if not self.wsgi_headers_sent and not self.wsgi_curr_headers:
124            raise RuntimeError(
125                "Content returned before start_response called")
126        if not self.wsgi_headers_sent:
127            self.wsgi_headers_sent = True
128            (status, headers) = self.wsgi_curr_headers
129            code, message = status.split(" ", 1)
130            self.send_response(int(code), message)
131            #
132            # HTTP/1.1 compliance; either send Content-Length or
133            # signal that the connection is being closed.
134            #
135            send_close = True
136            for (k, v) in  headers:
137                lk = k.lower()
138                if 'content-length' == lk:
139                    send_close = False
140                if 'connection' == lk:
141                    if 'close' == v.lower():
142                        self.close_connection = 1
143                        send_close = False
144                self.send_header(k, v)
145            if send_close:
146                self.close_connection = 1
147                self.send_header('Connection', 'close')
148
149            self.end_headers()
150        self.wfile.write(chunk)
151
152    def wsgi_start_response(self, status, response_headers, exc_info=None):
153        if exc_info:
154            try:
155                if self.wsgi_headers_sent:
156                    raise exc_info[0], exc_info[1], exc_info[2]
157                else:
158                    # In this case, we're going to assume that the
159                    # higher-level code is currently handling the
160                    # issue and returning a resonable response.
161                    # self.log_error(repr(exc_info))
162                    pass
163            finally:
164                exc_info = None
165        elif self.wsgi_curr_headers:
166            assert 0, "Attempt to set headers a second time w/o an exc_info"
167        self.wsgi_curr_headers = (status, response_headers)
168        return self.wsgi_write_chunk
169
170    def wsgi_setup(self, environ=None):
171        """
172        Setup the member variables used by this WSGI mixin, including
173        the ``environ`` and status member variables.
174
175        After the basic environment is created; the optional ``environ``
176        argument can be used to override any settings.
177        """
178
179        (scheme, netloc, path, query, fragment) = urlparse.urlsplit(self.path)
180        path = urllib.unquote(path)
181        endslash = path.endswith('/')
182        path = posixpath.normpath(path)
183        if endslash and path != '/':
184            # Put the slash back...
185            path += '/'
186        (server_name, server_port) = self.server.server_address[:2]
187
188        rfile = self.rfile
189        if 'HTTP/1.1' == self.protocol_version and \
190                '100-continue' == self.headers.get('Expect','').lower():
191            rfile = ContinueHook(rfile, self.wfile.write)
192        else:
193            # We can put in the protection to keep from over-reading the
194            # file
195            try:
196                content_length = int(self.headers.get('Content-Length', '0'))
197            except ValueError:
198                content_length = 0
199            if not hasattr(self.connection, 'get_context'):
200                # @@: LimitedLengthFile is currently broken in connection
201                # with SSL (sporatic errors that are diffcult to trace, but
202                # ones that go away when you don't use LimitedLengthFile)
203                rfile = LimitedLengthFile(rfile, content_length)
204
205        remote_address = self.client_address[0]
206        self.wsgi_environ = {
207                'wsgi.version': (1,0)
208               ,'wsgi.url_scheme': 'http'
209               ,'wsgi.input': rfile
210               ,'wsgi.errors': sys.stderr
211               ,'wsgi.multithread': True
212               ,'wsgi.multiprocess': False
213               ,'wsgi.run_once': False
214               # CGI variables required by PEP-333
215               ,'REQUEST_METHOD': self.command
216               ,'SCRIPT_NAME': '' # application is root of server
217               ,'PATH_INFO': path
218               ,'QUERY_STRING': query
219               ,'CONTENT_TYPE': self.headers.get('Content-Type', '')
220               ,'CONTENT_LENGTH': self.headers.get('Content-Length', '0')
221               ,'SERVER_NAME': server_name
222               ,'SERVER_PORT': str(server_port)
223               ,'SERVER_PROTOCOL': self.request_version
224               # CGI not required by PEP-333
225               ,'REMOTE_ADDR': remote_address
226               }
227        if scheme:
228            self.wsgi_environ['paste.httpserver.proxy.scheme'] = scheme
229        if netloc:
230            self.wsgi_environ['paste.httpserver.proxy.host'] = netloc
231
232        if self.lookup_addresses:
233            # @@: make lookup_addreses actually work, at this point
234            #     it has been address_string() is overriden down in
235            #     file and hence is a noop
236            if remote_address.startswith("192.168.") \
237            or remote_address.startswith("10.") \
238            or remote_address.startswith("172.16."):
239                pass
240            else:
241                address_string = None # self.address_string()
242                if address_string:
243                    self.wsgi_environ['REMOTE_HOST'] = address_string
244
245        if hasattr(self.server, 'thread_pool'):
246            # Now that we know what the request was for, we should
247            # tell the thread pool what its worker is working on
248            self.server.thread_pool.worker_tracker[thread.get_ident()][1] = self.wsgi_environ
249            self.wsgi_environ['paste.httpserver.thread_pool'] = self.server.thread_pool
250
251        for k, v in self.headers.items():
252            key = 'HTTP_' + k.replace("-","_").upper()
253            if key in ('HTTP_CONTENT_TYPE','HTTP_CONTENT_LENGTH'):
254                continue
255            self.wsgi_environ[key] = ','.join(self.headers.getheaders(k))
256
257        if hasattr(self.connection,'get_context'):
258            self.wsgi_environ['wsgi.url_scheme'] = 'https'
259            # @@: extract other SSL parameters from pyOpenSSL at...
260            # http://www.modssl.org/docs/2.8/ssl_reference.html#ToC25
261
262        if environ:
263            assert isinstance(environ, dict)
264            self.wsgi_environ.update(environ)
265            if 'on' == environ.get('HTTPS'):
266                self.wsgi_environ['wsgi.url_scheme'] = 'https'
267
268        self.wsgi_curr_headers = None
269        self.wsgi_headers_sent = False
270
271    def wsgi_connection_drop(self, exce, environ=None):
272        """
273        Override this if you're interested in socket exceptions, such
274        as when the user clicks 'Cancel' during a file download.
275        """
276        pass
277
278    def wsgi_execute(self, environ=None):
279        """
280        Invoke the server's ``wsgi_application``.
281        """
282
283        self.wsgi_setup(environ)
284
285        try:
286            result = self.server.wsgi_application(self.wsgi_environ,
287                                                  self.wsgi_start_response)
288            try:
289                for chunk in result:
290                    self.wsgi_write_chunk(chunk)
291                if not self.wsgi_headers_sent:
292                    self.wsgi_write_chunk('')
293            finally:
294                if hasattr(result,'close'):
295                    result.close()
296                result = None
297        except socket.error, exce:
298            self.wsgi_connection_drop(exce, environ)
299            return
300        except:
301            if not self.wsgi_headers_sent:
302                error_msg = "Internal Server Error\n"
303                self.wsgi_curr_headers = (
304                    '500 Internal Server Error',
305                    [('Content-type', 'text/plain'),
306                     ('Content-length', str(len(error_msg)))])
307                self.wsgi_write_chunk("Internal Server Error\n")
308            raise
309
310#
311# SSL Functionality
312#
313# This implementation was motivated by Sebastien Martini's SSL example
314# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
315#
316try:
317    from OpenSSL import SSL, tsafe
318    SocketErrors = (socket.error, SSL.ZeroReturnError, SSL.SysCallError)
319except ImportError:
320    # Do not require pyOpenSSL to be installed, but disable SSL
321    # functionality in that case.
322    SSL = None
323    SocketErrors = (socket.error,)
324    class SecureHTTPServer(HTTPServer):
325        def __init__(self, server_address, RequestHandlerClass,
326                     ssl_context=None, request_queue_size=None):
327            assert not ssl_context, "pyOpenSSL not installed"
328            HTTPServer.__init__(self, server_address, RequestHandlerClass)
329            if request_queue_size:
330                self.socket.listen(request_queue_size)
331else:
332
333    class _ConnFixer(object):
334        """ wraps a socket connection so it implements makefile """
335        def __init__(self, conn):
336            self.__conn = conn
337        def makefile(self, mode, bufsize):
338            return socket._fileobject(self.__conn, mode, bufsize)
339        def __getattr__(self, attrib):
340            return getattr(self.__conn, attrib)
341
342    class SecureHTTPServer(HTTPServer):
343        """
344        Provides SSL server functionality on top of the BaseHTTPServer
345        by overriding _private_ members of Python's standard
346        distribution. The interface for this instance only changes by
347        adding a an optional ssl_context attribute to the constructor:
348
349              cntx = SSL.Context(SSL.SSLv23_METHOD)
350              cntx.use_privatekey_file("host.pem")
351              cntx.use_certificate_file("host.pem")
352
353        """
354
355        def __init__(self, server_address, RequestHandlerClass,
356                     ssl_context=None, request_queue_size=None):
357            # This overrides the implementation of __init__ in python's
358            # SocketServer.TCPServer (which BaseHTTPServer.HTTPServer
359            # does not override, thankfully).
360            HTTPServer.__init__(self, server_address, RequestHandlerClass)
361            self.socket = socket.socket(self.address_family,
362                                        self.socket_type)
363            self.ssl_context = ssl_context
364            if ssl_context:
365                class TSafeConnection(tsafe.Connection):
366                    def settimeout(self, *args):
367                        self._lock.acquire()
368                        try:
369                            return self._ssl_conn.settimeout(*args)
370                        finally:
371                            self._lock.release()
372                    def gettimeout(self):
373                        self._lock.acquire()
374                        try:
375                            return self._ssl_conn.gettimeout()
376                        finally:
377                            self._lock.release()
378                self.socket = TSafeConnection(ssl_context, self.socket)
379            self.server_bind()
380            if request_queue_size:
381                self.socket.listen(request_queue_size)
382            self.server_activate()
383
384        def get_request(self):
385            # The default SSL request object does not seem to have a
386            # ``makefile(mode, bufsize)`` method as expected by
387            # Socketserver.StreamRequestHandler.
388            (conn, info) = self.socket.accept()
389            if self.ssl_context:
390                conn = _ConnFixer(conn)
391            return (conn, info)
392
393    def _auto_ssl_context():
394        import OpenSSL, time, random
395        pkey = OpenSSL.crypto.PKey()
396        pkey.generate_key(OpenSSL.crypto.TYPE_RSA, 768)
397
398        cert = OpenSSL.crypto.X509()
399
400        cert.set_serial_number(random.randint(0, sys.maxint))
401        cert.gmtime_adj_notBefore(0)
402        cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
403        cert.get_subject().CN = '*'
404        cert.get_subject().O = 'Dummy Certificate'
405        cert.get_issuer().CN = 'Untrusted Authority'
406        cert.get_issuer().O = 'Self-Signed'
407        cert.set_pubkey(pkey)
408        cert.sign(pkey, 'md5')
409
410        ctx = SSL.Context(SSL.SSLv23_METHOD)
411        ctx.use_privatekey(pkey)
412        ctx.use_certificate(cert)
413
414        return ctx
415
416class WSGIHandler(WSGIHandlerMixin, BaseHTTPRequestHandler):
417    """
418    A WSGI handler that overrides POST, GET and HEAD to delegate
419    requests to the server's ``wsgi_application``.
420    """
421    server_version = 'PasteWSGIServer/' + __version__
422
423    def handle_one_request(self):
424        """Handle a single HTTP request.
425
426        You normally don't need to override this method; see the class
427        __doc__ string for information on how to handle specific HTTP
428        commands such as GET and POST.
429
430        """
431        self.raw_requestline = self.rfile.readline()
432        if not self.raw_requestline:
433            self.close_connection = 1
434            return
435        if not self.parse_request(): # An error code has been sent, just exit
436            return
437        self.wsgi_execute()
438
439    def handle(self):
440        # don't bother logging disconnects while handling a request
441        try:
442            BaseHTTPRequestHandler.handle(self)
443        except SocketErrors, exce:
444            self.wsgi_connection_drop(exce)
445
446    def address_string(self):
447        """Return the client address formatted for logging.
448
449        This is overridden so that no hostname lookup is done.
450        """
451        return ''
452
453class LimitedLengthFile(object):
454    def __init__(self, file, length):
455        self.file = file
456        self.length = length
457        self._consumed = 0
458        if hasattr(self.file, 'seek'):
459            self.seek = self._seek
460
461    def __repr__(self):
462        base_repr = repr(self.file)
463        return base_repr[:-1] + ' length=%s>' % self.length
464
465    def read(self, length=None):
466        left = self.length - self._consumed
467        if length is None:
468            length = left
469        else:
470            length = min(length, left)
471        # next two lines are hnecessary only if read(0) blocks
472        if not left:
473            return ''
474        data = self.file.read(length)
475        self._consumed += len(data)
476        return data
477
478    def readline(self, *args):
479        max_read = self.length - self._consumed
480        if len(args):
481            max_read = min(args[0], max_read)
482        data = self.file.readline(max_read)
483        self._consumed += len(data)
484        return data
485
486    def readlines(self, hint=None):
487        data = self.file.readlines(hint)
488        for chunk in data:
489            self._consumed += len(chunk)
490        return data
491
492    def __iter__(self):
493        return self
494
495    def next(self):
496        if self.length - self._consumed <= 0:
497            raise StopIteration
498        return self.readline()
499
500    ## Optional methods ##
501
502    def _seek(self, place):
503        self.file.seek(place)
504        self._consumed = place
505
506    def tell(self):
507        if hasattr(self.file, 'tell'):
508            return self.file.tell()
509        else:
510            return self._consumed
511
512class ThreadPool(object):
513    """
514    Generic thread pool with a queue of callables to consume.
515
516    Keeps a notion of the status of its worker threads:
517
518    idle: worker thread with nothing to do
519
520    busy: worker thread doing its job
521
522    hung: worker thread that's been doing a job for too long
523
524    dying: a hung thread that has been killed, but hasn't died quite
525    yet.
526
527    zombie: what was a worker thread that we've tried to kill but
528    isn't dead yet.
529
530    At any time you can call track_threads, to get a dictionary with
531    these keys and lists of thread_ids that fall in that status.  All
532    keys will be present, even if they point to emty lists.
533
534    hung threads are threads that have been busy more than
535    hung_thread_limit seconds.  Hung threads are killed when they live
536    longer than kill_thread_limit seconds.  A thread is then
537    considered dying for dying_limit seconds, if it is still alive
538    after that it is considered a zombie.
539
540    When there are no idle workers and a request comes in, another
541    worker *may* be spawned.  If there are less than spawn_if_under
542    threads in the busy state, another thread will be spawned.  So if
543    the limit is 5, and there are 4 hung threads and 6 busy threads,
544    no thread will be spawned.
545
546    When there are more than max_zombie_threads_before_die zombie
547    threads, a SystemExit exception will be raised, stopping the
548    server.  Use 0 or None to never raise this exception.  Zombie
549    threads *should* get cleaned up, but killing threads is no
550    necessarily reliable.  This is turned off by default, since it is
551    only a good idea if you've deployed the server with some process
552    watching from above (something similar to daemontools or zdaemon).
553
554    Each worker thread only processes ``max_requests`` tasks before it
555    dies and replaces itself with a new worker thread.
556    """
557
558
559    SHUTDOWN = object()
560
561    def __init__(
562        self, nworkers, name="ThreadPool", daemon=False,
563        max_requests=100, # threads are killed after this many requests
564        hung_thread_limit=30, # when a thread is marked "hung"
565        kill_thread_limit=1800, # when you kill that hung thread
566        dying_limit=300, # seconds that a kill should take to go into effect (longer than this and the thread is a "zombie")
567        spawn_if_under=5, # spawn if there's too many hung threads
568        max_zombie_threads_before_die=0, # when to give up on the process
569        hung_check_period=100, # every 100 requests check for hung workers
570        logger=None, # Place to log messages to
571        error_email=None, # Person(s) to notify if serious problem occurs
572        ):
573        """
574        Create thread pool with `nworkers` worker threads.
575        """
576        self.nworkers = nworkers
577        self.max_requests = max_requests
578        self.name = name
579        self.queue = Queue.Queue()
580        self.workers = []
581        self.daemon = daemon
582        if logger is None:
583            logger = logging.getLogger('paste.httpserver.ThreadPool')
584        if isinstance(logger, basestring):
585            logger = logging.getLogger(logger)
586        self.logger = logger
587        self.error_email = error_email
588        self._worker_count = count()
589
590        assert (not kill_thread_limit
591                or kill_thread_limit >= hung_thread_limit), (
592            "kill_thread_limit (%s) should be higher than hung_thread_limit (%s)"
593            % (kill_thread_limit, hung_thread_limit))
594        if not killthread:
595            kill_thread_limit = 0
596            self.logger.info(
597                "Cannot use kill_thread_limit as ctypes/killthread is not available")
598        self.kill_thread_limit = kill_thread_limit
599        self.dying_limit = dying_limit
600        self.hung_thread_limit = hung_thread_limit
601        assert spawn_if_under <= nworkers, (
602            "spawn_if_under (%s) should be less than nworkers (%s)"
603            % (spawn_if_under, nworkers))
604        self.spawn_if_under = spawn_if_under
605        self.max_zombie_threads_before_die = max_zombie_threads_before_die
606        self.hung_check_period = hung_check_period
607        self.requests_since_last_hung_check = 0
608        # Used to keep track of what worker is doing what:
609        self.worker_tracker = {}
610        # Used to keep track of the workers not doing anything:
611        self.idle_workers = []
612        # Used to keep track of threads that have been killed, but maybe aren't dead yet:
613        self.dying_threads = {}
614        # This is used to track when we last had to add idle workers;
615        # we shouldn't cull extra workers until some time has passed
616        # (hung_thread_limit) since workers were added:
617        self._last_added_new_idle_workers = 0
618        if not daemon:
619            atexit.register(self.shutdown)
620        for i in range(self.nworkers):
621            self.add_worker_thread(message='Initial worker pool')
622
623    def add_task(self, task):
624        """
625        Add a task to the queue
626        """
627        self.logger.debug('Added task (%i tasks queued)', self.queue.qsize())
628        if self.hung_check_period:
629            self.requests_since_last_hung_check += 1
630            if self.requests_since_last_hung_check > self.hung_check_period:
631                self.requests_since_last_hung_check = 0
632                self.kill_hung_threads()
633        if not self.idle_workers and self.spawn_if_under:
634            # spawn_if_under can come into effect...
635            busy = 0
636            now = time.time()
637            self.logger.debug('No idle workers for task; checking if we need to make more workers')
638            for worker in self.workers:
639                if not hasattr(worker, 'thread_id'):
640                    # Not initialized
641                    continue
642                time_started, info = self.worker_tracker.get(worker.thread_id,
643                                                             (None, None))
644                if time_started is not None:
645                    if now - time_started < self.hung_thread_limit:
646                        busy += 1
647            if busy < self.spawn_if_under:
648                self.logger.info(
649                    'No idle tasks, and only %s busy tasks; adding %s more '
650                    'workers', busy, self.spawn_if_under-busy)
651                self._last_added_new_idle_workers = time.time()
652                for i in range(self.spawn_if_under - busy):
653                    self.add_worker_thread(message='Response to lack of idle workers')
654            else:
655                self.logger.debug(
656                    'No extra workers needed (%s busy workers)',
657                    busy)
658        if (len(self.workers) > self.nworkers
659            and len(self.idle_workers) > 3
660            and time.time()-self._last_added_new_idle_workers > self.hung_thread_limit):
661            # We've spawned worers in the past, but they aren't needed
662            # anymore; kill off some
663            self.logger.info(
664                'Culling %s extra workers (%s idle workers present)',
665                len(self.workers)-self.nworkers, len(self.idle_workers))
666            self.logger.debug(
667                'Idle workers: %s', self.idle_workers)
668            for i in range(len(self.workers) - self.nworkers):
669                self.queue.put(self.SHUTDOWN)
670        self.queue.put(task)
671
672    def track_threads(self):
673        """
674        Return a dict summarizing the threads in the pool (as
675        described in the ThreadPool docstring).
676        """
677        result = dict(idle=[], busy=[], hung=[], dying=[], zombie=[])
678        now = time.time()
679        for worker in self.workers:
680            if not hasattr(worker, 'thread_id'):
681                # The worker hasn't fully started up, we should just
682                # ignore it
683                continue
684            time_started, info = self.worker_tracker.get(worker.thread_id,
685                                                         (None, None))
686            if time_started is not None:
687                if now - time_started > self.hung_thread_limit:
688                    result['hung'].append(worker)
689                else:
690                    result['busy'].append(worker)
691            else:
692                result['idle'].append(worker)
693        for thread_id, (time_killed, worker) in self.dying_threads.items():
694            if not self.thread_exists(thread_id):
695                # Cull dying threads that are actually dead and gone
696                self.logger.info('Killed thread %s no longer around',
697                                 thread_id)
698                try:
699                    del self.dying_threads[thread_id]
700                except KeyError:
701                    pass
702                continue
703            if now - time_killed > self.dying_limit:
704                result['zombie'].append(worker)
705            else:
706                result['dying'].append(worker)
707        return result
708
709    def kill_worker(self, thread_id):
710        """
711        Removes the worker with the given thread_id from the pool, and
712        replaces it with a new worker thread.
713
714        This should only be done for mis-behaving workers.
715        """
716        if killthread is None:
717            raise RuntimeError(
718                "Cannot kill worker; killthread/ctypes not available")
719        thread_obj = threading._active.get(thread_id)
720        killthread.async_raise(thread_id, SystemExit)
721        try:
722            del self.worker_tracker[thread_id]
723        except KeyError:
724            pass
725        self.logger.info('Killing thread %s', thread_id)
726        if thread_obj in self.workers:
727            self.workers.remove(thread_obj)
728        self.dying_threads[thread_id] = (time.time(), thread_obj)
729        self.add_worker_thread(message='Replacement for killed thread %s' % thread_id)
730
731    def thread_exists(self, thread_id):
732        """
733        Returns true if a thread with this id is still running
734        """
735        return thread_id in threading._active
736
737    def add_worker_thread(self, *args, **kwargs):
738        index = self._worker_count.next()
739        worker = threading.Thread(target=self.worker_thread_callback,
740                                  args=args, kwargs=kwargs,
741                                  name=("worker %d" % index))
742        worker.setDaemon(self.daemon)
743        worker.start()
744
745    def kill_hung_threads(self):
746        """
747        Tries to kill any hung threads
748        """
749        if not self.kill_thread_limit:
750            # No killing should occur
751            return
752        now = time.time()
753        max_time = 0
754        total_time = 0
755        idle_workers = 0
756        starting_workers = 0
757        working_workers = 0
758        killed_workers = 0
759        for worker in self.workers:
760            if not hasattr(worker, 'thread_id'):
761                # Not setup yet
762                starting_workers += 1
763                continue
764            time_started, info = self.worker_tracker.get(worker.thread_id,
765                                                         (None, None))
766            if time_started is None:
767                # Must be idle
768                idle_workers += 1
769                continue
770            working_workers += 1
771            max_time = max(max_time, now-time_started)
772            total_time += now-time_started
773            if now - time_started > self.kill_thread_limit:
774                self.logger.warning(
775                    'Thread %s hung (working on task for %i seconds)',
776                    worker.thread_id, now - time_started)
777                try:
778                    import pprint
779                    info_desc = pprint.pformat(info)
780                except:
781                    out = StringIO()
782                    traceback.print_exc(file=out)
783                    info_desc = 'Error:\n%s' % out.getvalue()
784                self.notify_problem(
785                    "Killing worker thread (id=%(thread_id)s) because it has been \n"
786                    "working on task for %(time)s seconds (limit is %(limit)s)\n"
787                    "Info on task:\n"
788                    "%(info)s"
789                    % dict(thread_id=worker.thread_id,
790                           time=now - time_started,
791                           limit=self.kill_thread_limit,
792                           info=info_desc))
793                self.kill_worker(worker.thread_id)
794                killed_workers += 1
795        if working_workers:
796            ave_time = float(total_time) / working_workers
797            ave_time = '%.2fsec' % ave_time
798        else:
799            ave_time = 'N/A'
800        self.logger.info(
801            "kill_hung_threads status: %s threads (%s working, %s idle, %s starting) "
802            "ave time %s, max time %.2fsec, killed %s workers"
803            % (idle_workers + starting_workers + working_workers,
804               working_workers, idle_workers, starting_workers,
805               ave_time, max_time, killed_workers))
806        self.check_max_zombies()
807
808    def check_max_zombies(self):
809        """
810        Check if we've reached max_zombie_threads_before_die; if so
811        then kill the entire process.
812        """
813        if not self.max_zombie_threads_before_die:
814            return
815        found = []
816        now = time.time()
817        for thread_id, (time_killed, worker) in self.dying_threads.items():
818            if not self.thread_exists(thread_id):
819                # Cull dying threads that are actually dead and gone
820                try:
821                    del self.dying_threads[thread_id]
822                except KeyError:
823                    pass
824                continue
825            if now - time_killed > self.dying_limit:
826                found.append(thread_id)
827        if found:
828            self.logger.info('Found %s zombie threads', found)
829        if len(found) > self.max_zombie_threads_before_die:
830            self.logger.fatal(
831                'Exiting process because %s zombie threads is more than %s limit',
832                len(found), self.max_zombie_threads_before_die)
833            self.notify_problem(
834                "Exiting process because %(found)s zombie threads "
835                "(more than limit of %(limit)s)\n"
836                "Bad threads (ids):\n"
837                "  %(ids)s\n"
838                % dict(found=len(found),
839                       limit=self.max_zombie_threads_before_die,
840                       ids="\n  ".join(map(str, found))),
841                subject="Process restart (too many zombie threads)")
842            self.shutdown(10)
843            print 'Shutting down', threading.currentThread()
844            raise ServerExit(3)
845
846    def worker_thread_callback(self, message=None):
847        """
848        Worker thread should call this method to get and process queued
849        callables.
850        """
851        thread_obj = threading.currentThread()
852        thread_id = thread_obj.thread_id = thread.get_ident()
853        self.workers.append(thread_obj)
854        self.idle_workers.append(thread_id)
855        requests_processed = 0
856        add_replacement_worker = False
857        self.logger.debug('Started new worker %s: %s', thread_id, message)
858        try:
859            while True:
860                if self.max_requests and self.max_requests < requests_processed:
861                    # Replace this thread then die
862                    self.logger.debug('Thread %s processed %i requests (limit %s); stopping thread'
863                                      % (thread_id, requests_processed, self.max_requests))
864                    add_replacement_worker = True
865                    break
866                runnable = self.queue.get()
867                if runnable is ThreadPool.SHUTDOWN:
868                    self.logger.debug('Worker %s asked to SHUTDOWN', thread_id)
869                    break
870                try:
871                    self.idle_workers.remove(thread_id)
872                except ValueError:
873                    pass
874                self.worker_tracker[thread_id] = [time.time(), None]
875                requests_processed += 1
876                try:
877                    try:
878                        runnable()
879                    except:
880                        # We are later going to call sys.exc_clear(),
881                        # removing all remnants of any exception, so
882                        # we should log it now.  But ideally no
883                        # exception should reach this level
884                        print >> sys.stderr, (
885                            'Unexpected exception in worker %r' % runnable)
886                        traceback.print_exc()
887                    if thread_id in self.dying_threads:
888                        # That last exception was intended to kill me
889                        break
890                finally:
891                    try:
892                        del self.worker_tracker[thread_id]
893                    except KeyError:
894                        pass
895                    sys.exc_clear()
896                self.idle_workers.append(thread_id)
897        finally:
898            try:
899                del self.worker_tracker[thread_id]
900            except KeyError:
901                pass
902            try:
903                self.idle_workers.remove(thread_id)
904            except ValueError:
905                pass
906            try:
907                self.workers.remove(thread_obj)
908            except ValueError:
909                pass
910            try:
911                del self.dying_threads[thread_id]
912            except KeyError:
913                pass
914            if add_replacement_worker:
915                self.add_worker_thread(message='Voluntary replacement for thread %s' % thread_id)
916
917    def shutdown(self, force_quit_timeout=0):
918        """
919        Shutdown the queue (after finishing any pending requests).
920        """
921        self.logger.info('Shutting down threadpool')
922        # Add a shutdown request for every worker
923        for i in range(len(self.workers)):
924            self.queue.put(ThreadPool.SHUTDOWN)
925        # Wait for each thread to terminate
926        hung_workers = []
927        for worker in self.workers:
928            worker.join(0.5)
929            if worker.isAlive():
930                hung_workers.append(worker)
931        zombies = []
932        for thread_id in self.dying_threads:
933            if self.thread_exists(thread_id):
934                zombies.append(thread_id)
935        if hung_workers or zombies:
936            self.logger.info("%s workers didn't stop properly, and %s zombies",
937                             len(hung_workers), len(zombies))
938            if hung_workers:
939                for worker in hung_workers:
940                    self.kill_worker(worker.thread_id)
941                self.logger.info('Workers killed forcefully')
942            if force_quit_timeout:
943                hung = []
944                timed_out = False
945                need_force_quit = bool(zombies)
946                for workers in self.workers:
947                    if not timed_out and worker.isAlive():
948                        timed_out = True
949                        worker.join(force_quit_timeout)
950                    if worker.isAlive():
951                        print "Worker %s won't die" % worker
952                        need_force_quit = True
953                if need_force_quit:
954                    import atexit
955                    # Remove the threading atexit callback
956                    for callback in list(atexit._exithandlers):
957                        func = getattr(callback[0], 'im_func', None)
958                        if not func:
959                            continue
960                        globs = getattr(func, 'func_globals', {})
961                        mod = globs.get('__name__')
962                        if mod == 'threading':
963                            atexit._exithandlers.remove(callback)
964                    atexit._run_exitfuncs()
965                    print 'Forcefully exiting process'
966                    os._exit(3)
967                else:
968                    self.logger.info('All workers eventually killed')
969        else:
970            self.logger.info('All workers stopped')
971
972    def notify_problem(self, msg, subject=None, spawn_thread=True):
973        """
974        Called when there's a substantial problem.  msg contains the
975        body of the notification, subject the summary.
976
977        If spawn_thread is true, then the email will be send in
978        another thread (so this doesn't block).
979        """
980        if not self.error_email:
981            return
982        if spawn_thread:
983            t = threading.Thread(
984                target=self.notify_problem,
985                args=(msg, subject, False))
986            t.start()
987            return
988        from_address = 'errors@localhost'
989        if not subject:
990            subject = msg.strip().splitlines()[0]
991            subject = subject[:50]
992            subject = '[http threadpool] %s' % subject
993        headers = [
994            "To: %s" % self.error_email,
995            "From: %s" % from_address,
996            "Subject: %s" % subject,
997            ]
998        try:
999            system = ' '.join(os.uname())
1000        except:
1001            system = '(unknown)'
1002        body = (
1003            "An error has occurred in the paste.httpserver.ThreadPool\n"
1004            "Error:\n"
1005            "  %(msg)s\n"
1006            "Occurred at: %(time)s\n"
1007            "PID: %(pid)s\n"
1008            "System: %(system)s\n"
1009            "Server .py file: %(file)s\n"
1010            % dict(msg=msg,
1011                   time=time.strftime("%c"),
1012                   pid=os.getpid(),
1013                   system=system,
1014                   file=os.path.abspath(__file__),
1015                   ))
1016        message = '\n'.join(headers) + "\n\n" + body
1017        import smtplib
1018        server = smtplib.SMTP('localhost')
1019        error_emails = [
1020            e.strip() for e in self.error_email.split(",")
1021            if e.strip()]
1022        server.sendmail(from_address, error_emails, message)
1023        server.quit()
1024        print 'email sent to', error_emails, message
1025
1026class ThreadPoolMixIn(object):
1027    """
1028    Mix-in class to process requests from a thread pool
1029    """
1030    def __init__(self, nworkers, daemon=False, **threadpool_options):
1031        # Create and start the workers
1032        self.running = True
1033        assert nworkers > 0, "ThreadPoolMixIn servers must have at least one worker"
1034        self.thread_pool = ThreadPool(
1035            nworkers,
1036            "ThreadPoolMixIn HTTP server on %s:%d"
1037            % (self.server_name, self.server_port),
1038            daemon,
1039            **threadpool_options)
1040
1041    def process_request(self, request, client_address):
1042        """
1043        Queue the request to be processed by on of the thread pool threads
1044        """
1045        # This sets the socket to blocking mode (and no timeout) since it
1046        # may take the thread pool a little while to get back to it. (This
1047        # is the default but since we set a timeout on the parent socket so
1048        # that we can trap interrupts we need to restore this,.)
1049        request.setblocking(1)
1050        # Queue processing of the request
1051        self.thread_pool.add_task(
1052             lambda: self.process_request_in_thread(request, client_address))
1053
1054    def handle_error(self, request, client_address):
1055        exc_class, exc, tb = sys.exc_info()
1056        if exc_class is ServerExit:
1057            # This is actually a request to stop the server
1058            raise
1059        return super(ThreadPoolMixIn, self).handle_error(request, client_address)
1060
1061    def process_request_in_thread(self, request, client_address):
1062        """
1063        The worker thread should call back here to do the rest of the
1064        request processing. Error handling normaller done in 'handle_request'
1065        must be done here.
1066        """
1067        try:
1068            self.finish_request(request, client_address)
1069            self.close_request(request)
1070        except:
1071            self.handle_error(request, client_address)
1072            self.close_request(request)
1073            exc = sys.exc_info()[1]
1074            if isinstance(exc, (MemoryError, KeyboardInterrupt)):
1075                raise
1076
1077    def serve_forever(self):
1078        """
1079        Overrides `serve_forever` to shut the threadpool down cleanly.
1080        """
1081        try:
1082            while self.running:
1083                try:
1084                    self.handle_request()
1085                except socket.timeout:
1086                    # Timeout is expected, gives interrupts a chance to
1087                    # propogate, just keep handling
1088                    pass
1089        finally:
1090            self.thread_pool.shutdown()
1091
1092    def server_activate(self):
1093        """
1094        Overrides server_activate to set timeout on our listener socket.
1095        """
1096        # We set the timeout here so that we can trap interrupts on windows
1097        self.socket.settimeout(1)
1098
1099    def server_close(self):
1100        """
1101        Finish pending requests and shutdown the server.
1102        """
1103        self.running = False
1104        self.socket.close()
1105        self.thread_pool.shutdown(60)
1106
1107class WSGIServerBase(SecureHTTPServer):
1108    def __init__(self, wsgi_application, server_address,
1109                 RequestHandlerClass=None, ssl_context=None,
1110                 request_queue_size=None):
1111        SecureHTTPServer.__init__(self, server_address,
1112                                  RequestHandlerClass, ssl_context,
1113                                  request_queue_size=request_queue_size)
1114        self.wsgi_application = wsgi_application
1115        self.wsgi_socket_timeout = None
1116
1117    def get_request(self):
1118        # If there is a socket_timeout, set it on the accepted
1119        (conn,info) = SecureHTTPServer.get_request(self)
1120        if self.wsgi_socket_timeout:
1121            conn.settimeout(self.wsgi_socket_timeout)
1122        return (conn, info)
1123
1124class WSGIServer(ThreadingMixIn, WSGIServerBase):
1125    daemon_threads = False
1126
1127class WSGIThreadPoolServer(ThreadPoolMixIn, WSGIServerBase):
1128    def __init__(self, wsgi_application, server_address,
1129                 RequestHandlerClass=None, ssl_context=None,
1130                 nworkers=10, daemon_threads=False,
1131                 threadpool_options=None, request_queue_size=None):
1132        WSGIServerBase.__init__(self, wsgi_application, server_address,
1133                                RequestHandlerClass, ssl_context,
1134                                request_queue_size=request_queue_size)
1135        if threadpool_options is None:
1136            threadpool_options = {}
1137        ThreadPoolMixIn.__init__(self, nworkers, daemon_threads,
1138                                 **threadpool_options)
1139
1140class ServerExit(SystemExit):
1141    """
1142    Raised to tell the server to really exit (SystemExit is normally
1143    caught)
1144    """
1145
1146def serve(application, host=None, port=None, handler=None, ssl_pem=None,
1147          ssl_context=None, server_version=None, protocol_version=None,
1148          start_loop=True, daemon_threads=None, socket_timeout=None,
1149          use_threadpool=None, threadpool_workers=10,
1150          threadpool_options=None, request_queue_size=5):
1151    """
1152    Serves your ``application`` over HTTP(S) via WSGI interface
1153
1154    ``host``
1155
1156        This is the ipaddress to bind to (or a hostname if your
1157        nameserver is properly configured).  This defaults to
1158        127.0.0.1, which is not a public interface.
1159
1160    ``port``
1161
1162        The port to run on, defaults to 8080 for HTTP, or 4443 for
1163        HTTPS. This can be a string or an integer value.
1164
1165    ``handler``
1166
1167        This is the HTTP request handler to use, it defaults to
1168        ``WSGIHandler`` in this module.
1169
1170    ``ssl_pem``
1171
1172        This an optional SSL certificate file (via OpenSSL). You can
1173        supply ``*`` and a development-only certificate will be
1174        created for you, or you can generate a self-signed test PEM
1175        certificate file as follows::
1176
1177            $ openssl genrsa 1024 > host.key
1178            $ chmod 400 host.key
1179            $ openssl req -new -x509 -nodes -sha1 -days 365  \\
1180                          -key host.key > host.cert
1181            $ cat host.cert host.key > host.pem
1182            $ chmod 400 host.pem
1183
1184    ``ssl_context``
1185
1186        This an optional SSL context object for the server.  A SSL
1187        context will be automatically constructed for you if you supply
1188        ``ssl_pem``.  Supply this to use a context of your own
1189        construction.
1190
1191    ``server_version``
1192
1193        The version of the server as reported in HTTP response line. This
1194        defaults to something like "PasteWSGIServer/0.5".  Many servers
1195        hide their code-base identity with a name like 'Amnesiac/1.0'
1196
1197    ``protocol_version``
1198
1199        This sets the protocol used by the server, by default
1200        ``HTTP/1.0``. There is some support for ``HTTP/1.1``, which
1201        defaults to nicer keep-alive connections.  This server supports
1202        ``100 Continue``, but does not yet support HTTP/1.1 Chunked
1203        Encoding. Hence, if you use HTTP/1.1, you're somewhat in error
1204        since chunked coding is a mandatory requirement of a HTTP/1.1
1205        server.  If you specify HTTP/1.1, every response *must* have a
1206        ``Content-Length`` and you must be careful not to read past the
1207        end of the socket.
1208
1209    ``start_loop``
1210
1211        This specifies if the server loop (aka ``server.serve_forever()``)
1212        should be called; it defaults to ``True``.
1213
1214    ``daemon_threads``
1215
1216        This flag specifies if when your webserver terminates all
1217        in-progress client connections should be droppped.  It defaults
1218        to ``False``.   You might want to set this to ``True`` if you
1219        are using ``HTTP/1.1`` and don't set a ``socket_timeout``.
1220
1221    ``socket_timeout``
1222
1223        This specifies the maximum amount of time that a connection to a
1224        given client will be kept open.  At this time, it is a rude
1225        disconnect, but at a later time it might follow the RFC a bit
1226        more closely.
1227
1228    ``use_threadpool``
1229
1230        Server requests from a pool of worker threads (``threadpool_workers``)
1231        rather than creating a new thread for each request. This can
1232        substantially reduce latency since there is a high cost associated
1233        with thread creation.
1234
1235    ``threadpool_workers``
1236
1237        Number of worker threads to create when ``use_threadpool`` is true. This
1238        can be a string or an integer value.
1239
1240    ``threadpool_options``
1241
1242        A dictionary of options to be used when instantiating the
1243        threadpool.  See paste.httpserver.ThreadPool for specific
1244        options (``threadpool_workers`` is a specific option that can
1245        also go here).
1246
1247    ``request_queue_size``
1248
1249        The 'backlog' argument to socket.listen(); specifies the
1250        maximum number of queued connections.
1251
1252    """
1253    is_ssl = False
1254    if ssl_pem or ssl_context:
1255        assert SSL, "pyOpenSSL is not installed"
1256        is_ssl = True
1257        port = int(port or 4443)
1258        if not ssl_context:
1259            if ssl_pem == '*':
1260                ssl_context = _auto_ssl_context()
1261            else:
1262                ssl_context = SSL.Context(SSL.SSLv23_METHOD)
1263                ssl_context.use_privatekey_file(ssl_pem)
1264                ssl_context.use_certificate_chain_file(ssl_pem)
1265
1266    host = host or '127.0.0.1'
1267    if port is None:
1268        if ':' in host:
1269            host, port = host.split(':', 1)
1270        else:
1271            port = 8080
1272    server_address = (host, int(port))
1273
1274    if not handler:
1275        handler = WSGIHandler
1276    if server_version:
1277        handler.server_version = server_version
1278        handler.sys_version = None
1279    if protocol_version:
1280        assert protocol_version in ('HTTP/0.9', 'HTTP/1.0', 'HTTP/1.1')
1281        handler.protocol_version = protocol_version
1282
1283    if use_threadpool is None:
1284        use_threadpool = True
1285
1286    if converters.asbool(use_threadpool):
1287        server = WSGIThreadPoolServer(application, server_address, handler,
1288                                      ssl_context, int(threadpool_workers),
1289                                      daemon_threads,
1290                                      threadpool_options=threadpool_options,
1291                                      request_queue_size=request_queue_size)
1292    else:
1293        server = WSGIServer(application, server_address, handler, ssl_context,
1294                            request_queue_size=request_queue_size)
1295        if daemon_threads:
1296            server.daemon_threads = daemon_threads
1297
1298    if socket_timeout:
1299        server.wsgi_socket_timeout = int(socket_timeout)
1300
1301    if converters.asbool(start_loop):
1302        protocol = is_ssl and 'https' or 'http'
1303        host, port = server.server_address[:2]
1304        if host == '0.0.0.0':
1305            print 'serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' % \
1306                (port, protocol, port)
1307        else:
1308            print "serving on %s://%s:%s" % (protocol, host, port)
1309        try:
1310            server.serve_forever()
1311        except KeyboardInterrupt:
1312            # allow CTRL+C to shutdown
1313            pass
1314    return server
1315
1316# For paste.deploy server instantiation (egg:Paste#http)
1317# Note: this gets a separate function because it has to expect string
1318# arguments (though that's not much of an issue yet, ever?)
1319def server_runner(wsgi_app, global_conf, **kwargs):
1320    from paste.deploy.converters import asbool
1321    for name in ['port', 'socket_timeout', 'threadpool_workers',
1322                 'threadpool_hung_thread_limit',
1323                 'threadpool_kill_thread_limit',
1324                 'threadpool_dying_limit', 'threadpool_spawn_if_under',
1325                 'threadpool_max_zombie_threads_before_die',
1326                 'threadpool_hung_check_period',
1327                 'threadpool_max_requests', 'request_queue_size']:
1328        if name in kwargs:
1329            kwargs[name] = int(kwargs[name])
1330    for name in ['use_threadpool', 'daemon_threads']:
1331        if name in kwargs:
1332            kwargs[name] = asbool(kwargs[name])
1333    threadpool_options = {}
1334    for name, value in kwargs.items():
1335        if name.startswith('threadpool_') and name != 'threadpool_workers':
1336            threadpool_options[name[len('threadpool_'):]] = value
1337            del kwargs[name]
1338    if ('error_email' not in threadpool_options
1339        and 'error_email' in global_conf):
1340        threadpool_options['error_email'] = global_conf['error_email']
1341    kwargs['threadpool_options'] = threadpool_options
1342    serve(wsgi_app, **kwargs)
1343
1344server_runner.__doc__ = (serve.__doc__ or '') + """
1345
1346    You can also set these threadpool options:
1347
1348    ``threadpool_max_requests``:
1349
1350        The maximum number of requests a worker thread will process
1351        before dying (and replacing itself with a new worker thread).
1352        Default 100.
1353
1354    ``threadpool_hung_thread_limit``:
1355
1356        The number of seconds a thread can work on a task before it is
1357        considered hung (stuck).  Default 30 seconds.
1358
1359    ``threadpool_kill_thread_limit``:
1360
1361        The number of seconds a thread can work before you should kill it
1362        (assuming it will never finish).  Default 600 seconds (10 minutes).
1363
1364    ``threadpool_dying_limit``:
1365
1366        The length of time after killing a thread that it should actually
1367        disappear.  If it lives longer than this, it is considered a
1368        "zombie".  Note that even in easy situations killing a thread can
1369        be very slow.  Default 300 seconds (5 minutes).
1370
1371    ``threadpool_spawn_if_under``:
1372
1373        If there are no idle threads and a request comes in, and there are
1374        less than this number of *busy* threads, then add workers to the
1375        pool.  Busy threads are threads that have taken less than
1376        ``threadpool_hung_thread_limit`` seconds so far.  So if you get
1377        *lots* of requests but they complete in a reasonable amount of time,
1378        the requests will simply queue up (adding more threads probably
1379        wouldn't speed them up).  But if you have lots of hung threads and
1380        one more request comes in, this will add workers to handle it.
1381        Default 5.
1382
1383    ``threadpool_max_zombie_threads_before_die``:
1384
1385        If there are more zombies than this, just kill the process.  This is
1386        only good if you have a monitor that will automatically restart
1387        the server.  This can clean up the mess.  Default 0 (disabled).
1388
1389    `threadpool_hung_check_period``:
1390
1391        Every X requests, check for hung threads that need to be killed,
1392        or for zombie threads that should cause a restart.  Default 100
1393        requests.
1394
1395    ``threadpool_logger``:
1396
1397        Logging messages will go the logger named here.
1398
1399    ``threadpool_error_email`` (or global ``error_email`` setting):
1400
1401        When threads are killed or the process restarted, this email
1402        address will be contacted (using an SMTP server on localhost).
1403
1404"""
1405
1406
1407if __name__ == '__main__':
1408    from paste.wsgilib import dump_environ
1409    #serve(dump_environ, ssl_pem="test.pem")
1410    serve(dump_environ, server_version="Wombles/1.0",
1411          protocol_version="HTTP/1.1", port="8888")
Note: Veja TracBrowser para ajuda no uso do navegador do trac.
 

The contents and data of this website are published under license:
Creative Commons 4.0 Brasil - Atribuir Fonte - Compartilhar Igual.