瀏覽代碼

removed boilerplate web2py code in lieu of downloading from git in install.sh

Keith Newman 10 年之前
父節點
當前提交
39ae32082f
共有 100 個文件被更改,包括 6 次插入27201 次删除
  1. 0 0
      frameworks/Python/web2py/app/app/ABOUT
  2. 0 0
      frameworks/Python/web2py/app/app/LICENSE
  3. 0 0
      frameworks/Python/web2py/app/app/__init__.py
  4. 0 0
      frameworks/Python/web2py/app/app/controllers/appadmin.py
  5. 0 0
      frameworks/Python/web2py/app/app/controllers/default.py
  6. 0 0
      frameworks/Python/web2py/app/app/databases/7858106637800f617850eed16452cf10_fortune.table
  7. 0 0
      frameworks/Python/web2py/app/app/databases/7858106637800f617850eed16452cf10_world.table
  8. 0 0
      frameworks/Python/web2py/app/app/models/db.py
  9. 0 0
      frameworks/Python/web2py/app/app/models/menu.py
  10. 0 0
      frameworks/Python/web2py/app/app/private/appconfig.ini
  11. 0 0
      frameworks/Python/web2py/app/app/routes.example.py
  12. 0 0
      frameworks/Python/web2py/app/app/views/__init__.py
  13. 0 0
      frameworks/Python/web2py/app/app/views/appadmin.html
  14. 0 0
      frameworks/Python/web2py/app/app/views/default/fortune.html
  15. 0 0
      frameworks/Python/web2py/app/app/views/default/index.html
  16. 0 0
      frameworks/Python/web2py/app/app/views/default/user.html
  17. 0 0
      frameworks/Python/web2py/app/app/views/generic.html
  18. 0 0
      frameworks/Python/web2py/app/app/views/generic.ics
  19. 0 0
      frameworks/Python/web2py/app/app/views/generic.json
  20. 0 0
      frameworks/Python/web2py/app/app/views/generic.jsonp
  21. 0 0
      frameworks/Python/web2py/app/app/views/generic.load
  22. 0 0
      frameworks/Python/web2py/app/app/views/generic.map
  23. 0 0
      frameworks/Python/web2py/app/app/views/generic.pdf
  24. 0 0
      frameworks/Python/web2py/app/app/views/generic.rss
  25. 0 0
      frameworks/Python/web2py/app/app/views/generic.xml
  26. 0 0
      frameworks/Python/web2py/app/app/views/layout.html
  27. 0 0
      frameworks/Python/web2py/app/app/views/web2py_ajax.html
  28. 0 0
      frameworks/Python/web2py/app/routes.py
  29. 6 0
      frameworks/Python/web2py/install.sh
  30. 0 1
      frameworks/Python/web2py/requirements.txt
  31. 0 1
      frameworks/Python/web2py/web2py/VERSION
  32. 0 360
      frameworks/Python/web2py/web2py/anyserver.py
  33. 0 1
      frameworks/Python/web2py/web2py/applications/app/views/__init__.py
  34. 0 61
      frameworks/Python/web2py/web2py/gluon/__init__.py
  35. 0 461
      frameworks/Python/web2py/web2py/gluon/admin.py
  36. 0 756
      frameworks/Python/web2py/web2py/gluon/cache.py
  37. 0 54
      frameworks/Python/web2py/web2py/gluon/cfs.py
  38. 0 759
      frameworks/Python/web2py/web2py/gluon/compileapp.py
  39. 0 854
      frameworks/Python/web2py/web2py/gluon/contenttype.py
  40. 0 270
      frameworks/Python/web2py/web2py/gluon/contrib/AuthorizeNet.py
  41. 0 244
      frameworks/Python/web2py/web2py/gluon/contrib/DowCommerce.py
  42. 0 0
      frameworks/Python/web2py/web2py/gluon/contrib/__init__.py
  43. 0 502
      frameworks/Python/web2py/web2py/gluon/contrib/aes.py
  44. 0 123
      frameworks/Python/web2py/web2py/gluon/contrib/appconfig.py
  45. 0 220
      frameworks/Python/web2py/web2py/gluon/contrib/autolinks.py
  46. 0 4013
      frameworks/Python/web2py/web2py/gluon/contrib/feedparser.py
  47. 0 16
      frameworks/Python/web2py/web2py/gluon/contrib/fpdf/__init__.py
  48. 0 156
      frameworks/Python/web2py/web2py/gluon/contrib/fpdf/fonts.py
  49. 0 1920
      frameworks/Python/web2py/web2py/gluon/contrib/fpdf/fpdf.py
  50. 0 398
      frameworks/Python/web2py/web2py/gluon/contrib/fpdf/html.py
  51. 0 49
      frameworks/Python/web2py/web2py/gluon/contrib/fpdf/php.py
  52. 0 301
      frameworks/Python/web2py/web2py/gluon/contrib/fpdf/template.py
  53. 0 1083
      frameworks/Python/web2py/web2py/gluon/contrib/fpdf/ttfonts.py
  54. 0 74
      frameworks/Python/web2py/web2py/gluon/contrib/gae_memcache.py
  55. 0 89
      frameworks/Python/web2py/web2py/gluon/contrib/gae_retry.py
  56. 0 2
      frameworks/Python/web2py/web2py/gluon/contrib/gateways/__init__.py
  57. 0 1332
      frameworks/Python/web2py/web2py/gluon/contrib/gateways/fcgi.py
  58. 0 76
      frameworks/Python/web2py/web2py/gluon/contrib/generics.py
  59. 0 15
      frameworks/Python/web2py/web2py/gluon/contrib/google_wallet.py
  60. 0 30
      frameworks/Python/web2py/web2py/gluon/contrib/heroku.py
  61. 0 342
      frameworks/Python/web2py/web2py/gluon/contrib/hypermedia.py
  62. 0 61
      frameworks/Python/web2py/web2py/gluon/contrib/imageutils.py
  63. 0 1865
      frameworks/Python/web2py/web2py/gluon/contrib/ipaddr.py
  64. 0 1
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/__init__.py
  65. 0 24
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/basic_auth.py
  66. 0 95
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/browserid_account.py
  67. 0 140
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/cas_auth.py
  68. 0 130
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/dropbox_account.py
  69. 0 46
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/email_auth.py
  70. 0 105
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/extended_login_form.py
  71. 0 41
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/gae_google_account.py
  72. 0 138
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/janrain_account.py
  73. 0 688
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/ldap_auth.py
  74. 0 51
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/linkedin_account.py
  75. 0 97
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/loginradius_account.py
  76. 0 115
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/loginza.py
  77. 0 111
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/motp_auth.py
  78. 0 184
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/oauth10a_account.py
  79. 0 291
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/oauth20_account.py
  80. 0 107
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/oneall_account.py
  81. 0 653
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/openid_auth.py
  82. 0 22
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/pam_auth.py
  83. 0 134
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/rpx_account.py
  84. 0 129
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/saml2_auth.py
  85. 0 98
      frameworks/Python/web2py/web2py/gluon/contrib/login_methods/x509_auth.py
  86. 0 1
      frameworks/Python/web2py/web2py/gluon/contrib/markdown/LICENSE
  87. 0 17
      frameworks/Python/web2py/web2py/gluon/contrib/markdown/__init__.py
  88. 0 2365
      frameworks/Python/web2py/web2py/gluon/contrib/markdown/markdown2.py
  89. 0 2
      frameworks/Python/web2py/web2py/gluon/contrib/markmin/__init__.py
  90. 0 27
      frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin.html
  91. 二進制
      frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin.pdf
  92. 0 1505
      frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin2html.py
  93. 0 291
      frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin2latex.py
  94. 0 130
      frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin2pdf.py
  95. 0 362
      frameworks/Python/web2py/web2py/gluon/contrib/memcache/ChangeLog
  96. 0 10
      frameworks/Python/web2py/web2py/gluon/contrib/memcache/PKG-INFO
  97. 0 8
      frameworks/Python/web2py/web2py/gluon/contrib/memcache/README
  98. 0 111
      frameworks/Python/web2py/web2py/gluon/contrib/memcache/__init__.py
  99. 0 1578
      frameworks/Python/web2py/web2py/gluon/contrib/memcache/memcache.py
  100. 0 905
      frameworks/Python/web2py/web2py/gluon/contrib/memdb.py

+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/ABOUT → frameworks/Python/web2py/app/app/ABOUT


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/LICENSE → frameworks/Python/web2py/app/app/LICENSE


+ 0 - 0
frameworks/Python/web2py/web2py/applications/__init__.py → frameworks/Python/web2py/app/app/__init__.py


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/controllers/appadmin.py → frameworks/Python/web2py/app/app/controllers/appadmin.py


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/controllers/default.py → frameworks/Python/web2py/app/app/controllers/default.py


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/databases/7858106637800f617850eed16452cf10_fortune.table → frameworks/Python/web2py/app/app/databases/7858106637800f617850eed16452cf10_fortune.table


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/databases/7858106637800f617850eed16452cf10_world.table → frameworks/Python/web2py/app/app/databases/7858106637800f617850eed16452cf10_world.table


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/models/db.py → frameworks/Python/web2py/app/app/models/db.py


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/models/menu.py → frameworks/Python/web2py/app/app/models/menu.py


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/private/appconfig.ini → frameworks/Python/web2py/app/app/private/appconfig.ini


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/routes.example.py → frameworks/Python/web2py/app/app/routes.example.py


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/__init__.py → frameworks/Python/web2py/app/app/views/__init__.py


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/appadmin.html → frameworks/Python/web2py/app/app/views/appadmin.html


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/default/fortune.html → frameworks/Python/web2py/app/app/views/default/fortune.html


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/default/index.html → frameworks/Python/web2py/app/app/views/default/index.html


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/default/user.html → frameworks/Python/web2py/app/app/views/default/user.html


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.html → frameworks/Python/web2py/app/app/views/generic.html


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.ics → frameworks/Python/web2py/app/app/views/generic.ics


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.json → frameworks/Python/web2py/app/app/views/generic.json


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.jsonp → frameworks/Python/web2py/app/app/views/generic.jsonp


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.load → frameworks/Python/web2py/app/app/views/generic.load


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.map → frameworks/Python/web2py/app/app/views/generic.map


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.pdf → frameworks/Python/web2py/app/app/views/generic.pdf


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.rss → frameworks/Python/web2py/app/app/views/generic.rss


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/generic.xml → frameworks/Python/web2py/app/app/views/generic.xml


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/layout.html → frameworks/Python/web2py/app/app/views/layout.html


+ 0 - 0
frameworks/Python/web2py/web2py/applications/app/views/web2py_ajax.html → frameworks/Python/web2py/app/app/views/web2py_ajax.html


+ 0 - 0
frameworks/Python/web2py/web2py/routes.py → frameworks/Python/web2py/app/routes.py


+ 6 - 0
frameworks/Python/web2py/install.sh

@@ -10,3 +10,9 @@ export PY2_ROOT=$IROOT/py2
 export PY2_PIP=$PY2_ROOT/bin/pip
 
 $PY2_PIP install --install-option="--prefix=${PY2_ROOT}" -r $TROOT/requirements.txt
+
+cd $TROOT
+rm -fr web2py
+git clone --recursive https://github.com/web2py/web2py.git 
+cp -r app/app/ web2py/applications/
+cp app/routes.py web2py/

+ 0 - 1
frameworks/Python/web2py/requirements.txt

@@ -1,2 +1 @@
-pydal
 mysqlclient==1.3.6

+ 0 - 1
frameworks/Python/web2py/web2py/VERSION

@@ -1 +0,0 @@
-Version 2.10.3-stable+timestamp.2015.04.02.21.42.07

+ 0 - 360
frameworks/Python/web2py/web2py/anyserver.py

@@ -1,360 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-This file is part of the web2py Web Framework
-Copyrighted by Massimo Di Pierro <[email protected]>
-License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-This file is based, although a rewrite, on MIT-licensed code from the Bottle web framework.
-"""
-
-import os
-import sys
-import optparse
-import urllib
-
-path = os.path.dirname(os.path.abspath(__file__))
-os.chdir(path)
-sys.path = [path] + [p for p in sys.path if not p == path]
-
-
-class Servers:
-    @staticmethod
-    def cgi(app, address=None, **options):
-        from wsgiref.handlers import CGIHandler
-        CGIHandler().run(app)  # Just ignore host and port here
-
-    @staticmethod
-    def flup(app, address, **options):
-        import flup.server.fcgi
-        flup.server.fcgi.WSGIServer(app, bindAddress=address).run()
-
-    @staticmethod
-    def wsgiref(app, address, **options):  # pragma: no cover
-        from wsgiref.simple_server import make_server, WSGIRequestHandler
-        options = {}
-        class QuietHandler(WSGIRequestHandler):
-            def log_request(*args, **kw):
-                pass
-        options['handler_class'] = QuietHandler
-        srv = make_server(address[0], address[1], app, **options)
-        srv.serve_forever()
-
-    @staticmethod
-    def cherrypy(app, address, **options):
-        from cherrypy import wsgiserver
-        server = wsgiserver.CherryPyWSGIServer(address, app)
-        server.start()
-
-    @staticmethod
-    def rocket(app, address, **options):
-        from gluon.rocket import CherryPyWSGIServer
-        server = CherryPyWSGIServer(address, app)
-        server.start()
-
-    @staticmethod
-    def rocket_with_repoze_profiler(app, address, **options):
-        from gluon.rocket import CherryPyWSGIServer
-        from repoze.profile.profiler import AccumulatingProfileMiddleware
-        from gluon.settings import global_settings
-        global_settings.web2py_crontype = 'none'
-        wrapped = AccumulatingProfileMiddleware(
-            app,
-            log_filename='wsgi.prof',
-            discard_first_request=True,
-            flush_at_shutdown=True,
-            path='/__profile__'
-        )
-        server = CherryPyWSGIServer(address, wrapped)
-        server.start()
-
-    @staticmethod
-    def paste(app, address, **options):
-        options = {}
-        from paste import httpserver
-        from paste.translogger import TransLogger
-        httpserver.serve(app, host=address[0], port=address[1], **options)
-
-    @staticmethod
-    def fapws(app, address, **options):
-        import fapws._evwsgi as evwsgi
-        from fapws import base
-        evwsgi.start(address[0], str(address[1]))
-        evwsgi.set_base_module(base)
-
-        def app(environ, start_response):
-            environ['wsgi.multiprocess'] = False
-            return app(environ, start_response)
-        evwsgi.wsgi_cb(('', app))
-        evwsgi.run()
-
-    @staticmethod
-    def gevent(app, address, **options):
-        options = options['options']
-        workers = options.workers
-        from gevent import pywsgi
-        from gevent.pool import Pool
-        pywsgi.WSGIServer(address, app, spawn=workers and Pool(
-            int(options.workers)) or 'default', log=None).serve_forever()
-
-    @staticmethod
-    def bjoern(app, address, **options):
-        import bjoern
-        bjoern.run(app, *address)
-
-    @staticmethod
-    def tornado(app, address, **options):
-        import tornado.wsgi
-        import tornado.httpserver
-        import tornado.ioloop
-        container = tornado.wsgi.WSGIContainer(app)
-        server = tornado.httpserver.HTTPServer(container)
-        server.listen(address=address[0], port=address[1])
-        tornado.ioloop.IOLoop.instance().start()
-
-    @staticmethod
-    def twisted(app, address, **options):
-        from twisted.web import server, wsgi
-        from twisted.python.threadpool import ThreadPool
-        from twisted.internet import reactor
-        thread_pool = ThreadPool()
-        thread_pool.start()
-        reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
-        factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app))
-        reactor.listenTCP(address[1], factory, interface=address[0])
-        reactor.run()
-
-    @staticmethod
-    def diesel(app, address, **options):
-        from diesel.protocols.wsgi import WSGIApplication
-        app = WSGIApplication(app, port=address[1])
-        app.run()
-
-    @staticmethod
-    def gunicorn(app, address, **options):
-        options = {}
-        from gunicorn.app.base import Application
-        config = {'bind': "%s:%d" % address}
-        config.update(options)
-        sys.argv = ['anyserver.py']
-
-        class GunicornApplication(Application):
-            def init(self, parser, opts, args):
-                return config
-
-            def load(self):
-                return app
-        g = GunicornApplication()
-        g.run()
-
-    @staticmethod
-    def eventlet(app, address, **options):
-        from eventlet import wsgi, listen
-        wsgi.server(listen(address), app)
-
-    @staticmethod
-    def mongrel2(app, address, **options):
-        import uuid
-        sys.path.append(os.path.abspath(os.path.dirname(__file__)))
-        from mongrel2 import handler
-        conn = handler.Connection(str(uuid.uuid4()),
-                                  "tcp://127.0.0.1:9997",
-                                  "tcp://127.0.0.1:9996")
-        mongrel2_handler(app, conn, debug=False)
-
-    @staticmethod
-    def motor(app, address, **options):
-        #https://github.com/rpedroso/motor
-        import motor
-        app = motor.WSGIContainer(app)
-        http_server = motor.HTTPServer(app)
-        http_server.listen(address=address[0], port=address[1])
-        #http_server.start(2)
-        motor.IOLoop.instance().start()
-
-    @staticmethod
-    def pulsar(app, address, **options):
-        from pulsar.apps import wsgi
-        sys.argv = ['anyserver.py']
-        s = wsgi.WSGIServer(callable=app, bind="%s:%d" % address)
-        s.start()
-
-
-def mongrel2_handler(application, conn, debug=False):
-    """
-    Based on :
-    https://github.com/berry/Mongrel2-WSGI-Handler/blob/master/wsgi-handler.py
-
-    WSGI handler based on the Python wsgiref SimpleHandler.
-    A WSGI application should return a iterable op StringTypes.
-    Any encoding must be handled by the WSGI application itself.
-    """
-    from wsgiref.handlers import SimpleHandler
-    try:
-        import cStringIO as StringIO
-    except:
-        import StringIO
-
-    # TODO - this wsgi handler executes the application and renders a page
-    # in memory completely before returning it as a response to the client.
-    # Thus, it does not "stream" the result back to the client. It should be
-    # possible though. The SimpleHandler accepts file-like stream objects. So,
-    # it should be just a matter of connecting 0MQ requests/response streams to
-    # the SimpleHandler requests and response streams. However, the Python API
-    # for Mongrel2 doesn't seem to support file-like stream objects for requests
-    # and responses. Unless I have missed something.
-
-    while True:
-        if debug:
-            print "WAITING FOR REQUEST"
-
-        # receive a request
-        req = conn.recv()
-        if debug:
-            print "REQUEST BODY: %r\n" % req.body
-
-        if req.is_disconnect():
-            if debug:
-                print "DISCONNECT"
-            continue  # effectively ignore the disconnect from the client
-
-        # Set a couple of environment attributes a.k.a. header attributes
-        # that are a must according to PEP 333
-        environ = req.headers
-        environ['SERVER_PROTOCOL'] = 'HTTP/1.1'  # SimpleHandler expects a server_protocol, lets assume it is HTTP 1.1
-        environ['REQUEST_METHOD'] = environ['METHOD']
-        if ':' in environ['Host']:
-            environ['SERVER_NAME'] = environ['Host'].split(':')[0]
-            environ['SERVER_PORT'] = environ['Host'].split(':')[1]
-        else:
-            environ['SERVER_NAME'] = environ['Host']
-            environ['SERVER_PORT'] = ''
-        environ['SCRIPT_NAME'] = ''  # empty for now
-        environ['PATH_INFO'] = urllib.unquote(environ['PATH'])
-        if '?' in environ['URI']:
-            environ['QUERY_STRING'] = environ['URI'].split('?')[1]
-        else:
-            environ['QUERY_STRING'] = ''
-        if 'Content-Length' in environ:
-            environ['CONTENT_LENGTH'] = environ[
-                'Content-Length']  # necessary for POST to work with Django
-        environ['wsgi.input'] = req.body
-
-        if debug:
-            print "ENVIRON: %r\n" % environ
-
-        # SimpleHandler needs file-like stream objects for
-        # requests, errors and responses
-        reqIO = StringIO.StringIO(req.body)
-        errIO = StringIO.StringIO()
-        respIO = StringIO.StringIO()
-
-        # execute the application
-        handler = SimpleHandler(reqIO, respIO, errIO, environ,
-                                multithread=False, multiprocess=False)
-        handler.run(application)
-
-        # Get the response and filter out the response (=data) itself,
-        # the response headers,
-        # the response status code and the response status description
-        response = respIO.getvalue()
-        response = response.split("\r\n")
-        data = response[-1]
-        headers = dict([r.split(": ") for r in response[1:-2]])
-        code = response[0][9:12]
-        status = response[0][13:]
-
-        # strip BOM's from response data
-        # Especially the WSGI handler from Django seems to generate them (2 actually, huh?)
-        # a BOM isn't really necessary and cause HTML parsing errors in Chrome and Safari
-        # See also: http://www.xs4all.nl/~mechiel/projects/bomstrip/
-        # Although I still find this a ugly hack, it does work.
-        data = data.replace('\xef\xbb\xbf', '')
-
-        # Get the generated errors
-        errors = errIO.getvalue()
-
-        # return the response
-        if debug:
-            print "RESPONSE: %r\n" % response
-        if errors:
-            if debug:
-                print "ERRORS: %r" % errors
-            data = "%s\r\n\r\n%s" % (data, errors)
-        conn.reply_http(
-            req, data, code=code, status=status, headers=headers)
-
-
-def run(servername, ip, port, softcron=True, logging=False, profiler=None,
-        options=None):
-    if servername == 'gevent':
-        from gevent import monkey
-        monkey.patch_all()
-    elif servername == 'eventlet':
-        import eventlet
-        eventlet.monkey_patch()
-
-    import gluon.main
-
-    if logging:
-        application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
-                                            logfilename='httpserver.log',
-                                            profiler_dir=profiler)
-    else:
-        application = gluon.main.wsgibase
-    if softcron:
-        from gluon.settings import global_settings
-        global_settings.web2py_crontype = 'soft'
-    getattr(Servers, servername)(application, (ip, int(port)), options=options)
-
-
-
-def main():
-    usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P"
-    try:
-        version = open('VERSION','r')
-    except IOError:
-        version = ''
-    parser = optparse.OptionParser(usage, None, optparse.Option, version)
-    parser.add_option('-l',
-                      '--logging',
-                      action='store_true',
-                      default=False,
-                      dest='logging',
-                      help='log into httpserver.log')
-    parser.add_option('-P',
-                      '--profiler',
-                      default=False,
-                      dest='profiler_dir',
-                      help='profiler dir')
-    servers = ', '.join(x for x in dir(Servers) if not x[0] == '_')
-    parser.add_option('-s',
-                      '--server',
-                      default='rocket',
-                      dest='server',
-                      help='server name (%s)' % servers)
-    parser.add_option('-i',
-                      '--ip',
-                      default='127.0.0.1',
-                      dest='ip',
-                      help='ip address')
-    parser.add_option('-p',
-                      '--port',
-                      default='8000',
-                      dest='port',
-                      help='port number')
-    parser.add_option('-w',
-                      '--workers',
-                      default=None,
-                      dest='workers',
-                      help='number of workers number')
-    (options, args) = parser.parse_args()
-    print 'starting %s on %s:%s...' % (
-        options.server, options.ip, options.port)
-    run(options.server, options.ip, options.port,
-        logging=options.logging, profiler=options.profiler_dir,
-        options=options)
-
-if __name__ == '__main__':
-    main()

+ 0 - 1
frameworks/Python/web2py/web2py/applications/app/views/__init__.py

@@ -1 +0,0 @@
-

+ 0 - 61
frameworks/Python/web2py/web2py/gluon/__init__.py

@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-This file is part of the web2py Web Framework
-Copyrighted by Massimo Di Pierro <[email protected]>
-License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-
-Web2Py framework modules
-========================
-"""
-
-__all__ = ['A', 'B', 'BEAUTIFY', 'BODY', 'BR', 'CAT', 'CENTER', 'CLEANUP', 'CODE', 'CRYPT', 'DAL', 'DIV', 'EM', 'EMBED', 'FIELDSET', 'FORM', 'Field', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'HEAD', 'HR', 'HTML', 'HTTP', 'I', 'IFRAME', 'IMG', 'INPUT', 'IS_ALPHANUMERIC', 'IS_DATE', 'IS_DATETIME', 'IS_DATETIME_IN_RANGE', 'IS_DATE_IN_RANGE', 'IS_DECIMAL_IN_RANGE', 'IS_EMAIL', 'IS_LIST_OF_EMAILS', 'IS_EMPTY_OR', 'IS_EQUAL_TO', 'IS_EXPR', 'IS_FLOAT_IN_RANGE', 'IS_IMAGE', 'IS_JSON', 'IS_INT_IN_RANGE', 'IS_IN_DB', 'IS_IN_SET', 'IS_IPV4', 'IS_LENGTH', 'IS_LIST_OF', 'IS_LOWER', 'IS_MATCH', 'IS_NOT_EMPTY', 'IS_NOT_IN_DB', 'IS_NULL_OR', 'IS_SLUG', 'IS_STRONG', 'IS_TIME', 'IS_UPLOAD_FILENAME', 'IS_UPPER', 'IS_URL', 'LABEL', 'LEGEND', 'LI', 'LINK', 'LOAD', 'MARKMIN', 'MENU', 'META', 'OBJECT', 'OL', 'ON', 'OPTGROUP', 'OPTION', 'P', 'PRE', 'SCRIPT', 'SELECT', 'SPAN', 'SQLFORM', 'SQLTABLE', 'STRONG', 'STYLE', 'TABLE', 'TAG', 'TBODY', 'TD', 'TEXTAREA', 'TFOOT', 'TH', 'THEAD', 'TITLE', 'TR', 'TT', 'UL', 'URL', 'XHTML', 'XML', 'redirect', 'current', 'embed64']
-
-#: add pydal to sys.modules
-import os
-import sys
-try:
-    sys.path.append(os.path.join(
-        os.path.dirname(os.path.abspath(__file__)), "packages", "dal"))
-    import pydal
-    sys.modules['pydal'] = pydal
-except ImportError:
-    raise RuntimeError(
-        "web2py depends on pydal, which apparently you have not installed.\n" +
-        "Probably you cloned the repository using git without '--recursive'" +
-        "\nTo fix this, please run (from inside your web2py folder):\n\n" +
-        "     git submodule update --init --recursive\n\n" +
-        "You can also download a complete copy from http://www.web2py.com."
-    )
-
-from globals import current
-from html import *
-from validators import *
-from http import redirect, HTTP
-from dal import DAL, Field
-from sqlhtml import SQLFORM, SQLTABLE
-from compileapp import LOAD
-
-# Dummy code to enable code completion in IDE's.
-if 0:
-    from globals import Request, Response, Session
-    from cache import Cache
-    from languages import translator
-    from tools import Auth, Crud, Mail, Service, PluginManager
-
-    # API objects
-    request = Request()
-    response = Response()
-    session = Session()
-    cache = Cache(request)
-    T = translator(request)
-
-    # Objects commonly defined in application model files
-    # (names are conventions only -- not part of API)
-    db = DAL()
-    auth = Auth(db)
-    crud = Crud(db)
-    mail = Mail()
-    service = Service()
-    plugins = PluginManager()

+ 0 - 461
frameworks/Python/web2py/web2py/gluon/admin.py

@@ -1,461 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-| This file is part of the web2py Web Framework
-| Copyrighted by Massimo Di Pierro <[email protected]>
-| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-Utility functions for the Admin application
--------------------------------------------
-"""
-import os
-import sys
-import traceback
-import zipfile
-import urllib
-from shutil import rmtree
-from gluon.utils import web2py_uuid
-from gluon.fileutils import w2p_pack, w2p_unpack, w2p_pack_plugin, w2p_unpack_plugin
-from gluon.fileutils import up, fix_newlines, abspath, recursive_unlink
-from gluon.fileutils import read_file, write_file, parse_version
-from gluon.restricted import RestrictedError
-from gluon.settings import global_settings
-from gluon.cache import CacheOnDisk
-
-
-if not global_settings.web2py_runtime_gae:
-    import site
-
-
-def apath(path='', r=None):
-    """Builds a path inside an application folder
-
-    Args:
-        path(str): path within the application folder
-        r: the global request object
-
-    """
-
-    opath = up(r.folder)
-    while path[:3] == '../':
-        (opath, path) = (up(opath), path[3:])
-    return os.path.join(opath, path).replace('\\', '/')
-
-
-def app_pack(app, request, raise_ex=False, filenames=None):
-    """Builds a w2p package for the application
-
-    Args:
-        app(str): application name
-        request: the global request object
-    Returns:
-        filename of the w2p file or None on error
-
-    """
-    try:
-        if filenames is None: app_cleanup(app, request)
-        filename = apath('../deposit/web2py.app.%s.w2p' % app, request)
-        w2p_pack(filename, apath(app, request), filenames=filenames)
-        return filename
-    except Exception, e:
-        if raise_ex:
-            raise
-        return False
-
-
-def app_pack_compiled(app, request, raise_ex=False):
-    """Builds a w2p bytecode-compiled package for the application
-
-    Args:
-        app(str): application name
-        request: the global request object
-
-    Returns:
-        filename of the w2p file or None on error
-
-    """
-
-    try:
-        filename = apath('../deposit/%s.w2p' % app, request)
-        w2p_pack(filename, apath(app, request), compiled=True)
-        return filename
-    except Exception, e:
-        if raise_ex:
-            raise
-        return None
-
-
-def app_cleanup(app, request):
-    """Removes session, cache and error files
-
-    Args:
-        app(str): application name
-        request: the global request object
-
-    Returns:
-        True if everything went ok, False otherwise
-
-    """
-    r = True
-
-    # Remove error files
-    path = apath('%s/errors/' % app, request)
-    if os.path.exists(path):
-        for f in os.listdir(path):
-            try:
-                if f[:1] != '.': os.unlink(os.path.join(path, f))
-            except IOError:
-                r = False
-
-    # Remove session files
-    path = apath('%s/sessions/' % app, request)
-    if os.path.exists(path):
-        for f in os.listdir(path):
-            try:
-                if f[:1] != '.': recursive_unlink(os.path.join(path, f))
-            except (OSError, IOError):
-                r = False
-
-    # Remove cache files
-    path = apath('%s/cache/' % app, request)
-    CacheOnDisk(folder=path).clear()
-    
-    if os.path.exists(path):
-        for f in os.listdir(path):
-            try:
-                if f[:1] != '.': recursive_unlink(os.path.join(path, f))
-            except (OSError, IOError):
-                r = False
-    return r
-
-
-def app_compile(app, request):
-    """Compiles the application
-
-    Args:
-        app(str): application name
-        request: the global request object
-
-    Returns:
-        None if everything went ok, traceback text if errors are found
-
-    """
-    from compileapp import compile_application, remove_compiled_application
-    folder = apath(app, request)
-    try:
-        compile_application(folder)
-        return None
-    except (Exception, RestrictedError):
-        tb = traceback.format_exc(sys.exc_info)
-        remove_compiled_application(folder)
-        return tb
-
-
-def app_create(app, request, force=False, key=None, info=False):
-    """Create a copy of welcome.w2p (scaffolding) app
-
-    Args:
-        app(str): application name
-        request: the global request object
-
-    """
-    path = apath(app, request)
-    if not os.path.exists(path):
-        try:
-            os.mkdir(path)
-        except:
-            if info:
-                return False, traceback.format_exc(sys.exc_info)
-            else:
-                return False
-    elif not force:
-        if info:
-            return False, "Application exists"
-        else:
-            return False
-    try:
-        w2p_unpack('welcome.w2p', path)
-        for subfolder in [
-            'models', 'views', 'controllers', 'databases',
-            'modules', 'cron', 'errors', 'sessions', 'cache',
-            'languages', 'static', 'private', 'uploads']:
-            subpath = os.path.join(path, subfolder)
-            if not os.path.exists(subpath):
-                os.mkdir(subpath)
-        db = os.path.join(path, 'models', 'db.py')
-        if os.path.exists(db):
-            data = read_file(db)
-            data = data.replace('<your secret key>',
-                                'sha512:' + (key or web2py_uuid()))
-            write_file(db, data)
-        if info:
-            return True, None
-        else:
-            return True
-    except:
-        rmtree(path)
-        if info:
-            return False, traceback.format_exc(sys.exc_info)
-        else:
-            return False
-
-
-def app_install(app, fobj, request, filename, overwrite=None):
-    """Installs an application:
-
-    - Identifies file type by filename
-    - Writes `fobj` contents to the `../deposit/` folder
-    - Calls `w2p_unpack()` to do the job.
-
-    Args:
-        app(str): new application name
-        fobj(obj): file object containing the application to be installed
-        request: the global request object
-        filename(str): original filename of the `fobj`,
-            required to determine extension
-        overwrite(bool): force overwrite of existing application
-
-    Returns:
-        name of the file where app is temporarily stored or `None` on failure
-
-    """
-    did_mkdir = False
-    if filename[-4:] == '.w2p':
-        extension = 'w2p'
-    elif filename[-7:] == '.tar.gz':
-        extension = 'tar.gz'
-    else:
-        extension = 'tar'
-    upname = apath('../deposit/%s.%s' % (app, extension), request)
-
-    try:
-        write_file(upname, fobj.read(), 'wb')
-        path = apath(app, request)
-        if not overwrite:
-            os.mkdir(path)
-            did_mkdir = True
-        w2p_unpack(upname, path)
-        if extension != 'tar':
-            os.unlink(upname)
-        fix_newlines(path)
-        return upname
-    except Exception:
-        if did_mkdir:
-            rmtree(path)
-        return False
-
-
-def app_uninstall(app, request):
-    """Uninstalls the application.
-
-    Args:
-        app(str): application name
-        request: the global request object
-
-    Returns:
-        `True` on success, `False` on failure
-
-    """
-    try:
-        # Hey App, this is your end...
-        path = apath(app, request)
-        rmtree(path)
-        return True
-    except Exception:
-        return False
-
-
-def plugin_pack(app, plugin_name, request):
-    """Builds a w2p package for the plugin
-
-    Args:
-        app(str): application name
-        plugin_name(str): the name of the plugin without `plugin_` prefix
-        request: the current request app
-
-    Returns:
-        filename of the w2p file or False on error
-
-    """
-    try:
-        filename = apath(
-            '../deposit/web2py.plugin.%s.w2p' % plugin_name, request)
-        w2p_pack_plugin(filename, apath(app, request), plugin_name)
-        return filename
-    except Exception:
-        return False
-
-
-def plugin_install(app, fobj, request, filename):
-    """Installs a plugin:
-
-    - Identifies file type by filename
-    - Writes `fobj` contents to the `../deposit/` folder
-    - Calls `w2p_unpack_plugin()` to do the job.
-
-    Args:
-        app(str): new application name
-        fobj: file object containing the application to be installed
-        request: the global request object
-        filename: original filename of the `fobj`,
-            required to determine extension
-
-    Returns:
-        name of the file where plugin is temporarily stored
-        or `False` on failure
-
-    """
-    upname = apath('../deposit/%s' % filename, request)
-
-    try:
-        write_file(upname, fobj.read(), 'wb')
-        path = apath(app, request)
-        w2p_unpack_plugin(upname, path)
-        fix_newlines(path)
-        return upname
-    except Exception:
-        os.unlink(upname)
-        return False
-
-
-def check_new_version(myversion, version_url):
-    """Compares current web2py's version with the latest stable web2py version.
-
-    Args:
-        myversion: the current version as stored in file `web2py/VERSION`
-        version_URL: the URL that contains the version
-                     of the latest stable release
-
-    Returns:
-        tuple: state, version
-
-        - state : `True` if upgrade available, `False` if current
-          version is up-to-date, -1 on error
-        - version : the most up-to-version available
-
-    """
-    try:
-        from urllib import urlopen
-        version = urlopen(version_url).read()
-        pversion = parse_version(version)
-        pmyversion = parse_version(myversion)
-    except IOError:
-        import traceback
-        print traceback.format_exc()
-        return -1, myversion
-
-    if pversion[:3]+pversion[-6:] > pmyversion[:3]+pmyversion[-6:]:
-        return True, version
-    else:
-        return False, version
-
-
-def unzip(filename, dir, subfolder=''):
-    """Unzips filename into dir (.zip only, no .gz etc)
-
-    Args:
-        filename(str): archive
-        dir(str): destination
-        subfolder(str): if != '' unzips only files in subfolder
-
-    """
-    filename = abspath(filename)
-    if not zipfile.is_zipfile(filename):
-        raise RuntimeError('Not a valid zipfile')
-    zf = zipfile.ZipFile(filename)
-    if not subfolder.endswith('/'):
-        subfolder += '/'
-    n = len(subfolder)
-    for name in sorted(zf.namelist()):
-        if not name.startswith(subfolder):
-            continue
-        #print name[n:]
-        if name.endswith('/'):
-            folder = os.path.join(dir, name[n:])
-            if not os.path.exists(folder):
-                os.mkdir(folder)
-        else:
-            write_file(os.path.join(dir, name[n:]), zf.read(name), 'wb')
-
-
-def upgrade(request, url='http://web2py.com'):
-    """Upgrades web2py (src, osx, win) if a new version is posted.
-    It detects whether src, osx or win is running and downloads the right one
-
-    Args:
-        request: the current request object
-            (required to determine version and path)
-        url: the incomplete url where to locate the latest web2py
-             (actual url is url+'/examples/static/web2py_(src|osx|win).zip')
-
-    Returns
-        tuple: completed, traceback
-
-        - completed: True on success, False on failure
-          (network problem or old version)
-        - traceback: None on success, raised exception details on failure
-
-    """
-    web2py_version = request.env.web2py_version
-    gluon_parent = request.env.gluon_parent
-    if not gluon_parent.endswith('/'):
-        gluon_parent += '/'
-    (check, version) = check_new_version(web2py_version,
-                                         url + '/examples/default/version')
-    if not check:
-        return False, 'Already latest version'
-    if os.path.exists(os.path.join(gluon_parent, 'web2py.exe')):
-        version_type = 'win'
-        destination = gluon_parent
-        subfolder = 'web2py/'
-    elif gluon_parent.endswith('/Contents/Resources/'):
-        version_type = 'osx'
-        destination = gluon_parent[:-len('/Contents/Resources/')]
-        subfolder = 'web2py/web2py.app/'
-    else:
-        version_type = 'src'
-        destination = gluon_parent
-        subfolder = 'web2py/'
-
-    full_url = url + '/examples/static/web2py_%s.zip' % version_type
-    filename = abspath('web2py_%s_downloaded.zip' % version_type)
-    try:
-        write_file(filename, urllib.urlopen(full_url).read(), 'wb')
-    except Exception, e:
-        return False, e
-    try:
-        unzip(filename, destination, subfolder)
-        return True, None
-    except Exception, e:
-        return False, e
-
-
-def add_path_first(path):
-    sys.path = [path] + [p for p in sys.path if (
-        not p == path and not p == (path + '/'))]
-    if not global_settings.web2py_runtime_gae:
-        site.addsitedir(path)
-
-
-def create_missing_folders():
-    if not global_settings.web2py_runtime_gae:
-        for path in ('applications', 'deposit', 'site-packages', 'logs'):
-            path = abspath(path, gluon=True)
-            if not os.path.exists(path):
-                os.mkdir(path)
-    paths = (global_settings.gluon_parent, abspath(
-        'site-packages', gluon=True), abspath('gluon', gluon=True), '')
-    [add_path_first(path) for path in paths]
-
-
-def create_missing_app_folders(request):
-    if not global_settings.web2py_runtime_gae:
-        if request.folder not in global_settings.app_folders:
-            for subfolder in ('models', 'views', 'controllers', 'databases',
-                              'modules', 'cron', 'errors', 'sessions',
-                              'languages', 'static', 'private', 'uploads'):
-                path = os.path.join(request.folder, subfolder)
-                if not os.path.exists(path):
-                    os.mkdir(path)
-            global_settings.app_folders.add(request.folder)

+ 0 - 756
frameworks/Python/web2py/web2py/gluon/cache.py

@@ -1,756 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-| This file is part of the web2py Web Framework
-| Copyrighted by Massimo Di Pierro <[email protected]>
-| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-Basic caching classes and methods
----------------------------------
-
-- Cache - The generic caching object interfacing with the others
-- CacheInRam - providing caching in ram
-- CacheOnDisk - provides caches on disk
-
-Memcache is also available via a different module (see gluon.contrib.memcache)
-
-When web2py is running on Google App Engine,
-caching will be provided by the GAE memcache
-(see gluon.contrib.gae_memcache)
-"""
-import time
-import thread
-import os
-import gc
-import sys
-import logging
-import re
-import random
-import hashlib
-import datetime
-import tempfile
-from gluon import recfile
-from gluon import portalocker
-from collections import defaultdict
-try:
-    from collections import OrderedDict
-except ImportError:
-    from gluon.contrib.ordereddict import OrderedDict
-try:
-    from gluon import settings
-    have_settings = True
-except ImportError:
-    have_settings = False
-
-try:
-   import cPickle as pickle
-except:
-   import pickle
-
-try:
-    import psutil
-    HAVE_PSUTIL = True
-except ImportError:
-    HAVE_PSUTIL = False
-
-def remove_oldest_entries(storage, percentage=90):
-    # compute current memory usage (%)
-    old_mem = psutil.virtual_memory().percent
-    # if we have data in storage and utilization exceeds 90%
-    while storage and old_mem > percentage:    
-        # removed oldest entry
-        storage.popitem(last=False)
-        # garbage collect
-        gc.collect(1)
-        # comute used memory again
-        new_mem = psutil.virtual_memory().percent
-        # if the used memory did not decrease stop
-        if new_mem >= old_mem: break
-        # net new measurement for memory usage and loop
-        old_mem = new_mem
-
-
-logger = logging.getLogger("web2py.cache")
-
-__all__ = ['Cache', 'lazy_cache']
-
-
-DEFAULT_TIME_EXPIRE = 300
-
-class CacheAbstract(object):
-    """
-    Abstract class for cache implementations.
-    Main function just provides referenced api documentation.
-
-    Use CacheInRam or CacheOnDisk instead which are derived from this class.
-
-    Note:
-        Michele says: there are signatures inside gdbm files that are used
-        directly by the python gdbm adapter that often are lagging behind in the
-        detection code in python part.
-        On every occasion that a gdbm store is probed by the python adapter,
-        the probe fails, because gdbm file version is newer.
-        Using gdbm directly from C would work, because there is backward
-        compatibility, but not from python!
-        The .shelve file is discarded and a new one created (with new
-        signature) and it works until it is probed again...
-        The possible consequences are memory leaks and broken sessions.
-    """
-
-    cache_stats_name = 'web2py_cache_statistics'
-    max_ram_utilization = 90 # percent
-
-    def __init__(self, request=None):
-        """Initializes the object
-
-        Args:
-            request: the global request object
-        """
-        raise NotImplementedError
-
-    def __call__(self, key, f,
-                 time_expire=DEFAULT_TIME_EXPIRE):
-        """
-        Tries to retrieve the value corresponding to `key` from the cache if the
-        object exists and if it did not expire, else it calls the function `f`
-        and stores the output in the cache corresponding to `key`. It always
-        returns the function that is returned.
-
-        Args:
-            key(str): the key of the object to be stored or retrieved
-            f(function): the function whose output is to be cached.
-
-                If `f` is `None` the cache is cleared.
-            time_expire(int): expiration of the cache in seconds.
-
-                It's used to compare the current time with the time
-                when the requested object was last saved in cache. It does not
-                affect future requests. Setting `time_expire` to 0 or negative
-                value forces the cache to refresh.
-        """
-        raise NotImplementedError
-
-    def clear(self, regex=None):
-        """
-        Clears the cache of all keys that match the provided regular expression.
-        If no regular expression is provided, it clears all entries in cache.
-
-        Args:
-            regex: if provided, only keys matching the regex will be cleared,
-                otherwise all keys are cleared.
-        """
-
-        raise NotImplementedError
-
-    def increment(self, key, value=1):
-        """
-        Increments the cached value for the given key by the amount in value
-
-        Args:
-            key(str): key for the cached object to be incremeneted
-            value(int): amount of the increment (defaults to 1, can be negative)
-        """
-        raise NotImplementedError
-
-    def _clear(self, storage, regex):
-        """
-        Auxiliary function called by `clear` to search and clear cache entries
-        """
-        r = re.compile(regex)
-        for key in storage.keys():
-            if r.match(str(key)):
-                del storage[key]
-        return
-
-
-class CacheInRam(CacheAbstract):
-    """
-    Ram based caching
-
-    This is implemented as global (per process, shared by all threads)
-    dictionary.
-    A mutex-lock mechanism avoid conflicts.
-    """
-
-    locker = thread.allocate_lock()
-    meta_storage = {}
-    stats = {}
-
-    def __init__(self, request=None):
-        self.initialized = False
-        self.request = request
-        self.storage = OrderedDict() if HAVE_PSUTIL else {}
-        self.app = request.application if request else ''
-    def initialize(self):
-        if self.initialized:
-            return
-        else:
-            self.initialized = True
-        self.locker.acquire()
-        if not self.app in self.meta_storage:
-            self.storage = self.meta_storage[self.app] = \
-                OrderedDict() if HAVE_PSUTIL else {}
-            self.stats[self.app] = {'hit_total': 0, 'misses': 0}
-        else:
-            self.storage = self.meta_storage[self.app]
-        self.locker.release()
-
-    def clear(self, regex=None):
-        self.initialize()
-        self.locker.acquire()
-        storage = self.storage
-        if regex is None:
-            storage.clear()
-        else:
-            self._clear(storage, regex)
-
-        if not self.app in self.stats:
-            self.stats[self.app] = {'hit_total': 0, 'misses': 0}
-
-        self.locker.release()
-
-    def __call__(self, key, f,
-                 time_expire=DEFAULT_TIME_EXPIRE,
-                 destroyer=None):
-        """
-        Attention! cache.ram does not copy the cached object.
-        It just stores a reference to it. Turns out the deepcopying the object
-        has some problems:
-
-        - would break backward compatibility
-        - would be limiting because people may want to cache live objects
-        - would work unless we deepcopy no storage and retrival which would make
-          things slow.
-
-        Anyway. You can deepcopy explicitly in the function generating the value
-        to be cached.
-        """
-        self.initialize()
-
-        dt = time_expire
-        now = time.time()
-
-        self.locker.acquire()
-        item = self.storage.get(key, None)
-        if item and f is None:
-            del self.storage[key]
-            if destroyer:
-                destroyer(item[1])
-        self.stats[self.app]['hit_total'] += 1
-        self.locker.release()
-
-        if f is None:
-            return None
-        if item and (dt is None or item[0] > now - dt):
-            return item[1]
-        elif item and (item[0] < now - dt) and destroyer:
-            destroyer(item[1])
-        value = f()
-
-        self.locker.acquire()
-        self.storage[key] = (now, value)
-        self.stats[self.app]['misses'] += 1
-        if HAVE_PSUTIL and self.max_ram_utilization!=None and random.random()<0.10:
-            remove_oldest_entries(self.storage, percentage = self.max_ram_utilization)
-        self.locker.release()
-        return value
-
-    def increment(self, key, value=1):
-        self.initialize()
-        self.locker.acquire()
-        try:
-            if key in self.storage:
-                value = self.storage[key][1] + value
-            self.storage[key] = (time.time(), value)
-        except BaseException, e:
-            self.locker.release()
-            raise e
-        self.locker.release()
-        return value
-
-
-class CacheOnDisk(CacheAbstract):
-    """
-    Disk based cache
-
-    This is implemented as a key value store where each key corresponds to a
-    single file in disk which is replaced when the value changes.
-
-    Disk cache provides persistance when web2py is started/stopped but it is
-    slower than `CacheInRam`
-
-    Values stored in disk cache must be pickable.
-    """
-
-    class PersistentStorage(object):
-        """
-        Implements a key based thread/process-safe safe storage in disk.
-        """
-
-        def __init__(self, folder, file_lock_time_wait=0.1):
-            self.folder = folder
-            self.key_filter_in = lambda key: key
-            self.key_filter_out = lambda key: key
-            self.file_lock_time_wait = file_lock_time_wait # How long we should wait before retrying to lock a file held by another process
-            # We still need a mutex for each file as portalocker only blocks other processes
-            self.file_locks = defaultdict(thread.allocate_lock)
-
-
-            # Make sure we use valid filenames.
-            if sys.platform == "win32":
-                import base64
-                def key_filter_in_windows(key):
-                    """
-                    Windows doesn't allow \ / : * ? "< > | in filenames.
-                    To go around this encode the keys with base32.
-                    """
-                    return base64.b32encode(key)
-
-                def key_filter_out_windows(key):
-                    """
-                    We need to decode the keys so regex based removal works.
-                    """
-                    return base64.b32decode(key)
-
-                self.key_filter_in = key_filter_in_windows
-                self.key_filter_out = key_filter_out_windows
-
-
-        def wait_portalock(self, val_file):
-            """
-            Wait for the process file lock.
-            """
-            while True:
-                try:
-                    portalocker.lock(val_file, portalocker.LOCK_EX)
-                    break
-                except:
-                    time.sleep(self.file_lock_time_wait)
-
-
-        def acquire(self, key):
-            self.file_locks[key].acquire()
-
-
-        def release(self, key):
-            self.file_locks[key].release()
-
-
-        def __setitem__(self, key, value):
-            key = self.key_filter_in(key)
-            val_file = recfile.open(key, mode='wb', path=self.folder)
-            self.wait_portalock(val_file)
-            pickle.dump(value, val_file, pickle.HIGHEST_PROTOCOL)
-            val_file.close()
-
-
-        def __getitem__(self, key):
-            key = self.key_filter_in(key)
-            try:
-                val_file = recfile.open(key, mode='rb', path=self.folder)
-            except IOError:
-                raise KeyError
-
-            self.wait_portalock(val_file)
-            value = pickle.load(recfile.open(key, 'rb', path=self.folder))
-            val_file.close()
-            return value
-
-
-        def __contains__(self, key):
-            key = self.key_filter_in(key)
-            return (key in self.file_locks) or recfile.exists(key, path=self.folder)
-
-
-        def __delitem__(self, key):
-            key = self.key_filter_in(key)
-            try:
-                recfile.remove(key, path=self.folder)
-            except IOError:
-                raise KeyError
-
-
-        def __iter__(self):
-            for dirpath, dirnames, filenames in os.walk(self.folder):
-                for filename in filenames:
-                    yield self.key_filter_out(filename)
-
-
-        def safe_apply(self, key, function, default_value=None):
-            """ 
-            Safely apply a function to the value of a key in storage and set
-            the return value of the function to it.
-
-            Return the result of applying the function.
-            """
-            key = self.key_filter_in(key)
-            exists = True
-            try:
-                val_file = recfile.open(key, mode='r+b', path=self.folder)
-            except IOError:
-                exists = False
-                val_file = recfile.open(key, mode='wb', path=self.folder)
-            self.wait_portalock(val_file)
-            if exists:
-                timestamp, value = pickle.load(val_file)
-            else:
-                value = default_value
-            new_value = function(value)
-            val_file.seek(0)
-            pickle.dump((time.time(), new_value), val_file, pickle.HIGHEST_PROTOCOL)
-            val_file.truncate()
-            val_file.close()
-            return new_value
-
-
-        def keys(self):
-            return list(self.__iter__())
-
-
-        def get(self, key, default=None):
-            try:
-                return self[key]
-            except KeyError:
-                return default
-
-
-    def __init__(self, request=None, folder=None):
-        self.initialized = False
-        self.request = request
-        self.folder = folder
-        self.storage = None
-
-
-    def initialize(self):
-        if self.initialized:
-            return
-        else:
-            self.initialized = True
-
-        folder = self.folder
-        request = self.request
-
-        # Lets test if the cache folder exists, if not
-        # we are going to create it
-        folder = os.path.join(folder or request.folder, 'cache')
-
-        if not os.path.exists(folder):
-            os.mkdir(folder)
-
-        self.storage = CacheOnDisk.PersistentStorage(folder)
-
-
-    def __call__(self, key, f,
-                 time_expire=DEFAULT_TIME_EXPIRE):
-        self.initialize()
-
-        def inc_hit_total(v):
-            v['hit_total'] += 1
-            return v
-
-        def inc_misses(v):
-            v['misses'] += 1
-            return v
-
-        dt = time_expire
-        self.storage.acquire(key)
-        self.storage.acquire(CacheAbstract.cache_stats_name)
-        item = self.storage.get(key)
-        self.storage.safe_apply(CacheAbstract.cache_stats_name, inc_hit_total,
-                                default_value={'hit_total': 0, 'misses': 0})
-
-        if item and f is None:
-            del self.storage[key]
-
-        if f is None:
-            self.storage.release(CacheAbstract.cache_stats_name)
-            self.storage.release(key)
-            return None
-
-        now = time.time()
-
-        if item and ((dt is None) or (item[0] > now - dt)):
-            value = item[1]
-        else:
-            value = f()
-            self.storage[key] = (now, value)
-            self.storage.safe_apply(CacheAbstract.cache_stats_name, inc_misses, 
-                                    default_value={'hit_total': 0, 'misses': 0})
-
-        self.storage.release(CacheAbstract.cache_stats_name)
-        self.storage.release(key)
-        return value
-
-
-    def clear(self, regex=None):
-        self.initialize()
-        storage = self.storage
-        if regex is None:
-            keys = storage
-        else:
-            r = re.compile(regex)
-            keys = (key for key in storage if r.match(key))
-        for key in keys:
-            storage.acquire(key)
-            try:
-                del storage[key]
-            except KeyError:
-                pass
-            storage.release(key)
-
-
-    def increment(self, key, value=1):
-        self.initialize()
-        self.storage.acquire(key)
-        value = self.storage.safe_apply(key, lambda x: x + value, default_value=0)
-        self.storage.release(key)
-        return value
-
-
-
-class CacheAction(object):
-    def __init__(self, func, key, time_expire, cache, cache_model):
-        self.__name__ = func.__name__
-        self.__doc__ = func.__doc__
-        self.func = func
-        self.key = key
-        self.time_expire = time_expire
-        self.cache = cache
-        self.cache_model = cache_model
-
-    def __call__(self, *a, **b):
-        if not self.key:
-            key2 = self.__name__ + ':' + repr(a) + ':' + repr(b)
-        else:
-            key2 = self.key.replace('%(name)s', self.__name__)\
-                .replace('%(args)s', str(a)).replace('%(vars)s', str(b))
-        cache_model = self.cache_model
-        if not cache_model or isinstance(cache_model, str):
-            cache_model = getattr(self.cache, cache_model or 'ram')
-        return cache_model(key2,
-                           lambda a=a, b=b: self.func(*a, **b),
-                           self.time_expire)
-
-
-class Cache(object):
-    """
-    Sets up generic caching, creating an instance of both CacheInRam and
-    CacheOnDisk.
-    In case of GAE will make use of gluon.contrib.gae_memcache.
-
-    - self.ram is an instance of CacheInRam
-    - self.disk is an instance of CacheOnDisk
-    """
-
-    autokey = ':%(name)s:%(args)s:%(vars)s'
-
-    def __init__(self, request):
-        """
-        Args:
-            request: the global request object
-        """
-        # GAE will have a special caching
-        if have_settings and settings.global_settings.web2py_runtime_gae:
-            from gluon.contrib.gae_memcache import MemcacheClient
-            self.ram = self.disk = MemcacheClient(request)
-        else:
-            # Otherwise use ram (and try also disk)
-            self.ram = CacheInRam(request)
-            try:
-                self.disk = CacheOnDisk(request)
-            except IOError:
-                logger.warning('no cache.disk (IOError)')
-            except AttributeError:
-                # normally not expected anymore, as GAE has already
-                # been accounted for
-                logger.warning('no cache.disk (AttributeError)')
-
-    def action(self, time_expire=DEFAULT_TIME_EXPIRE, cache_model=None,
-             prefix=None, session=False, vars=True, lang=True,
-             user_agent=False, public=True, valid_statuses=None,
-             quick=None):
-        """Better fit for caching an action
-
-        Warning:
-            Experimental!
-
-        Currently only HTTP 1.1 compliant
-        reference : http://code.google.com/p/doctype-mirror/wiki/ArticleHttpCaching
-
-        Args:
-            time_expire(int): same as @cache
-            cache_model(str): same as @cache
-            prefix(str): add a prefix to the calculated key
-            session(bool): adds response.session_id to the key
-            vars(bool): adds request.env.query_string
-            lang(bool): adds T.accepted_language
-            user_agent(bool or dict): if True, adds is_mobile and is_tablet to the key.
-                Pass a dict to use all the needed values (uses str(.items()))
-                (e.g. user_agent=request.user_agent()). Used only if session is
-                not True
-            public(bool): if False forces the Cache-Control to be 'private'
-            valid_statuses: by default only status codes starting with 1,2,3 will be cached.
-                pass an explicit list of statuses on which turn the cache on
-            quick: Session,Vars,Lang,User-agent,Public:
-                fast overrides with initials, e.g. 'SVLP' or 'VLP', or 'VLP'
-        """
-        from gluon import current
-        from gluon.http import HTTP
-        def wrap(func):
-            def wrapped_f():
-                if current.request.env.request_method != 'GET':
-                    return func()
-                if time_expire:
-                    cache_control = 'max-age=%(time_expire)s, s-maxage=%(time_expire)s' % dict(time_expire=time_expire)
-                    if quick:
-                        session_ = True if 'S' in quick else False
-                        vars_ = True if 'V' in quick else False
-                        lang_ = True if 'L' in quick else False
-                        user_agent_ = True if 'U' in quick else False
-                        public_ = True if 'P' in quick else False
-                    else:
-                        session_, vars_, lang_, user_agent_, public_ = session, vars, lang, user_agent, public
-                    if not session_ and public_:
-                        cache_control += ', public'
-                        expires = (current.request.utcnow + datetime.timedelta(seconds=time_expire)).strftime('%a, %d %b %Y %H:%M:%S GMT')
-                    else:
-                        cache_control += ', private'
-                        expires = 'Fri, 01 Jan 1990 00:00:00 GMT'
-                if cache_model:
-                    #figure out the correct cache key
-                    cache_key = [current.request.env.path_info, current.response.view]
-                    if session_:
-                        cache_key.append(current.response.session_id)
-                    elif user_agent_:
-                        if user_agent_ is True:
-                            cache_key.append("%(is_mobile)s_%(is_tablet)s" % current.request.user_agent())
-                        else:
-                            cache_key.append(str(user_agent_.items()))
-                    if vars_:
-                        cache_key.append(current.request.env.query_string)
-                    if lang_:
-                        cache_key.append(current.T.accepted_language)
-                    cache_key = hashlib.md5('__'.join(cache_key)).hexdigest()
-                    if prefix:
-                        cache_key = prefix + cache_key
-                    try:
-                        #action returns something
-                        rtn = cache_model(cache_key, lambda : func(), time_expire=time_expire)
-                        http, status = None, current.response.status
-                    except HTTP, e:
-                        #action raises HTTP (can still be valid)
-                        rtn = cache_model(cache_key, lambda : e.body, time_expire=time_expire)
-                        http, status = HTTP(e.status, rtn, **e.headers), e.status
-                    else:
-                        #action raised a generic exception
-                        http = None
-                else:
-                    #no server-cache side involved
-                    try:
-                        #action returns something
-                        rtn = func()
-                        http, status = None, current.response.status
-                    except HTTP, e:
-                        #action raises HTTP (can still be valid)
-                        status = e.status
-                        http = HTTP(e.status, e.body, **e.headers)
-                    else:
-                        #action raised a generic exception
-                        http = None
-                send_headers = False
-                if http and isinstance(valid_statuses, list):
-                    if status in valid_statuses:
-                        send_headers = True
-                elif valid_statuses is None:
-                    if str(status)[0] in '123':
-                        send_headers = True
-                if send_headers:
-                    headers = {
-                        'Pragma' : None,
-                        'Expires' : expires,
-                        'Cache-Control' : cache_control
-                        }
-                    current.response.headers.update(headers)
-                if cache_model and not send_headers:
-                    #we cached already the value, but the status is not valid
-                    #so we need to delete the cached value
-                    cache_model(cache_key, None)
-                if http:
-                    if send_headers:
-                        http.headers.update(current.response.headers)
-                    raise http
-                return rtn
-            wrapped_f.__name__ = func.__name__
-            wrapped_f.__doc__ = func.__doc__
-            return wrapped_f
-        return wrap
-
-    def __call__(self,
-                 key=None,
-                 time_expire=DEFAULT_TIME_EXPIRE,
-                 cache_model=None):
-        """
-        Decorator function that can be used to cache any function/method.
-
-        Args:
-            key(str) : the key of the object to be store or retrieved
-            time_expire(int) : expiration of the cache in seconds
-                `time_expire` is used to compare the current time with the time
-                when the requested object was last saved in cache.
-                It does not affect future requests.
-                Setting `time_expire` to 0 or negative value forces the cache to
-                refresh.
-            cache_model(str): can be "ram", "disk" or other (like "memcache").
-                Defaults to "ram"
-
-        When the function `f` is called, web2py tries to retrieve
-        the value corresponding to `key` from the cache if the
-        object exists and if it did not expire, else it calles the function `f`
-        and stores the output in the cache corresponding to `key`. In the case
-        the output of the function is returned.
-
-        Example: ::
-
-          @cache('key', 5000, cache.ram)
-          def f():
-              return time.ctime()
-
-        Note:
-            If the function `f` is an action, we suggest using
-            @cache.action instead
-        """
-
-        def tmp(func, cache=self, cache_model=cache_model):
-            return CacheAction(func, key, time_expire, self, cache_model)
-        return tmp
-
-    @staticmethod
-    def with_prefix(cache_model, prefix):
-        """
-        allow replacing cache.ram with cache.with_prefix(cache.ram,'prefix')
-        it will add prefix to all the cache keys used.
-        """
-        return lambda key, f, time_expire=DEFAULT_TIME_EXPIRE, prefix=prefix:\
-            cache_model(prefix + key, f, time_expire)
-
-
-def lazy_cache(key=None, time_expire=None, cache_model='ram'):
-    """
-    Can be used to cache any function including ones in modules,
-    as long as the cached function is only called within a web2py request
-
-    If a key is not provided, one is generated from the function name
-    `time_expire` defaults to None (no cache expiration)
-
-    If cache_model is "ram" then the model is current.cache.ram, etc.
-    """
-    def decorator(f, key=key, time_expire=time_expire, cache_model=cache_model):
-        key = key or repr(f)
-
-        def g(*c, **d):
-            from gluon import current
-            return current.cache(key, time_expire, cache_model)(f)(*c, **d)
-        g.__name__ = f.__name__
-        return g
-    return decorator

+ 0 - 54
frameworks/Python/web2py/web2py/gluon/cfs.py

@@ -1,54 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-| This file is part of the web2py Web Framework
-| Copyrighted by Massimo Di Pierro <[email protected]>
-| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-Functions required to execute app components
---------------------------------------------
-
-Note:
-    FOR INTERNAL USE ONLY
-"""
-
-from os import stat
-import thread
-from gluon.fileutils import read_file
-
-cfs = {}  # for speed-up
-cfs_lock = thread.allocate_lock()  # and thread safety
-
-
-def getcfs(key, filename, filter=None):
-    """
-    Caches the *filtered* file `filename` with `key` until the file is
-    modified.
-
-    Args:
-        key(str): the cache key
-        filename: the file to cache
-        filter: is the function used for filtering. Normally `filename` is a
-            .py file and `filter` is a function that bytecode compiles the file.
-            In this way the bytecode compiled file is cached. (Default = None)
-
-    This is used on Google App Engine since pyc files cannot be saved.
-    """
-    try:
-        t = stat(filename).st_mtime
-    except OSError:
-        return filter() if callable(filter) else ''
-    cfs_lock.acquire()
-    item = cfs.get(key, None)
-    cfs_lock.release()
-    if item and item[0] == t:
-        return item[1]
-    if not callable(filter):
-        data = read_file(filename)
-    else:
-        data = filter()
-    cfs_lock.acquire()
-    cfs[key] = (t, data)
-    cfs_lock.release()
-    return data

+ 0 - 759
frameworks/Python/web2py/web2py/gluon/compileapp.py

@@ -1,759 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-| This file is part of the web2py Web Framework
-| Copyrighted by Massimo Di Pierro <[email protected]>
-| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-Functions required to execute app components
----------------------------------------------
-
-Note:
-    FOR INTERNAL USE ONLY
-"""
-
-import re
-import fnmatch
-import os
-import copy
-import random
-import __builtin__
-from gluon.storage import Storage, List
-from gluon.template import parse_template
-from gluon.restricted import restricted, compile2
-from gluon.fileutils import mktree, listdir, read_file, write_file
-from gluon.myregex import regex_expose, regex_longcomments
-from gluon.languages import translator
-from gluon.dal import DAL, Field
-from pydal.base import BaseAdapter
-from gluon.sqlhtml import SQLFORM, SQLTABLE
-from gluon.cache import Cache
-from gluon.globals import current, Response
-from gluon import settings
-from gluon.cfs import getcfs
-from gluon import html
-from gluon import validators
-from gluon.http import HTTP, redirect
-import marshal
-import shutil
-import imp
-import logging
-import types
-logger = logging.getLogger("web2py")
-from gluon import rewrite
-from custom_import import custom_import_install
-
-try:
-    import py_compile
-except:
-    logger.warning('unable to import py_compile')
-
-is_pypy = settings.global_settings.is_pypy
-is_gae = settings.global_settings.web2py_runtime_gae
-is_jython = settings.global_settings.is_jython
-
-pjoin = os.path.join
-
-TEST_CODE = \
-    r"""
-def _TEST():
-    import doctest, sys, cStringIO, types, cgi, gluon.fileutils
-    if not gluon.fileutils.check_credentials(request):
-        raise HTTP(401, web2py_error='invalid credentials')
-    stdout = sys.stdout
-    html = '<h2>Testing controller "%s.py" ... done.</h2><br/>\n' \
-        % request.controller
-    for key in sorted([key for key in globals() if not key in __symbols__+['_TEST']]):
-        eval_key = eval(key)
-        if type(eval_key) == types.FunctionType:
-            number_doctests = sum([len(ds.examples) for ds in doctest.DocTestFinder().find(eval_key)])
-            if number_doctests>0:
-                sys.stdout = cStringIO.StringIO()
-                name = '%s/controllers/%s.py in %s.__doc__' \
-                    % (request.folder, request.controller, key)
-                doctest.run_docstring_examples(eval_key,
-                    globals(), False, name=name)
-                report = sys.stdout.getvalue().strip()
-                if report:
-                    pf = 'failed'
-                else:
-                    pf = 'passed'
-                html += '<h3 class="%s">Function %s [%s]</h3>\n' \
-                    % (pf, key, pf)
-                if report:
-                    html += CODE(report, language='web2py', \
-                        link='/examples/global/vars/').xml()
-                html += '<br/>\n'
-            else:
-                html += \
-                    '<h3 class="nodoctests">Function %s [no doctests]</h3><br/>\n' \
-                    % (key)
-    response._vars = html
-    sys.stdout = stdout
-_TEST()
-"""
-
-CACHED_REGEXES = {}
-CACHED_REGEXES_MAX_SIZE = 1000
-
-
-def re_compile(regex):
-    try:
-        return CACHED_REGEXES[regex]
-    except KeyError:
-        if len(CACHED_REGEXES) >= CACHED_REGEXES_MAX_SIZE:
-            CACHED_REGEXES.clear()
-        compiled_regex = CACHED_REGEXES[regex] = re.compile(regex)
-        return compiled_regex
-
-
-class mybuiltin(object):
-    """
-    NOTE could simple use a dict and populate it,
-    NOTE not sure if this changes things though if monkey patching import.....
-    """
-    #__builtins__
-    def __getitem__(self, key):
-        try:
-            return getattr(__builtin__, key)
-        except AttributeError:
-            raise KeyError(key)
-
-    def __setitem__(self, key, value):
-        setattr(self, key, value)
-
-
-def LOAD(c=None, f='index', args=None, vars=None,
-         extension=None, target=None, ajax=False, ajax_trap=False,
-         url=None, user_signature=False, timeout=None, times=1,
-         content='loading...', post_vars=Storage(), **attr):
-    """  LOADs a component into the action's document
-
-    Args:
-        c(str): controller
-        f(str): function
-        args(tuple or list): arguments
-        vars(dict): vars
-        extension(str): extension
-        target(str): id of the target
-        ajax(bool): True to enable AJAX bahaviour
-        ajax_trap(bool): True if `ajax` is set to `True`, traps
-            both links and forms "inside" the target
-        url(str): overrides `c`,`f`,`args` and `vars`
-        user_signature(bool): adds hmac signature to all links
-            with a key that is different for every user
-        timeout(int): in milliseconds, specifies the time to wait before
-            starting the request or the frequency if times is greater than
-            1 or "infinity"
-        times(integer or str): how many times the component will be requested
-            "infinity" or "continuous" are accepted to reload indefinitely the
-            component
-    """
-    from html import TAG, DIV, URL, SCRIPT, XML
-    if args is None:
-        args = []
-    vars = Storage(vars or {})
-    target = target or 'c' + str(random.random())[2:]
-    attr['_id'] = target
-    request = current.request
-    if '.' in f:
-        f, extension = f.rsplit('.', 1)
-    if url or ajax:
-        url = url or URL(request.application, c, f, r=request,
-                         args=args, vars=vars, extension=extension,
-                         user_signature=user_signature)
-        # timing options
-        if isinstance(times, basestring):
-            if times.upper() in ("INFINITY", "CONTINUOUS"):
-                times = "Infinity"
-            else:
-                raise TypeError("Unsupported times argument %s" % times)
-        elif isinstance(times, int):
-            if times <= 0:
-                raise ValueError("Times argument must be greater than zero, 'Infinity' or None")
-        else:
-            raise TypeError("Unsupported times argument type %s" % type(times))
-        if timeout is not None:
-            if not isinstance(timeout, (int, long)):
-                raise ValueError("Timeout argument must be an integer or None")
-            elif timeout <= 0:
-                raise ValueError(
-                    "Timeout argument must be greater than zero or None")
-            statement = "$.web2py.component('%s','%s', %s, %s);" \
-                % (url, target, timeout, times)
-            attr['_data-w2p_timeout'] = timeout
-            attr['_data-w2p_times'] = times
-        else:
-            statement = "$.web2py.component('%s','%s');" % (url, target)
-        attr['_data-w2p_remote'] = url
-        if not target is None:
-            return DIV(content, **attr)
-
-    else:
-        if not isinstance(args, (list, tuple)):
-            args = [args]
-        c = c or request.controller
-        other_request = Storage(request)
-        other_request['env'] = Storage(request.env)
-        other_request.controller = c
-        other_request.function = f
-        other_request.extension = extension or request.extension
-        other_request.args = List(args)
-        other_request.vars = vars
-        other_request.get_vars = vars
-        other_request.post_vars = post_vars
-        other_response = Response()
-        other_request.env.path_info = '/' + \
-            '/'.join([request.application, c, f] +
-                     map(str, other_request.args))
-        other_request.env.query_string = \
-            vars and URL(vars=vars).split('?')[1] or ''
-        other_request.env.http_web2py_component_location = \
-            request.env.path_info
-        other_request.cid = target
-        other_request.env.http_web2py_component_element = target
-        other_request.restful = types.MethodType(request.restful.im_func, other_request) # A bit nasty but needed to use LOAD on action decorates with @request.restful()
-        other_response.view = '%s/%s.%s' % (c, f, other_request.extension)
-
-        other_environment = copy.copy(current.globalenv)  # NASTY
-
-        other_response._view_environment = other_environment
-        other_response.generic_patterns = \
-            copy.copy(current.response.generic_patterns)
-        other_environment['request'] = other_request
-        other_environment['response'] = other_response
-
-        ## some magic here because current are thread-locals
-
-        original_request, current.request = current.request, other_request
-        original_response, current.response = current.response, other_response
-        page = run_controller_in(c, f, other_environment)
-        if isinstance(page, dict):
-            other_response._vars = page
-            other_response._view_environment.update(page)
-            run_view_in(other_response._view_environment)
-            page = other_response.body.getvalue()
-        current.request, current.response = original_request, original_response
-        js = None
-        if ajax_trap:
-            link = URL(request.application, c, f, r=request,
-                       args=args, vars=vars, extension=extension,
-                       user_signature=user_signature)
-            js = "$.web2py.trap_form('%s','%s');" % (link, target)
-        script = js and SCRIPT(js, _type="text/javascript") or ''
-        return TAG[''](DIV(XML(page), **attr), script)
-
-
-class LoadFactory(object):
-    """
-    Attention: this helper is new and experimental
-    """
-    def __init__(self, environment):
-        self.environment = environment
-
-    def __call__(self, c=None, f='index', args=None, vars=None,
-                 extension=None, target=None, ajax=False, ajax_trap=False,
-                 url=None, user_signature=False, content='loading...', **attr):
-        if args is None:
-            args = []
-        vars = Storage(vars or {})
-        import globals
-        target = target or 'c' + str(random.random())[2:]
-        attr['_id'] = target
-        request = self.environment['request']
-        if '.' in f:
-            f, extension = f.rsplit('.', 1)
-        if url or ajax:
-            url = url or html.URL(request.application, c, f, r=request,
-                                  args=args, vars=vars, extension=extension,
-                                  user_signature=user_signature)
-            script = html.SCRIPT('$.web2py.component("%s","%s")' % (url, target),
-                                 _type="text/javascript")
-            return html.TAG[''](script, html.DIV(content, **attr))
-        else:
-            if not isinstance(args, (list, tuple)):
-                args = [args]
-            c = c or request.controller
-
-            other_request = Storage(request)
-            other_request['env'] = Storage(request.env)
-            other_request.controller = c
-            other_request.function = f
-            other_request.extension = extension or request.extension
-            other_request.args = List(args)
-            other_request.vars = vars
-            other_request.get_vars = vars
-            other_request.post_vars = Storage()
-            other_response = globals.Response()
-            other_request.env.path_info = '/' + \
-                '/'.join([request.application, c, f] +
-                         map(str, other_request.args))
-            other_request.env.query_string = \
-                vars and html.URL(vars=vars).split('?')[1] or ''
-            other_request.env.http_web2py_component_location = \
-                request.env.path_info
-            other_request.cid = target
-            other_request.env.http_web2py_component_element = target
-            other_response.view = '%s/%s.%s' % (c, f, other_request.extension)
-            other_environment = copy.copy(self.environment)
-            other_response._view_environment = other_environment
-            other_response.generic_patterns = \
-                copy.copy(current.response.generic_patterns)
-            other_environment['request'] = other_request
-            other_environment['response'] = other_response
-
-            ## some magic here because current are thread-locals
-
-            original_request, current.request = current.request, other_request
-            original_response, current.response = current.response, other_response
-            page = run_controller_in(c, f, other_environment)
-            if isinstance(page, dict):
-                other_response._vars = page
-                other_response._view_environment.update(page)
-                run_view_in(other_response._view_environment)
-                page = other_response.body.getvalue()
-            current.request, current.response = original_request, original_response
-            js = None
-            if ajax_trap:
-                link = html.URL(request.application, c, f, r=request,
-                                args=args, vars=vars, extension=extension,
-                                user_signature=user_signature)
-                js = "$.web2py.trap_form('%s','%s');" % (link, target)
-            script = js and html.SCRIPT(js, _type="text/javascript") or ''
-            return html.TAG[''](html.DIV(html.XML(page), **attr), script)
-
-
-def local_import_aux(name, reload_force=False, app='welcome'):
-    """
-    In apps, instead of importing a local module
-    (in applications/app/modules) with::
-
-       import a.b.c as d
-
-    you should do::
-
-       d = local_import('a.b.c')
-
-    or (to force a reload):
-
-       d = local_import('a.b.c', reload=True)
-
-    This prevents conflict between applications and un-necessary execs.
-    It can be used to import any module, including regular Python modules.
-    """
-    items = name.replace('/', '.')
-    name = "applications.%s.modules.%s" % (app, items)
-    module = __import__(name)
-    for item in name.split(".")[1:]:
-        module = getattr(module, item)
-    if reload_force:
-        reload(module)
-    return module
-
-
-"""
-OLD IMPLEMENTATION:
-    items = name.replace('/','.').split('.')
-    filename, modulepath = items[-1], pjoin(apath,'modules',*items[:-1])
-    imp.acquire_lock()
-    try:
-        file=None
-        (file,path,desc) = imp.find_module(filename,[modulepath]+sys.path)
-        if not path in sys.modules or reload:
-            if is_gae:
-                module={}
-                execfile(path,{},module)
-                module=Storage(module)
-            else:
-                module = imp.load_module(path,file,path,desc)
-            sys.modules[path] = module
-        else:
-            module = sys.modules[path]
-    except Exception, e:
-        module = None
-    if file:
-        file.close()
-    imp.release_lock()
-    if not module:
-        raise ImportError, "cannot find module %s in %s" % (
-            filename, modulepath)
-    return module
-"""
-
-_base_environment_ = dict((k, getattr(html, k)) for k in html.__all__)
-_base_environment_.update(
-    (k, getattr(validators, k)) for k in validators.__all__)
-_base_environment_['__builtins__'] = __builtins__
-_base_environment_['HTTP'] = HTTP
-_base_environment_['redirect'] = redirect
-_base_environment_['DAL'] = DAL
-_base_environment_['Field'] = Field
-_base_environment_['SQLDB'] = DAL        # for backward compatibility
-_base_environment_['SQLField'] = Field  # for backward compatibility
-_base_environment_['SQLFORM'] = SQLFORM
-_base_environment_['SQLTABLE'] = SQLTABLE
-_base_environment_['LOAD'] = LOAD
-
-def build_environment(request, response, session, store_current=True):
-    """
-    Build the environment dictionary into which web2py files are executed.
-    """
-    #h,v = html,validators
-    environment = dict(_base_environment_)
-
-    if not request.env:
-        request.env = Storage()
-    # Enable standard conditional models (i.e., /*.py, /[controller]/*.py, and
-    # /[controller]/[function]/*.py)
-    response.models_to_run = [
-        r'^\w+\.py$',
-        r'^%s/\w+\.py$' % request.controller,
-        r'^%s/%s/\w+\.py$' % (request.controller, request.function)
-        ]
-
-    t = environment['T'] = translator(os.path.join(request.folder,'languages'),
-                                      request.env.http_accept_language)
-    c = environment['cache'] = Cache(request)
-
-    if store_current:
-        current.globalenv = environment
-        current.request = request
-        current.response = response
-        current.session = session
-        current.T = t
-        current.cache = c
-
-    global __builtins__
-    if is_jython:  # jython hack
-        __builtins__ = mybuiltin()
-    elif is_pypy:  # apply the same hack to pypy too
-        __builtins__ = mybuiltin()
-    else:
-        __builtins__['__import__'] = __builtin__.__import__  # WHY?
-    environment['request'] = request
-    environment['response'] = response
-    environment['session'] = session
-    environment['local_import'] = \
-        lambda name, reload=False, app=request.application:\
-        local_import_aux(name, reload, app)
-    BaseAdapter.set_folder(pjoin(request.folder, 'databases'))
-    response._view_environment = copy.copy(environment)
-    custom_import_install()
-    return environment
-
-
-def save_pyc(filename):
-    """
-    Bytecode compiles the file `filename`
-    """
-    py_compile.compile(filename)
-
-
-def read_pyc(filename):
-    """
-    Read the code inside a bytecode compiled file if the MAGIC number is
-    compatible
-
-    Returns:
-        a code object
-    """
-    data = read_file(filename, 'rb')
-    if not is_gae and data[:4] != imp.get_magic():
-        raise SystemError('compiled code is incompatible')
-    return marshal.loads(data[8:])
-
-
-def compile_views(folder):
-    """
-    Compiles all the views in the application specified by `folder`
-    """
-
-    path = pjoin(folder, 'views')
-    for fname in listdir(path, '^[\w/\-]+(\.\w+)*$'):
-        try:
-            data = parse_template(fname, path)
-        except Exception, e:
-            raise Exception("%s in %s" % (e, fname))
-        filename = 'views.%s.py' % fname.replace(os.path.sep, '.')
-        filename = pjoin(folder, 'compiled', filename)
-        write_file(filename, data)
-        save_pyc(filename)
-        os.unlink(filename)
-
-
-def compile_models(folder):
-    """
-    Compiles all the models in the application specified by `folder`
-    """
-
-    path = pjoin(folder, 'models')
-    for fname in listdir(path, '.+\.py$'):
-        data = read_file(pjoin(path, fname))
-        modelfile = 'models.'+fname.replace(os.path.sep,'.')
-        filename = pjoin(folder, 'compiled', modelfile)
-        mktree(filename)
-        write_file(filename, data)
-        save_pyc(filename)
-        os.unlink(filename)
-
-def find_exposed_functions(data):
-    data = regex_longcomments.sub('',data)
-    return regex_expose.findall(data)
-
-def compile_controllers(folder):
-    """
-    Compiles all the controllers in the application specified by `folder`
-    """
-
-    path = pjoin(folder, 'controllers')
-    for fname in listdir(path, '.+\.py$'):
-        ### why is this here? save_pyc(pjoin(path, file))
-        data = read_file(pjoin(path, fname))
-        exposed = find_exposed_functions(data)
-        for function in exposed:
-            command = data + "\nresponse._vars=response._caller(%s)\n" % \
-                function
-            filename = pjoin(folder, 'compiled',
-                             'controllers.%s.%s.py' % (fname[:-3],function))
-            write_file(filename, command)
-            save_pyc(filename)
-            os.unlink(filename)
-
-def model_cmp(a, b, sep='.'):
-    return cmp(a.count(sep), b.count(sep)) or cmp(a, b)
-
-def model_cmp_sep(a, b, sep=os.path.sep):
-    return model_cmp(a,b,sep)
-
-def run_models_in(environment):
-    """
-    Runs all models (in the app specified by the current folder)
-    It tries pre-compiled models first before compiling them.
-    """
-
-    folder = environment['request'].folder
-    c = environment['request'].controller
-    #f = environment['request'].function
-    response = environment['response']
-
-    path = pjoin(folder, 'models')
-    cpath = pjoin(folder, 'compiled')
-    compiled = os.path.exists(cpath)
-    if compiled:
-        models = sorted(listdir(cpath, '^models[_.][\w.]+\.pyc$', 0), model_cmp)
-    else:
-        models = sorted(listdir(path, '^\w+\.py$', 0, sort=False), model_cmp_sep)
-    models_to_run = None
-    for model in models:
-        if response.models_to_run != models_to_run:
-            regex = models_to_run = response.models_to_run[:]
-            if isinstance(regex, list):
-                regex = re_compile('|'.join(regex))
-        if models_to_run:
-            if compiled:
-                n = len(cpath)+8
-                fname = model[n:-4].replace('.','/')+'.py'
-            else:
-                n = len(path)+1
-                fname = model[n:].replace(os.path.sep,'/')
-            if not regex.search(fname) and c != 'appadmin':
-                continue
-            elif compiled:
-                code = read_pyc(model)
-            elif is_gae:
-                code = getcfs(model, model,
-                              lambda: compile2(read_file(model), model))
-            else:
-                code = getcfs(model, model, None)
-            restricted(code, environment, layer=model)
-
-
-def run_controller_in(controller, function, environment):
-    """
-    Runs the controller.function() (for the app specified by
-    the current folder).
-    It tries pre-compiled controller_function.pyc first before compiling it.
-    """
-
-    # if compiled should run compiled!
-    folder = environment['request'].folder
-    path = pjoin(folder, 'compiled')
-    badc = 'invalid controller (%s/%s)' % (controller, function)
-    badf = 'invalid function (%s/%s)' % (controller, function)
-    if os.path.exists(path):
-        filename = pjoin(path, 'controllers.%s.%s.pyc'
-                         % (controller, function))
-        if not os.path.exists(filename):
-            ### for backward compatibility
-            filename = pjoin(path, 'controllers_%s_%s.pyc'
-                             % (controller, function))
-            ### end for backward compatibility
-            if not os.path.exists(filename):
-                raise HTTP(404,
-                           rewrite.THREAD_LOCAL.routes.error_message % badf,
-                           web2py_error=badf)
-        restricted(read_pyc(filename), environment, layer=filename)
-    elif function == '_TEST':
-        # TESTING: adjust the path to include site packages
-        from settings import global_settings
-        from admin import abspath, add_path_first
-        paths = (global_settings.gluon_parent, abspath(
-            'site-packages', gluon=True), abspath('gluon', gluon=True), '')
-        [add_path_first(path) for path in paths]
-        # TESTING END
-
-        filename = pjoin(folder, 'controllers/%s.py'
-                                 % controller)
-        if not os.path.exists(filename):
-            raise HTTP(404,
-                       rewrite.THREAD_LOCAL.routes.error_message % badc,
-                       web2py_error=badc)
-        environment['__symbols__'] = environment.keys()
-        code = read_file(filename)
-        code += TEST_CODE
-        restricted(code, environment, layer=filename)
-    else:
-        filename = pjoin(folder, 'controllers/%s.py'
-                                 % controller)
-        if not os.path.exists(filename):
-            raise HTTP(404,
-                       rewrite.THREAD_LOCAL.routes.error_message % badc,
-                       web2py_error=badc)
-        code = read_file(filename)
-        exposed = find_exposed_functions(code)
-        if not function in exposed:
-            raise HTTP(404,
-                       rewrite.THREAD_LOCAL.routes.error_message % badf,
-                       web2py_error=badf)
-        code = "%s\nresponse._vars=response._caller(%s)\n" % (code, function)
-        if is_gae:
-            layer = filename + ':' + function
-            code = getcfs(layer, filename, lambda: compile2(code, layer))
-        restricted(code, environment, filename)
-    response = environment['response']
-    vars = response._vars
-    if response.postprocessing:
-        vars = reduce(lambda vars, p: p(vars), response.postprocessing, vars)
-    if isinstance(vars, unicode):
-        vars = vars.encode('utf8')
-    elif hasattr(vars, 'xml') and callable(vars.xml):
-        vars = vars.xml()
-    return vars
-
-
-def run_view_in(environment):
-    """
-    Executes the view for the requested action.
-    The view is the one specified in `response.view` or determined by the url
-    or `view/generic.extension`
-    It tries the pre-compiled views_controller_function.pyc before compiling it.
-    """
-    request = environment['request']
-    response = environment['response']
-    view = response.view
-    folder = request.folder
-    path = pjoin(folder, 'compiled')
-    badv = 'invalid view (%s)' % view
-    patterns = response.get('generic_patterns')
-    if patterns:
-        regex = re_compile('|'.join(map(fnmatch.translate, patterns)))
-        short_action = '%(controller)s/%(function)s.%(extension)s' % request
-        allow_generic = regex.search(short_action)
-    else:
-        allow_generic = False
-    if not isinstance(view, str):
-        ccode = parse_template(view, pjoin(folder, 'views'),
-                               context=environment)
-        restricted(ccode, environment, 'file stream')
-    elif os.path.exists(path):
-        x = view.replace('/', '.')
-        files = ['views.%s.pyc' % x]
-        if allow_generic:
-            files.append('views.generic.%s.pyc' % request.extension)
-        # for backward compatibility
-        x = view.replace('/', '_')
-        files.append('views_%s.pyc' % x)
-        if allow_generic:
-            files.append('views_generic.%s.pyc' % request.extension)
-        if request.extension == 'html':
-            files.append('views_%s.pyc' % x[:-5])
-            if allow_generic:
-                files.append('views_generic.pyc')
-        # end backward compatibility code
-        for f in files:
-            filename = pjoin(path, f)
-            if os.path.exists(filename):
-                code = read_pyc(filename)
-                restricted(code, environment, layer=filename)
-                return
-        raise HTTP(404,
-                   rewrite.THREAD_LOCAL.routes.error_message % badv,
-                   web2py_error=badv)
-    else:
-        filename = pjoin(folder, 'views', view)
-        if not os.path.exists(filename) and allow_generic:
-            view = 'generic.' + request.extension
-            filename = pjoin(folder, 'views', view)
-        if not os.path.exists(filename):
-            raise HTTP(404,
-                       rewrite.THREAD_LOCAL.routes.error_message % badv,
-                       web2py_error=badv)
-        layer = filename
-        if is_gae:
-            ccode = getcfs(layer, filename,
-                           lambda: compile2(parse_template(view,
-                                            pjoin(folder, 'views'),
-                                            context=environment), layer))
-        else:
-            ccode = parse_template(view,
-                                   pjoin(folder, 'views'),
-                                   context=environment)
-        restricted(ccode, environment, layer)
-
-
-def remove_compiled_application(folder):
-    """
-    Deletes the folder `compiled` containing the compiled application.
-    """
-    try:
-        shutil.rmtree(pjoin(folder, 'compiled'))
-        path = pjoin(folder, 'controllers')
-        for file in listdir(path, '.*\.pyc$', drop=False):
-            os.unlink(file)
-    except OSError:
-        pass
-
-
-def compile_application(folder):
-    """
-    Compiles all models, views, controller for the application in `folder`.
-    """
-    remove_compiled_application(folder)
-    os.mkdir(pjoin(folder, 'compiled'))
-    compile_models(folder)
-    compile_controllers(folder)
-    compile_views(folder)
-
-
-def test():
-    """
-    Example::
-
-        >>> import traceback, types
-        >>> environment={'x':1}
-        >>> open('a.py', 'w').write('print 1/x')
-        >>> save_pyc('a.py')
-        >>> os.unlink('a.py')
-        >>> if type(read_pyc('a.pyc'))==types.CodeType: print 'code'
-        code
-        >>> exec read_pyc('a.pyc') in environment
-        1
-    """
-
-    return
-
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()

+ 0 - 854
frameworks/Python/web2py/web2py/gluon/contenttype.py

@@ -1,854 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-| This file is part of the web2py Web Framework
-| Copyrighted by Massimo Di Pierro <[email protected]>
-| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
-
-CONTENT_TYPE dictionary created against freedesktop.org's shared mime info
-database version 1.1.
-
-Deviations from official standards:
- - .md: application/x-genesis-rom --> text/x-markdown
- - .png: image/x-apple-ios-png --> image/png
-Additions:
- - .load: text/html
- - .json: application/json
- - .jsonp: application/jsonp
- - .pickle: application/python-pickle
- - .w2p': application/w2p
-"""
-
-__all__ = ['contenttype']
-
-CONTENT_TYPE = {
-    '.123': 'application/vnd.lotus-1-2-3',
-    '.3ds': 'image/x-3ds',
-    '.3g2': 'video/3gpp2',
-    '.3ga': 'video/3gpp',
-    '.3gp': 'video/3gpp',
-    '.3gp2': 'video/3gpp2',
-    '.3gpp': 'video/3gpp',
-    '.3gpp2': 'video/3gpp2',
-    '.602': 'application/x-t602',
-    '.669': 'audio/x-mod',
-    '.7z': 'application/x-7z-compressed',
-    '.a': 'application/x-archive',
-    '.aac': 'audio/aac',
-    '.abw': 'application/x-abiword',
-    '.abw.crashed': 'application/x-abiword',
-    '.abw.gz': 'application/x-abiword',
-    '.ac3': 'audio/ac3',
-    '.ace': 'application/x-ace',
-    '.adb': 'text/x-adasrc',
-    '.ads': 'text/x-adasrc',
-    '.afm': 'application/x-font-afm',
-    '.ag': 'image/x-applix-graphics',
-    '.ai': 'application/illustrator',
-    '.aif': 'audio/x-aiff',
-    '.aifc': 'audio/x-aifc',
-    '.aiff': 'audio/x-aiff',
-    '.aiffc': 'audio/x-aifc',
-    '.al': 'application/x-perl',
-    '.alz': 'application/x-alz',
-    '.amr': 'audio/amr',
-    '.amz': 'audio/x-amzxml',
-    '.ani': 'application/x-navi-animation',
-    '.anim[1-9j]': 'video/x-anim',
-    '.anx': 'application/annodex',
-    '.ape': 'audio/x-ape',
-    '.apk': 'application/vnd.android.package-archive',
-    '.ar': 'application/x-archive',
-    '.arj': 'application/x-arj',
-    '.arw': 'image/x-sony-arw',
-    '.as': 'application/x-applix-spreadsheet',
-    '.asc': 'text/plain',
-    '.asf': 'video/x-ms-asf',
-    '.asp': 'application/x-asp',
-    '.ass': 'text/x-ssa',
-    '.asx': 'audio/x-ms-asx',
-    '.atom': 'application/atom+xml',
-    '.au': 'audio/basic',
-    '.avf': 'video/x-msvideo',
-    '.avi': 'video/x-msvideo',
-    '.aw': 'application/x-applix-word',
-    '.awb': 'audio/amr-wb',
-    '.awk': 'application/x-awk',
-    '.axa': 'audio/annodex',
-    '.axv': 'video/annodex',
-    '.bak': 'application/x-trash',
-    '.bcpio': 'application/x-bcpio',
-    '.bdf': 'application/x-font-bdf',
-    '.bdm': 'video/mp2t',
-    '.bdmv': 'video/mp2t',
-    '.bib': 'text/x-bibtex',
-    '.bin': 'application/octet-stream',
-    '.blend': 'application/x-blender',
-    '.blender': 'application/x-blender',
-    '.bmp': 'image/bmp',
-    '.bz': 'application/x-bzip',
-    '.bz2': 'application/x-bzip',
-    '.c': 'text/x-csrc',
-    '.c++': 'text/x-c++src',
-    '.cab': 'application/vnd.ms-cab-compressed',
-    '.cap': 'application/vnd.tcpdump.pcap',
-    '.cb7': 'application/x-cb7',
-    '.cbl': 'text/x-cobol',
-    '.cbr': 'application/x-cbr',
-    '.cbt': 'application/x-cbt',
-    '.cbz': 'application/x-cbz',
-    '.cc': 'text/x-c++src',
-    '.ccmx': 'application/x-ccmx',
-    '.cdf': 'application/x-netcdf',
-    '.cdr': 'application/vnd.corel-draw',
-    '.cer': 'application/pkix-cert',
-    '.cert': 'application/x-x509-ca-cert',
-    '.cgm': 'image/cgm',
-    '.chm': 'application/vnd.ms-htmlhelp',
-    '.chrt': 'application/x-kchart',
-    '.class': 'application/x-java',
-    '.clpi': 'video/mp2t',
-    '.cls': 'text/x-tex',
-    '.cmake': 'text/x-cmake',
-    '.cob': 'text/x-cobol',
-    '.cpi': 'video/mp2t',
-    '.cpio': 'application/x-cpio',
-    '.cpio.gz': 'application/x-cpio-compressed',
-    '.cpp': 'text/x-c++src',
-    '.cr2': 'image/x-canon-cr2',
-    '.crl': 'application/pkix-crl',
-    '.crt': 'application/x-x509-ca-cert',
-    '.crw': 'image/x-canon-crw',
-    '.cs': 'text/x-csharp',
-    '.csh': 'application/x-csh',
-    '.css': 'text/css',
-    '.cssl': 'text/css',
-    '.csv': 'text/csv',
-    '.cue': 'application/x-cue',
-    '.cur': 'image/x-win-bitmap',
-    '.cxx': 'text/x-c++src',
-    '.d': 'text/x-dsrc',
-    '.dar': 'application/x-dar',
-    '.dbf': 'application/x-dbf',
-    '.dc': 'application/x-dc-rom',
-    '.dcl': 'text/x-dcl',
-    '.dcm': 'application/dicom',
-    '.dcr': 'image/x-kodak-dcr',
-    '.dds': 'image/x-dds',
-    '.deb': 'application/x-deb',
-    '.der': 'application/x-x509-ca-cert',
-    '.desktop': 'application/x-desktop',
-    '.di': 'text/x-dsrc',
-    '.dia': 'application/x-dia-diagram',
-    '.diff': 'text/x-patch',
-    '.divx': 'video/x-msvideo',
-    '.djv': 'image/vnd.djvu',
-    '.djvu': 'image/vnd.djvu',
-    '.dmg': 'application/x-apple-diskimage',
-    '.dmp': 'application/vnd.tcpdump.pcap',
-    '.dng': 'image/x-adobe-dng',
-    '.doc': 'application/msword',
-    '.docbook': 'application/x-docbook+xml',
-    '.docm': 'application/vnd.ms-word.document.macroenabled.12',
-    '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
-    '.dot': 'text/vnd.graphviz',
-    '.dotm': 'application/vnd.ms-word.template.macroenabled.12',
-    '.dotx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
-    '.dsl': 'text/x-dsl',
-    '.dtd': 'application/xml-dtd',
-    '.dts': 'audio/vnd.dts',
-    '.dtshd': 'audio/vnd.dts.hd',
-    '.dtx': 'text/x-tex',
-    '.dv': 'video/dv',
-    '.dvi': 'application/x-dvi',
-    '.dvi.bz2': 'application/x-bzdvi',
-    '.dvi.gz': 'application/x-gzdvi',
-    '.dwg': 'image/vnd.dwg',
-    '.dxf': 'image/vnd.dxf',
-    '.e': 'text/x-eiffel',
-    '.egon': 'application/x-egon',
-    '.eif': 'text/x-eiffel',
-    '.el': 'text/x-emacs-lisp',
-    '.emf': 'image/x-emf',
-    '.eml': 'message/rfc822',
-    '.emp': 'application/vnd.emusic-emusic_package',
-    '.ent': 'application/xml-external-parsed-entity',
-    '.eps': 'image/x-eps',
-    '.eps.bz2': 'image/x-bzeps',
-    '.eps.gz': 'image/x-gzeps',
-    '.epsf': 'image/x-eps',
-    '.epsf.bz2': 'image/x-bzeps',
-    '.epsf.gz': 'image/x-gzeps',
-    '.epsi': 'image/x-eps',
-    '.epsi.bz2': 'image/x-bzeps',
-    '.epsi.gz': 'image/x-gzeps',
-    '.epub': 'application/epub+zip',
-    '.erl': 'text/x-erlang',
-    '.es': 'application/ecmascript',
-    '.etheme': 'application/x-e-theme',
-    '.etx': 'text/x-setext',
-    '.exe': 'application/x-ms-dos-executable',
-    '.exr': 'image/x-exr',
-    '.ez': 'application/andrew-inset',
-    '.f': 'text/x-fortran',
-    '.f4a': 'audio/mp4',
-    '.f4b': 'audio/x-m4b',
-    '.f4v': 'video/mp4',
-    '.f90': 'text/x-fortran',
-    '.f95': 'text/x-fortran',
-    '.fb2': 'application/x-fictionbook+xml',
-    '.fig': 'image/x-xfig',
-    '.fits': 'image/fits',
-    '.fl': 'application/x-fluid',
-    '.flac': 'audio/flac',
-    '.flc': 'video/x-flic',
-    '.fli': 'video/x-flic',
-    '.flv': 'video/x-flv',
-    '.flw': 'application/x-kivio',
-    '.fo': 'text/x-xslfo',
-    '.fodg': 'application/vnd.oasis.opendocument.graphics-flat-xml',
-    '.fodp': 'application/vnd.oasis.opendocument.presentation-flat-xml',
-    '.fods': 'application/vnd.oasis.opendocument.spreadsheet-flat-xml',
-    '.fodt': 'application/vnd.oasis.opendocument.text-flat-xml',
-    '.for': 'text/x-fortran',
-    '.fxm': 'video/x-javafx',
-    '.g3': 'image/fax-g3',
-    '.gb': 'application/x-gameboy-rom',
-    '.gba': 'application/x-gba-rom',
-    '.gcrd': 'text/vcard',
-    '.ged': 'application/x-gedcom',
-    '.gedcom': 'application/x-gedcom',
-    '.gem': 'application/x-tar',
-    '.gen': 'application/x-genesis-rom',
-    '.gf': 'application/x-tex-gf',
-    '.gg': 'application/x-sms-rom',
-    '.gif': 'image/gif',
-    '.glade': 'application/x-glade',
-    '.gml': 'application/gml+xml',
-    '.gmo': 'application/x-gettext-translation',
-    '.gnc': 'application/x-gnucash',
-    '.gnd': 'application/gnunet-directory',
-    '.gnucash': 'application/x-gnucash',
-    '.gnumeric': 'application/x-gnumeric',
-    '.gnuplot': 'application/x-gnuplot',
-    '.go': 'text/x-go',
-    '.gp': 'application/x-gnuplot',
-    '.gpg': 'application/pgp-encrypted',
-    '.gplt': 'application/x-gnuplot',
-    '.gra': 'application/x-graphite',
-    '.gsf': 'application/x-font-type1',
-    '.gsm': 'audio/x-gsm',
-    '.gtar': 'application/x-tar',
-    '.gv': 'text/vnd.graphviz',
-    '.gvp': 'text/x-google-video-pointer',
-    '.gz': 'application/gzip',
-    '.h': 'text/x-chdr',
-    '.h++': 'text/x-c++hdr',
-    '.h4': 'application/x-hdf',
-    '.h5': 'application/x-hdf',
-    '.hdf': 'application/x-hdf',
-    '.hdf4': 'application/x-hdf',
-    '.hdf5': 'application/x-hdf',
-    '.hh': 'text/x-c++hdr',
-    '.hlp': 'application/winhlp',
-    '.hp': 'text/x-c++hdr',
-    '.hpgl': 'application/vnd.hp-hpgl',
-    '.hpp': 'text/x-c++hdr',
-    '.hs': 'text/x-haskell',
-    '.htm': 'text/html',
-    '.html': 'text/html',
-    '.hwp': 'application/x-hwp',
-    '.hwt': 'application/x-hwt',
-    '.hxx': 'text/x-c++hdr',
-    '.ica': 'application/x-ica',
-    '.icb': 'image/x-tga',
-    '.icc': 'application/vnd.iccprofile',
-    '.icm': 'application/vnd.iccprofile',
-    '.icns': 'image/x-icns',
-    '.ico': 'image/vnd.microsoft.icon',
-    '.ics': 'text/calendar',
-    '.idl': 'text/x-idl',
-    '.ief': 'image/ief',
-    '.iff': 'image/x-ilbm',
-    '.ilbm': 'image/x-ilbm',
-    '.ime': 'text/x-imelody',
-    '.imy': 'text/x-imelody',
-    '.ins': 'text/x-tex',
-    '.iptables': 'text/x-iptables',
-    '.iso': 'application/x-cd-image',
-    '.iso9660': 'application/x-cd-image',
-    '.it': 'audio/x-it',
-    '.it87': 'application/x-it87',
-    '.j2k': 'image/jp2',
-    '.jad': 'text/vnd.sun.j2me.app-descriptor',
-    '.jar': 'application/x-java-archive',
-    '.java': 'text/x-java',
-    '.jceks': 'application/x-java-jce-keystore',
-    '.jks': 'application/x-java-keystore',
-    '.jng': 'image/x-jng',
-    '.jnlp': 'application/x-java-jnlp-file',
-    '.jp2': 'image/jp2',
-    '.jpc': 'image/jp2',
-    '.jpe': 'image/jpeg',
-    '.jpeg': 'image/jpeg',
-    '.jpf': 'image/jp2',
-    '.jpg': 'image/jpeg',
-    '.jpr': 'application/x-jbuilder-project',
-    '.jpx': 'image/jp2',
-    '.js': 'application/javascript',
-    '.json': 'application/json',
-    '.jsonp': 'application/jsonp',
-    '.k25': 'image/x-kodak-k25',
-    '.kar': 'audio/midi',
-    '.karbon': 'application/x-karbon',
-    '.kdc': 'image/x-kodak-kdc',
-    '.kdelnk': 'application/x-desktop',
-    '.kexi': 'application/x-kexiproject-sqlite3',
-    '.kexic': 'application/x-kexi-connectiondata',
-    '.kexis': 'application/x-kexiproject-shortcut',
-    '.kfo': 'application/x-kformula',
-    '.kil': 'application/x-killustrator',
-    '.kino': 'application/smil',
-    '.kml': 'application/vnd.google-earth.kml+xml',
-    '.kmz': 'application/vnd.google-earth.kmz',
-    '.kon': 'application/x-kontour',
-    '.kpm': 'application/x-kpovmodeler',
-    '.kpr': 'application/x-kpresenter',
-    '.kpt': 'application/x-kpresenter',
-    '.kra': 'application/x-krita',
-    '.ks': 'application/x-java-keystore',
-    '.ksp': 'application/x-kspread',
-    '.kud': 'application/x-kugar',
-    '.kwd': 'application/x-kword',
-    '.kwt': 'application/x-kword',
-    '.la': 'application/x-shared-library-la',
-    '.latex': 'text/x-tex',
-    '.lbm': 'image/x-ilbm',
-    '.ldif': 'text/x-ldif',
-    '.lha': 'application/x-lha',
-    '.lhs': 'text/x-literate-haskell',
-    '.lhz': 'application/x-lhz',
-    '.load' : 'text/html',
-    '.log': 'text/x-log',
-    '.lrz': 'application/x-lrzip',
-    '.ltx': 'text/x-tex',
-    '.lua': 'text/x-lua',
-    '.lwo': 'image/x-lwo',
-    '.lwob': 'image/x-lwo',
-    '.lwp': 'application/vnd.lotus-wordpro',
-    '.lws': 'image/x-lws',
-    '.ly': 'text/x-lilypond',
-    '.lyx': 'application/x-lyx',
-    '.lz': 'application/x-lzip',
-    '.lzh': 'application/x-lha',
-    '.lzma': 'application/x-lzma',
-    '.lzo': 'application/x-lzop',
-    '.m': 'text/x-matlab',
-    '.m15': 'audio/x-mod',
-    '.m1u': 'video/vnd.mpegurl',
-    '.m2t': 'video/mp2t',
-    '.m2ts': 'video/mp2t',
-    '.m3u': 'application/vnd.apple.mpegurl',
-    '.m3u8': 'application/vnd.apple.mpegurl',
-    '.m4': 'application/x-m4',
-    '.m4a': 'audio/mp4',
-    '.m4b': 'audio/x-m4b',
-    '.m4u': 'video/vnd.mpegurl',
-    '.m4v': 'video/mp4',
-    '.mab': 'application/x-markaby',
-    '.mak': 'text/x-makefile',
-    '.man': 'application/x-troff-man',
-    '.manifest': 'text/cache-manifest',
-    '.markdown': 'text/x-markdown',
-    '.mbox': 'application/mbox',
-    '.md': 'text/x-markdown',
-    '.mdb': 'application/vnd.ms-access',
-    '.mdi': 'image/vnd.ms-modi',
-    '.me': 'text/x-troff-me',
-    '.med': 'audio/x-mod',
-    '.meta4': 'application/metalink4+xml',
-    '.metalink': 'application/metalink+xml',
-    '.mgp': 'application/x-magicpoint',
-    '.mht': 'application/x-mimearchive',
-    '.mhtml': 'application/x-mimearchive',
-    '.mid': 'audio/midi',
-    '.midi': 'audio/midi',
-    '.mif': 'application/x-mif',
-    '.minipsf': 'audio/x-minipsf',
-    '.mk': 'text/x-makefile',
-    '.mka': 'audio/x-matroska',
-    '.mkd': 'text/x-markdown',
-    '.mkv': 'video/x-matroska',
-    '.ml': 'text/x-ocaml',
-    '.mli': 'text/x-ocaml',
-    '.mm': 'text/x-troff-mm',
-    '.mmf': 'application/x-smaf',
-    '.mml': 'application/mathml+xml',
-    '.mng': 'video/x-mng',
-    '.mo': 'text/x-modelica',
-    '.mo3': 'audio/x-mo3',
-    '.mobi': 'application/x-mobipocket-ebook',
-    '.moc': 'text/x-moc',
-    '.mod': 'audio/x-mod',
-    '.mof': 'text/x-mof',
-    '.moov': 'video/quicktime',
-    '.mov': 'video/quicktime',
-    '.movie': 'video/x-sgi-movie',
-    '.mp+': 'audio/x-musepack',
-    '.mp2': 'video/mpeg',
-    '.mp3': 'audio/mpeg',
-    '.mp4': 'video/mp4',
-    '.mpc': 'audio/x-musepack',
-    '.mpe': 'video/mpeg',
-    '.mpeg': 'video/mpeg',
-    '.mpg': 'video/mpeg',
-    '.mpga': 'audio/mpeg',
-    '.mpl': 'video/mp2t',
-    '.mpls': 'video/mp2t',
-    '.mpp': 'audio/x-musepack',
-    '.mrl': 'text/x-mrml',
-    '.mrml': 'text/x-mrml',
-    '.mrw': 'image/x-minolta-mrw',
-    '.ms': 'text/x-troff-ms',
-    '.msi': 'application/x-msi',
-    '.msod': 'image/x-msod',
-    '.msx': 'application/x-msx-rom',
-    '.mtm': 'audio/x-mod',
-    '.mts': 'video/mp2t',
-    '.mup': 'text/x-mup',
-    '.mxf': 'application/mxf',
-    '.mxu': 'video/vnd.mpegurl',
-    '.n64': 'application/x-n64-rom',
-    '.nb': 'application/mathematica',
-    '.nc': 'application/x-netcdf',
-    '.nds': 'application/x-nintendo-ds-rom',
-    '.nef': 'image/x-nikon-nef',
-    '.nes': 'application/x-nes-rom',
-    '.nfo': 'text/x-nfo',
-    '.not': 'text/x-mup',
-    '.nsc': 'application/x-netshow-channel',
-    '.nsv': 'video/x-nsv',
-    '.nzb': 'application/x-nzb',
-    '.o': 'application/x-object',
-    '.obj': 'application/x-tgif',
-    '.ocl': 'text/x-ocl',
-    '.oda': 'application/oda',
-    '.odb': 'application/vnd.oasis.opendocument.database',
-    '.odc': 'application/vnd.oasis.opendocument.chart',
-    '.odf': 'application/vnd.oasis.opendocument.formula',
-    '.odg': 'application/vnd.oasis.opendocument.graphics',
-    '.odi': 'application/vnd.oasis.opendocument.image',
-    '.odm': 'application/vnd.oasis.opendocument.text-master',
-    '.odp': 'application/vnd.oasis.opendocument.presentation',
-    '.ods': 'application/vnd.oasis.opendocument.spreadsheet',
-    '.odt': 'application/vnd.oasis.opendocument.text',
-    '.oga': 'audio/ogg',
-    '.ogg': 'application/ogg',
-    '.ogm': 'video/x-ogm+ogg',
-    '.ogv': 'video/ogg',
-    '.ogx': 'application/ogg',
-    '.old': 'application/x-trash',
-    '.oleo': 'application/x-oleo',
-    '.ooc': 'text/x-ooc',
-    '.opml': 'text/x-opml+xml',
-    '.oprc': 'application/vnd.palm',
-    '.ora': 'image/openraster',
-    '.orf': 'image/x-olympus-orf',
-    '.otc': 'application/vnd.oasis.opendocument.chart-template',
-    '.otf': 'application/x-font-otf',
-    '.otg': 'application/vnd.oasis.opendocument.graphics-template',
-    '.oth': 'application/vnd.oasis.opendocument.text-web',
-    '.otp': 'application/vnd.oasis.opendocument.presentation-template',
-    '.ots': 'application/vnd.oasis.opendocument.spreadsheet-template',
-    '.ott': 'application/vnd.oasis.opendocument.text-template',
-    '.owl': 'application/rdf+xml',
-    '.oxps': 'application/oxps',
-    '.oxt': 'application/vnd.openofficeorg.extension',
-    '.p': 'text/x-pascal',
-    '.p10': 'application/pkcs10',
-    '.p12': 'application/x-pkcs12',
-    '.p7b': 'application/x-pkcs7-certificates',
-    '.p7c': 'application/pkcs7-mime',
-    '.p7m': 'application/pkcs7-mime',
-    '.p7s': 'application/pkcs7-signature',
-    '.p8': 'application/pkcs8',
-    '.pack': 'application/x-java-pack200',
-    '.pak': 'application/x-pak',
-    '.par2': 'application/x-par2',
-    '.pas': 'text/x-pascal',
-    '.patch': 'text/x-patch',
-    '.pbm': 'image/x-portable-bitmap',
-    '.pcap': 'application/vnd.tcpdump.pcap',
-    '.pcd': 'image/x-photo-cd',
-    '.pcf': 'application/x-cisco-vpn-settings',
-    '.pcf.gz': 'application/x-font-pcf',
-    '.pcf.z': 'application/x-font-pcf',
-    '.pcl': 'application/vnd.hp-pcl',
-    '.pct': 'image/x-pict',
-    '.pcx': 'image/x-pcx',
-    '.pdb': 'chemical/x-pdb',
-    '.pdc': 'application/x-aportisdoc',
-    '.pdf': 'application/pdf',
-    '.pdf.bz2': 'application/x-bzpdf',
-    '.pdf.gz': 'application/x-gzpdf',
-    '.pdf.xz': 'application/x-xzpdf',
-    '.pef': 'image/x-pentax-pef',
-    '.pem': 'application/x-x509-ca-cert',
-    '.perl': 'application/x-perl',
-    '.pfa': 'application/x-font-type1',
-    '.pfb': 'application/x-font-type1',
-    '.pfx': 'application/x-pkcs12',
-    '.pgm': 'image/x-portable-graymap',
-    '.pgn': 'application/x-chess-pgn',
-    '.pgp': 'application/pgp-encrypted',
-    '.php': 'application/x-php',
-    '.php3': 'application/x-php',
-    '.php4': 'application/x-php',
-    '.php5': 'application/x-php',
-    '.phps': 'application/x-php',
-    '.pict': 'image/x-pict',
-    '.pict1': 'image/x-pict',
-    '.pict2': 'image/x-pict',
-    '.pk': 'application/x-tex-pk',
-    '.pkipath': 'application/pkix-pkipath',
-    '.pkr': 'application/pgp-keys',
-    '.pl': 'application/x-perl',
-    '.pla': 'audio/x-iriver-pla',
-    '.pln': 'application/x-planperfect',
-    '.pls': 'audio/x-scpls',
-    '.pm': 'application/x-perl',
-    '.png': 'image/png',
-    '.pnm': 'image/x-portable-anymap',
-    '.pntg': 'image/x-macpaint',
-    '.po': 'text/x-gettext-translation',
-    '.por': 'application/x-spss-por',
-    '.pot': 'text/x-gettext-translation-template',
-    '.potm': 'application/vnd.ms-powerpoint.template.macroenabled.12',
-    '.potx': 'application/vnd.openxmlformats-officedocument.presentationml.template',
-    '.ppam': 'application/vnd.ms-powerpoint.addin.macroenabled.12',
-    '.ppm': 'image/x-portable-pixmap',
-    '.pps': 'application/vnd.ms-powerpoint',
-    '.ppsm': 'application/vnd.ms-powerpoint.slideshow.macroenabled.12',
-    '.ppsx': 'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
-    '.ppt': 'application/vnd.ms-powerpoint',
-    '.pptm': 'application/vnd.ms-powerpoint.presentation.macroenabled.12',
-    '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
-    '.ppz': 'application/vnd.ms-powerpoint',
-    '.pqa': 'application/vnd.palm',
-    '.prc': 'application/vnd.palm',
-    '.ps': 'application/postscript',
-    '.ps.bz2': 'application/x-bzpostscript',
-    '.ps.gz': 'application/x-gzpostscript',
-    '.psd': 'image/vnd.adobe.photoshop',
-    '.psf': 'audio/x-psf',
-    '.psf.gz': 'application/x-gz-font-linux-psf',
-    '.psflib': 'audio/x-psflib',
-    '.psid': 'audio/prs.sid',
-    '.psw': 'application/x-pocket-word',
-    '.pw': 'application/x-pw',
-    '.py': 'text/x-python',
-    '.pyc': 'application/x-python-bytecode',
-    '.pickle': 'application/python-pickle',
-    '.pyo': 'application/x-python-bytecode',
-    '.qif': 'image/x-quicktime',
-    '.qml': 'text/x-qml',
-    '.qt': 'video/quicktime',
-    '.qti': 'application/x-qtiplot',
-    '.qti.gz': 'application/x-qtiplot',
-    '.qtif': 'image/x-quicktime',
-    '.qtl': 'application/x-quicktime-media-link',
-    '.qtvr': 'video/quicktime',
-    '.ra': 'audio/vnd.rn-realaudio',
-    '.raf': 'image/x-fuji-raf',
-    '.ram': 'application/ram',
-    '.rar': 'application/x-rar',
-    '.ras': 'image/x-cmu-raster',
-    '.raw': 'image/x-panasonic-raw',
-    '.rax': 'audio/vnd.rn-realaudio',
-    '.rb': 'application/x-ruby',
-    '.rdf': 'application/rdf+xml',
-    '.rdfs': 'application/rdf+xml',
-    '.reg': 'text/x-ms-regedit',
-    '.rej': 'text/x-reject',
-    '.rgb': 'image/x-rgb',
-    '.rle': 'image/rle',
-    '.rm': 'application/vnd.rn-realmedia',
-    '.rmj': 'application/vnd.rn-realmedia',
-    '.rmm': 'application/vnd.rn-realmedia',
-    '.rms': 'application/vnd.rn-realmedia',
-    '.rmvb': 'application/vnd.rn-realmedia',
-    '.rmx': 'application/vnd.rn-realmedia',
-    '.rnc': 'application/relax-ng-compact-syntax',
-    '.rng': 'application/xml',
-    '.roff': 'text/troff',
-    '.rp': 'image/vnd.rn-realpix',
-    '.rpm': 'application/x-rpm',
-    '.rss': 'application/rss+xml',
-    '.rt': 'text/vnd.rn-realtext',
-    '.rtf': 'application/rtf',
-    '.rtx': 'text/richtext',
-    '.rv': 'video/vnd.rn-realvideo',
-    '.rvx': 'video/vnd.rn-realvideo',
-    '.rw2': 'image/x-panasonic-raw2',
-    '.s3m': 'audio/x-s3m',
-    '.sam': 'application/x-amipro',
-    '.sami': 'application/x-sami',
-    '.sav': 'application/x-spss-sav',
-    '.scala': 'text/x-scala',
-    '.scm': 'text/x-scheme',
-    '.sda': 'application/vnd.stardivision.draw',
-    '.sdc': 'application/vnd.stardivision.calc',
-    '.sdd': 'application/vnd.stardivision.impress',
-    '.sdp': 'application/sdp',
-    '.sds': 'application/vnd.stardivision.chart',
-    '.sdw': 'application/vnd.stardivision.writer',
-    '.sgf': 'application/x-go-sgf',
-    '.sgi': 'image/x-sgi',
-    '.sgl': 'application/vnd.stardivision.writer',
-    '.sgm': 'text/sgml',
-    '.sgml': 'text/sgml',
-    '.sh': 'application/x-shellscript',
-    '.shape': 'application/x-dia-shape',
-    '.shar': 'application/x-shar',
-    '.shn': 'application/x-shorten',
-    '.siag': 'application/x-siag',
-    '.sid': 'audio/prs.sid',
-    '.sik': 'application/x-trash',
-    '.sis': 'application/vnd.symbian.install',
-    '.sisx': 'x-epoc/x-sisx-app',
-    '.sit': 'application/x-stuffit',
-    '.siv': 'application/sieve',
-    '.sk': 'image/x-skencil',
-    '.sk1': 'image/x-skencil',
-    '.skr': 'application/pgp-keys',
-    '.sldm': 'application/vnd.ms-powerpoint.slide.macroenabled.12',
-    '.sldx': 'application/vnd.openxmlformats-officedocument.presentationml.slide',
-    '.slk': 'text/spreadsheet',
-    '.smaf': 'application/x-smaf',
-    '.smc': 'application/x-snes-rom',
-    '.smd': 'application/vnd.stardivision.mail',
-    '.smf': 'application/vnd.stardivision.math',
-    '.smi': 'application/x-sami',
-    '.smil': 'application/smil',
-    '.sml': 'application/smil',
-    '.sms': 'application/x-sms-rom',
-    '.snd': 'audio/basic',
-    '.so': 'application/x-sharedlib',
-    '.spc': 'application/x-pkcs7-certificates',
-    '.spd': 'application/x-font-speedo',
-    '.spec': 'text/x-rpm-spec',
-    '.spl': 'application/x-shockwave-flash',
-    '.spm': 'application/x-source-rpm',
-    '.spx': 'audio/x-speex',
-    '.sql': 'text/x-sql',
-    '.sr2': 'image/x-sony-sr2',
-    '.src': 'application/x-wais-source',
-    '.src.rpm': 'application/x-source-rpm',
-    '.srf': 'image/x-sony-srf',
-    '.srt': 'application/x-subrip',
-    '.ss': 'text/x-scheme',
-    '.ssa': 'text/x-ssa',
-    '.stc': 'application/vnd.sun.xml.calc.template',
-    '.std': 'application/vnd.sun.xml.draw.template',
-    '.sti': 'application/vnd.sun.xml.impress.template',
-    '.stm': 'audio/x-stm',
-    '.stw': 'application/vnd.sun.xml.writer.template',
-    '.sty': 'text/x-tex',
-    '.sub': 'text/x-subviewer',
-    '.sun': 'image/x-sun-raster',
-    '.sv': 'text/x-svsrc',
-    '.sv4cpio': 'application/x-sv4cpio',
-    '.sv4crc': 'application/x-sv4crc',
-    '.svg': 'image/svg+xml',
-    '.svgz': 'image/svg+xml-compressed',
-    '.svh': 'text/x-svhdr',
-    '.swf': 'application/x-shockwave-flash',
-    '.swm': 'application/x-ms-wim',
-    '.sxc': 'application/vnd.sun.xml.calc',
-    '.sxd': 'application/vnd.sun.xml.draw',
-    '.sxg': 'application/vnd.sun.xml.writer.global',
-    '.sxi': 'application/vnd.sun.xml.impress',
-    '.sxm': 'application/vnd.sun.xml.math',
-    '.sxw': 'application/vnd.sun.xml.writer',
-    '.sylk': 'text/spreadsheet',
-    '.t': 'text/troff',
-    '.t2t': 'text/x-txt2tags',
-    '.tar': 'application/x-tar',
-    '.tar.bz': 'application/x-bzip-compressed-tar',
-    '.tar.bz2': 'application/x-bzip-compressed-tar',
-    '.tar.gz': 'application/x-compressed-tar',
-    '.tar.lrz': 'application/x-lrzip-compressed-tar',
-    '.tar.lzma': 'application/x-lzma-compressed-tar',
-    '.tar.lzo': 'application/x-tzo',
-    '.tar.xz': 'application/x-xz-compressed-tar',
-    '.tar.z': 'application/x-tarz',
-    '.taz': 'application/x-tarz',
-    '.tb2': 'application/x-bzip-compressed-tar',
-    '.tbz': 'application/x-bzip-compressed-tar',
-    '.tbz2': 'application/x-bzip-compressed-tar',
-    '.tcl': 'text/x-tcl',
-    '.tex': 'text/x-tex',
-    '.texi': 'text/x-texinfo',
-    '.texinfo': 'text/x-texinfo',
-    '.tga': 'image/x-tga',
-    '.tgz': 'application/x-compressed-tar',
-    '.theme': 'application/x-theme',
-    '.themepack': 'application/x-windows-themepack',
-    '.tif': 'image/tiff',
-    '.tiff': 'image/tiff',
-    '.tk': 'text/x-tcl',
-    '.tlrz': 'application/x-lrzip-compressed-tar',
-    '.tlz': 'application/x-lzma-compressed-tar',
-    '.tnef': 'application/vnd.ms-tnef',
-    '.tnf': 'application/vnd.ms-tnef',
-    '.toc': 'application/x-cdrdao-toc',
-    '.torrent': 'application/x-bittorrent',
-    '.tpic': 'image/x-tga',
-    '.tr': 'text/troff',
-    '.ts': 'video/mp2t',
-    '.tsv': 'text/tab-separated-values',
-    '.tta': 'audio/x-tta',
-    '.ttc': 'application/x-font-ttf',
-    '.ttf': 'application/x-font-ttf',
-    '.ttx': 'application/x-font-ttx',
-    '.txt': 'text/plain',
-    '.txz': 'application/x-xz-compressed-tar',
-    '.tzo': 'application/x-tzo',
-    '.ufraw': 'application/x-ufraw',
-    '.ui': 'application/x-gtk-builder',
-    '.uil': 'text/x-uil',
-    '.ult': 'audio/x-mod',
-    '.uni': 'audio/x-mod',
-    '.url': 'application/x-mswinurl',
-    '.ustar': 'application/x-ustar',
-    '.uue': 'text/x-uuencode',
-    '.v': 'text/x-verilog',
-    '.vala': 'text/x-vala',
-    '.vapi': 'text/x-vala',
-    '.vcard': 'text/vcard',
-    '.vcf': 'text/vcard',
-    '.vcs': 'text/calendar',
-    '.vct': 'text/vcard',
-    '.vda': 'image/x-tga',
-    '.vhd': 'text/x-vhdl',
-    '.vhdl': 'text/x-vhdl',
-    '.viv': 'video/vivo',
-    '.vivo': 'video/vivo',
-    '.vlc': 'audio/x-mpegurl',
-    '.vob': 'video/mpeg',
-    '.voc': 'audio/x-voc',
-    '.vor': 'application/vnd.stardivision.writer',
-    '.vrm': 'model/vrml',
-    '.vrml': 'model/vrml',
-    '.vsd': 'application/vnd.visio',
-    '.vss': 'application/vnd.visio',
-    '.vst': 'image/x-tga',
-    '.vsw': 'application/vnd.visio',
-    '.vtt': 'text/vtt',
-    '.w2p': 'application/w2p',
-    '.wav': 'audio/x-wav',
-    '.wax': 'audio/x-ms-asx',
-    '.wb1': 'application/x-quattropro',
-    '.wb2': 'application/x-quattropro',
-    '.wb3': 'application/x-quattropro',
-    '.wbmp': 'image/vnd.wap.wbmp',
-    '.wcm': 'application/vnd.ms-works',
-    '.wdb': 'application/vnd.ms-works',
-    '.webm': 'video/webm',
-    '.wim': 'application/x-ms-wim',
-    '.wk1': 'application/vnd.lotus-1-2-3',
-    '.wk3': 'application/vnd.lotus-1-2-3',
-    '.wk4': 'application/vnd.lotus-1-2-3',
-    '.wks': 'application/vnd.ms-works',
-    '.wma': 'audio/x-ms-wma',
-    '.wmf': 'image/x-wmf',
-    '.wml': 'text/vnd.wap.wml',
-    '.wmls': 'text/vnd.wap.wmlscript',
-    '.wmv': 'video/x-ms-wmv',
-    '.wmx': 'audio/x-ms-asx',
-    '.woff': 'application/font-woff',
-    '.wp': 'application/vnd.wordperfect',
-    '.wp4': 'application/vnd.wordperfect',
-    '.wp5': 'application/vnd.wordperfect',
-    '.wp6': 'application/vnd.wordperfect',
-    '.wpd': 'application/vnd.wordperfect',
-    '.wpg': 'application/x-wpg',
-    '.wpl': 'application/vnd.ms-wpl',
-    '.wpp': 'application/vnd.wordperfect',
-    '.wps': 'application/vnd.ms-works',
-    '.wri': 'application/x-mswrite',
-    '.wrl': 'model/vrml',
-    '.wsgi': 'text/x-python',
-    '.wv': 'audio/x-wavpack',
-    '.wvc': 'audio/x-wavpack-correction',
-    '.wvp': 'audio/x-wavpack',
-    '.wvx': 'audio/x-ms-asx',
-    '.wwf': 'application/x-wwf',
-    '.x3f': 'image/x-sigma-x3f',
-    '.xac': 'application/x-gnucash',
-    '.xbel': 'application/x-xbel',
-    '.xbl': 'application/xml',
-    '.xbm': 'image/x-xbitmap',
-    '.xcf': 'image/x-xcf',
-    '.xcf.bz2': 'image/x-compressed-xcf',
-    '.xcf.gz': 'image/x-compressed-xcf',
-    '.xhtml': 'application/xhtml+xml',
-    '.xi': 'audio/x-xi',
-    '.xla': 'application/vnd.ms-excel',
-    '.xlam': 'application/vnd.ms-excel.addin.macroenabled.12',
-    '.xlc': 'application/vnd.ms-excel',
-    '.xld': 'application/vnd.ms-excel',
-    '.xlf': 'application/x-xliff',
-    '.xliff': 'application/x-xliff',
-    '.xll': 'application/vnd.ms-excel',
-    '.xlm': 'application/vnd.ms-excel',
-    '.xlr': 'application/vnd.ms-works',
-    '.xls': 'application/vnd.ms-excel',
-    '.xlsb': 'application/vnd.ms-excel.sheet.binary.macroenabled.12',
-    '.xlsm': 'application/vnd.ms-excel.sheet.macroenabled.12',
-    '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
-    '.xlt': 'application/vnd.ms-excel',
-    '.xltm': 'application/vnd.ms-excel.template.macroenabled.12',
-    '.xltx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
-    '.xlw': 'application/vnd.ms-excel',
-    '.xm': 'audio/x-xm',
-    '.xmf': 'audio/x-xmf',
-    '.xmi': 'text/x-xmi',
-    '.xml': 'application/xml',
-    '.xpi': 'application/x-xpinstall',
-    '.xpm': 'image/x-xpixmap',
-    '.xps': 'application/oxps',
-    '.xsd': 'application/xml',
-    '.xsl': 'application/xslt+xml',
-    '.xslfo': 'text/x-xslfo',
-    '.xslm' : 'application/vnd.ms-excel.sheet.macroEnabled.12',
-    '.xslt': 'application/xslt+xml',
-    '.xspf': 'application/xspf+xml',
-    '.xul': 'application/vnd.mozilla.xul+xml',
-    '.xwd': 'image/x-xwindowdump',
-    '.xyz': 'chemical/x-pdb',
-    '.xz': 'application/x-xz',
-    '.yaml': 'application/x-yaml',
-    '.yml': 'application/x-yaml',
-    '.z': 'application/x-compress',
-    '.zabw': 'application/x-abiword',
-    '.zip': 'application/zip',
-    '.zoo': 'application/x-zoo',
-}
-
-
-def contenttype(filename, default='text/plain'):
-    """
-    Returns the Content-Type string matching extension of the given filename.
-    """
-
-    i = filename.rfind('.')
-    if i >= 0:
-        default = CONTENT_TYPE.get(filename[i:].lower(), default)
-        j = filename.rfind('.', 0, i)
-        if j >= 0:
-            default = CONTENT_TYPE.get(filename[j:].lower(), default)
-    if default.startswith('text/'):
-        default += '; charset=utf-8'
-    return default

+ 0 - 270
frameworks/Python/web2py/web2py/gluon/contrib/AuthorizeNet.py

@@ -1,270 +0,0 @@
-"""
-AIM class to credit card payment with authorize.net
-
-Fork of authnet code written by John Conde
-http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/
-BSDv3 License
-
-Modifed by Massimo Di Pierro
-
-- ported from Python 3.x run on Python 2.4+
-- fixed a couple of bugs
-- merged with test so single file
-- namedtuple from http://code.activestate.com/recipes/500261/
-
-"""
-
-__all__ = ['AIM']
-
-from operator import itemgetter
-import urllib
-
-_known_tuple_types = {}
-
-
-class NamedTupleBase(tuple):
-    """Base class for named tuples with the __new__ operator set, named tuples
-       yielded by the namedtuple() function will subclass this and add
-       properties."""
-    def __new__(cls, *args, **kws):
-        """Create a new instance of this fielded tuple"""
-        # May need to unpack named field values here
-        if kws:
-            values = list(args) + [None] * (len(cls._fields) - len(args))
-            fields = dict((val, idx) for idx, val in enumerate(cls._fields))
-            for kw, val in kws.iteritems():
-                assert kw in kws, "%r not in field list" % kw
-                values[fields[kw]] = val
-            args = tuple(values)
-        return tuple.__new__(cls, args)
-
-
-def namedtuple(typename, fieldnames):
-    """
-    >>> import namedtuples
-    >>> tpl = namedtuples.namedtuple(['a', 'b', 'c'])
-    >>> tpl(1, 2, 3)
-    (1, 2, 3)
-    >>> tpl(1, 2, 3).b
-    2
-    >>> tpl(c=1, a=2, b=3)
-    (2, 3, 1)
-    >>> tpl(c=1, a=2, b=3).b
-    3
-    >>> tpl(c='pads with nones')
-    (None, None, 'pads with nones')
-    >>> tpl(b='pads with nones')
-    (None, 'pads with nones', None)
-    >>>
-    """
-    # Split up a string, some people do this
-    if isinstance(fieldnames, basestring):
-        fieldnames = fieldnames.replace(',', ' ').split()
-    # Convert anything iterable that enumerates fields to a tuple now
-    fieldname_tuple = tuple(str(field) for field in fieldnames)
-    # See if we've cached this
-    if fieldname_tuple in _known_tuple_types:
-        return _known_tuple_types[fieldname_tuple]
-    # Make the type
-    new_tuple_type = type(typename, (NamedTupleBase,), {})
-    # Set the hidden field
-    new_tuple_type._fields = fieldname_tuple
-    # Add the getters
-    for i, field in enumerate(fieldname_tuple):
-        setattr(new_tuple_type, field, property(itemgetter(i)))
-    # Cache
-    _known_tuple_types[fieldname_tuple] = new_tuple_type
-    # Done
-    return new_tuple_type
-
-
-class AIM:
-
-    class AIMError(Exception):
-        def __init__(self, value):
-            self.parameter = value
-
-        def __str__(self):
-            return str(self.parameter)
-
-    def __init__(self, login, transkey, testmode=False):
-        if str(login).strip() == '' or login is None:
-            raise AIM.AIMError('No login name provided')
-        if str(transkey).strip() == '' or transkey is None:
-            raise AIM.AIMError('No transaction key provided')
-        if testmode != True and testmode != False:
-            raise AIM.AIMError('Invalid value for testmode. Must be True or False. "{0}" given.'.format(testmode))
-
-        self.testmode = testmode
-        self.proxy = None
-        self.delimiter = '|'
-        self.results = []
-        self.error = True
-        self.success = False
-        self.declined = False
-
-        self.parameters = {}
-        self.setParameter('x_delim_data', 'true')
-        self.setParameter('x_delim_char', self.delimiter)
-        self.setParameter('x_relay_response', 'FALSE')
-        self.setParameter('x_url', 'FALSE')
-        self.setParameter('x_version', '3.1')
-        self.setParameter('x_method', 'CC')
-        self.setParameter('x_type', 'AUTH_CAPTURE')
-        self.setParameter('x_login', login)
-        self.setParameter('x_tran_key', transkey)
-
-    def process(self):
-        encoded_args = urllib.urlencode(self.parameters)
-        if self.testmode == True:
-            url = 'https://test.authorize.net/gateway/transact.dll'
-        else:
-            url = 'https://secure.authorize.net/gateway/transact.dll'
-
-        if self.proxy is None:
-            self.results += str(urllib.urlopen(
-                url, encoded_args).read()).split(self.delimiter)
-        else:
-            opener = urllib.FancyURLopener(self.proxy)
-            opened = opener.open(url, encoded_args)
-            try:
-                self.results += str(opened.read()).split(self.delimiter)
-            finally:
-                opened.close()
-        Results = namedtuple('Results', 'ResultResponse ResponseSubcode ResponseCode ResponseText AuthCode \
-                                          AVSResponse TransactionID InvoiceNumber Description Amount PaymentMethod \
-                                          TransactionType CustomerID CHFirstName CHLastName Company BillingAddress \
-                                          BillingCity BillingState BillingZip BillingCountry Phone Fax Email ShippingFirstName \
-                                          ShippingLastName ShippingCompany ShippingAddress ShippingCity ShippingState \
-                                          ShippingZip ShippingCountry TaxAmount DutyAmount FreightAmount TaxExemptFlag \
-                                          PONumber MD5Hash CVVResponse CAVVResponse')
-        self.response = Results(*tuple(r for r in self.results)[0:40])
-
-        if self.getResultResponseFull() == 'Approved':
-            self.error = False
-            self.success = True
-            self.declined = False
-        elif self.getResultResponseFull() == 'Declined':
-            self.error = False
-            self.success = False
-            self.declined = True
-        else:
-            raise AIM.AIMError(self.response.ResponseText)
-
-    def setTransaction(self, creditcard, expiration, total, cvv=None, tax=None, invoice=None):
-        if str(creditcard).strip() == '' or creditcard is None:
-            raise AIM.AIMError('No credit card number passed to setTransaction(): {0}'.format(creditcard))
-        if str(expiration).strip() == '' or expiration is None:
-            raise AIM.AIMError('No expiration number to setTransaction(): {0}'.format(expiration))
-        if str(total).strip() == '' or total is None:
-            raise AIM.AIMError('No total amount passed to setTransaction(): {0}'.format(total))
-
-        self.setParameter('x_card_num', creditcard)
-        self.setParameter('x_exp_date', expiration)
-        self.setParameter('x_amount', total)
-        if cvv is not None:
-            self.setParameter('x_card_code', cvv)
-        if tax is not None:
-            self.setParameter('x_tax', tax)
-        if invoice is not None:
-            self.setParameter('x_invoice_num', invoice)
-
-    def setTransactionType(self, transtype=None):
-        types = ['AUTH_CAPTURE', 'AUTH_ONLY', 'PRIOR_AUTH_CAPTURE',
-                 'CREDIT', 'CAPTURE_ONLY', 'VOID']
-        if transtype.upper() not in types:
-            raise AIM.AIMError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype))
-        self.setParameter('x_type', transtype.upper())
-
-    def setProxy(self, proxy=None):
-        if str(proxy).strip() == '' or proxy is None:
-            raise AIM.AIMError('No proxy passed to setProxy()')
-        self.proxy = {'http': str(proxy).strip()}
-
-    def setParameter(self, key=None, value=None):
-        if key is not None and value is not None and str(key).strip() != '' and str(value).strip() != '':
-            self.parameters[key] = str(value).strip()
-        else:
-            raise AIM.AIMError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value))
-
-    def isApproved(self):
-        return self.success
-
-    def isDeclined(self):
-        return self.declined
-
-    def isError(self):
-        return self.error
-
-    def getResultResponseFull(self):
-        responses = ['', 'Approved', 'Declined', 'Error']
-        return responses[int(self.results[0])]
-
-
-def process(creditcard, expiration, total, cvv=None, tax=None, invoice=None,
-            login='cnpdev4289', transkey='SR2P8g4jdEn7vFLQ', testmode=True):
-    payment = AIM(login, transkey, testmode)
-    expiration = expiration.replace('/', '')
-    payment.setTransaction(creditcard, expiration, total, cvv, tax, invoice)
-    try:
-        payment.process()
-        return payment.isApproved()
-    except AIM.AIMError:
-        return False
-
-
-def test():
-    import socket
-    import sys
-    from time import time
-
-    creditcard = '4427802641004797'
-    expiration = '122012'
-    total = '1.00'
-    cvv = '123'
-    tax = '0.00'
-    invoice = str(time())[4:10]  # get a random invoice number
-
-    try:
-        payment = AIM('cnpdev4289', 'SR2P8g4jdEn7vFLQ', True)
-        payment.setTransaction(
-            creditcard, expiration, total, cvv, tax, invoice)
-        payment.setParameter(
-            'x_duplicate_window', 180)  # three minutes duplicate windows
-        payment.setParameter('x_cust_id', '1324')       # customer ID
-        payment.setParameter('x_first_name', 'John')
-        payment.setParameter('x_last_name', 'Conde')
-        payment.setParameter('x_company', 'Test Company')
-        payment.setParameter('x_address', '1234 Main Street')
-        payment.setParameter('x_city', 'Townsville')
-        payment.setParameter('x_state', 'NJ')
-        payment.setParameter('x_zip', '12345')
-        payment.setParameter('x_country', 'US')
-        payment.setParameter('x_phone', '800-555-1234')
-        payment.setParameter('x_description', 'Test Transaction')
-        payment.setParameter(
-            'x_customer_ip', socket.gethostbyname(socket.gethostname()))
-        payment.setParameter('x_email', '[email protected]')
-        payment.setParameter('x_email_customer', False)
-        payment.process()
-        if payment.isApproved():
-            print 'Response Code: ', payment.response.ResponseCode
-            print 'Response Text: ', payment.response.ResponseText
-            print 'Response: ', payment.getResultResponseFull()
-            print 'Transaction ID: ', payment.response.TransactionID
-            print 'CVV Result: ', payment.response.CVVResponse
-            print 'Approval Code: ', payment.response.AuthCode
-            print 'AVS Result: ', payment.response.AVSResponse
-        elif payment.isDeclined():
-            print 'Your credit card was declined by your bank'
-        elif payment.isError():
-            raise AIM.AIMError('An uncaught error occurred')
-    except AIM.AIMError, e:
-        print "Exception thrown:", e
-        print 'An error occured'
-    print 'approved', payment.isApproved()
-    print 'declined', payment.isDeclined()
-    print 'error', payment.isError()
-
-if __name__ == '__main__':
-    test()

+ 0 - 244
frameworks/Python/web2py/web2py/gluon/contrib/DowCommerce.py

@@ -1,244 +0,0 @@
-"""
-DowCommerce class to process credit card payments with DowCommerce.com
-
-Modifications to support Dow Commerce API from code originally written by John Conde
-http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/
-BSDv3 License
-
-Modifed by Dave Stoll [email protected]
-
-- modifed to support Dow Commerce API
-"""
-
-__all__ = ['DowCommerce']
-
-from operator import itemgetter
-import urllib
-
-
-class DowCommerce:
-
-    class DowCommerceError(Exception):
-        def __init__(self, value):
-            self.parameter = value
-
-        def __str__(self):
-            return str(self.parameter)
-
-    def __init__(self, username=None, password=None, demomode=False):
-        if not demomode:
-            if str(username).strip() == '' or username is None:
-                raise DowCommerce.DowCommerceError('No username provided')
-            if str(password).strip() == '' or password is None:
-                raise DowCommerce.DowCommerceError('No password provided')
-        else:
-            username = 'demo'
-            password = 'password'
-
-        self.proxy = None
-        self.delimiter = '&'
-        self.results = {}
-        self.error = True
-        self.success = False
-        self.declined = False
-        self.url = 'https://secure.dowcommerce.net/api/transact.php'
-
-        self.parameters = {}
-        self.setParameter('username', username)
-        self.setParameter('password', password)
-
-    def process(self):
-        encoded_args = urllib.urlencode(self.parameters)
-        if self.proxy is None:
-            results = str(urllib.urlopen(
-                self.url, encoded_args).read()).split(self.delimiter)
-        else:
-            opener = urllib.FancyURLopener(self.proxy)
-            opened = opener.open(self.url, encoded_args)
-            try:
-                results = str(opened.read()).split(self.delimiter)
-            finally:
-                opened.close()
-
-        for result in results:
-            (key, val) = result.split('=')
-            self.results[key] = val
-
-        if self.results['response'] == '1':
-            self.error = False
-            self.success = True
-            self.declined = False
-        elif self.results['response'] == '2':
-            self.error = False
-            self.success = False
-            self.declined = True
-        elif self.results['response'] == '3':
-            self.error = True
-            self.success = False
-            self.declined = False
-        else:
-            self.error = True
-            self.success = False
-            self.declined = False
-            raise DowCommerce.DowCommerceError(self.results)
-
-    def setTransaction(
-        self, creditcard, expiration, total, cvv=None, orderid=None, orderdescription=None,
-        ipaddress=None, tax=None, shipping=None,
-        firstname=None, lastname=None, company=None, address1=None, address2=None, city=None, state=None, zipcode=None,
-        country=None, phone=None, fax=None, emailaddress=None, website=None,
-        shipping_firstname=None, shipping_lastname=None, shipping_company=None, shipping_address1=None, shipping_address2=None,
-            shipping_city=None, shipping_state=None, shipping_zipcode=None, shipping_country=None, shipping_emailaddress=None):
-        if str(creditcard).strip() == '' or creditcard is None:
-            raise DowCommerce.DowCommerceError('No credit card number passed to setTransaction(): {0}'.format(creditcard))
-        if str(expiration).strip() == '' or expiration is None:
-            raise DowCommerce.DowCommerceError('No expiration number passed to setTransaction(): {0}'.format(expiration))
-        if str(total).strip() == '' or total is None:
-            raise DowCommerce.DowCommerceError('No total amount passed to setTransaction(): {0}'.format(total))
-
-        self.setParameter('ccnumber', creditcard)
-        self.setParameter('ccexp', expiration)
-        self.setParameter('amount', total)
-
-        if cvv:
-            self.setParameter('cvv', cvv)
-        if orderid:
-            self.setParameter('orderid', orderid)
-        if orderdescription:
-            self.setParameter('orderdescription', orderdescription)
-        if ipaddress:
-            self.setParameter('ipaddress', ipaddress)
-        if tax:
-            self.setParameter('tax', tax)
-        if shipping:
-            self.setParameter('shipping', shipping)
-
-        ## billing info
-        if firstname:
-            self.setParameter('firstname', firstname)
-        if lastname:
-            self.setParameter('lastname', lastname)
-        if company:
-            self.setParameter('company', company)
-        if address1:
-            self.setParameter('address1', address1)
-        if address2:
-            self.setParameter('address2', address2)
-        if city:
-            self.setParameter('city', city)
-        if state:
-            self.setParameter('state', state)
-        if zipcode:
-            self.setParameter('zip', zipcode)
-        if country:
-            self.setParameter('country', country)
-        if phone:
-            self.setParameter('phone', phone)
-        if fax:
-            self.setParameter('fax', fax)
-        if emailaddress:
-            self.setParameter('email', emailaddress)
-        if website:
-            self.setParameter('website', website)
-
-        ## shipping info
-        if shipping_firstname:
-            self.setParameter('shipping_firstname', shipping_firstname)
-        if shipping_lastname:
-            self.setParameter('shipping_lastname', shipping_lastname)
-        if shipping_company:
-            self.setParameter('shipping_company', shipping_company)
-        if shipping_address1:
-            self.setParameter('shipping_address1', shipping_address1)
-        if shipping_address2:
-            self.setParameter('shipping_address2', shipping_address2)
-        if shipping_city:
-            self.setParameter('shipping_city', shipping_city)
-        if shipping_state:
-            self.setParameter('shipping_state', shipping_state)
-        if shipping_zipcode:
-            self.setParameter('shipping_zip', shipping_zipcode)
-        if shipping_country:
-            self.setParameter('shipping_country', shipping_country)
-
-    def setTransactionType(self, transtype=None):
-        types = ['sale', 'auth', 'credit']
-        if transtype.lower() not in types:
-            raise DowCommerce.DowCommerceError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype))
-        self.setParameter('type', transtype.lower())
-
-    def setProxy(self, proxy=None):
-        if str(proxy).strip() == '' or proxy is None:
-            raise DowCommerce.DowCommerceError('No proxy passed to setProxy()')
-        self.proxy = {'http': str(proxy).strip()}
-
-    def setParameter(self, key=None, value=None):
-        if key is not None and value is not None and str(key).strip() != '' and str(value).strip() != '':
-            self.parameters[key] = str(value).strip()
-        else:
-            raise DowCommerce.DowCommerceError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value))
-
-    def isApproved(self):
-        return self.success
-
-    def isDeclined(self):
-        return self.declined
-
-    def isError(self):
-        return self.error
-
-    def getResultResponseShort(self):
-        responses = ['', 'Approved', 'Declined', 'Error']
-        return responses[int(self.results['response'])]
-
-    def getFullResponse(self):
-        return self.results
-
-    def getResponseText(self):
-        return self.results['responsetext']
-
-
-def test():
-    import socket
-    import sys
-    from time import time
-
-    ## TEST VALUES FROM API DOC:
-    # Visa:             4111111111111111
-    # MasterCard        5431111111111111
-    # DiscoverCard:     6011601160116611
-    # American Express: 341111111111111
-    # Expiration:       10/10
-    # Amount:           > 1.00  (( passing less than $1.00 will cause it to be declined ))
-    # CVV:              999
-    creditcard = '4111111111111111'
-    expiration = '1010'
-    total = '1.00'
-    cvv = '999'
-    tax = '0.00'
-    orderid = str(time())[4:10]  # get a random invoice number
-
-    try:
-        payment = DowCommerce(demomode=True)
-        payment.setTransaction(
-            creditcard, expiration, total, cvv=cvv, tax=tax, orderid=orderid, orderdescription='Test Transaction',
-            firstname='John', lastname='Doe', company='Acme', address1='123 Min Street', city='Hometown', state='VA',
-            zipcode='12345', country='US', phone='888-555-1212', emailaddress='[email protected]', ipaddress='192.168.1.1')
-
-        payment.process()
-        if payment.isApproved():
-            print 'Payment approved!'
-            print payment.getFullResponse()
-        elif payment.isDeclined():
-            print 'Your credit card was declined by your bank'
-        elif payment.isError():
-            raise DowCommerce.DowCommerceError('An uncaught error occurred')
-    except DowCommerce.DowCommerceError, e:
-        print "Exception thrown:", e
-        print 'An error occured'
-    print 'approved', payment.isApproved()
-    print 'declined', payment.isDeclined()
-    print 'error', payment.isError()
-
-if __name__ == '__main__':
-    test()

+ 0 - 0
frameworks/Python/web2py/web2py/gluon/contrib/__init__.py


+ 0 - 502
frameworks/Python/web2py/web2py/gluon/contrib/aes.py

@@ -1,502 +0,0 @@
-"""Simple AES cipher implementation in pure Python following PEP-272 API
-
-Homepage: https://bitbucket.org/intgr/pyaes/
-
-The goal of this module is to be as fast as reasonable in Python while still
-being Pythonic and readable/understandable. It is licensed under the permissive
-MIT license.
-
-Hopefully the code is readable and commented enough that it can serve as an
-introduction to the AES cipher for Python coders. In fact, it should go along
-well with the Stick Figure Guide to AES:
-http://www.moserware.com/2009/09/stick-figure-guide-to-advanced.html
-
-Contrary to intuition, this implementation numbers the 4x4 matrices from top to
-bottom for efficiency reasons::
-
-  0  4  8 12
-  1  5  9 13
-  2  6 10 14
-  3  7 11 15
-
-Effectively it's the transposition of what you'd expect. This actually makes
-the code simpler -- except the ShiftRows step, but hopefully the explanation
-there clears it up.
-
-"""
-
-####
-# Copyright (c) 2010 Marti Raudsepp <[email protected]>
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-# THE SOFTWARE.
-####
-
-
-from array import array
-
-# Globals mandated by PEP 272:
-# http://www.python.org/dev/peps/pep-0272/
-MODE_ECB = 1
-MODE_CBC = 2
-#MODE_CTR = 6
-
-block_size = 16
-key_size = None
-
-def new(key, mode=MODE_CBC, IV=None):
-    if mode == MODE_ECB:
-        return ECBMode(AES(key))
-    elif mode == MODE_CBC:
-        if IV is None:
-            raise ValueError("CBC mode needs an IV value!")
-
-        return CBCMode(AES(key), IV)
-    else:
-        raise NotImplementedError
-
-#### AES cipher implementation
-
-class AES(object):
-    block_size = 16
-
-    def __init__(self, key):
-        self.setkey(key)
-
-    def setkey(self, key):
-        """Sets the key and performs key expansion."""
-
-        self.key = key
-        self.key_size = len(key)
-
-        if self.key_size == 16:
-            self.rounds = 10
-        elif self.key_size == 24:
-            self.rounds = 12
-        elif self.key_size == 32:
-            self.rounds = 14
-        else:
-            raise ValueError("Key length must be 16, 24 or 32 bytes")
-
-        self.expand_key()
-
-    def expand_key(self):
-        """Performs AES key expansion on self.key and stores in self.exkey"""
-
-        # The key schedule specifies how parts of the key are fed into the
-        # cipher's round functions. "Key expansion" means performing this
-        # schedule in advance. Almost all implementations do this.
-        #
-        # Here's a description of AES key schedule:
-        # http://en.wikipedia.org/wiki/Rijndael_key_schedule
-
-        # The expanded key starts with the actual key itself
-        exkey = array('B', self.key)
-
-        # extra key expansion steps
-        if self.key_size == 16:
-            extra_cnt = 0
-        elif self.key_size == 24:
-            extra_cnt = 2
-        else:
-            extra_cnt = 3
-
-        # 4-byte temporary variable for key expansion
-        word = exkey[-4:]
-        # Each expansion cycle uses 'i' once for Rcon table lookup
-        for i in xrange(1, 11):
-
-            #### key schedule core:
-            # left-rotate by 1 byte
-            word = word[1:4] + word[0:1]
-
-            # apply S-box to all bytes
-            for j in xrange(4):
-                word[j] = aes_sbox[word[j]]
-
-            # apply the Rcon table to the leftmost byte
-            word[0] = word[0] ^ aes_Rcon[i]
-            #### end key schedule core
-
-            for z in xrange(4):
-                for j in xrange(4):
-                    # mix in bytes from the last subkey
-                    word[j] ^= exkey[-self.key_size + j]
-                exkey.extend(word)
-
-            # Last key expansion cycle always finishes here
-            if len(exkey) >= (self.rounds+1) * self.block_size:
-                break
-
-            # Special substitution step for 256-bit key
-            if self.key_size == 32:
-                for j in xrange(4):
-                    # mix in bytes from the last subkey XORed with S-box of
-                    # current word bytes
-                    word[j] = aes_sbox[word[j]] ^ exkey[-self.key_size + j]
-                exkey.extend(word)
-
-            # Twice for 192-bit key, thrice for 256-bit key
-            for z in xrange(extra_cnt):
-                for j in xrange(4):
-                    # mix in bytes from the last subkey
-                    word[j] ^= exkey[-self.key_size + j]
-                exkey.extend(word)
-
-        self.exkey = exkey
-
-    def add_round_key(self, block, round):
-        """AddRoundKey step in AES. This is where the key is mixed into plaintext"""
-
-        offset = round * 16
-        exkey = self.exkey
-
-        for i in xrange(16):
-            block[i] ^= exkey[offset + i]
-
-        #print 'AddRoundKey:', block
-
-    def sub_bytes(self, block, sbox):
-        """SubBytes step, apply S-box to all bytes
-
-        Depending on whether encrypting or decrypting, a different sbox array
-        is passed in.
-        """
-
-        for i in xrange(16):
-            block[i] = sbox[block[i]]
-
-        #print 'SubBytes   :', block
-
-    def shift_rows(self, b):
-        """ShiftRows step. Shifts 2nd row to left by 1, 3rd row by 2, 4th row by 3
-
-        Since we're performing this on a transposed matrix, cells are numbered
-        from top to bottom::
-
-          0  4  8 12   ->    0  4  8 12    -- 1st row doesn't change
-          1  5  9 13   ->    5  9 13  1    -- row shifted to left by 1 (wraps around)
-          2  6 10 14   ->   10 14  2  6    -- shifted by 2
-          3  7 11 15   ->   15  3  7 11    -- shifted by 3
-        """
-
-        b[1], b[5], b[ 9], b[13] = b[ 5], b[ 9], b[13], b[ 1]
-        b[2], b[6], b[10], b[14] = b[10], b[14], b[ 2], b[ 6]
-        b[3], b[7], b[11], b[15] = b[15], b[ 3], b[ 7], b[11]
-
-        #print 'ShiftRows  :', b
-
-    def shift_rows_inv(self, b):
-        """Similar to shift_rows above, but performed in inverse for decryption."""
-
-        b[ 5], b[ 9], b[13], b[ 1] = b[1], b[5], b[ 9], b[13]
-        b[10], b[14], b[ 2], b[ 6] = b[2], b[6], b[10], b[14]
-        b[15], b[ 3], b[ 7], b[11] = b[3], b[7], b[11], b[15]
-
-        #print 'ShiftRows  :', b
-
-    def mix_columns(self, block):
-        """MixColumns step. Mixes the values in each column"""
-
-        # Cache global multiplication tables (see below)
-        mul_by_2 = gf_mul_by_2
-        mul_by_3 = gf_mul_by_3
-
-        # Since we're dealing with a transposed matrix, columns are already
-        # sequential
-        for i in xrange(4):
-            col = i * 4
-
-            #v0, v1, v2, v3 = block[col : col+4]
-            v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
-                              block[col + 3])
-
-            block[col  ] = mul_by_2[v0] ^ v3 ^ v2 ^ mul_by_3[v1]
-            block[col+1] = mul_by_2[v1] ^ v0 ^ v3 ^ mul_by_3[v2]
-            block[col+2] = mul_by_2[v2] ^ v1 ^ v0 ^ mul_by_3[v3]
-            block[col+3] = mul_by_2[v3] ^ v2 ^ v1 ^ mul_by_3[v0]
-
-        #print 'MixColumns :', block
-
-    def mix_columns_inv(self, block):
-        """Similar to mix_columns above, but performed in inverse for decryption."""
-
-        # Cache global multiplication tables (see below)
-        mul_9  = gf_mul_by_9
-        mul_11 = gf_mul_by_11
-        mul_13 = gf_mul_by_13
-        mul_14 = gf_mul_by_14
-
-        # Since we're dealing with a transposed matrix, columns are already
-        # sequential
-        for i in xrange(4):
-            col = i * 4
-
-            v0, v1, v2, v3 = (block[col], block[col + 1], block[col + 2],
-                              block[col + 3])
-            #v0, v1, v2, v3 = block[col:col+4]
-
-            block[col  ] = mul_14[v0] ^ mul_9[v3] ^ mul_13[v2] ^ mul_11[v1]
-            block[col+1] = mul_14[v1] ^ mul_9[v0] ^ mul_13[v3] ^ mul_11[v2]
-            block[col+2] = mul_14[v2] ^ mul_9[v1] ^ mul_13[v0] ^ mul_11[v3]
-            block[col+3] = mul_14[v3] ^ mul_9[v2] ^ mul_13[v1] ^ mul_11[v0]
-
-        #print 'MixColumns :', block
-
-    def encrypt_block(self, block):
-        """Encrypts a single block. This is the main AES function"""
-
-        # For efficiency reasons, the state between steps is transmitted via a
-        # mutable array, not returned.
-        self.add_round_key(block, 0)
-
-        for round in xrange(1, self.rounds):
-            self.sub_bytes(block, aes_sbox)
-            self.shift_rows(block)
-            self.mix_columns(block)
-            self.add_round_key(block, round)
-
-        self.sub_bytes(block, aes_sbox)
-        self.shift_rows(block)
-        # no mix_columns step in the last round
-        self.add_round_key(block, self.rounds)
-
-    def decrypt_block(self, block):
-        """Decrypts a single block. This is the main AES decryption function"""
-
-        # For efficiency reasons, the state between steps is transmitted via a
-        # mutable array, not returned.
-        self.add_round_key(block, self.rounds)
-
-        # count rounds down from 15 ... 1
-        for round in xrange(self.rounds-1, 0, -1):
-            self.shift_rows_inv(block)
-            self.sub_bytes(block, aes_inv_sbox)
-            self.add_round_key(block, round)
-            self.mix_columns_inv(block)
-
-        self.shift_rows_inv(block)
-        self.sub_bytes(block, aes_inv_sbox)
-        self.add_round_key(block, 0)
-        # no mix_columns step in the last round
-
-
-#### ECB mode implementation
-
-class ECBMode(object):
-    """Electronic CodeBook (ECB) mode encryption.
-
-    Basically this mode applies the cipher function to each block individually;
-    no feedback is done. NB! This is insecure for almost all purposes
-    """
-
-    def __init__(self, cipher):
-        self.cipher = cipher
-        self.block_size = cipher.block_size
-
-    def ecb(self, data, block_func):
-        """Perform ECB mode with the given function"""
-
-        if len(data) % self.block_size != 0:
-            raise ValueError("Plaintext length must be multiple of 16")
-
-        block_size = self.block_size
-        data = array('B', data)
-
-        for offset in xrange(0, len(data), block_size):
-            block = data[offset : offset+block_size]
-            block_func(block)
-            data[offset : offset+block_size] = block
-
-        return data.tostring()
-
-    def encrypt(self, data):
-        """Encrypt data in ECB mode"""
-
-        return self.ecb(data, self.cipher.encrypt_block)
-
-    def decrypt(self, data):
-        """Decrypt data in ECB mode"""
-
-        return self.ecb(data, self.cipher.decrypt_block)
-
-#### CBC mode
-
-class CBCMode(object):
-    """Cipher Block Chaining (CBC) mode encryption. This mode avoids content leaks.
-
-    In CBC encryption, each plaintext block is XORed with the ciphertext block
-    preceding it; decryption is simply the inverse.
-    """
-
-    # A better explanation of CBC can be found here:
-    # http://en.wikipedia.org/wiki/Block_cipher_modes_of_operation#Cipher-block_chaining_.28CBC.29
-
-    def __init__(self, cipher, IV):
-        self.cipher = cipher
-        self.block_size = cipher.block_size
-        self.IV = array('B', IV)
-
-    def encrypt(self, data):
-        """Encrypt data in CBC mode"""
-
-        block_size = self.block_size
-        if len(data) % block_size != 0:
-            raise ValueError("Plaintext length must be multiple of 16")
-
-        data = array('B', data)
-        IV = self.IV
-
-        for offset in xrange(0, len(data), block_size):
-            block = data[offset : offset+block_size]
-
-            # Perform CBC chaining
-            for i in xrange(block_size):
-                block[i] ^= IV[i]
-
-            self.cipher.encrypt_block(block)
-            data[offset : offset+block_size] = block
-            IV = block
-
-        self.IV = IV
-        return data.tostring()
-
-    def decrypt(self, data):
-        """Decrypt data in CBC mode"""
-
-        block_size = self.block_size
-        if len(data) % block_size != 0:
-            raise ValueError("Ciphertext length must be multiple of 16")
-
-        data = array('B', data)
-        IV = self.IV
-
-        for offset in xrange(0, len(data), block_size):
-            ctext = data[offset : offset+block_size]
-            block = ctext[:]
-            self.cipher.decrypt_block(block)
-
-            # Perform CBC chaining
-            #for i in xrange(block_size):
-            #    data[offset + i] ^= IV[i]
-            for i in xrange(block_size):
-                block[i] ^= IV[i]
-            data[offset : offset+block_size] = block
-
-            IV = ctext
-            #data[offset : offset+block_size] = block
-
-        self.IV = IV
-        return data.tostring()
-
-####
-
-def galois_multiply(a, b):
-    """Galois Field multiplicaiton for AES"""
-    p = 0
-    while b:
-        if b & 1:
-            p ^= a
-        a <<= 1
-        if a & 0x100:
-            a ^= 0x1b
-        b >>= 1
-
-    return p & 0xff
-
-# Precompute the multiplication tables for encryption
-gf_mul_by_2  = array('B', [galois_multiply(x,  2) for x in range(256)])
-gf_mul_by_3  = array('B', [galois_multiply(x,  3) for x in range(256)])
-# ... for decryption
-gf_mul_by_9  = array('B', [galois_multiply(x,  9) for x in range(256)])
-gf_mul_by_11 = array('B', [galois_multiply(x, 11) for x in range(256)])
-gf_mul_by_13 = array('B', [galois_multiply(x, 13) for x in range(256)])
-gf_mul_by_14 = array('B', [galois_multiply(x, 14) for x in range(256)])
-
-####
-
-# The S-box is a 256-element array, that maps a single byte value to another
-# byte value. Since it's designed to be reversible, each value occurs only once
-# in the S-box
-#
-# More information: http://en.wikipedia.org/wiki/Rijndael_S-box
-
-aes_sbox = array('B',
-    '637c777bf26b6fc53001672bfed7ab76'
-    'ca82c97dfa5947f0add4a2af9ca472c0'
-    'b7fd9326363ff7cc34a5e5f171d83115'
-    '04c723c31896059a071280e2eb27b275'
-    '09832c1a1b6e5aa0523bd6b329e32f84'
-    '53d100ed20fcb15b6acbbe394a4c58cf'
-    'd0efaafb434d338545f9027f503c9fa8'
-    '51a3408f929d38f5bcb6da2110fff3d2'
-    'cd0c13ec5f974417c4a77e3d645d1973'
-    '60814fdc222a908846eeb814de5e0bdb'
-    'e0323a0a4906245cc2d3ac629195e479'
-    'e7c8376d8dd54ea96c56f4ea657aae08'
-    'ba78252e1ca6b4c6e8dd741f4bbd8b8a'
-    '703eb5664803f60e613557b986c11d9e'
-    'e1f8981169d98e949b1e87e9ce5528df'
-    '8ca1890dbfe6426841992d0fb054bb16'.decode('hex')
-)
-
-# This is the inverse of the above. In other words:
-# aes_inv_sbox[aes_sbox[val]] == val
-
-aes_inv_sbox = array('B',
-    '52096ad53036a538bf40a39e81f3d7fb'
-    '7ce339829b2fff87348e4344c4dee9cb'
-    '547b9432a6c2233dee4c950b42fac34e'
-    '082ea16628d924b2765ba2496d8bd125'
-    '72f8f66486689816d4a45ccc5d65b692'
-    '6c704850fdedb9da5e154657a78d9d84'
-    '90d8ab008cbcd30af7e45805b8b34506'
-    'd02c1e8fca3f0f02c1afbd0301138a6b'
-    '3a9111414f67dcea97f2cfcef0b4e673'
-    '96ac7422e7ad3585e2f937e81c75df6e'
-    '47f11a711d29c5896fb7620eaa18be1b'
-    'fc563e4bc6d279209adbc0fe78cd5af4'
-    '1fdda8338807c731b11210592780ec5f'
-    '60517fa919b54a0d2de57a9f93c99cef'
-    'a0e03b4dae2af5b0c8ebbb3c83539961'
-    '172b047eba77d626e169146355210c7d'.decode('hex')
-)
-
-# The Rcon table is used in AES's key schedule (key expansion)
-# It's a pre-computed table of exponentation of 2 in AES's finite field
-#
-# More information: http://en.wikipedia.org/wiki/Rijndael_key_schedule
-
-aes_Rcon = array('B',
-    '8d01020408102040801b366cd8ab4d9a'
-    '2f5ebc63c697356ad4b37dfaefc59139'
-    '72e4d3bd61c29f254a943366cc831d3a'
-    '74e8cb8d01020408102040801b366cd8'
-    'ab4d9a2f5ebc63c697356ad4b37dfaef'
-    'c5913972e4d3bd61c29f254a943366cc'
-    '831d3a74e8cb8d01020408102040801b'
-    '366cd8ab4d9a2f5ebc63c697356ad4b3'
-    '7dfaefc5913972e4d3bd61c29f254a94'
-    '3366cc831d3a74e8cb8d010204081020'
-    '40801b366cd8ab4d9a2f5ebc63c69735'
-    '6ad4b37dfaefc5913972e4d3bd61c29f'
-    '254a943366cc831d3a74e8cb8d010204'
-    '08102040801b366cd8ab4d9a2f5ebc63'
-    'c697356ad4b37dfaefc5913972e4d3bd'
-    '61c29f254a943366cc831d3a74e8cb'.decode('hex')
-)

+ 0 - 123
frameworks/Python/web2py/web2py/gluon/contrib/appconfig.py

@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-Read from configuration files easily without hurting performances
-
-USAGE:
-During development you can load a config file either in .ini or .json
-format (by default app/private/appconfig.ini or app/private/appconfig.json)
-The result is a dict holding the configured values. Passing reload=True
-is meant only for development: in production, leave reload to False and all
-values will be cached
-
-from gluon.contrib.appconfig import AppConfig
-myconfig = AppConfig(path_to_configfile, reload=False)
-
-print myconfig['db']['uri']
-
-The returned dict can walk with "dot notation" an arbitrarely nested dict
-
-print myconfig.take('db.uri')
-
-You can even pass a cast function, i.e.
-
-print myconfig.take('auth.expiration', cast=int)
-
-Once the value has been fetched (and casted) it won't change until the process
-is restarted (or reload=True is passed).
-
-"""
-import thread
-import os
-from ConfigParser import SafeConfigParser
-from gluon import current
-from gluon.serializers import json_parser
-
-locker = thread.allocate_lock()
-
-
-def AppConfig(*args, **vars):
-
-    locker.acquire()
-    reload_ = vars.pop('reload', False)
-    try:
-        instance_name = 'AppConfig_' + current.request.application
-        if reload_ or not hasattr(AppConfig, instance_name):
-            setattr(AppConfig, instance_name, AppConfigLoader(*args, **vars))
-        return getattr(AppConfig, instance_name).settings
-    finally:
-        locker.release()
-
-
-class AppConfigDict(dict):
-    """
-    dict that has a .take() method to fetch nested values and puts
-    them into cache
-    """
-
-    def __init__(self, *args, **kwargs):
-        dict.__init__(self, *args, **kwargs)
-        self.int_cache = {}
-
-    def take(self, path, cast=None):
-        parts = path.split('.')
-        if path in self.int_cache:
-            return self.int_cache[path]
-        value = self
-        walking = []
-        for part in parts:
-            if part not in value:
-                raise BaseException("%s not in config [%s]" %
-                    (part, '-->'.join(walking)))
-            value = value[part]
-            walking.append(part)
-        if cast is None:
-            self.int_cache[path] = value
-        else:
-            try:
-                value = cast(value)
-                self.int_cache[path] = value
-            except (ValueError, TypeError):
-                raise BaseException("%s can't be converted to %s" %
-                 (value, cast))
-        return value
-
-
-class AppConfigLoader(object):
-
-    def __init__(self, configfile=None):
-        if not configfile:
-            priv_folder = os.path.join(current.request.folder, 'private')
-            configfile = os.path.join(priv_folder, 'appconfig.ini')
-            if not os.path.isfile(configfile):
-                configfile = os.path.join(priv_folder, 'appconfig.json')
-                if not os.path.isfile(configfile):
-                    configfile = None
-        if not configfile or not os.path.isfile(configfile):
-            raise BaseException("Config file not found")
-        self.file = configfile
-        self.ctype = os.path.splitext(configfile)[1][1:]
-        self.settings = None
-        self.read_config()
-
-    def read_config_ini(self):
-        config = SafeConfigParser()
-        config.read(self.file)
-        settings = {}
-        for section in config.sections():
-            settings[section] = {}
-            for option in config.options(section):
-                settings[section][option] = config.get(section, option)
-        self.settings = AppConfigDict(settings)
-
-    def read_config_json(self):
-        with open(self.file, 'r') as c:
-            self.settings = AppConfigDict(json_parser.load(c))
-
-    def read_config(self):
-        if self.settings is None:
-            try:
-                getattr(self, 'read_config_' + self.ctype)()
-            except AttributeError:
-                raise BaseException("Unsupported config file format")
-        return self.settings

+ 0 - 220
frameworks/Python/web2py/web2py/gluon/contrib/autolinks.py

@@ -1,220 +0,0 @@
-"""
-Developed by Massimo Di Pierro
-Released under the web2py license (LGPL)
-
-What does it do?
-
-if html is a variable containing HTML text and urls in the text, when you call
-
-    html = expend_html(html)
-
-it automatically converts the url to links but when possible it embeds the object being linked.
-In particular it can embed images, videos, audio files, documents (it uses the google code player),
-as well as pages to a oembed service.
-
-
-Google Doc Support
-==================
-Microsoft Word (.DOC, .DOCX)
-Microsoft Excel (.XLS and .XLSX)
-Microsoft PowerPoint 2007 / 2010 (.PPTX)
-Apple Pages (.PAGES)
-Adobe PDF (.PDF)
-Adobe Illustrator (.AI)
-Adobe Photoshop (.PSD)
-Autodesk AutoCad (.DXF)
-Scalable Vector Graphics (.SVG)
-PostScript (.EPS, .PS)
-TrueType (.TTF)
-XML Paper Specification (.XPS)
-
-Oembed Support
-==============
-flickr.com
-youtube.com
-hulu.com
-vimeo.com
-slideshare.net
-qik.com
-polleverywhere.com
-wordpress.com
-revision3.com
-viddler.com
-"""
-
-import re
-import cgi
-import sys
-from simplejson import loads
-import urllib
-import uuid
-try:
-    from BeautifulSoup import BeautifulSoup, Comment
-    have_soup = True
-except ImportError:
-    have_soup = False
-
-regex_link = re.compile('https?://\S+')
-
-EMBED_MAPS = [
-    (re.compile('http://\S*?flickr.com/\S*'),
-     'http://www.flickr.com/services/oembed/'),
-    (re.compile('http://\S*.youtu(\.be|be\.com)/watch\S*'),
-     'http://www.youtube.com/oembed'),
-    (re.compile('http://www.hulu.com/watch/\S*'),
-     'http://www.hulu.com/api/oembed.json'),
-    (re.compile('http://vimeo.com/\S*'),
-     'http://vimeo.com/api/oembed.json'),
-    (re.compile('http://www.slideshare.net/[^\/]+/\S*'),
-     'http://www.slideshare.net/api/oembed/2'),
-    (re.compile('http://qik.com/\S*'),
-     'http://qik.com/api/oembed.json'),
-    (re.compile('http://www.polleverywhere.com/\w+/\S+'),
-     'http://www.polleverywhere.com/services/oembed/'),
-    (re.compile('http://\S+.wordpress.com/\S+'),
-     'http://public-api.wordpress.com/oembed/'),
-    (re.compile('http://*.revision3.com/\S+'),
-     'http://revision3.com/api/oembed/'),
-    (re.compile('http://\S+.viddler.com/\S+'),
-     'http://lab.viddler.com/services/oembed/'),
-]
-
-
-def image(url):
-    return '<img src="%s" style="max-width:100%%"/>' % url
-
-
-def audio(url):
-    return '<audio controls="controls" style="max-width:100%%"><source src="%s" /></audio>' % url
-
-
-def video(url):
-    return '<video controls="controls" style="max-width:100%%"><source src="%s" /></video>' % url
-
-
-def googledoc_viewer(url):
-    return '<iframe src="http://docs.google.com/viewer?url=%s&embedded=true" style="max-width:100%%"></iframe>' % urllib.quote(url)
-
-
-def web2py_component(url):
-    code = str(uuid.uuid4())
-    return '<div id="%s"></div><script>\nweb2py_component("%s","%s");\n</script>' % (code, url, code)
-
-EXTENSION_MAPS = {
-    'png': image,
-    'gif': image,
-    'jpg': image,
-    'jpeg': image,
-    'wav': audio,
-    'ogg': audio,
-    'mp3': audio,
-    'mov': video,
-    'mpe': video,
-    'mp4': video,
-    'mpg': video,
-    'mpg2': video,
-    'mpeg': video,
-    'mpeg4': video,
-    'movie': video,
-    'wmv': video,
-    'load': web2py_component,
-    'pdf': googledoc_viewer,
-    'doc': googledoc_viewer,
-    'docx': googledoc_viewer,
-    'ppt': googledoc_viewer,
-    'pptx': googledoc_viewer,
-    'xls': googledoc_viewer,
-    'xlsx': googledoc_viewer,
-    'pages': googledoc_viewer,
-    'ai': googledoc_viewer,
-    'psd': googledoc_viewer,
-    'xdf': googledoc_viewer,
-    'svg': googledoc_viewer,
-    'ttf': googledoc_viewer,
-    'xps': googledoc_viewer,
-}
-
-
-class VimeoURLOpener(urllib.FancyURLopener):
-    "Vimeo blocks the urllib user agent for some reason"
-    version = "Mozilla/4.0"
-urllib._urlopener = VimeoURLOpener()
-
-
-def oembed(url):
-    for k, v in EMBED_MAPS:
-        if k.match(url):
-            oembed = v + '?format=json&url=' + cgi.escape(url)
-            try:
-                data = urllib.urlopen(oembed).read()
-                return loads(data)  # json!
-            except:
-                pass
-    return {}
-
-
-def extension(url):
-    return url.split('?')[0].split('.')[-1].lower()
-
-
-def expand_one(url, cdict):
-    # try ombed but first check in cache
-    if '@' in url and not '://'in url:
-        return '<a href="mailto:%s">%s</a>' % (url, url)
-    if cdict and url in cdict:
-        r = cdict[url]
-    else:
-        r = oembed(url)
-        if isinstance(cdict, dict):
-            cdict[url] = r
-    # if oembed service
-    if 'html' in r:
-        html = r['html'].encode('utf8')
-        if html.startswith('<object'):
-            return '<embed style="max-width:100%%">%s</embed>' % html
-        else:
-            return html
-    elif 'url' in r:
-        url = r['url'].encode('utf8')
-    # embed images, video, audio files
-    ext = extension(url)
-    if ext in EXTENSION_MAPS:
-        return EXTENSION_MAPS[ext](url)
-    # else regular link
-    return '<a href="%(u)s">%(u)s</a>' % dict(u=url)
-
-
-def expand_html(html, cdict=None):
-    if not have_soup:
-        raise RuntimeError("Missing BeautifulSoup")
-    soup = BeautifulSoup(html)
-    comments = soup.findAll(text=lambda text: isinstance(text, Comment))
-    [comment.extract() for comment in comments]
-    for txt in soup.findAll(text=True):
-        if not txt.parent.name in ('a', 'script', 'pre', 'code', 'embed', 'object', 'audio', 'video'):
-            ntxt = regex_link.sub(
-                lambda match: expand_one(match.group(0), cdict), txt)
-            txt.replaceWith(BeautifulSoup(ntxt))
-    return str(soup)
-
-
-def test():
-    example = """
-<h3>Fringilla nisi parturient nullam</h3>
-<p>http://www.youtube.com/watch?v=IWBFiI5RrA0</p>
-<p>http://www.web2py.com/examples/static/images/logo_bw.png</p>
-<p>http://www.web2py.com/examples/default/index.load</p>
-<p>http://www.web2py.com/examples/static/web2py_manual_cutl.pdf</p>
-<p>Elementum sodales est varius magna leo sociis erat. Nascetur pretium non
-ultricies gravida. Condimentum at nascetur tempus. Porttitor viverra ipsum
-accumsan neque aliquet. Ultrices vestibulum tempor quisque eget sem eget.
-Ornare malesuada tempus dolor dolor magna consectetur. Nisl dui non curabitur
-laoreet tortor.</p>
-"""
-    return expand_html(example)
-
-if __name__ == "__main__":
-    if len(sys.argv) > 1:
-        print expand_html(open(sys.argv[1]).read())
-    else:
-        print test()

+ 0 - 4013
frameworks/Python/web2py/web2py/gluon/contrib/feedparser.py

@@ -1,4013 +0,0 @@
-"""Universal feed parser
-
-Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds
-
-Visit https://code.google.com/p/feedparser/ for the latest version
-Visit http://packages.python.org/feedparser/ for the latest documentation
-
-Required: Python 2.4 or later
-Recommended: iconv_codec <http://cjkpython.i18n.org/>
-"""
-
-__version__ = "5.1.3"
-__license__ = """
-Copyright (c) 2010-2012 Kurt McKee <[email protected]>
-Copyright (c) 2002-2008 Mark Pilgrim
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice,
-  this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright notice,
-  this list of conditions and the following disclaimer in the documentation
-  and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE."""
-__author__ = "Mark Pilgrim <http://diveintomark.org/>"
-__contributors__ = ["Jason Diamond <http://injektilo.org/>",
-                    "John Beimler <http://john.beimler.org/>",
-                    "Fazal Majid <http://www.majid.info/mylos/weblog/>",
-                    "Aaron Swartz <http://aaronsw.com/>",
-                    "Kevin Marks <http://epeus.blogspot.com/>",
-                    "Sam Ruby <http://intertwingly.net/>",
-                    "Ade Oshineye <http://blog.oshineye.com/>",
-                    "Martin Pool <http://sourcefrog.net/>",
-                    "Kurt McKee <http://kurtmckee.org/>",
-                    "Bernd Schlapsi <https://github.com/brot>",]
-
-# HTTP "User-Agent" header to send to servers when downloading feeds.
-# If you are embedding feedparser in a larger application, you should
-# change this to your application name and URL.
-USER_AGENT = "UniversalFeedParser/%s +https://code.google.com/p/feedparser/" % __version__
-
-# HTTP "Accept" header to send to servers when downloading feeds.  If you don't
-# want to send an Accept header, set this to None.
-ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
-
-# List of preferred XML parsers, by SAX driver name.  These will be tried first,
-# but if they're not installed, Python will keep searching through its own list
-# of pre-installed parsers until it finds one that supports everything we need.
-PREFERRED_XML_PARSERS = ["drv_libxml2"]
-
-# If you want feedparser to automatically run HTML markup through HTML Tidy, set
-# this to 1.  Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
-# or utidylib <http://utidylib.berlios.de/>.
-TIDY_MARKUP = 0
-
-# List of Python interfaces for HTML Tidy, in order of preference.  Only useful
-# if TIDY_MARKUP = 1
-PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
-
-# If you want feedparser to automatically resolve all relative URIs, set this
-# to 1.
-RESOLVE_RELATIVE_URIS = 1
-
-# If you want feedparser to automatically sanitize all potentially unsafe
-# HTML content, set this to 1.
-SANITIZE_HTML = 1
-
-# If you want feedparser to automatically parse microformat content embedded
-# in entry contents, set this to 1
-PARSE_MICROFORMATS = 1
-
-# ---------- Python 3 modules (make it work if possible) ----------
-try:
-    import rfc822
-except ImportError:
-    from email import _parseaddr as rfc822
-
-try:
-    # Python 3.1 introduces bytes.maketrans and simultaneously
-    # deprecates string.maketrans; use bytes.maketrans if possible
-    _maketrans = bytes.maketrans
-except (NameError, AttributeError):
-    import string
-    _maketrans = string.maketrans
-
-# base64 support for Atom feeds that contain embedded binary data
-try:
-    import base64, binascii
-except ImportError:
-    base64 = binascii = None
-else:
-    # Python 3.1 deprecates decodestring in favor of decodebytes
-    _base64decode = getattr(base64, 'decodebytes', base64.decodestring)
-
-# _s2bytes: convert a UTF-8 str to bytes if the interpreter is Python 3
-# _l2bytes: convert a list of ints to bytes if the interpreter is Python 3
-try:
-    if bytes is str:
-        # In Python 2.5 and below, bytes doesn't exist (NameError)
-        # In Python 2.6 and above, bytes and str are the same type
-        raise NameError
-except NameError:
-    # Python 2
-    def _s2bytes(s):
-        return s
-    def _l2bytes(l):
-        return ''.join(map(chr, l))
-else:
-    # Python 3
-    def _s2bytes(s):
-        return bytes(s, 'utf8')
-    def _l2bytes(l):
-        return bytes(l)
-
-# If you want feedparser to allow all URL schemes, set this to ()
-# List culled from Python's urlparse documentation at:
-#   http://docs.python.org/library/urlparse.html
-# as well as from "URI scheme" at Wikipedia:
-#   https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme
-# Many more will likely need to be added!
-ACCEPTABLE_URI_SCHEMES = (
-    'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'magnet',
-    'mailto', 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu',
-    'sftp', 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet',
-    'wais',
-    # Additional common-but-unofficial schemes
-    'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs',
-    'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg',
-)
-#ACCEPTABLE_URI_SCHEMES = ()
-
-# ---------- required modules (should come with any Python distribution) ----------
-import cgi
-import codecs
-import copy
-import datetime
-import re
-import struct
-import time
-import types
-import urllib
-import urllib2
-import urlparse
-import warnings
-
-from htmlentitydefs import name2codepoint, codepoint2name, entitydefs
-
-try:
-    from io import BytesIO as _StringIO
-except ImportError:
-    try:
-        from cStringIO import StringIO as _StringIO
-    except ImportError:
-        from StringIO import StringIO as _StringIO
-
-# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
-
-# gzip is included with most Python distributions, but may not be available if you compiled your own
-try:
-    import gzip
-except ImportError:
-    gzip = None
-try:
-    import zlib
-except ImportError:
-    zlib = None
-
-# If a real XML parser is available, feedparser will attempt to use it.  feedparser has
-# been tested with the built-in SAX parser and libxml2.  On platforms where the
-# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
-# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
-try:
-    import xml.sax
-    from xml.sax.saxutils import escape as _xmlescape
-except ImportError:
-    _XML_AVAILABLE = 0
-    def _xmlescape(data,entities={}):
-        data = data.replace('&', '&amp;')
-        data = data.replace('>', '&gt;')
-        data = data.replace('<', '&lt;')
-        for char, entity in entities:
-            data = data.replace(char, entity)
-        return data
-else:
-    try:
-        xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
-    except xml.sax.SAXReaderNotAvailable:
-        _XML_AVAILABLE = 0
-    else:
-        _XML_AVAILABLE = 1
-
-# sgmllib is not available by default in Python 3; if the end user doesn't have
-# it available then we'll lose illformed XML parsing, content santizing, and
-# microformat support (at least while feedparser depends on BeautifulSoup).
-try:
-    import sgmllib
-except ImportError:
-    # This is probably Python 3, which doesn't include sgmllib anymore
-    _SGML_AVAILABLE = 0
-
-    # Mock sgmllib enough to allow subclassing later on
-    class sgmllib(object):
-        class SGMLParser(object):
-            def goahead(self, i):
-                pass
-            def parse_starttag(self, i):
-                pass
-else:
-    _SGML_AVAILABLE = 1
-
-    # sgmllib defines a number of module-level regular expressions that are
-    # insufficient for the XML parsing feedparser needs. Rather than modify
-    # the variables directly in sgmllib, they're defined here using the same
-    # names, and the compiled code objects of several sgmllib.SGMLParser
-    # methods are copied into _BaseHTMLProcessor so that they execute in
-    # feedparser's scope instead of sgmllib's scope.
-    charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);')
-    tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
-    attrfind = re.compile(
-        r'\s*([a-zA-Z_][-:.a-zA-Z_0-9]*)[$]?(\s*=\s*'
-        r'(\'[^\']*\'|"[^"]*"|[][\-a-zA-Z0-9./,:;+*%?!&$\(\)_#=~\'"@]*))?'
-    )
-
-    # Unfortunately, these must be copied over to prevent NameError exceptions
-    entityref = sgmllib.entityref
-    incomplete = sgmllib.incomplete
-    interesting = sgmllib.interesting
-    shorttag = sgmllib.shorttag
-    shorttagopen = sgmllib.shorttagopen
-    starttagopen = sgmllib.starttagopen
-
-    class _EndBracketRegEx:
-        def __init__(self):
-            # Overriding the built-in sgmllib.endbracket regex allows the
-            # parser to find angle brackets embedded in element attributes.
-            self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
-        def search(self, target, index=0):
-            match = self.endbracket.match(target, index)
-            if match is not None:
-                # Returning a new object in the calling thread's context
-                # resolves a thread-safety.
-                return EndBracketMatch(match)
-            return None
-    class EndBracketMatch:
-        def __init__(self, match):
-            self.match = match
-        def start(self, n):
-            return self.match.end(n)
-    endbracket = _EndBracketRegEx()
-
-
-# iconv_codec provides support for more character encodings.
-# It's available from http://cjkpython.i18n.org/
-try:
-    import iconv_codec
-except ImportError:
-    pass
-
-# chardet library auto-detects character encodings
-# Download from http://chardet.feedparser.org/
-try:
-    import chardet
-except ImportError:
-    chardet = None
-
-# BeautifulSoup is used to extract microformat content from HTML
-# feedparser is tested using BeautifulSoup 3.2.0
-# http://www.crummy.com/software/BeautifulSoup/
-try:
-    import BeautifulSoup
-except ImportError:
-    BeautifulSoup = None
-    PARSE_MICROFORMATS = False
-
-# ---------- don't touch these ----------
-class ThingsNobodyCaresAboutButMe(Exception): pass
-class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
-class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
-class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
-class UndeclaredNamespace(Exception): pass
-
-SUPPORTED_VERSIONS = {'': u'unknown',
-                      'rss090': u'RSS 0.90',
-                      'rss091n': u'RSS 0.91 (Netscape)',
-                      'rss091u': u'RSS 0.91 (Userland)',
-                      'rss092': u'RSS 0.92',
-                      'rss093': u'RSS 0.93',
-                      'rss094': u'RSS 0.94',
-                      'rss20': u'RSS 2.0',
-                      'rss10': u'RSS 1.0',
-                      'rss': u'RSS (unknown version)',
-                      'atom01': u'Atom 0.1',
-                      'atom02': u'Atom 0.2',
-                      'atom03': u'Atom 0.3',
-                      'atom10': u'Atom 1.0',
-                      'atom': u'Atom (unknown version)',
-                      'cdf': u'CDF',
-                      }
-
-class FeedParserDict(dict):
-    keymap = {'channel': 'feed',
-              'items': 'entries',
-              'guid': 'id',
-              'date': 'updated',
-              'date_parsed': 'updated_parsed',
-              'description': ['summary', 'subtitle'],
-              'description_detail': ['summary_detail', 'subtitle_detail'],
-              'url': ['href'],
-              'modified': 'updated',
-              'modified_parsed': 'updated_parsed',
-              'issued': 'published',
-              'issued_parsed': 'published_parsed',
-              'copyright': 'rights',
-              'copyright_detail': 'rights_detail',
-              'tagline': 'subtitle',
-              'tagline_detail': 'subtitle_detail'}
-    def __getitem__(self, key):
-        if key == 'category':
-            try:
-                return dict.__getitem__(self, 'tags')[0]['term']
-            except IndexError:
-                raise KeyError, "object doesn't have key 'category'"
-        elif key == 'enclosures':
-            norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
-            return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure']
-        elif key == 'license':
-            for link in dict.__getitem__(self, 'links'):
-                if link['rel']==u'license' and 'href' in link:
-                    return link['href']
-        elif key == 'updated':
-            # Temporarily help developers out by keeping the old
-            # broken behavior that was reported in issue 310.
-            # This fix was proposed in issue 328.
-            if not dict.__contains__(self, 'updated') and \
-                dict.__contains__(self, 'published'):
-                warnings.warn("To avoid breaking existing software while "
-                    "fixing issue 310, a temporary mapping has been created "
-                    "from `updated` to `published` if `updated` doesn't "
-                    "exist. This fallback will be removed in a future version "
-                    "of feedparser.", DeprecationWarning)
-                return dict.__getitem__(self, 'published')
-            return dict.__getitem__(self, 'updated')
-        elif key == 'updated_parsed':
-            if not dict.__contains__(self, 'updated_parsed') and \
-                dict.__contains__(self, 'published_parsed'):
-                warnings.warn("To avoid breaking existing software while "
-                    "fixing issue 310, a temporary mapping has been created "
-                    "from `updated_parsed` to `published_parsed` if "
-                    "`updated_parsed` doesn't exist. This fallback will be "
-                    "removed in a future version of feedparser.",
-                    DeprecationWarning)
-                return dict.__getitem__(self, 'published_parsed')
-            return dict.__getitem__(self, 'updated_parsed')
-        else:
-            realkey = self.keymap.get(key, key)
-            if isinstance(realkey, list):
-                for k in realkey:
-                    if dict.__contains__(self, k):
-                        return dict.__getitem__(self, k)
-            elif dict.__contains__(self, realkey):
-                return dict.__getitem__(self, realkey)
-        return dict.__getitem__(self, key)
-
-    def __contains__(self, key):
-        if key in ('updated', 'updated_parsed'):
-            # Temporarily help developers out by keeping the old
-            # broken behavior that was reported in issue 310.
-            # This fix was proposed in issue 328.
-            return dict.__contains__(self, key)
-        try:
-            self.__getitem__(key)
-        except KeyError:
-            return False
-        else:
-            return True
-
-    has_key = __contains__
-
-    def get(self, key, default=None):
-        try:
-            return self.__getitem__(key)
-        except KeyError:
-            return default
-
-    def __setitem__(self, key, value):
-        key = self.keymap.get(key, key)
-        if isinstance(key, list):
-            key = key[0]
-        return dict.__setitem__(self, key, value)
-
-    def setdefault(self, key, value):
-        if key not in self:
-            self[key] = value
-            return value
-        return self[key]
-
-    def __getattr__(self, key):
-        # __getattribute__() is called first; this will be called
-        # only if an attribute was not already found
-        try:
-            return self.__getitem__(key)
-        except KeyError:
-            raise AttributeError, "object has no attribute '%s'" % key
-
-    def __hash__(self):
-        return id(self)
-
-_cp1252 = {
-    128: unichr(8364), # euro sign
-    130: unichr(8218), # single low-9 quotation mark
-    131: unichr( 402), # latin small letter f with hook
-    132: unichr(8222), # double low-9 quotation mark
-    133: unichr(8230), # horizontal ellipsis
-    134: unichr(8224), # dagger
-    135: unichr(8225), # double dagger
-    136: unichr( 710), # modifier letter circumflex accent
-    137: unichr(8240), # per mille sign
-    138: unichr( 352), # latin capital letter s with caron
-    139: unichr(8249), # single left-pointing angle quotation mark
-    140: unichr( 338), # latin capital ligature oe
-    142: unichr( 381), # latin capital letter z with caron
-    145: unichr(8216), # left single quotation mark
-    146: unichr(8217), # right single quotation mark
-    147: unichr(8220), # left double quotation mark
-    148: unichr(8221), # right double quotation mark
-    149: unichr(8226), # bullet
-    150: unichr(8211), # en dash
-    151: unichr(8212), # em dash
-    152: unichr( 732), # small tilde
-    153: unichr(8482), # trade mark sign
-    154: unichr( 353), # latin small letter s with caron
-    155: unichr(8250), # single right-pointing angle quotation mark
-    156: unichr( 339), # latin small ligature oe
-    158: unichr( 382), # latin small letter z with caron
-    159: unichr( 376), # latin capital letter y with diaeresis
-}
-
-_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
-def _urljoin(base, uri):
-    uri = _urifixer.sub(r'\1\3', uri)
-    #try:
-    if not isinstance(uri, unicode):
-        uri = uri.decode('utf-8', 'ignore')
-    uri = urlparse.urljoin(base, uri)
-    if not isinstance(uri, unicode):
-        return uri.decode('utf-8', 'ignore')
-    return uri
-    #except:
-    #    uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
-    #    return urlparse.urljoin(base, uri)
-
-class _FeedParserMixin:
-    namespaces = {
-        '': '',
-        'http://backend.userland.com/rss': '',
-        'http://blogs.law.harvard.edu/tech/rss': '',
-        'http://purl.org/rss/1.0/': '',
-        'http://my.netscape.com/rdf/simple/0.9/': '',
-        'http://example.com/newformat#': '',
-        'http://example.com/necho': '',
-        'http://purl.org/echo/': '',
-        'uri/of/echo/namespace#': '',
-        'http://purl.org/pie/': '',
-        'http://purl.org/atom/ns#': '',
-        'http://www.w3.org/2005/Atom': '',
-        'http://purl.org/rss/1.0/modules/rss091#': '',
-
-        'http://webns.net/mvcb/':                                'admin',
-        'http://purl.org/rss/1.0/modules/aggregation/':          'ag',
-        'http://purl.org/rss/1.0/modules/annotate/':             'annotate',
-        'http://media.tangent.org/rss/1.0/':                     'audio',
-        'http://backend.userland.com/blogChannelModule':         'blogChannel',
-        'http://web.resource.org/cc/':                           'cc',
-        'http://backend.userland.com/creativeCommonsRssModule':  'creativeCommons',
-        'http://purl.org/rss/1.0/modules/company':               'co',
-        'http://purl.org/rss/1.0/modules/content/':              'content',
-        'http://my.theinfo.org/changed/1.0/rss/':                'cp',
-        'http://purl.org/dc/elements/1.1/':                      'dc',
-        'http://purl.org/dc/terms/':                             'dcterms',
-        'http://purl.org/rss/1.0/modules/email/':                'email',
-        'http://purl.org/rss/1.0/modules/event/':                'ev',
-        'http://rssnamespace.org/feedburner/ext/1.0':            'feedburner',
-        'http://freshmeat.net/rss/fm/':                          'fm',
-        'http://xmlns.com/foaf/0.1/':                            'foaf',
-        'http://www.w3.org/2003/01/geo/wgs84_pos#':              'geo',
-        'http://postneo.com/icbm/':                              'icbm',
-        'http://purl.org/rss/1.0/modules/image/':                'image',
-        'http://www.itunes.com/DTDs/PodCast-1.0.dtd':            'itunes',
-        'http://example.com/DTDs/PodCast-1.0.dtd':               'itunes',
-        'http://purl.org/rss/1.0/modules/link/':                 'l',
-        'http://search.yahoo.com/mrss':                          'media',
-        # Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
-        'http://search.yahoo.com/mrss/':                         'media',
-        'http://madskills.com/public/xml/rss/module/pingback/':  'pingback',
-        'http://prismstandard.org/namespaces/1.2/basic/':        'prism',
-        'http://www.w3.org/1999/02/22-rdf-syntax-ns#':           'rdf',
-        'http://www.w3.org/2000/01/rdf-schema#':                 'rdfs',
-        'http://purl.org/rss/1.0/modules/reference/':            'ref',
-        'http://purl.org/rss/1.0/modules/richequiv/':            'reqv',
-        'http://purl.org/rss/1.0/modules/search/':               'search',
-        'http://purl.org/rss/1.0/modules/slash/':                'slash',
-        'http://schemas.xmlsoap.org/soap/envelope/':             'soap',
-        'http://purl.org/rss/1.0/modules/servicestatus/':        'ss',
-        'http://hacks.benhammersley.com/rss/streaming/':         'str',
-        'http://purl.org/rss/1.0/modules/subscription/':         'sub',
-        'http://purl.org/rss/1.0/modules/syndication/':          'sy',
-        'http://schemas.pocketsoap.com/rss/myDescModule/':       'szf',
-        'http://purl.org/rss/1.0/modules/taxonomy/':             'taxo',
-        'http://purl.org/rss/1.0/modules/threading/':            'thr',
-        'http://purl.org/rss/1.0/modules/textinput/':            'ti',
-        'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
-        'http://wellformedweb.org/commentAPI/':                  'wfw',
-        'http://purl.org/rss/1.0/modules/wiki/':                 'wiki',
-        'http://www.w3.org/1999/xhtml':                          'xhtml',
-        'http://www.w3.org/1999/xlink':                          'xlink',
-        'http://www.w3.org/XML/1998/namespace':                  'xml',
-    }
-    _matchnamespaces = {}
-
-    can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
-    can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
-    can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
-    html_types = [u'text/html', u'application/xhtml+xml']
-
-    def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'):
-        if not self._matchnamespaces:
-            for k, v in self.namespaces.items():
-                self._matchnamespaces[k.lower()] = v
-        self.feeddata = FeedParserDict() # feed-level data
-        self.encoding = encoding # character encoding
-        self.entries = [] # list of entry-level data
-        self.version = u'' # feed type/version, see SUPPORTED_VERSIONS
-        self.namespacesInUse = {} # dictionary of namespaces defined by the feed
-
-        # the following are used internally to track state;
-        # this is really out of control and should be refactored
-        self.infeed = 0
-        self.inentry = 0
-        self.incontent = 0
-        self.intextinput = 0
-        self.inimage = 0
-        self.inauthor = 0
-        self.incontributor = 0
-        self.inpublisher = 0
-        self.insource = 0
-        self.sourcedata = FeedParserDict()
-        self.contentparams = FeedParserDict()
-        self._summaryKey = None
-        self.namespacemap = {}
-        self.elementstack = []
-        self.basestack = []
-        self.langstack = []
-        self.baseuri = baseuri or u''
-        self.lang = baselang or None
-        self.svgOK = 0
-        self.title_depth = -1
-        self.depth = 0
-        if baselang:
-            self.feeddata['language'] = baselang.replace('_','-')
-
-        # A map of the following form:
-        #     {
-        #         object_that_value_is_set_on: {
-        #             property_name: depth_of_node_property_was_extracted_from,
-        #             other_property: depth_of_node_property_was_extracted_from,
-        #         },
-        #     }
-        self.property_depth_map = {}
-
-    def _normalize_attributes(self, kv):
-        k = kv[0].lower()
-        v = k in ('rel', 'type') and kv[1].lower() or kv[1]
-        # the sgml parser doesn't handle entities in attributes, nor
-        # does it pass the attribute values through as unicode, while
-        # strict xml parsers do -- account for this difference
-        if isinstance(self, _LooseFeedParser):
-            v = v.replace('&amp;', '&')
-            if not isinstance(v, unicode):
-                v = v.decode('utf-8')
-        return (k, v)
-
-    def unknown_starttag(self, tag, attrs):
-        # increment depth counter
-        self.depth += 1
-
-        # normalize attrs
-        attrs = map(self._normalize_attributes, attrs)
-
-        # track xml:base and xml:lang
-        attrsD = dict(attrs)
-        baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
-        if not isinstance(baseuri, unicode):
-            baseuri = baseuri.decode(self.encoding, 'ignore')
-        # ensure that self.baseuri is always an absolute URI that
-        # uses a whitelisted URI scheme (e.g. not `javscript:`)
-        if self.baseuri:
-            self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
-        else:
-            self.baseuri = _urljoin(self.baseuri, baseuri)
-        lang = attrsD.get('xml:lang', attrsD.get('lang'))
-        if lang == '':
-            # xml:lang could be explicitly set to '', we need to capture that
-            lang = None
-        elif lang is None:
-            # if no xml:lang is specified, use parent lang
-            lang = self.lang
-        if lang:
-            if tag in ('feed', 'rss', 'rdf:RDF'):
-                self.feeddata['language'] = lang.replace('_','-')
-        self.lang = lang
-        self.basestack.append(self.baseuri)
-        self.langstack.append(lang)
-
-        # track namespaces
-        for prefix, uri in attrs:
-            if prefix.startswith('xmlns:'):
-                self.trackNamespace(prefix[6:], uri)
-            elif prefix == 'xmlns':
-                self.trackNamespace(None, uri)
-
-        # track inline content
-        if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
-            if tag in ('xhtml:div', 'div'):
-                return # typepad does this 10/2007
-            # element declared itself as escaped markup, but it isn't really
-            self.contentparams['type'] = u'application/xhtml+xml'
-        if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
-            if tag.find(':') <> -1:
-                prefix, tag = tag.split(':', 1)
-                namespace = self.namespacesInUse.get(prefix, '')
-                if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
-                    attrs.append(('xmlns',namespace))
-                if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
-                    attrs.append(('xmlns',namespace))
-            if tag == 'svg':
-                self.svgOK += 1
-            return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
-
-        # match namespaces
-        if tag.find(':') <> -1:
-            prefix, suffix = tag.split(':', 1)
-        else:
-            prefix, suffix = '', tag
-        prefix = self.namespacemap.get(prefix, prefix)
-        if prefix:
-            prefix = prefix + '_'
-
-        # special hack for better tracking of empty textinput/image elements in illformed feeds
-        if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
-            self.intextinput = 0
-        if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
-            self.inimage = 0
-
-        # call special handler (if defined) or default handler
-        methodname = '_start_' + prefix + suffix
-        try:
-            method = getattr(self, methodname)
-            return method(attrsD)
-        except AttributeError:
-            # Since there's no handler or something has gone wrong we explicitly add the element and its attributes
-            unknown_tag = prefix + suffix
-            if len(attrsD) == 0:
-                # No attributes so merge it into the encosing dictionary
-                return self.push(unknown_tag, 1)
-            else:
-                # Has attributes so create it in its own dictionary
-                context = self._getContext()
-                context[unknown_tag] = attrsD
-
-    def unknown_endtag(self, tag):
-        # match namespaces
-        if tag.find(':') <> -1:
-            prefix, suffix = tag.split(':', 1)
-        else:
-            prefix, suffix = '', tag
-        prefix = self.namespacemap.get(prefix, prefix)
-        if prefix:
-            prefix = prefix + '_'
-        if suffix == 'svg' and self.svgOK:
-            self.svgOK -= 1
-
-        # call special handler (if defined) or default handler
-        methodname = '_end_' + prefix + suffix
-        try:
-            if self.svgOK:
-                raise AttributeError()
-            method = getattr(self, methodname)
-            method()
-        except AttributeError:
-            self.pop(prefix + suffix)
-
-        # track inline content
-        if self.incontent and not self.contentparams.get('type', u'xml').endswith(u'xml'):
-            # element declared itself as escaped markup, but it isn't really
-            if tag in ('xhtml:div', 'div'):
-                return # typepad does this 10/2007
-            self.contentparams['type'] = u'application/xhtml+xml'
-        if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml':
-            tag = tag.split(':')[-1]
-            self.handle_data('</%s>' % tag, escape=0)
-
-        # track xml:base and xml:lang going out of scope
-        if self.basestack:
-            self.basestack.pop()
-            if self.basestack and self.basestack[-1]:
-                self.baseuri = self.basestack[-1]
-        if self.langstack:
-            self.langstack.pop()
-            if self.langstack: # and (self.langstack[-1] is not None):
-                self.lang = self.langstack[-1]
-
-        self.depth -= 1
-
-    def handle_charref(self, ref):
-        # called for each character reference, e.g. for '&#160;', ref will be '160'
-        if not self.elementstack:
-            return
-        ref = ref.lower()
-        if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
-            text = '&#%s;' % ref
-        else:
-            if ref[0] == 'x':
-                c = int(ref[1:], 16)
-            else:
-                c = int(ref)
-            text = unichr(c).encode('utf-8')
-        self.elementstack[-1][2].append(text)
-
-    def handle_entityref(self, ref):
-        # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
-        if not self.elementstack:
-            return
-        if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
-            text = '&%s;' % ref
-        elif ref in self.entities:
-            text = self.entities[ref]
-            if text.startswith('&#') and text.endswith(';'):
-                return self.handle_entityref(text)
-        else:
-            try:
-                name2codepoint[ref]
-            except KeyError:
-                text = '&%s;' % ref
-            else:
-                text = unichr(name2codepoint[ref]).encode('utf-8')
-        self.elementstack[-1][2].append(text)
-
-    def handle_data(self, text, escape=1):
-        # called for each block of plain text, i.e. outside of any tag and
-        # not containing any character or entity references
-        if not self.elementstack:
-            return
-        if escape and self.contentparams.get('type') == u'application/xhtml+xml':
-            text = _xmlescape(text)
-        self.elementstack[-1][2].append(text)
-
-    def handle_comment(self, text):
-        # called for each comment, e.g. <!-- insert message here -->
-        pass
-
-    def handle_pi(self, text):
-        # called for each processing instruction, e.g. <?instruction>
-        pass
-
-    def handle_decl(self, text):
-        pass
-
-    def parse_declaration(self, i):
-        # override internal declaration handler to handle CDATA blocks
-        if self.rawdata[i:i+9] == '<![CDATA[':
-            k = self.rawdata.find(']]>', i)
-            if k == -1:
-                # CDATA block began but didn't finish
-                k = len(self.rawdata)
-                return k
-            self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
-            return k+3
-        else:
-            k = self.rawdata.find('>', i)
-            if k >= 0:
-                return k+1
-            else:
-                # We have an incomplete CDATA block.
-                return k
-
-    def mapContentType(self, contentType):
-        contentType = contentType.lower()
-        if contentType == 'text' or contentType == 'plain':
-            contentType = u'text/plain'
-        elif contentType == 'html':
-            contentType = u'text/html'
-        elif contentType == 'xhtml':
-            contentType = u'application/xhtml+xml'
-        return contentType
-
-    def trackNamespace(self, prefix, uri):
-        loweruri = uri.lower()
-        if not self.version:
-            if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
-                self.version = u'rss090'
-            elif loweruri == 'http://purl.org/rss/1.0/':
-                self.version = u'rss10'
-            elif loweruri == 'http://www.w3.org/2005/atom':
-                self.version = u'atom10'
-        if loweruri.find(u'backend.userland.com/rss') <> -1:
-            # match any backend.userland.com namespace
-            uri = u'http://backend.userland.com/rss'
-            loweruri = uri
-        if loweruri in self._matchnamespaces:
-            self.namespacemap[prefix] = self._matchnamespaces[loweruri]
-            self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
-        else:
-            self.namespacesInUse[prefix or ''] = uri
-
-    def resolveURI(self, uri):
-        return _urljoin(self.baseuri or u'', uri)
-
-    def decodeEntities(self, element, data):
-        return data
-
-    def strattrs(self, attrs):
-        return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'&quot;'})) for t in attrs])
-
-    def push(self, element, expectingText):
-        self.elementstack.append([element, expectingText, []])
-
-    def pop(self, element, stripWhitespace=1):
-        if not self.elementstack:
-            return
-        if self.elementstack[-1][0] != element:
-            return
-
-        element, expectingText, pieces = self.elementstack.pop()
-
-        if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml':
-            # remove enclosing child element, but only if it is a <div> and
-            # only if all the remaining content is nested underneath it.
-            # This means that the divs would be retained in the following:
-            #    <div>foo</div><div>bar</div>
-            while pieces and len(pieces)>1 and not pieces[-1].strip():
-                del pieces[-1]
-            while pieces and len(pieces)>1 and not pieces[0].strip():
-                del pieces[0]
-            if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
-                depth = 0
-                for piece in pieces[:-1]:
-                    if piece.startswith('</'):
-                        depth -= 1
-                        if depth == 0:
-                            break
-                    elif piece.startswith('<') and not piece.endswith('/>'):
-                        depth += 1
-                else:
-                    pieces = pieces[1:-1]
-
-        # Ensure each piece is a str for Python 3
-        for (i, v) in enumerate(pieces):
-            if not isinstance(v, unicode):
-                pieces[i] = v.decode('utf-8')
-
-        output = u''.join(pieces)
-        if stripWhitespace:
-            output = output.strip()
-        if not expectingText:
-            return output
-
-        # decode base64 content
-        if base64 and self.contentparams.get('base64', 0):
-            try:
-                output = _base64decode(output)
-            except binascii.Error:
-                pass
-            except binascii.Incomplete:
-                pass
-            except TypeError:
-                # In Python 3, base64 takes and outputs bytes, not str
-                # This may not be the most correct way to accomplish this
-                output = _base64decode(output.encode('utf-8')).decode('utf-8')
-
-        # resolve relative URIs
-        if (element in self.can_be_relative_uri) and output:
-            output = self.resolveURI(output)
-
-        # decode entities within embedded markup
-        if not self.contentparams.get('base64', 0):
-            output = self.decodeEntities(element, output)
-
-        # some feed formats require consumers to guess
-        # whether the content is html or plain text
-        if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain':
-            if self.lookslikehtml(output):
-                self.contentparams['type'] = u'text/html'
-
-        # remove temporary cruft from contentparams
-        try:
-            del self.contentparams['mode']
-        except KeyError:
-            pass
-        try:
-            del self.contentparams['base64']
-        except KeyError:
-            pass
-
-        is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types
-        # resolve relative URIs within embedded markup
-        if is_htmlish and RESOLVE_RELATIVE_URIS:
-            if element in self.can_contain_relative_uris:
-                output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html'))
-
-        # parse microformats
-        # (must do this before sanitizing because some microformats
-        # rely on elements that we sanitize)
-        if PARSE_MICROFORMATS and is_htmlish and element in ['content', 'description', 'summary']:
-            mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
-            if mfresults:
-                for tag in mfresults.get('tags', []):
-                    self._addTag(tag['term'], tag['scheme'], tag['label'])
-                for enclosure in mfresults.get('enclosures', []):
-                    self._start_enclosure(enclosure)
-                for xfn in mfresults.get('xfn', []):
-                    self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
-                vcard = mfresults.get('vcard')
-                if vcard:
-                    self._getContext()['vcard'] = vcard
-
-        # sanitize embedded markup
-        if is_htmlish and SANITIZE_HTML:
-            if element in self.can_contain_dangerous_markup:
-                output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html'))
-
-        if self.encoding and not isinstance(output, unicode):
-            output = output.decode(self.encoding, 'ignore')
-
-        # address common error where people take data that is already
-        # utf-8, presume that it is iso-8859-1, and re-encode it.
-        if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode):
-            try:
-                output = output.encode('iso-8859-1').decode('utf-8')
-            except (UnicodeEncodeError, UnicodeDecodeError):
-                pass
-
-        # map win-1252 extensions to the proper code points
-        if isinstance(output, unicode):
-            output = output.translate(_cp1252)
-
-        # categories/tags/keywords/whatever are handled in _end_category
-        if element == 'category':
-            return output
-
-        if element == 'title' and -1 < self.title_depth <= self.depth:
-            return output
-
-        # store output in appropriate place(s)
-        if self.inentry and not self.insource:
-            if element == 'content':
-                self.entries[-1].setdefault(element, [])
-                contentparams = copy.deepcopy(self.contentparams)
-                contentparams['value'] = output
-                self.entries[-1][element].append(contentparams)
-            elif element == 'link':
-                if not self.inimage:
-                    # query variables in urls in link elements are improperly
-                    # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
-                    # unhandled character references. fix this special case.
-                    output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
-                    self.entries[-1][element] = output
-                    if output:
-                        self.entries[-1]['links'][-1]['href'] = output
-            else:
-                if element == 'description':
-                    element = 'summary'
-                old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
-                if old_value_depth is None or self.depth <= old_value_depth:
-                    self.property_depth_map[self.entries[-1]][element] = self.depth
-                    self.entries[-1][element] = output
-                if self.incontent:
-                    contentparams = copy.deepcopy(self.contentparams)
-                    contentparams['value'] = output
-                    self.entries[-1][element + '_detail'] = contentparams
-        elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
-            context = self._getContext()
-            if element == 'description':
-                element = 'subtitle'
-            context[element] = output
-            if element == 'link':
-                # fix query variables; see above for the explanation
-                output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
-                context[element] = output
-                context['links'][-1]['href'] = output
-            elif self.incontent:
-                contentparams = copy.deepcopy(self.contentparams)
-                contentparams['value'] = output
-                context[element + '_detail'] = contentparams
-        return output
-
-    def pushContent(self, tag, attrsD, defaultContentType, expectingText):
-        self.incontent += 1
-        if self.lang:
-            self.lang=self.lang.replace('_','-')
-        self.contentparams = FeedParserDict({
-            'type': self.mapContentType(attrsD.get('type', defaultContentType)),
-            'language': self.lang,
-            'base': self.baseuri})
-        self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
-        self.push(tag, expectingText)
-
-    def popContent(self, tag):
-        value = self.pop(tag)
-        self.incontent -= 1
-        self.contentparams.clear()
-        return value
-
-    # a number of elements in a number of RSS variants are nominally plain
-    # text, but this is routinely ignored.  This is an attempt to detect
-    # the most common cases.  As false positives often result in silent
-    # data loss, this function errs on the conservative side.
-    @staticmethod
-    def lookslikehtml(s):
-        # must have a close tag or an entity reference to qualify
-        if not (re.search(r'</(\w+)>',s) or re.search("&#?\w+;",s)):
-            return
-
-        # all tags must be in a restricted subset of valid HTML tags
-        if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
-            re.findall(r'</?(\w+)',s)):
-            return
-
-        # all entities must have been defined as valid HTML entities
-        if filter(lambda e: e not in entitydefs.keys(), re.findall(r'&(\w+);', s)):
-            return
-
-        return 1
-
-    def _mapToStandardPrefix(self, name):
-        colonpos = name.find(':')
-        if colonpos <> -1:
-            prefix = name[:colonpos]
-            suffix = name[colonpos+1:]
-            prefix = self.namespacemap.get(prefix, prefix)
-            name = prefix + ':' + suffix
-        return name
-
-    def _getAttribute(self, attrsD, name):
-        return attrsD.get(self._mapToStandardPrefix(name))
-
-    def _isBase64(self, attrsD, contentparams):
-        if attrsD.get('mode', '') == 'base64':
-            return 1
-        if self.contentparams['type'].startswith(u'text/'):
-            return 0
-        if self.contentparams['type'].endswith(u'+xml'):
-            return 0
-        if self.contentparams['type'].endswith(u'/xml'):
-            return 0
-        return 1
-
-    def _itsAnHrefDamnIt(self, attrsD):
-        href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
-        if href:
-            try:
-                del attrsD['url']
-            except KeyError:
-                pass
-            try:
-                del attrsD['uri']
-            except KeyError:
-                pass
-            attrsD['href'] = href
-        return attrsD
-
-    def _save(self, key, value, overwrite=False):
-        context = self._getContext()
-        if overwrite:
-            context[key] = value
-        else:
-            context.setdefault(key, value)
-
-    def _start_rss(self, attrsD):
-        versionmap = {'0.91': u'rss091u',
-                      '0.92': u'rss092',
-                      '0.93': u'rss093',
-                      '0.94': u'rss094'}
-        #If we're here then this is an RSS feed.
-        #If we don't have a version or have a version that starts with something
-        #other than RSS then there's been a mistake. Correct it.
-        if not self.version or not self.version.startswith(u'rss'):
-            attr_version = attrsD.get('version', '')
-            version = versionmap.get(attr_version)
-            if version:
-                self.version = version
-            elif attr_version.startswith('2.'):
-                self.version = u'rss20'
-            else:
-                self.version = u'rss'
-
-    def _start_channel(self, attrsD):
-        self.infeed = 1
-        self._cdf_common(attrsD)
-
-    def _cdf_common(self, attrsD):
-        if 'lastmod' in attrsD:
-            self._start_modified({})
-            self.elementstack[-1][-1] = attrsD['lastmod']
-            self._end_modified()
-        if 'href' in attrsD:
-            self._start_link({})
-            self.elementstack[-1][-1] = attrsD['href']
-            self._end_link()
-
-    def _start_feed(self, attrsD):
-        self.infeed = 1
-        versionmap = {'0.1': u'atom01',
-                      '0.2': u'atom02',
-                      '0.3': u'atom03'}
-        if not self.version:
-            attr_version = attrsD.get('version')
-            version = versionmap.get(attr_version)
-            if version:
-                self.version = version
-            else:
-                self.version = u'atom'
-
-    def _end_channel(self):
-        self.infeed = 0
-    _end_feed = _end_channel
-
-    def _start_image(self, attrsD):
-        context = self._getContext()
-        if not self.inentry:
-            context.setdefault('image', FeedParserDict())
-        self.inimage = 1
-        self.title_depth = -1
-        self.push('image', 0)
-
-    def _end_image(self):
-        self.pop('image')
-        self.inimage = 0
-
-    def _start_textinput(self, attrsD):
-        context = self._getContext()
-        context.setdefault('textinput', FeedParserDict())
-        self.intextinput = 1
-        self.title_depth = -1
-        self.push('textinput', 0)
-    _start_textInput = _start_textinput
-
-    def _end_textinput(self):
-        self.pop('textinput')
-        self.intextinput = 0
-    _end_textInput = _end_textinput
-
-    def _start_author(self, attrsD):
-        self.inauthor = 1
-        self.push('author', 1)
-        # Append a new FeedParserDict when expecting an author
-        context = self._getContext()
-        context.setdefault('authors', [])
-        context['authors'].append(FeedParserDict())
-    _start_managingeditor = _start_author
-    _start_dc_author = _start_author
-    _start_dc_creator = _start_author
-    _start_itunes_author = _start_author
-
-    def _end_author(self):
-        self.pop('author')
-        self.inauthor = 0
-        self._sync_author_detail()
-    _end_managingeditor = _end_author
-    _end_dc_author = _end_author
-    _end_dc_creator = _end_author
-    _end_itunes_author = _end_author
-
-    def _start_itunes_owner(self, attrsD):
-        self.inpublisher = 1
-        self.push('publisher', 0)
-
-    def _end_itunes_owner(self):
-        self.pop('publisher')
-        self.inpublisher = 0
-        self._sync_author_detail('publisher')
-
-    def _start_contributor(self, attrsD):
-        self.incontributor = 1
-        context = self._getContext()
-        context.setdefault('contributors', [])
-        context['contributors'].append(FeedParserDict())
-        self.push('contributor', 0)
-
-    def _end_contributor(self):
-        self.pop('contributor')
-        self.incontributor = 0
-
-    def _start_dc_contributor(self, attrsD):
-        self.incontributor = 1
-        context = self._getContext()
-        context.setdefault('contributors', [])
-        context['contributors'].append(FeedParserDict())
-        self.push('name', 0)
-
-    def _end_dc_contributor(self):
-        self._end_name()
-        self.incontributor = 0
-
-    def _start_name(self, attrsD):
-        self.push('name', 0)
-    _start_itunes_name = _start_name
-
-    def _end_name(self):
-        value = self.pop('name')
-        if self.inpublisher:
-            self._save_author('name', value, 'publisher')
-        elif self.inauthor:
-            self._save_author('name', value)
-        elif self.incontributor:
-            self._save_contributor('name', value)
-        elif self.intextinput:
-            context = self._getContext()
-            context['name'] = value
-    _end_itunes_name = _end_name
-
-    def _start_width(self, attrsD):
-        self.push('width', 0)
-
-    def _end_width(self):
-        value = self.pop('width')
-        try:
-            value = int(value)
-        except ValueError:
-            value = 0
-        if self.inimage:
-            context = self._getContext()
-            context['width'] = value
-
-    def _start_height(self, attrsD):
-        self.push('height', 0)
-
-    def _end_height(self):
-        value = self.pop('height')
-        try:
-            value = int(value)
-        except ValueError:
-            value = 0
-        if self.inimage:
-            context = self._getContext()
-            context['height'] = value
-
-    def _start_url(self, attrsD):
-        self.push('href', 1)
-    _start_homepage = _start_url
-    _start_uri = _start_url
-
-    def _end_url(self):
-        value = self.pop('href')
-        if self.inauthor:
-            self._save_author('href', value)
-        elif self.incontributor:
-            self._save_contributor('href', value)
-    _end_homepage = _end_url
-    _end_uri = _end_url
-
-    def _start_email(self, attrsD):
-        self.push('email', 0)
-    _start_itunes_email = _start_email
-
-    def _end_email(self):
-        value = self.pop('email')
-        if self.inpublisher:
-            self._save_author('email', value, 'publisher')
-        elif self.inauthor:
-            self._save_author('email', value)
-        elif self.incontributor:
-            self._save_contributor('email', value)
-    _end_itunes_email = _end_email
-
-    def _getContext(self):
-        if self.insource:
-            context = self.sourcedata
-        elif self.inimage and 'image' in self.feeddata:
-            context = self.feeddata['image']
-        elif self.intextinput:
-            context = self.feeddata['textinput']
-        elif self.inentry:
-            context = self.entries[-1]
-        else:
-            context = self.feeddata
-        return context
-
-    def _save_author(self, key, value, prefix='author'):
-        context = self._getContext()
-        context.setdefault(prefix + '_detail', FeedParserDict())
-        context[prefix + '_detail'][key] = value
-        self._sync_author_detail()
-        context.setdefault('authors', [FeedParserDict()])
-        context['authors'][-1][key] = value
-
-    def _save_contributor(self, key, value):
-        context = self._getContext()
-        context.setdefault('contributors', [FeedParserDict()])
-        context['contributors'][-1][key] = value
-
-    def _sync_author_detail(self, key='author'):
-        context = self._getContext()
-        detail = context.get('%s_detail' % key)
-        if detail:
-            name = detail.get('name')
-            email = detail.get('email')
-            if name and email:
-                context[key] = u'%s (%s)' % (name, email)
-            elif name:
-                context[key] = name
-            elif email:
-                context[key] = email
-        else:
-            author, email = context.get(key), None
-            if not author:
-                return
-            emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
-            if emailmatch:
-                email = emailmatch.group(0)
-                # probably a better way to do the following, but it passes all the tests
-                author = author.replace(email, u'')
-                author = author.replace(u'()', u'')
-                author = author.replace(u'<>', u'')
-                author = author.replace(u'&lt;&gt;', u'')
-                author = author.strip()
-                if author and (author[0] == u'('):
-                    author = author[1:]
-                if author and (author[-1] == u')'):
-                    author = author[:-1]
-                author = author.strip()
-            if author or email:
-                context.setdefault('%s_detail' % key, FeedParserDict())
-            if author:
-                context['%s_detail' % key]['name'] = author
-            if email:
-                context['%s_detail' % key]['email'] = email
-
-    def _start_subtitle(self, attrsD):
-        self.pushContent('subtitle', attrsD, u'text/plain', 1)
-    _start_tagline = _start_subtitle
-    _start_itunes_subtitle = _start_subtitle
-
-    def _end_subtitle(self):
-        self.popContent('subtitle')
-    _end_tagline = _end_subtitle
-    _end_itunes_subtitle = _end_subtitle
-
-    def _start_rights(self, attrsD):
-        self.pushContent('rights', attrsD, u'text/plain', 1)
-    _start_dc_rights = _start_rights
-    _start_copyright = _start_rights
-
-    def _end_rights(self):
-        self.popContent('rights')
-    _end_dc_rights = _end_rights
-    _end_copyright = _end_rights
-
-    def _start_item(self, attrsD):
-        self.entries.append(FeedParserDict())
-        self.push('item', 0)
-        self.inentry = 1
-        self.guidislink = 0
-        self.title_depth = -1
-        id = self._getAttribute(attrsD, 'rdf:about')
-        if id:
-            context = self._getContext()
-            context['id'] = id
-        self._cdf_common(attrsD)
-    _start_entry = _start_item
-
-    def _end_item(self):
-        self.pop('item')
-        self.inentry = 0
-    _end_entry = _end_item
-
-    def _start_dc_language(self, attrsD):
-        self.push('language', 1)
-    _start_language = _start_dc_language
-
-    def _end_dc_language(self):
-        self.lang = self.pop('language')
-    _end_language = _end_dc_language
-
-    def _start_dc_publisher(self, attrsD):
-        self.push('publisher', 1)
-    _start_webmaster = _start_dc_publisher
-
-    def _end_dc_publisher(self):
-        self.pop('publisher')
-        self._sync_author_detail('publisher')
-    _end_webmaster = _end_dc_publisher
-
-    def _start_published(self, attrsD):
-        self.push('published', 1)
-    _start_dcterms_issued = _start_published
-    _start_issued = _start_published
-    _start_pubdate = _start_published
-
-    def _end_published(self):
-        value = self.pop('published')
-        self._save('published_parsed', _parse_date(value), overwrite=True)
-    _end_dcterms_issued = _end_published
-    _end_issued = _end_published
-    _end_pubdate = _end_published
-
-    def _start_updated(self, attrsD):
-        self.push('updated', 1)
-    _start_modified = _start_updated
-    _start_dcterms_modified = _start_updated
-    _start_dc_date = _start_updated
-    _start_lastbuilddate = _start_updated
-
-    def _end_updated(self):
-        value = self.pop('updated')
-        parsed_value = _parse_date(value)
-        self._save('updated_parsed', parsed_value, overwrite=True)
-    _end_modified = _end_updated
-    _end_dcterms_modified = _end_updated
-    _end_dc_date = _end_updated
-    _end_lastbuilddate = _end_updated
-
-    def _start_created(self, attrsD):
-        self.push('created', 1)
-    _start_dcterms_created = _start_created
-
-    def _end_created(self):
-        value = self.pop('created')
-        self._save('created_parsed', _parse_date(value), overwrite=True)
-    _end_dcterms_created = _end_created
-
-    def _start_expirationdate(self, attrsD):
-        self.push('expired', 1)
-
-    def _end_expirationdate(self):
-        self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True)
-
-    def _start_cc_license(self, attrsD):
-        context = self._getContext()
-        value = self._getAttribute(attrsD, 'rdf:resource')
-        attrsD = FeedParserDict()
-        attrsD['rel'] = u'license'
-        if value:
-            attrsD['href']=value
-        context.setdefault('links', []).append(attrsD)
-
-    def _start_creativecommons_license(self, attrsD):
-        self.push('license', 1)
-    _start_creativeCommons_license = _start_creativecommons_license
-
-    def _end_creativecommons_license(self):
-        value = self.pop('license')
-        context = self._getContext()
-        attrsD = FeedParserDict()
-        attrsD['rel'] = u'license'
-        if value:
-            attrsD['href'] = value
-        context.setdefault('links', []).append(attrsD)
-        del context['license']
-    _end_creativeCommons_license = _end_creativecommons_license
-
-    def _addXFN(self, relationships, href, name):
-        context = self._getContext()
-        xfn = context.setdefault('xfn', [])
-        value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
-        if value not in xfn:
-            xfn.append(value)
-
-    def _addTag(self, term, scheme, label):
-        context = self._getContext()
-        tags = context.setdefault('tags', [])
-        if (not term) and (not scheme) and (not label):
-            return
-        value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
-        if value not in tags:
-            tags.append(value)
-
-    def _start_category(self, attrsD):
-        term = attrsD.get('term')
-        scheme = attrsD.get('scheme', attrsD.get('domain'))
-        label = attrsD.get('label')
-        self._addTag(term, scheme, label)
-        self.push('category', 1)
-    _start_dc_subject = _start_category
-    _start_keywords = _start_category
-
-    def _start_media_category(self, attrsD):
-        attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema')
-        self._start_category(attrsD)
-
-    def _end_itunes_keywords(self):
-        for term in self.pop('itunes_keywords').split(','):
-            if term.strip():
-                self._addTag(term.strip(), u'http://www.itunes.com/', None)
-
-    def _start_itunes_category(self, attrsD):
-        self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None)
-        self.push('category', 1)
-
-    def _end_category(self):
-        value = self.pop('category')
-        if not value:
-            return
-        context = self._getContext()
-        tags = context['tags']
-        if value and len(tags) and not tags[-1]['term']:
-            tags[-1]['term'] = value
-        else:
-            self._addTag(value, None, None)
-    _end_dc_subject = _end_category
-    _end_keywords = _end_category
-    _end_itunes_category = _end_category
-    _end_media_category = _end_category
-
-    def _start_cloud(self, attrsD):
-        self._getContext()['cloud'] = FeedParserDict(attrsD)
-
-    def _start_link(self, attrsD):
-        attrsD.setdefault('rel', u'alternate')
-        if attrsD['rel'] == u'self':
-            attrsD.setdefault('type', u'application/atom+xml')
-        else:
-            attrsD.setdefault('type', u'text/html')
-        context = self._getContext()
-        attrsD = self._itsAnHrefDamnIt(attrsD)
-        if 'href' in attrsD:
-            attrsD['href'] = self.resolveURI(attrsD['href'])
-        expectingText = self.infeed or self.inentry or self.insource
-        context.setdefault('links', [])
-        if not (self.inentry and self.inimage):
-            context['links'].append(FeedParserDict(attrsD))
-        if 'href' in attrsD:
-            expectingText = 0
-            if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
-                context['link'] = attrsD['href']
-        else:
-            self.push('link', expectingText)
-
-    def _end_link(self):
-        value = self.pop('link')
-
-    def _start_guid(self, attrsD):
-        self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
-        self.push('id', 1)
-    _start_id = _start_guid
-
-    def _end_guid(self):
-        value = self.pop('id')
-        self._save('guidislink', self.guidislink and 'link' not in self._getContext())
-        if self.guidislink:
-            # guid acts as link, but only if 'ispermalink' is not present or is 'true',
-            # and only if the item doesn't already have a link element
-            self._save('link', value)
-    _end_id = _end_guid
-
-    def _start_title(self, attrsD):
-        if self.svgOK:
-            return self.unknown_starttag('title', attrsD.items())
-        self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
-    _start_dc_title = _start_title
-    _start_media_title = _start_title
-
-    def _end_title(self):
-        if self.svgOK:
-            return
-        value = self.popContent('title')
-        if not value:
-            return
-        self.title_depth = self.depth
-    _end_dc_title = _end_title
-
-    def _end_media_title(self):
-        title_depth = self.title_depth
-        self._end_title()
-        self.title_depth = title_depth
-
-    def _start_description(self, attrsD):
-        context = self._getContext()
-        if 'summary' in context:
-            self._summaryKey = 'content'
-            self._start_content(attrsD)
-        else:
-            self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource)
-    _start_dc_description = _start_description
-
-    def _start_abstract(self, attrsD):
-        self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource)
-
-    def _end_description(self):
-        if self._summaryKey == 'content':
-            self._end_content()
-        else:
-            value = self.popContent('description')
-        self._summaryKey = None
-    _end_abstract = _end_description
-    _end_dc_description = _end_description
-
-    def _start_info(self, attrsD):
-        self.pushContent('info', attrsD, u'text/plain', 1)
-    _start_feedburner_browserfriendly = _start_info
-
-    def _end_info(self):
-        self.popContent('info')
-    _end_feedburner_browserfriendly = _end_info
-
-    def _start_generator(self, attrsD):
-        if attrsD:
-            attrsD = self._itsAnHrefDamnIt(attrsD)
-            if 'href' in attrsD:
-                attrsD['href'] = self.resolveURI(attrsD['href'])
-        self._getContext()['generator_detail'] = FeedParserDict(attrsD)
-        self.push('generator', 1)
-
-    def _end_generator(self):
-        value = self.pop('generator')
-        context = self._getContext()
-        if 'generator_detail' in context:
-            context['generator_detail']['name'] = value
-
-    def _start_admin_generatoragent(self, attrsD):
-        self.push('generator', 1)
-        value = self._getAttribute(attrsD, 'rdf:resource')
-        if value:
-            self.elementstack[-1][2].append(value)
-        self.pop('generator')
-        self._getContext()['generator_detail'] = FeedParserDict({'href': value})
-
-    def _start_admin_errorreportsto(self, attrsD):
-        self.push('errorreportsto', 1)
-        value = self._getAttribute(attrsD, 'rdf:resource')
-        if value:
-            self.elementstack[-1][2].append(value)
-        self.pop('errorreportsto')
-
-    def _start_summary(self, attrsD):
-        context = self._getContext()
-        if 'summary' in context:
-            self._summaryKey = 'content'
-            self._start_content(attrsD)
-        else:
-            self._summaryKey = 'summary'
-            self.pushContent(self._summaryKey, attrsD, u'text/plain', 1)
-    _start_itunes_summary = _start_summary
-
-    def _end_summary(self):
-        if self._summaryKey == 'content':
-            self._end_content()
-        else:
-            self.popContent(self._summaryKey or 'summary')
-        self._summaryKey = None
-    _end_itunes_summary = _end_summary
-
-    def _start_enclosure(self, attrsD):
-        attrsD = self._itsAnHrefDamnIt(attrsD)
-        context = self._getContext()
-        attrsD['rel'] = u'enclosure'
-        context.setdefault('links', []).append(FeedParserDict(attrsD))
-
-    def _start_source(self, attrsD):
-        if 'url' in attrsD:
-            # This means that we're processing a source element from an RSS 2.0 feed
-            self.sourcedata['href'] = attrsD[u'url']
-        self.push('source', 1)
-        self.insource = 1
-        self.title_depth = -1
-
-    def _end_source(self):
-        self.insource = 0
-        value = self.pop('source')
-        if value:
-            self.sourcedata['title'] = value
-        self._getContext()['source'] = copy.deepcopy(self.sourcedata)
-        self.sourcedata.clear()
-
-    def _start_content(self, attrsD):
-        self.pushContent('content', attrsD, u'text/plain', 1)
-        src = attrsD.get('src')
-        if src:
-            self.contentparams['src'] = src
-        self.push('content', 1)
-
-    def _start_body(self, attrsD):
-        self.pushContent('content', attrsD, u'application/xhtml+xml', 1)
-    _start_xhtml_body = _start_body
-
-    def _start_content_encoded(self, attrsD):
-        self.pushContent('content', attrsD, u'text/html', 1)
-    _start_fullitem = _start_content_encoded
-
-    def _end_content(self):
-        copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types)
-        value = self.popContent('content')
-        if copyToSummary:
-            self._save('summary', value)
-
-    _end_body = _end_content
-    _end_xhtml_body = _end_content
-    _end_content_encoded = _end_content
-    _end_fullitem = _end_content
-
-    def _start_itunes_image(self, attrsD):
-        self.push('itunes_image', 0)
-        if attrsD.get('href'):
-            self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
-        elif attrsD.get('url'):
-            self._getContext()['image'] = FeedParserDict({'href': attrsD.get('url')})
-    _start_itunes_link = _start_itunes_image
-
-    def _end_itunes_block(self):
-        value = self.pop('itunes_block', 0)
-        self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
-
-    def _end_itunes_explicit(self):
-        value = self.pop('itunes_explicit', 0)
-        # Convert 'yes' -> True, 'clean' to False, and any other value to None
-        # False and None both evaluate as False, so the difference can be ignored
-        # by applications that only need to know if the content is explicit.
-        self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0]
-
-    def _start_media_content(self, attrsD):
-        context = self._getContext()
-        context.setdefault('media_content', [])
-        context['media_content'].append(attrsD)
-
-    def _start_media_thumbnail(self, attrsD):
-        context = self._getContext()
-        context.setdefault('media_thumbnail', [])
-        self.push('url', 1) # new
-        context['media_thumbnail'].append(attrsD)
-
-    def _end_media_thumbnail(self):
-        url = self.pop('url')
-        context = self._getContext()
-        if url != None and len(url.strip()) != 0:
-            if 'url' not in context['media_thumbnail'][-1]:
-                context['media_thumbnail'][-1]['url'] = url
-
-    def _start_media_player(self, attrsD):
-        self.push('media_player', 0)
-        self._getContext()['media_player'] = FeedParserDict(attrsD)
-
-    def _end_media_player(self):
-        value = self.pop('media_player')
-        context = self._getContext()
-        context['media_player']['content'] = value
-
-    def _start_newlocation(self, attrsD):
-        self.push('newlocation', 1)
-
-    def _end_newlocation(self):
-        url = self.pop('newlocation')
-        context = self._getContext()
-        # don't set newlocation if the context isn't right
-        if context is not self.feeddata:
-            return
-        context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip())
-
-if _XML_AVAILABLE:
-    class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
-        def __init__(self, baseuri, baselang, encoding):
-            xml.sax.handler.ContentHandler.__init__(self)
-            _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
-            self.bozo = 0
-            self.exc = None
-            self.decls = {}
-
-        def startPrefixMapping(self, prefix, uri):
-            if not uri:
-                return
-            # Jython uses '' instead of None; standardize on None
-            prefix = prefix or None
-            self.trackNamespace(prefix, uri)
-            if prefix and uri == 'http://www.w3.org/1999/xlink':
-                self.decls['xmlns:' + prefix] = uri
-
-        def startElementNS(self, name, qname, attrs):
-            namespace, localname = name
-            lowernamespace = str(namespace or '').lower()
-            if lowernamespace.find(u'backend.userland.com/rss') <> -1:
-                # match any backend.userland.com namespace
-                namespace = u'http://backend.userland.com/rss'
-                lowernamespace = namespace
-            if qname and qname.find(':') > 0:
-                givenprefix = qname.split(':')[0]
-            else:
-                givenprefix = None
-            prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
-            if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and givenprefix not in self.namespacesInUse:
-                raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
-            localname = str(localname).lower()
-
-            # qname implementation is horribly broken in Python 2.1 (it
-            # doesn't report any), and slightly broken in Python 2.2 (it
-            # doesn't report the xml: namespace). So we match up namespaces
-            # with a known list first, and then possibly override them with
-            # the qnames the SAX parser gives us (if indeed it gives us any
-            # at all).  Thanks to MatejC for helping me test this and
-            # tirelessly telling me that it didn't work yet.
-            attrsD, self.decls = self.decls, {}
-            if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
-                attrsD['xmlns']=namespace
-            if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
-                attrsD['xmlns']=namespace
-
-            if prefix:
-                localname = prefix.lower() + ':' + localname
-            elif namespace and not qname: #Expat
-                for name,value in self.namespacesInUse.items():
-                    if name and value == namespace:
-                        localname = name + ':' + localname
-                        break
-
-            for (namespace, attrlocalname), attrvalue in attrs.items():
-                lowernamespace = (namespace or '').lower()
-                prefix = self._matchnamespaces.get(lowernamespace, '')
-                if prefix:
-                    attrlocalname = prefix + ':' + attrlocalname
-                attrsD[str(attrlocalname).lower()] = attrvalue
-            for qname in attrs.getQNames():
-                attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
-            self.unknown_starttag(localname, attrsD.items())
-
-        def characters(self, text):
-            self.handle_data(text)
-
-        def endElementNS(self, name, qname):
-            namespace, localname = name
-            lowernamespace = str(namespace or '').lower()
-            if qname and qname.find(':') > 0:
-                givenprefix = qname.split(':')[0]
-            else:
-                givenprefix = ''
-            prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
-            if prefix:
-                localname = prefix + ':' + localname
-            elif namespace and not qname: #Expat
-                for name,value in self.namespacesInUse.items():
-                    if name and value == namespace:
-                        localname = name + ':' + localname
-                        break
-            localname = str(localname).lower()
-            self.unknown_endtag(localname)
-
-        def error(self, exc):
-            self.bozo = 1
-            self.exc = exc
-
-        # drv_libxml2 calls warning() in some cases
-        warning = error
-
-        def fatalError(self, exc):
-            self.error(exc)
-            raise exc
-
-class _BaseHTMLProcessor(sgmllib.SGMLParser):
-    special = re.compile('''[<>'"]''')
-    bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
-    elements_no_end_tag = set([
-      'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
-      'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param',
-      'source', 'track', 'wbr'
-    ])
-
-    def __init__(self, encoding, _type):
-        self.encoding = encoding
-        self._type = _type
-        sgmllib.SGMLParser.__init__(self)
-
-    def reset(self):
-        self.pieces = []
-        sgmllib.SGMLParser.reset(self)
-
-    def _shorttag_replace(self, match):
-        tag = match.group(1)
-        if tag in self.elements_no_end_tag:
-            return '<' + tag + ' />'
-        else:
-            return '<' + tag + '></' + tag + '>'
-
-    # By declaring these methods and overriding their compiled code
-    # with the code from sgmllib, the original code will execute in
-    # feedparser's scope instead of sgmllib's. This means that the
-    # `tagfind` and `charref` regular expressions will be found as
-    # they're declared above, not as they're declared in sgmllib.
-    def goahead(self, i):
-        pass
-    goahead.func_code = sgmllib.SGMLParser.goahead.func_code
-
-    def __parse_starttag(self, i):
-        pass
-    __parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code
-
-    def parse_starttag(self,i):
-        j = self.__parse_starttag(i)
-        if self._type == 'application/xhtml+xml':
-            if j>2 and self.rawdata[j-2:j]=='/>':
-                self.unknown_endtag(self.lasttag)
-        return j
-
-    def feed(self, data):
-        data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'&lt;!\1', data)
-        data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data)
-        data = data.replace('&#39;', "'")
-        data = data.replace('&#34;', '"')
-        try:
-            bytes
-            if bytes is str:
-                raise NameError
-            self.encoding = self.encoding + u'_INVALID_PYTHON_3'
-        except NameError:
-            if self.encoding and isinstance(data, unicode):
-                data = data.encode(self.encoding)
-        sgmllib.SGMLParser.feed(self, data)
-        sgmllib.SGMLParser.close(self)
-
-    def normalize_attrs(self, attrs):
-        if not attrs:
-            return attrs
-        # utility method to be called by descendants
-        attrs = dict([(k.lower(), v) for k, v in attrs]).items()
-        attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
-        attrs.sort()
-        return attrs
-
-    def unknown_starttag(self, tag, attrs):
-        # called for each start tag
-        # attrs is a list of (attr, value) tuples
-        # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
-        uattrs = []
-        strattrs=''
-        if attrs:
-            for key, value in attrs:
-                value=value.replace('>','&gt;').replace('<','&lt;').replace('"','&quot;')
-                value = self.bare_ampersand.sub("&amp;", value)
-                # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
-                if not isinstance(value, unicode):
-                    value = value.decode(self.encoding, 'ignore')
-                try:
-                    # Currently, in Python 3 the key is already a str, and cannot be decoded again
-                    uattrs.append((unicode(key, self.encoding), value))
-                except TypeError:
-                    uattrs.append((key, value))
-            strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
-            if self.encoding:
-                try:
-                    strattrs = strattrs.encode(self.encoding)
-                except (UnicodeEncodeError, LookupError):
-                    pass
-        if tag in self.elements_no_end_tag:
-            self.pieces.append('<%s%s />' % (tag, strattrs))
-        else:
-            self.pieces.append('<%s%s>' % (tag, strattrs))
-
-    def unknown_endtag(self, tag):
-        # called for each end tag, e.g. for </pre>, tag will be 'pre'
-        # Reconstruct the original end tag.
-        if tag not in self.elements_no_end_tag:
-            self.pieces.append("</%s>" % tag)
-
-    def handle_charref(self, ref):
-        # called for each character reference, e.g. for '&#160;', ref will be '160'
-        # Reconstruct the original character reference.
-        ref = ref.lower()
-        if ref.startswith('x'):
-            value = int(ref[1:], 16)
-        else:
-            value = int(ref)
-
-        if value in _cp1252:
-            self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
-        else:
-            self.pieces.append('&#%s;' % ref)
-
-    def handle_entityref(self, ref):
-        # called for each entity reference, e.g. for '&copy;', ref will be 'copy'
-        # Reconstruct the original entity reference.
-        if ref in name2codepoint or ref == 'apos':
-            self.pieces.append('&%s;' % ref)
-        else:
-            self.pieces.append('&amp;%s' % ref)
-
-    def handle_data(self, text):
-        # called for each block of plain text, i.e. outside of any tag and
-        # not containing any character or entity references
-        # Store the original text verbatim.
-        self.pieces.append(text)
-
-    def handle_comment(self, text):
-        # called for each HTML comment, e.g. <!-- insert Javascript code here -->
-        # Reconstruct the original comment.
-        self.pieces.append('<!--%s-->' % text)
-
-    def handle_pi(self, text):
-        # called for each processing instruction, e.g. <?instruction>
-        # Reconstruct original processing instruction.
-        self.pieces.append('<?%s>' % text)
-
-    def handle_decl(self, text):
-        # called for the DOCTYPE, if present, e.g.
-        # <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
-        #     "http://www.w3.org/TR/html4/loose.dtd">
-        # Reconstruct original DOCTYPE
-        self.pieces.append('<!%s>' % text)
-
-    _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
-    def _scan_name(self, i, declstartpos):
-        rawdata = self.rawdata
-        n = len(rawdata)
-        if i == n:
-            return None, -1
-        m = self._new_declname_match(rawdata, i)
-        if m:
-            s = m.group()
-            name = s.strip()
-            if (i + len(s)) == n:
-                return None, -1  # end of buffer
-            return name.lower(), m.end()
-        else:
-            self.handle_data(rawdata)
-#            self.updatepos(declstartpos, i)
-            return None, -1
-
-    def convert_charref(self, name):
-        return '&#%s;' % name
-
-    def convert_entityref(self, name):
-        return '&%s;' % name
-
-    def output(self):
-        '''Return processed HTML as a single string'''
-        return ''.join([str(p) for p in self.pieces])
-
-    def parse_declaration(self, i):
-        try:
-            return sgmllib.SGMLParser.parse_declaration(self, i)
-        except sgmllib.SGMLParseError:
-            # escape the doctype declaration and continue parsing
-            self.handle_data('&lt;')
-            return i+1
-
-class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
-    def __init__(self, baseuri, baselang, encoding, entities):
-        sgmllib.SGMLParser.__init__(self)
-        _FeedParserMixin.__init__(self, baseuri, baselang, encoding)
-        _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
-        self.entities=entities
-
-    def decodeEntities(self, element, data):
-        data = data.replace('&#60;', '&lt;')
-        data = data.replace('&#x3c;', '&lt;')
-        data = data.replace('&#x3C;', '&lt;')
-        data = data.replace('&#62;', '&gt;')
-        data = data.replace('&#x3e;', '&gt;')
-        data = data.replace('&#x3E;', '&gt;')
-        data = data.replace('&#38;', '&amp;')
-        data = data.replace('&#x26;', '&amp;')
-        data = data.replace('&#34;', '&quot;')
-        data = data.replace('&#x22;', '&quot;')
-        data = data.replace('&#39;', '&apos;')
-        data = data.replace('&#x27;', '&apos;')
-        if not self.contentparams.get('type', u'xml').endswith(u'xml'):
-            data = data.replace('&lt;', '<')
-            data = data.replace('&gt;', '>')
-            data = data.replace('&amp;', '&')
-            data = data.replace('&quot;', '"')
-            data = data.replace('&apos;', "'")
-        return data
-
-    def strattrs(self, attrs):
-        return ''.join([' %s="%s"' % (n,v.replace('"','&quot;')) for n,v in attrs])
-
-class _MicroformatsParser:
-    STRING = 1
-    DATE = 2
-    URI = 3
-    NODE = 4
-    EMAIL = 5
-
-    known_xfn_relationships = set(['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'])
-    known_binary_extensions =  set(['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'])
-
-    def __init__(self, data, baseuri, encoding):
-        self.document = BeautifulSoup.BeautifulSoup(data)
-        self.baseuri = baseuri
-        self.encoding = encoding
-        if isinstance(data, unicode):
-            data = data.encode(encoding)
-        self.tags = []
-        self.enclosures = []
-        self.xfn = []
-        self.vcard = None
-
-    def vcardEscape(self, s):
-        if isinstance(s, basestring):
-            s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
-        return s
-
-    def vcardFold(self, s):
-        s = re.sub(';+$', '', s)
-        sFolded = ''
-        iMax = 75
-        sPrefix = ''
-        while len(s) > iMax:
-            sFolded += sPrefix + s[:iMax] + '\n'
-            s = s[iMax:]
-            sPrefix = ' '
-            iMax = 74
-        sFolded += sPrefix + s
-        return sFolded
-
-    def normalize(self, s):
-        return re.sub(r'\s+', ' ', s).strip()
-
-    def unique(self, aList):
-        results = []
-        for element in aList:
-            if element not in results:
-                results.append(element)
-        return results
-
-    def toISO8601(self, dt):
-        return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
-
-    def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
-        all = lambda x: 1
-        sProperty = sProperty.lower()
-        bFound = 0
-        bNormalize = 1
-        propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)}
-        if bAllowMultiple and (iPropertyType != self.NODE):
-            snapResults = []
-            containers = elmRoot(['ul', 'ol'], propertyMatch)
-            for container in containers:
-                snapResults.extend(container('li'))
-            bFound = (len(snapResults) != 0)
-        if not bFound:
-            snapResults = elmRoot(all, propertyMatch)
-            bFound = (len(snapResults) != 0)
-        if (not bFound) and (sProperty == 'value'):
-            snapResults = elmRoot('pre')
-            bFound = (len(snapResults) != 0)
-            bNormalize = not bFound
-            if not bFound:
-                snapResults = [elmRoot]
-                bFound = (len(snapResults) != 0)
-        arFilter = []
-        if sProperty == 'vcard':
-            snapFilter = elmRoot(all, propertyMatch)
-            for node in snapFilter:
-                if node.findParent(all, propertyMatch):
-                    arFilter.append(node)
-        arResults = []
-        for node in snapResults:
-            if node not in arFilter:
-                arResults.append(node)
-        bFound = (len(arResults) != 0)
-        if not bFound:
-            if bAllowMultiple:
-                return []
-            elif iPropertyType == self.STRING:
-                return ''
-            elif iPropertyType == self.DATE:
-                return None
-            elif iPropertyType == self.URI:
-                return ''
-            elif iPropertyType == self.NODE:
-                return None
-            else:
-                return None
-        arValues = []
-        for elmResult in arResults:
-            sValue = None
-            if iPropertyType == self.NODE:
-                if bAllowMultiple:
-                    arValues.append(elmResult)
-                    continue
-                else:
-                    return elmResult
-            sNodeName = elmResult.name.lower()
-            if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
-                sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
-            if sValue:
-                sValue = bNormalize and self.normalize(sValue) or sValue.strip()
-            if (not sValue) and (sNodeName == 'abbr'):
-                sValue = elmResult.get('title')
-            if sValue:
-                sValue = bNormalize and self.normalize(sValue) or sValue.strip()
-            if (not sValue) and (iPropertyType == self.URI):
-                if sNodeName == 'a':
-                    sValue = elmResult.get('href')
-                elif sNodeName == 'img':
-                    sValue = elmResult.get('src')
-                elif sNodeName == 'object':
-                    sValue = elmResult.get('data')
-            if sValue:
-                sValue = bNormalize and self.normalize(sValue) or sValue.strip()
-            if (not sValue) and (sNodeName == 'img'):
-                sValue = elmResult.get('alt')
-            if sValue:
-                sValue = bNormalize and self.normalize(sValue) or sValue.strip()
-            if not sValue:
-                sValue = elmResult.renderContents()
-                sValue = re.sub(r'<\S[^>]*>', '', sValue)
-                sValue = sValue.replace('\r\n', '\n')
-                sValue = sValue.replace('\r', '\n')
-            if sValue:
-                sValue = bNormalize and self.normalize(sValue) or sValue.strip()
-            if not sValue:
-                continue
-            if iPropertyType == self.DATE:
-                sValue = _parse_date_iso8601(sValue)
-            if bAllowMultiple:
-                arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
-            else:
-                return bAutoEscape and self.vcardEscape(sValue) or sValue
-        return arValues
-
-    def findVCards(self, elmRoot, bAgentParsing=0):
-        sVCards = ''
-
-        if not bAgentParsing:
-            arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
-        else:
-            arCards = [elmRoot]
-
-        for elmCard in arCards:
-            arLines = []
-
-            def processSingleString(sProperty):
-                sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding)
-                if sValue:
-                    arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
-                return sValue or u''
-
-            def processSingleURI(sProperty):
-                sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
-                if sValue:
-                    sContentType = ''
-                    sEncoding = ''
-                    sValueKey = ''
-                    if sValue.startswith('data:'):
-                        sEncoding = ';ENCODING=b'
-                        sContentType = sValue.split(';')[0].split('/').pop()
-                        sValue = sValue.split(',', 1).pop()
-                    else:
-                        elmValue = self.getPropertyValue(elmCard, sProperty)
-                        if elmValue:
-                            if sProperty != 'url':
-                                sValueKey = ';VALUE=uri'
-                            sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
-                    sContentType = sContentType.upper()
-                    if sContentType == 'OCTET-STREAM':
-                        sContentType = ''
-                    if sContentType:
-                        sContentType = ';TYPE=' + sContentType.upper()
-                    arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
-
-            def processTypeValue(sProperty, arDefaultType, arForceType=None):
-                arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
-                for elmResult in arResults:
-                    arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
-                    if arForceType:
-                        arType = self.unique(arForceType + arType)
-                    if not arType:
-                        arType = arDefaultType
-                    sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
-                    if sValue:
-                        arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
-
-            # AGENT
-            # must do this before all other properties because it is destructive
-            # (removes nested class="vcard" nodes so they don't interfere with
-            # this vcard's other properties)
-            arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
-            for elmAgent in arAgent:
-                if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
-                    sAgentValue = self.findVCards(elmAgent, 1) + '\n'
-                    sAgentValue = sAgentValue.replace('\n', '\\n')
-                    sAgentValue = sAgentValue.replace(';', '\\;')
-                    if sAgentValue:
-                        arLines.append(self.vcardFold('AGENT:' + sAgentValue))
-                    # Completely remove the agent element from the parse tree
-                    elmAgent.extract()
-                else:
-                    sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
-                    if sAgentValue:
-                        arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
-
-            # FN (full name)
-            sFN = processSingleString('fn')
-
-            # N (name)
-            elmName = self.getPropertyValue(elmCard, 'n')
-            if elmName:
-                sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
-                sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
-                arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
-                arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
-                arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
-                arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
-                                         sGivenName + ';' +
-                                         ','.join(arAdditionalNames) + ';' +
-                                         ','.join(arHonorificPrefixes) + ';' +
-                                         ','.join(arHonorificSuffixes)))
-            elif sFN:
-                # implied "N" optimization
-                # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
-                arNames = self.normalize(sFN).split()
-                if len(arNames) == 2:
-                    bFamilyNameFirst = (arNames[0].endswith(',') or
-                                        len(arNames[1]) == 1 or
-                                        ((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
-                    if bFamilyNameFirst:
-                        arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
-                    else:
-                        arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
-
-            # SORT-STRING
-            sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
-            if sSortString:
-                arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
-
-            # NICKNAME
-            arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
-            if arNickname:
-                arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
-
-            # PHOTO
-            processSingleURI('photo')
-
-            # BDAY
-            dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
-            if dtBday:
-                arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
-
-            # ADR (address)
-            arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
-            for elmAdr in arAdr:
-                arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
-                if not arType:
-                    arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
-                sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
-                sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
-                sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
-                sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
-                sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
-                sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
-                sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
-                arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
-                                         sPostOfficeBox + ';' +
-                                         sExtendedAddress + ';' +
-                                         sStreetAddress + ';' +
-                                         sLocality + ';' +
-                                         sRegion + ';' +
-                                         sPostalCode + ';' +
-                                         sCountryName))
-
-            # LABEL
-            processTypeValue('label', ['intl','postal','parcel','work'])
-
-            # TEL (phone number)
-            processTypeValue('tel', ['voice'])
-
-            # EMAIL
-            processTypeValue('email', ['internet'], ['internet'])
-
-            # MAILER
-            processSingleString('mailer')
-
-            # TZ (timezone)
-            processSingleString('tz')
-
-            # GEO (geographical information)
-            elmGeo = self.getPropertyValue(elmCard, 'geo')
-            if elmGeo:
-                sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
-                sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
-                arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
-
-            # TITLE
-            processSingleString('title')
-
-            # ROLE
-            processSingleString('role')
-
-            # LOGO
-            processSingleURI('logo')
-
-            # ORG (organization)
-            elmOrg = self.getPropertyValue(elmCard, 'org')
-            if elmOrg:
-                sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
-                if not sOrganizationName:
-                    # implied "organization-name" optimization
-                    # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
-                    sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
-                    if sOrganizationName:
-                        arLines.append(self.vcardFold('ORG:' + sOrganizationName))
-                else:
-                    arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
-                    arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
-
-            # CATEGORY
-            arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
-            if arCategory:
-                arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
-
-            # NOTE
-            processSingleString('note')
-
-            # REV
-            processSingleString('rev')
-
-            # SOUND
-            processSingleURI('sound')
-
-            # UID
-            processSingleString('uid')
-
-            # URL
-            processSingleURI('url')
-
-            # CLASS
-            processSingleString('class')
-
-            # KEY
-            processSingleURI('key')
-
-            if arLines:
-                arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard']
-                # XXX - this is super ugly; properly fix this with issue 148
-                for i, s in enumerate(arLines):
-                    if not isinstance(s, unicode):
-                        arLines[i] = s.decode('utf-8', 'ignore')
-                sVCards += u'\n'.join(arLines) + u'\n'
-
-        return sVCards.strip()
-
-    def isProbablyDownloadable(self, elm):
-        attrsD = elm.attrMap
-        if 'href' not in attrsD:
-            return 0
-        linktype = attrsD.get('type', '').strip()
-        if linktype.startswith('audio/') or \
-           linktype.startswith('video/') or \
-           (linktype.startswith('application/') and not linktype.endswith('xml')):
-            return 1
-        try:
-            path = urlparse.urlparse(attrsD['href'])[2]
-        except ValueError:
-            return 0
-        if path.find('.') == -1:
-            return 0
-        fileext = path.split('.').pop().lower()
-        return fileext in self.known_binary_extensions
-
-    def findTags(self):
-        all = lambda x: 1
-        for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
-            href = elm.get('href')
-            if not href:
-                continue
-            urlscheme, domain, path, params, query, fragment = \
-                       urlparse.urlparse(_urljoin(self.baseuri, href))
-            segments = path.split('/')
-            tag = segments.pop()
-            if not tag:
-                if segments:
-                    tag = segments.pop()
-                else:
-                    # there are no tags
-                    continue
-            tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
-            if not tagscheme.endswith('/'):
-                tagscheme += '/'
-            self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
-
-    def findEnclosures(self):
-        all = lambda x: 1
-        enclosure_match = re.compile(r'\benclosure\b')
-        for elm in self.document(all, {'href': re.compile(r'.+')}):
-            if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm):
-                continue
-            if elm.attrMap not in self.enclosures:
-                self.enclosures.append(elm.attrMap)
-                if elm.string and not elm.get('title'):
-                    self.enclosures[-1]['title'] = elm.string
-
-    def findXFN(self):
-        all = lambda x: 1
-        for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
-            rels = elm.get('rel', u'').split()
-            xfn_rels = [r for r in rels if r in self.known_xfn_relationships]
-            if xfn_rels:
-                self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
-
-def _parseMicroformats(htmlSource, baseURI, encoding):
-    if not BeautifulSoup:
-        return
-    try:
-        p = _MicroformatsParser(htmlSource, baseURI, encoding)
-    except UnicodeEncodeError:
-        # sgmllib throws this exception when performing lookups of tags
-        # with non-ASCII characters in them.
-        return
-    p.vcard = p.findVCards(p.document)
-    p.findTags()
-    p.findEnclosures()
-    p.findXFN()
-    return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
-
-class _RelativeURIResolver(_BaseHTMLProcessor):
-    relative_uris = set([('a', 'href'),
-                     ('applet', 'codebase'),
-                     ('area', 'href'),
-                     ('blockquote', 'cite'),
-                     ('body', 'background'),
-                     ('del', 'cite'),
-                     ('form', 'action'),
-                     ('frame', 'longdesc'),
-                     ('frame', 'src'),
-                     ('iframe', 'longdesc'),
-                     ('iframe', 'src'),
-                     ('head', 'profile'),
-                     ('img', 'longdesc'),
-                     ('img', 'src'),
-                     ('img', 'usemap'),
-                     ('input', 'src'),
-                     ('input', 'usemap'),
-                     ('ins', 'cite'),
-                     ('link', 'href'),
-                     ('object', 'classid'),
-                     ('object', 'codebase'),
-                     ('object', 'data'),
-                     ('object', 'usemap'),
-                     ('q', 'cite'),
-                     ('script', 'src'),
-                     ('video', 'poster')])
-
-    def __init__(self, baseuri, encoding, _type):
-        _BaseHTMLProcessor.__init__(self, encoding, _type)
-        self.baseuri = baseuri
-
-    def resolveURI(self, uri):
-        return _makeSafeAbsoluteURI(self.baseuri, uri.strip())
-
-    def unknown_starttag(self, tag, attrs):
-        attrs = self.normalize_attrs(attrs)
-        attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
-        _BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
-
-def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type):
-    if not _SGML_AVAILABLE:
-        return htmlSource
-
-    p = _RelativeURIResolver(baseURI, encoding, _type)
-    p.feed(htmlSource)
-    return p.output()
-
-def _makeSafeAbsoluteURI(base, rel=None):
-    # bail if ACCEPTABLE_URI_SCHEMES is empty
-    if not ACCEPTABLE_URI_SCHEMES:
-        try:
-            return _urljoin(base, rel or u'')
-        except ValueError:
-            return u''
-    if not base:
-        return rel or u''
-    if not rel:
-        try:
-            scheme = urlparse.urlparse(base)[0]
-        except ValueError:
-            return u''
-        if not scheme or scheme in ACCEPTABLE_URI_SCHEMES:
-            return base
-        return u''
-    try:
-        uri = _urljoin(base, rel)
-    except ValueError:
-        return u''
-    if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES:
-        return u''
-    return uri
-
-class _HTMLSanitizer(_BaseHTMLProcessor):
-    acceptable_elements = set(['a', 'abbr', 'acronym', 'address', 'area',
-        'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
-        'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
-        'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
-        'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
-        'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
-        'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
-        'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
-        'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
-        'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
-        'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
-        'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
-        'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'])
-
-    acceptable_attributes = set(['abbr', 'accept', 'accept-charset', 'accesskey',
-      'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
-      'background', 'balance', 'bgcolor', 'bgproperties', 'border',
-      'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
-      'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
-      'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
-      'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data',
-      'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay',
-      'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
-      'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
-      'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
-      'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
-      'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
-      'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
-      'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
-      'poster', 'pqg', 'preload', 'prompt', 'radiogroup', 'readonly', 'rel',
-      'repeat-max', 'repeat-min', 'replace', 'required', 'rev', 'rightspacing',
-      'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span',
-      'src', 'start', 'step', 'summary', 'suppress', 'tabindex', 'target',
-      'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
-      'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
-      'width', 'wrap', 'xml:lang'])
-
-    unacceptable_elements_with_end_tag = set(['script', 'applet', 'style'])
-
-    acceptable_css_properties = set(['azimuth', 'background-color',
-      'border-bottom-color', 'border-collapse', 'border-color',
-      'border-left-color', 'border-right-color', 'border-top-color', 'clear',
-      'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
-      'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
-      'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
-      'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
-      'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
-      'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
-      'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
-      'white-space', 'width'])
-
-    # survey of common keywords found in feeds
-    acceptable_css_keywords = set(['auto', 'aqua', 'black', 'block', 'blue',
-      'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
-      'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
-      'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
-      'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
-      'transparent', 'underline', 'white', 'yellow'])
-
-    valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
-      '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
-
-    mathml_elements = set(['annotation', 'annotation-xml', 'maction', 'math',
-      'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
-      'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle',
-      'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
-      'munderover', 'none', 'semantics'])
-
-    mathml_attributes = set(['actiontype', 'align', 'columnalign', 'columnalign',
-      'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth',
-      'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows',
-      'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness',
-      'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant',
-      'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign',
-      'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
-      'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href',
-      'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'])
-
-    # svgtiny - foreignObject + linearGradient + radialGradient + stop
-    svg_elements = set(['a', 'animate', 'animateColor', 'animateMotion',
-      'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject',
-      'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
-      'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
-      'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
-      'svg', 'switch', 'text', 'title', 'tspan', 'use'])
-
-    # svgtiny + class + opacity + offset + xmlns + xmlns:xlink
-    svg_attributes = set(['accent-height', 'accumulate', 'additive', 'alphabetic',
-       'arabic-form', 'ascent', 'attributeName', 'attributeType',
-       'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
-       'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
-       'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity',
-       'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style',
-       'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2',
-       'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x',
-       'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines',
-       'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid',
-       'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max',
-       'min', 'name', 'offset', 'opacity', 'orient', 'origin',
-       'overline-position', 'overline-thickness', 'panose-1', 'path',
-       'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY',
-       'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures',
-       'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv',
-       'stop-color', 'stop-opacity', 'strikethrough-position',
-       'strikethrough-thickness', 'stroke', 'stroke-dasharray',
-       'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin',
-       'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage',
-       'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2',
-       'underline-position', 'underline-thickness', 'unicode', 'unicode-range',
-       'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width',
-       'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
-       'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
-       'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1',
-       'y2', 'zoomAndPan'])
-
-    svg_attr_map = None
-    svg_elem_map = None
-
-    acceptable_svg_properties = set([ 'fill', 'fill-opacity', 'fill-rule',
-      'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
-      'stroke-opacity'])
-
-    def reset(self):
-        _BaseHTMLProcessor.reset(self)
-        self.unacceptablestack = 0
-        self.mathmlOK = 0
-        self.svgOK = 0
-
-    def unknown_starttag(self, tag, attrs):
-        acceptable_attributes = self.acceptable_attributes
-        keymap = {}
-        if not tag in self.acceptable_elements or self.svgOK:
-            if tag in self.unacceptable_elements_with_end_tag:
-                self.unacceptablestack += 1
-
-            # add implicit namespaces to html5 inline svg/mathml
-            if self._type.endswith('html'):
-                if not dict(attrs).get('xmlns'):
-                    if tag=='svg':
-                        attrs.append( ('xmlns','http://www.w3.org/2000/svg') )
-                    if tag=='math':
-                        attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') )
-
-            # not otherwise acceptable, perhaps it is MathML or SVG?
-            if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
-                self.mathmlOK += 1
-            if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
-                self.svgOK += 1
-
-            # chose acceptable attributes based on tag class, else bail
-            if  self.mathmlOK and tag in self.mathml_elements:
-                acceptable_attributes = self.mathml_attributes
-            elif self.svgOK and tag in self.svg_elements:
-                # for most vocabularies, lowercasing is a good idea.  Many
-                # svg elements, however, are camel case
-                if not self.svg_attr_map:
-                    lower=[attr.lower() for attr in self.svg_attributes]
-                    mix=[a for a in self.svg_attributes if a not in lower]
-                    self.svg_attributes = lower
-                    self.svg_attr_map = dict([(a.lower(),a) for a in mix])
-
-                    lower=[attr.lower() for attr in self.svg_elements]
-                    mix=[a for a in self.svg_elements if a not in lower]
-                    self.svg_elements = lower
-                    self.svg_elem_map = dict([(a.lower(),a) for a in mix])
-                acceptable_attributes = self.svg_attributes
-                tag = self.svg_elem_map.get(tag,tag)
-                keymap = self.svg_attr_map
-            elif not tag in self.acceptable_elements:
-                return
-
-        # declare xlink namespace, if needed
-        if self.mathmlOK or self.svgOK:
-            if filter(lambda (n,v): n.startswith('xlink:'),attrs):
-                if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
-                    attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
-
-        clean_attrs = []
-        for key, value in self.normalize_attrs(attrs):
-            if key in acceptable_attributes:
-                key=keymap.get(key,key)
-                # make sure the uri uses an acceptable uri scheme
-                if key == u'href':
-                    value = _makeSafeAbsoluteURI(value)
-                clean_attrs.append((key,value))
-            elif key=='style':
-                clean_value = self.sanitize_style(value)
-                if clean_value:
-                    clean_attrs.append((key,clean_value))
-        _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
-
-    def unknown_endtag(self, tag):
-        if not tag in self.acceptable_elements:
-            if tag in self.unacceptable_elements_with_end_tag:
-                self.unacceptablestack -= 1
-            if self.mathmlOK and tag in self.mathml_elements:
-                if tag == 'math' and self.mathmlOK:
-                    self.mathmlOK -= 1
-            elif self.svgOK and tag in self.svg_elements:
-                tag = self.svg_elem_map.get(tag,tag)
-                if tag == 'svg' and self.svgOK:
-                    self.svgOK -= 1
-            else:
-                return
-        _BaseHTMLProcessor.unknown_endtag(self, tag)
-
-    def handle_pi(self, text):
-        pass
-
-    def handle_decl(self, text):
-        pass
-
-    def handle_data(self, text):
-        if not self.unacceptablestack:
-            _BaseHTMLProcessor.handle_data(self, text)
-
-    def sanitize_style(self, style):
-        # disallow urls
-        style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
-
-        # gauntlet
-        if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
-            return ''
-        # This replaced a regexp that used re.match and was prone to pathological back-tracking.
-        if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip():
-            return ''
-
-        clean = []
-        for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
-            if not value:
-                continue
-            if prop.lower() in self.acceptable_css_properties:
-                clean.append(prop + ': ' + value + ';')
-            elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
-                for keyword in value.split():
-                    if not keyword in self.acceptable_css_keywords and \
-                        not self.valid_css_values.match(keyword):
-                        break
-                else:
-                    clean.append(prop + ': ' + value + ';')
-            elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
-                clean.append(prop + ': ' + value + ';')
-
-        return ' '.join(clean)
-
-    def parse_comment(self, i, report=1):
-        ret = _BaseHTMLProcessor.parse_comment(self, i, report)
-        if ret >= 0:
-            return ret
-        # if ret == -1, this may be a malicious attempt to circumvent
-        # sanitization, or a page-destroying unclosed comment
-        match = re.compile(r'--[^>]*>').search(self.rawdata, i+4)
-        if match:
-            return match.end()
-        # unclosed comment; deliberately fail to handle_data()
-        return len(self.rawdata)
-
-
-def _sanitizeHTML(htmlSource, encoding, _type):
-    if not _SGML_AVAILABLE:
-        return htmlSource
-    p = _HTMLSanitizer(encoding, _type)
-    htmlSource = htmlSource.replace('<![CDATA[', '&lt;![CDATA[')
-    p.feed(htmlSource)
-    data = p.output()
-    if TIDY_MARKUP:
-        # loop through list of preferred Tidy interfaces looking for one that's installed,
-        # then set up a common _tidy function to wrap the interface-specific API.
-        _tidy = None
-        for tidy_interface in PREFERRED_TIDY_INTERFACES:
-            try:
-                if tidy_interface == "uTidy":
-                    from tidy import parseString as _utidy
-                    def _tidy(data, **kwargs):
-                        return str(_utidy(data, **kwargs))
-                    break
-                elif tidy_interface == "mxTidy":
-                    from mx.Tidy import Tidy as _mxtidy
-                    def _tidy(data, **kwargs):
-                        nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
-                        return data
-                    break
-            except:
-                pass
-        if _tidy:
-            utf8 = isinstance(data, unicode)
-            if utf8:
-                data = data.encode('utf-8')
-            data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
-            if utf8:
-                data = unicode(data, 'utf-8')
-            if data.count('<body'):
-                data = data.split('<body', 1)[1]
-                if data.count('>'):
-                    data = data.split('>', 1)[1]
-            if data.count('</body'):
-                data = data.split('</body', 1)[0]
-    data = data.strip().replace('\r\n', '\n')
-    return data
-
-class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
-    def http_error_default(self, req, fp, code, msg, headers):
-        # The default implementation just raises HTTPError.
-        # Forget that.
-        fp.status = code
-        return fp
-
-    def http_error_301(self, req, fp, code, msg, hdrs):
-        result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp,
-                                                            code, msg, hdrs)
-        result.status = code
-        result.newurl = result.geturl()
-        return result
-    # The default implementations in urllib2.HTTPRedirectHandler
-    # are identical, so hardcoding a http_error_301 call above
-    # won't affect anything
-    http_error_300 = http_error_301
-    http_error_302 = http_error_301
-    http_error_303 = http_error_301
-    http_error_307 = http_error_301
-
-    def http_error_401(self, req, fp, code, msg, headers):
-        # Check if
-        # - server requires digest auth, AND
-        # - we tried (unsuccessfully) with basic auth, AND
-        # If all conditions hold, parse authentication information
-        # out of the Authorization header we sent the first time
-        # (for the username and password) and the WWW-Authenticate
-        # header the server sent back (for the realm) and retry
-        # the request with the appropriate digest auth headers instead.
-        # This evil genius hack has been brought to you by Aaron Swartz.
-        host = urlparse.urlparse(req.get_full_url())[1]
-        if base64 is None or 'Authorization' not in req.headers \
-                          or 'WWW-Authenticate' not in headers:
-            return self.http_error_default(req, fp, code, msg, headers)
-        auth = _base64decode(req.headers['Authorization'].split(' ')[1])
-        user, passw = auth.split(':')
-        realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
-        self.add_password(realm, host, user, passw)
-        retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
-        self.reset_retry_count()
-        return retry
-
-def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers):
-    """URL, filename, or string --> stream
-
-    This function lets you define parsers that take any input source
-    (URL, pathname to local or network file, or actual data as a string)
-    and deal with it in a uniform manner.  Returned object is guaranteed
-    to have all the basic stdio read methods (read, readline, readlines).
-    Just .close() the object when you're done with it.
-
-    If the etag argument is supplied, it will be used as the value of an
-    If-None-Match request header.
-
-    If the modified argument is supplied, it can be a tuple of 9 integers
-    (as returned by gmtime() in the standard Python time module) or a date
-    string in any format supported by feedparser. Regardless, it MUST
-    be in GMT (Greenwich Mean Time). It will be reformatted into an
-    RFC 1123-compliant date and used as the value of an If-Modified-Since
-    request header.
-
-    If the agent argument is supplied, it will be used as the value of a
-    User-Agent request header.
-
-    If the referrer argument is supplied, it will be used as the value of a
-    Referer[sic] request header.
-
-    If handlers is supplied, it is a list of handlers used to build a
-    urllib2 opener.
-
-    if request_headers is supplied it is a dictionary of HTTP request headers
-    that will override the values generated by FeedParser.
-    """
-
-    if hasattr(url_file_stream_or_string, 'read'):
-        return url_file_stream_or_string
-
-    if isinstance(url_file_stream_or_string, basestring) \
-       and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'):
-        # Deal with the feed URI scheme
-        if url_file_stream_or_string.startswith('feed:http'):
-            url_file_stream_or_string = url_file_stream_or_string[5:]
-        elif url_file_stream_or_string.startswith('feed:'):
-            url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:]
-        if not agent:
-            agent = USER_AGENT
-        # Test for inline user:password credentials for HTTP basic auth
-        auth = None
-        if base64 and not url_file_stream_or_string.startswith('ftp:'):
-            urltype, rest = urllib.splittype(url_file_stream_or_string)
-            realhost, rest = urllib.splithost(rest)
-            if realhost:
-                user_passwd, realhost = urllib.splituser(realhost)
-                if user_passwd:
-                    url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
-                    auth = base64.standard_b64encode(user_passwd).strip()
-
-        # iri support
-        if isinstance(url_file_stream_or_string, unicode):
-            url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string)
-
-        # try to open with urllib2 (to use optional headers)
-        request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers)
-        opener = urllib2.build_opener(*tuple(handlers + [_FeedURLHandler()]))
-        opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
-        try:
-            return opener.open(request)
-        finally:
-            opener.close() # JohnD
-
-    # try to open with native open function (if url_file_stream_or_string is a filename)
-    try:
-        return open(url_file_stream_or_string, 'rb')
-    except (IOError, UnicodeEncodeError, TypeError):
-        # if url_file_stream_or_string is a unicode object that
-        # cannot be converted to the encoding returned by
-        # sys.getfilesystemencoding(), a UnicodeEncodeError
-        # will be thrown
-        # If url_file_stream_or_string is a string that contains NULL
-        # (such as an XML document encoded in UTF-32), TypeError will
-        # be thrown.
-        pass
-
-    # treat url_file_stream_or_string as string
-    if isinstance(url_file_stream_or_string, unicode):
-        return _StringIO(url_file_stream_or_string.encode('utf-8'))
-    return _StringIO(url_file_stream_or_string)
-
-def _convert_to_idn(url):
-    """Convert a URL to IDN notation"""
-    # this function should only be called with a unicode string
-    # strategy: if the host cannot be encoded in ascii, then
-    # it'll be necessary to encode it in idn form
-    parts = list(urlparse.urlsplit(url))
-    try:
-        parts[1].encode('ascii')
-    except UnicodeEncodeError:
-        # the url needs to be converted to idn notation
-        host = parts[1].rsplit(':', 1)
-        newhost = []
-        port = u''
-        if len(host) == 2:
-            port = host.pop()
-        for h in host[0].split('.'):
-            newhost.append(h.encode('idna').decode('utf-8'))
-        parts[1] = '.'.join(newhost)
-        if port:
-            parts[1] += ':' + port
-        return urlparse.urlunsplit(parts)
-    else:
-        return url
-
-def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers):
-    request = urllib2.Request(url)
-    request.add_header('User-Agent', agent)
-    if etag:
-        request.add_header('If-None-Match', etag)
-    if isinstance(modified, basestring):
-        modified = _parse_date(modified)
-    elif isinstance(modified, datetime.datetime):
-        modified = modified.utctimetuple()
-    if modified:
-        # format into an RFC 1123-compliant timestamp. We can't use
-        # time.strftime() since the %a and %b directives can be affected
-        # by the current locale, but RFC 2616 states that dates must be
-        # in English.
-        short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
-        months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-        request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
-    if referrer:
-        request.add_header('Referer', referrer)
-    if gzip and zlib:
-        request.add_header('Accept-encoding', 'gzip, deflate')
-    elif gzip:
-        request.add_header('Accept-encoding', 'gzip')
-    elif zlib:
-        request.add_header('Accept-encoding', 'deflate')
-    else:
-        request.add_header('Accept-encoding', '')
-    if auth:
-        request.add_header('Authorization', 'Basic %s' % auth)
-    if ACCEPT_HEADER:
-        request.add_header('Accept', ACCEPT_HEADER)
-    # use this for whatever -- cookies, special headers, etc
-    # [('Cookie','Something'),('x-special-header','Another Value')]
-    for header_name, header_value in request_headers.items():
-        request.add_header(header_name, header_value)
-    request.add_header('A-IM', 'feed') # RFC 3229 support
-    return request
-
-_date_handlers = []
-def registerDateHandler(func):
-    '''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
-    _date_handlers.insert(0, func)
-
-# ISO-8601 date parsing routines written by Fazal Majid.
-# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
-# parser is beyond the scope of feedparser and would be a worthwhile addition
-# to the Python library.
-# A single regular expression cannot parse ISO 8601 date formats into groups
-# as the standard is highly irregular (for instance is 030104 2003-01-04 or
-# 0301-04-01), so we use templates instead.
-# Please note the order in templates is significant because we need a
-# greedy match.
-_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
-                'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
-                '-YY-?MM', '-OOO', '-YY',
-                '--MM-?DD', '--MM',
-                '---DD',
-                'CC', '']
-_iso8601_re = [
-    tmpl.replace(
-    'YYYY', r'(?P<year>\d{4})').replace(
-    'YY', r'(?P<year>\d\d)').replace(
-    'MM', r'(?P<month>[01]\d)').replace(
-    'DD', r'(?P<day>[0123]\d)').replace(
-    'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
-    'CC', r'(?P<century>\d\d$)')
-    + r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
-    + r'(:(?P<second>\d{2}))?'
-    + r'(\.(?P<fracsecond>\d+))?'
-    + r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
-    for tmpl in _iso8601_tmpl]
-try:
-    del tmpl
-except NameError:
-    pass
-_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
-try:
-    del regex
-except NameError:
-    pass
-def _parse_date_iso8601(dateString):
-    '''Parse a variety of ISO-8601-compatible formats like 20040105'''
-    m = None
-    for _iso8601_match in _iso8601_matches:
-        m = _iso8601_match(dateString)
-        if m:
-            break
-    if not m:
-        return
-    if m.span() == (0, 0):
-        return
-    params = m.groupdict()
-    ordinal = params.get('ordinal', 0)
-    if ordinal:
-        ordinal = int(ordinal)
-    else:
-        ordinal = 0
-    year = params.get('year', '--')
-    if not year or year == '--':
-        year = time.gmtime()[0]
-    elif len(year) == 2:
-        # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
-        year = 100 * int(time.gmtime()[0] / 100) + int(year)
-    else:
-        year = int(year)
-    month = params.get('month', '-')
-    if not month or month == '-':
-        # ordinals are NOT normalized by mktime, we simulate them
-        # by setting month=1, day=ordinal
-        if ordinal:
-            month = 1
-        else:
-            month = time.gmtime()[1]
-    month = int(month)
-    day = params.get('day', 0)
-    if not day:
-        # see above
-        if ordinal:
-            day = ordinal
-        elif params.get('century', 0) or \
-                 params.get('year', 0) or params.get('month', 0):
-            day = 1
-        else:
-            day = time.gmtime()[2]
-    else:
-        day = int(day)
-    # special case of the century - is the first year of the 21st century
-    # 2000 or 2001 ? The debate goes on...
-    if 'century' in params:
-        year = (int(params['century']) - 1) * 100 + 1
-    # in ISO 8601 most fields are optional
-    for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
-        if not params.get(field, None):
-            params[field] = 0
-    hour = int(params.get('hour', 0))
-    minute = int(params.get('minute', 0))
-    second = int(float(params.get('second', 0)))
-    # weekday is normalized by mktime(), we can ignore it
-    weekday = 0
-    daylight_savings_flag = -1
-    tm = [year, month, day, hour, minute, second, weekday,
-          ordinal, daylight_savings_flag]
-    # ISO 8601 time zone adjustments
-    tz = params.get('tz')
-    if tz and tz != 'Z':
-        if tz[0] == '-':
-            tm[3] += int(params.get('tzhour', 0))
-            tm[4] += int(params.get('tzmin', 0))
-        elif tz[0] == '+':
-            tm[3] -= int(params.get('tzhour', 0))
-            tm[4] -= int(params.get('tzmin', 0))
-        else:
-            return None
-    # Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
-    # which is guaranteed to normalize d/m/y/h/m/s.
-    # Many implementations have bugs, but we'll pretend they don't.
-    return time.localtime(time.mktime(tuple(tm)))
-registerDateHandler(_parse_date_iso8601)
-
-# 8-bit date handling routines written by ytrewq1.
-_korean_year  = u'\ub144' # b3e2 in euc-kr
-_korean_month = u'\uc6d4' # bff9 in euc-kr
-_korean_day   = u'\uc77c' # c0cf in euc-kr
-_korean_am    = u'\uc624\uc804' # bfc0 c0fc in euc-kr
-_korean_pm    = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
-
-_korean_onblog_date_re = \
-    re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
-               (_korean_year, _korean_month, _korean_day))
-_korean_nate_date_re = \
-    re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
-               (_korean_am, _korean_pm))
-def _parse_date_onblog(dateString):
-    '''Parse a string according to the OnBlog 8-bit date format'''
-    m = _korean_onblog_date_re.match(dateString)
-    if not m:
-        return
-    w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
-                 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
-                 'zonediff': '+09:00'}
-    return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_onblog)
-
-def _parse_date_nate(dateString):
-    '''Parse a string according to the Nate 8-bit date format'''
-    m = _korean_nate_date_re.match(dateString)
-    if not m:
-        return
-    hour = int(m.group(5))
-    ampm = m.group(4)
-    if (ampm == _korean_pm):
-        hour += 12
-    hour = str(hour)
-    if len(hour) == 1:
-        hour = '0' + hour
-    w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
-                 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
-                 'zonediff': '+09:00'}
-    return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_nate)
-
-# Unicode strings for Greek date strings
-_greek_months = \
-  { \
-   u'\u0399\u03b1\u03bd': u'Jan',       # c9e1ed in iso-8859-7
-   u'\u03a6\u03b5\u03b2': u'Feb',       # d6e5e2 in iso-8859-7
-   u'\u039c\u03ac\u03ce': u'Mar',       # ccdcfe in iso-8859-7
-   u'\u039c\u03b1\u03ce': u'Mar',       # cce1fe in iso-8859-7
-   u'\u0391\u03c0\u03c1': u'Apr',       # c1f0f1 in iso-8859-7
-   u'\u039c\u03ac\u03b9': u'May',       # ccdce9 in iso-8859-7
-   u'\u039c\u03b1\u03ca': u'May',       # cce1fa in iso-8859-7
-   u'\u039c\u03b1\u03b9': u'May',       # cce1e9 in iso-8859-7
-   u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
-   u'\u0399\u03bf\u03bd': u'Jun',       # c9efed in iso-8859-7
-   u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
-   u'\u0399\u03bf\u03bb': u'Jul',       # c9f9eb in iso-8859-7
-   u'\u0391\u03cd\u03b3': u'Aug',       # c1fde3 in iso-8859-7
-   u'\u0391\u03c5\u03b3': u'Aug',       # c1f5e3 in iso-8859-7
-   u'\u03a3\u03b5\u03c0': u'Sep',       # d3e5f0 in iso-8859-7
-   u'\u039f\u03ba\u03c4': u'Oct',       # cfeaf4 in iso-8859-7
-   u'\u039d\u03bf\u03ad': u'Nov',       # cdefdd in iso-8859-7
-   u'\u039d\u03bf\u03b5': u'Nov',       # cdefe5 in iso-8859-7
-   u'\u0394\u03b5\u03ba': u'Dec',       # c4e5ea in iso-8859-7
-  }
-
-_greek_wdays = \
-  { \
-   u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
-   u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
-   u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
-   u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
-   u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
-   u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
-   u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
-  }
-
-_greek_date_format_re = \
-    re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
-
-def _parse_date_greek(dateString):
-    '''Parse a string according to a Greek 8-bit date format.'''
-    m = _greek_date_format_re.match(dateString)
-    if not m:
-        return
-    wday = _greek_wdays[m.group(1)]
-    month = _greek_months[m.group(3)]
-    rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
-                 {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
-                  'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
-                  'zonediff': m.group(8)}
-    return _parse_date_rfc822(rfc822date)
-registerDateHandler(_parse_date_greek)
-
-# Unicode strings for Hungarian date strings
-_hungarian_months = \
-  { \
-    u'janu\u00e1r':   u'01',  # e1 in iso-8859-2
-    u'febru\u00e1ri': u'02',  # e1 in iso-8859-2
-    u'm\u00e1rcius':  u'03',  # e1 in iso-8859-2
-    u'\u00e1prilis':  u'04',  # e1 in iso-8859-2
-    u'm\u00e1ujus':   u'05',  # e1 in iso-8859-2
-    u'j\u00fanius':   u'06',  # fa in iso-8859-2
-    u'j\u00falius':   u'07',  # fa in iso-8859-2
-    u'augusztus':     u'08',
-    u'szeptember':    u'09',
-    u'okt\u00f3ber':  u'10',  # f3 in iso-8859-2
-    u'november':      u'11',
-    u'december':      u'12',
-  }
-
-_hungarian_date_format_re = \
-  re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
-
-def _parse_date_hungarian(dateString):
-    '''Parse a string according to a Hungarian 8-bit date format.'''
-    m = _hungarian_date_format_re.match(dateString)
-    if not m or m.group(2) not in _hungarian_months:
-        return None
-    month = _hungarian_months[m.group(2)]
-    day = m.group(3)
-    if len(day) == 1:
-        day = '0' + day
-    hour = m.group(4)
-    if len(hour) == 1:
-        hour = '0' + hour
-    w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
-                {'year': m.group(1), 'month': month, 'day': day,\
-                 'hour': hour, 'minute': m.group(5),\
-                 'zonediff': m.group(6)}
-    return _parse_date_w3dtf(w3dtfdate)
-registerDateHandler(_parse_date_hungarian)
-
-# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
-# Drake and licensed under the Python license.  Removed all range checking
-# for month, day, hour, minute, and second, since mktime will normalize
-# these later
-# Modified to also support MSSQL-style datetimes as defined at:
-# http://msdn.microsoft.com/en-us/library/ms186724.aspx
-# (which basically means allowing a space as a date/time/timezone separator)
-def _parse_date_w3dtf(dateString):
-    def __extract_date(m):
-        year = int(m.group('year'))
-        if year < 100:
-            year = 100 * int(time.gmtime()[0] / 100) + int(year)
-        if year < 1000:
-            return 0, 0, 0
-        julian = m.group('julian')
-        if julian:
-            julian = int(julian)
-            month = julian / 30 + 1
-            day = julian % 30 + 1
-            jday = None
-            while jday != julian:
-                t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
-                jday = time.gmtime(t)[-2]
-                diff = abs(jday - julian)
-                if jday > julian:
-                    if diff < day:
-                        day = day - diff
-                    else:
-                        month = month - 1
-                        day = 31
-                elif jday < julian:
-                    if day + diff < 28:
-                        day = day + diff
-                    else:
-                        month = month + 1
-            return year, month, day
-        month = m.group('month')
-        day = 1
-        if month is None:
-            month = 1
-        else:
-            month = int(month)
-            day = m.group('day')
-            if day:
-                day = int(day)
-            else:
-                day = 1
-        return year, month, day
-
-    def __extract_time(m):
-        if not m:
-            return 0, 0, 0
-        hours = m.group('hours')
-        if not hours:
-            return 0, 0, 0
-        hours = int(hours)
-        minutes = int(m.group('minutes'))
-        seconds = m.group('seconds')
-        if seconds:
-            seconds = int(seconds)
-        else:
-            seconds = 0
-        return hours, minutes, seconds
-
-    def __extract_tzd(m):
-        '''Return the Time Zone Designator as an offset in seconds from UTC.'''
-        if not m:
-            return 0
-        tzd = m.group('tzd')
-        if not tzd:
-            return 0
-        if tzd == 'Z':
-            return 0
-        hours = int(m.group('tzdhours'))
-        minutes = m.group('tzdminutes')
-        if minutes:
-            minutes = int(minutes)
-        else:
-            minutes = 0
-        offset = (hours*60 + minutes) * 60
-        if tzd[0] == '+':
-            return -offset
-        return offset
-
-    __date_re = ('(?P<year>\d\d\d\d)'
-                 '(?:(?P<dsep>-|)'
-                 '(?:(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?'
-                 '|(?P<julian>\d\d\d)))?')
-    __tzd_re = ' ?(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)?'
-    __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
-                 '(?:(?P=tsep)(?P<seconds>\d\d)(?:[.,]\d+)?)?'
-                 + __tzd_re)
-    __datetime_re = '%s(?:[T ]%s)?' % (__date_re, __time_re)
-    __datetime_rx = re.compile(__datetime_re)
-    m = __datetime_rx.match(dateString)
-    if (m is None) or (m.group() != dateString):
-        return
-    gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
-    if gmt[0] == 0:
-        return
-    return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
-registerDateHandler(_parse_date_w3dtf)
-
-# Define the strings used by the RFC822 datetime parser
-_rfc822_months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
-          'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
-_rfc822_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
-
-# Only the first three letters of the month name matter
-_rfc822_month = "(?P<month>%s)(?:[a-z]*,?)" % ('|'.join(_rfc822_months))
-# The year may be 2 or 4 digits; capture the century if it exists
-_rfc822_year = "(?P<year>(?:\d{2})?\d{2})"
-_rfc822_day = "(?P<day> *\d{1,2})"
-_rfc822_date = "%s %s %s" % (_rfc822_day, _rfc822_month, _rfc822_year)
-
-_rfc822_hour = "(?P<hour>\d{2}):(?P<minute>\d{2})(?::(?P<second>\d{2}))?"
-_rfc822_tz = "(?P<tz>ut|gmt(?:[+-]\d{2}:\d{2})?|[aecmp][sd]?t|[zamny]|[+-]\d{4})"
-_rfc822_tznames = {
-    'ut': 0, 'gmt': 0, 'z': 0,
-    'adt': -3, 'ast': -4, 'at': -4,
-    'edt': -4, 'est': -5, 'et': -5,
-    'cdt': -5, 'cst': -6, 'ct': -6,
-    'mdt': -6, 'mst': -7, 'mt': -7,
-    'pdt': -7, 'pst': -8, 'pt': -8,
-    'a': -1, 'n': 1,
-    'm': -12, 'y': 12,
- }
-# The timezone may be prefixed by 'Etc/'
-_rfc822_time = "%s (?:etc/)?%s" % (_rfc822_hour, _rfc822_tz)
-
-_rfc822_dayname = "(?P<dayname>%s)" % ('|'.join(_rfc822_daynames))
-_rfc822_match = re.compile(
-    "(?:%s, )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date, _rfc822_time)
-).match
-
-def _parse_date_group_rfc822(m):
-    # Calculate a date and timestamp
-    for k in ('year', 'day', 'hour', 'minute', 'second'):
-        m[k] = int(m[k])
-    m['month'] = _rfc822_months.index(m['month']) + 1
-    # If the year is 2 digits, assume everything in the 90's is the 1990's
-    if m['year'] < 100:
-        m['year'] += (1900, 2000)[m['year'] < 90]
-    stamp = datetime.datetime(*[m[i] for i in 
-                ('year', 'month', 'day', 'hour', 'minute', 'second')])
-
-    # Use the timezone information to calculate the difference between
-    # the given date and timestamp and Universal Coordinated Time
-    tzhour = 0
-    tzmin = 0
-    if m['tz'] and m['tz'].startswith('gmt'):
-        # Handle GMT and GMT+hh:mm timezone syntax (the trailing
-        # timezone info will be handled by the next `if` block)
-        m['tz'] = ''.join(m['tz'][3:].split(':')) or 'gmt'
-    if not m['tz']:
-        pass
-    elif m['tz'].startswith('+'):
-        tzhour = int(m['tz'][1:3])
-        tzmin = int(m['tz'][3:])
-    elif m['tz'].startswith('-'):
-        tzhour = int(m['tz'][1:3]) * -1
-        tzmin = int(m['tz'][3:]) * -1
-    else:
-        tzhour = _rfc822_tznames[m['tz']]
-    delta = datetime.timedelta(0, 0, 0, 0, tzmin, tzhour)
-
-    # Return the date and timestamp in UTC
-    return (stamp - delta).utctimetuple()
-
-def _parse_date_rfc822(dt):
-    """Parse RFC 822 dates and times, with one minor
-    difference: years may be 4DIGIT or 2DIGIT.
-    http://tools.ietf.org/html/rfc822#section-5"""
-    try:
-        m = _rfc822_match(dt.lower()).groupdict(0)
-    except AttributeError:
-        return None
-
-    return _parse_date_group_rfc822(m)
-registerDateHandler(_parse_date_rfc822)
-
-def _parse_date_rfc822_grubby(dt):
-    """Parse date format similar to RFC 822, but 
-    the comma after the dayname is optional and
-    month/day are inverted"""
-    _rfc822_date_grubby = "%s %s %s" % (_rfc822_month, _rfc822_day, _rfc822_year)
-    _rfc822_match_grubby = re.compile(
-        "(?:%s[,]? )?%s(?: %s)?" % (_rfc822_dayname, _rfc822_date_grubby, _rfc822_time)
-    ).match
-
-    try:
-        m = _rfc822_match_grubby(dt.lower()).groupdict(0)
-    except AttributeError:
-        return None
-
-    return _parse_date_group_rfc822(m)
-registerDateHandler(_parse_date_rfc822_grubby)
-
-def _parse_date_asctime(dt):
-    """Parse asctime-style dates"""
-    dayname, month, day, remainder = dt.split(None, 3)
-    # Convert month and day into zero-padded integers
-    month = '%02i ' % (_rfc822_months.index(month.lower()) + 1)
-    day = '%02i ' % (int(day),)
-    dt = month + day + remainder
-    return time.strptime(dt, '%m %d %H:%M:%S %Y')[:-1] + (0, )
-registerDateHandler(_parse_date_asctime)
-
-def _parse_date_perforce(aDateString):
-    """parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
-    # Fri, 2006/09/15 08:19:53 EDT
-    _my_date_pattern = re.compile( \
-        r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
-
-    m = _my_date_pattern.search(aDateString)
-    if m is None:
-        return None
-    dow, year, month, day, hour, minute, second, tz = m.groups()
-    months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
-    dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
-    tm = rfc822.parsedate_tz(dateString)
-    if tm:
-        return time.gmtime(rfc822.mktime_tz(tm))
-registerDateHandler(_parse_date_perforce)
-
-def _parse_date(dateString):
-    '''Parses a variety of date formats into a 9-tuple in GMT'''
-    if not dateString:
-        return None
-    for handler in _date_handlers:
-        try:
-            date9tuple = handler(dateString)
-        except (KeyError, OverflowError, ValueError):
-            continue
-        if not date9tuple:
-            continue
-        if len(date9tuple) != 9:
-            continue
-        return date9tuple
-    return None
-
-# Each marker represents some of the characters of the opening XML
-# processing instruction ('<?xm') in the specified encoding.
-EBCDIC_MARKER = _l2bytes([0x4C, 0x6F, 0xA7, 0x94])
-UTF16BE_MARKER = _l2bytes([0x00, 0x3C, 0x00, 0x3F])
-UTF16LE_MARKER = _l2bytes([0x3C, 0x00, 0x3F, 0x00])
-UTF32BE_MARKER = _l2bytes([0x00, 0x00, 0x00, 0x3C])
-UTF32LE_MARKER = _l2bytes([0x3C, 0x00, 0x00, 0x00])
-
-ZERO_BYTES = _l2bytes([0x00, 0x00])
-
-# Match the opening XML declaration.
-# Example: <?xml version="1.0" encoding="utf-8"?>
-RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
-
-# Capture the value of the XML processing instruction's encoding attribute.
-# Example: <?xml version="1.0" encoding="utf-8"?>
-RE_XML_PI_ENCODING = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>'))
-
-def convert_to_utf8(http_headers, data):
-    '''Detect and convert the character encoding to UTF-8.
-
-    http_headers is a dictionary
-    data is a raw string (not Unicode)'''
-
-    # This is so much trickier than it sounds, it's not even funny.
-    # According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
-    # is application/xml, application/*+xml,
-    # application/xml-external-parsed-entity, or application/xml-dtd,
-    # the encoding given in the charset parameter of the HTTP Content-Type
-    # takes precedence over the encoding given in the XML prefix within the
-    # document, and defaults to 'utf-8' if neither are specified.  But, if
-    # the HTTP Content-Type is text/xml, text/*+xml, or
-    # text/xml-external-parsed-entity, the encoding given in the XML prefix
-    # within the document is ALWAYS IGNORED and only the encoding given in
-    # the charset parameter of the HTTP Content-Type header should be
-    # respected, and it defaults to 'us-ascii' if not specified.
-
-    # Furthermore, discussion on the atom-syntax mailing list with the
-    # author of RFC 3023 leads me to the conclusion that any document
-    # served with a Content-Type of text/* and no charset parameter
-    # must be treated as us-ascii.  (We now do this.)  And also that it
-    # must always be flagged as non-well-formed.  (We now do this too.)
-
-    # If Content-Type is unspecified (input was local file or non-HTTP source)
-    # or unrecognized (server just got it totally wrong), then go by the
-    # encoding given in the XML prefix of the document and default to
-    # 'iso-8859-1' as per the HTTP specification (RFC 2616).
-
-    # Then, assuming we didn't find a character encoding in the HTTP headers
-    # (and the HTTP Content-type allowed us to look in the body), we need
-    # to sniff the first few bytes of the XML data and try to determine
-    # whether the encoding is ASCII-compatible.  Section F of the XML
-    # specification shows the way here:
-    # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
-
-    # If the sniffed encoding is not ASCII-compatible, we need to make it
-    # ASCII compatible so that we can sniff further into the XML declaration
-    # to find the encoding attribute, which will tell us the true encoding.
-
-    # Of course, none of this guarantees that we will be able to parse the
-    # feed in the declared character encoding (assuming it was declared
-    # correctly, which many are not).  iconv_codec can help a lot;
-    # you should definitely install it if you can.
-    # http://cjkpython.i18n.org/
-
-    bom_encoding = u''
-    xml_encoding = u''
-    rfc3023_encoding = u''
-
-    # Look at the first few bytes of the document to guess what
-    # its encoding may be. We only need to decode enough of the
-    # document that we can use an ASCII-compatible regular
-    # expression to search for an XML encoding declaration.
-    # The heuristic follows the XML specification, section F:
-    # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
-    # Check for BOMs first.
-    if data[:4] == codecs.BOM_UTF32_BE:
-        bom_encoding = u'utf-32be'
-        data = data[4:]
-    elif data[:4] == codecs.BOM_UTF32_LE:
-        bom_encoding = u'utf-32le'
-        data = data[4:]
-    elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
-        bom_encoding = u'utf-16be'
-        data = data[2:]
-    elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
-        bom_encoding = u'utf-16le'
-        data = data[2:]
-    elif data[:3] == codecs.BOM_UTF8:
-        bom_encoding = u'utf-8'
-        data = data[3:]
-    # Check for the characters '<?xm' in several encodings.
-    elif data[:4] == EBCDIC_MARKER:
-        bom_encoding = u'cp037'
-    elif data[:4] == UTF16BE_MARKER:
-        bom_encoding = u'utf-16be'
-    elif data[:4] == UTF16LE_MARKER:
-        bom_encoding = u'utf-16le'
-    elif data[:4] == UTF32BE_MARKER:
-        bom_encoding = u'utf-32be'
-    elif data[:4] == UTF32LE_MARKER:
-        bom_encoding = u'utf-32le'
-
-    tempdata = data
-    try:
-        if bom_encoding:
-            tempdata = data.decode(bom_encoding).encode('utf-8')
-    except (UnicodeDecodeError, LookupError):
-        # feedparser recognizes UTF-32 encodings that aren't
-        # available in Python 2.4 and 2.5, so it's possible to
-        # encounter a LookupError during decoding.
-        xml_encoding_match = None
-    else:
-        xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
-
-    if xml_encoding_match:
-        xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
-        # Normalize the xml_encoding if necessary.
-        if bom_encoding and (xml_encoding in (
-            u'u16', u'utf-16', u'utf16', u'utf_16',
-            u'u32', u'utf-32', u'utf32', u'utf_32',
-            u'iso-10646-ucs-2', u'iso-10646-ucs-4',
-            u'csucs4', u'csunicode', u'ucs-2', u'ucs-4'
-        )):
-            xml_encoding = bom_encoding
-
-    # Find the HTTP Content-Type and, hopefully, a character
-    # encoding provided by the server. The Content-Type is used
-    # to choose the "correct" encoding among the BOM encoding,
-    # XML declaration encoding, and HTTP encoding, following the
-    # heuristic defined in RFC 3023.
-    http_content_type = http_headers.get('content-type') or ''
-    http_content_type, params = cgi.parse_header(http_content_type)
-    http_encoding = params.get('charset', '').replace("'", "")
-    if not isinstance(http_encoding, unicode):
-        http_encoding = http_encoding.decode('utf-8', 'ignore')
-
-    acceptable_content_type = 0
-    application_content_types = (u'application/xml', u'application/xml-dtd',
-                                 u'application/xml-external-parsed-entity')
-    text_content_types = (u'text/xml', u'text/xml-external-parsed-entity')
-    if (http_content_type in application_content_types) or \
-       (http_content_type.startswith(u'application/') and 
-        http_content_type.endswith(u'+xml')):
-        acceptable_content_type = 1
-        rfc3023_encoding = http_encoding or xml_encoding or u'utf-8'
-    elif (http_content_type in text_content_types) or \
-         (http_content_type.startswith(u'text/') and
-          http_content_type.endswith(u'+xml')):
-        acceptable_content_type = 1
-        rfc3023_encoding = http_encoding or u'us-ascii'
-    elif http_content_type.startswith(u'text/'):
-        rfc3023_encoding = http_encoding or u'us-ascii'
-    elif http_headers and 'content-type' not in http_headers:
-        rfc3023_encoding = xml_encoding or u'iso-8859-1'
-    else:
-        rfc3023_encoding = xml_encoding or u'utf-8'
-    # gb18030 is a superset of gb2312, so always replace gb2312
-    # with gb18030 for greater compatibility.
-    if rfc3023_encoding.lower() == u'gb2312':
-        rfc3023_encoding = u'gb18030'
-    if xml_encoding.lower() == u'gb2312':
-        xml_encoding = u'gb18030'
-
-    # there are four encodings to keep track of:
-    # - http_encoding is the encoding declared in the Content-Type HTTP header
-    # - xml_encoding is the encoding declared in the <?xml declaration
-    # - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
-    # - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
-    error = None
-
-    if http_headers and (not acceptable_content_type):
-        if 'content-type' in http_headers:
-            msg = '%s is not an XML media type' % http_headers['content-type']
-        else:
-            msg = 'no Content-type specified'
-        error = NonXMLContentType(msg)
-
-    # determine character encoding
-    known_encoding = 0
-    chardet_encoding = None
-    tried_encodings = []
-    if chardet:
-        chardet_encoding = unicode(chardet.detect(data)['encoding'] or '', 'ascii', 'ignore')
-    # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
-    for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
-                              chardet_encoding, u'utf-8', u'windows-1252', u'iso-8859-2'):
-        if not proposed_encoding:
-            continue
-        if proposed_encoding in tried_encodings:
-            continue
-        tried_encodings.append(proposed_encoding)
-        try:
-            data = data.decode(proposed_encoding)
-        except (UnicodeDecodeError, LookupError):
-            pass
-        else:
-            known_encoding = 1
-            # Update the encoding in the opening XML processing instruction.
-            new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
-            if RE_XML_DECLARATION.search(data):
-                data = RE_XML_DECLARATION.sub(new_declaration, data)
-            else:
-                data = new_declaration + u'\n' + data
-            data = data.encode('utf-8')
-            break
-    # if still no luck, give up
-    if not known_encoding:
-        error = CharacterEncodingUnknown(
-            'document encoding unknown, I tried ' +
-            '%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
-            (rfc3023_encoding, xml_encoding))
-        rfc3023_encoding = u''
-    elif proposed_encoding != rfc3023_encoding:
-        error = CharacterEncodingOverride(
-            'document declared as %s, but parsed as %s' %
-            (rfc3023_encoding, proposed_encoding))
-        rfc3023_encoding = proposed_encoding
-
-    return data, rfc3023_encoding, error
-
-# Match XML entity declarations.
-# Example: <!ENTITY copyright "(C)">
-RE_ENTITY_PATTERN = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
-
-# Match XML DOCTYPE declarations.
-# Example: <!DOCTYPE feed [ ]>
-RE_DOCTYPE_PATTERN = re.compile(_s2bytes(r'^\s*<!DOCTYPE([^>]*?)>'), re.MULTILINE)
-
-# Match safe entity declarations.
-# This will allow hexadecimal character references through,
-# as well as text, but not arbitrary nested entities.
-# Example: cubed "&#179;"
-# Example: copyright "(C)"
-# Forbidden: explode1 "&explode2;&explode2;"
-RE_SAFE_ENTITY_PATTERN = re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
-
-def replace_doctype(data):
-    '''Strips and replaces the DOCTYPE, returns (rss_version, stripped_data)
-
-    rss_version may be 'rss091n' or None
-    stripped_data is the same XML document with a replaced DOCTYPE
-    '''
-
-    # Divide the document into two groups by finding the location
-    # of the first element that doesn't begin with '<?' or '<!'.
-    start = re.search(_s2bytes('<\w'), data)
-    start = start and start.start() or -1
-    head, data = data[:start+1], data[start+1:]
-
-    # Save and then remove all of the ENTITY declarations.
-    entity_results = RE_ENTITY_PATTERN.findall(head)
-    head = RE_ENTITY_PATTERN.sub(_s2bytes(''), head)
-
-    # Find the DOCTYPE declaration and check the feed type.
-    doctype_results = RE_DOCTYPE_PATTERN.findall(head)
-    doctype = doctype_results and doctype_results[0] or _s2bytes('')
-    if _s2bytes('netscape') in doctype.lower():
-        version = u'rss091n'
-    else:
-        version = None
-
-    # Re-insert the safe ENTITY declarations if a DOCTYPE was found.
-    replacement = _s2bytes('')
-    if len(doctype_results) == 1 and entity_results:
-        match_safe_entities = lambda e: RE_SAFE_ENTITY_PATTERN.match(e)
-        safe_entities = filter(match_safe_entities, entity_results)
-        if safe_entities:
-            replacement = _s2bytes('<!DOCTYPE feed [\n<!ENTITY') \
-                        + _s2bytes('>\n<!ENTITY ').join(safe_entities) \
-                        + _s2bytes('>\n]>')
-    data = RE_DOCTYPE_PATTERN.sub(replacement, head) + data
-
-    # Precompute the safe entities for the loose parser.
-    safe_entities = dict((k.decode('utf-8'), v.decode('utf-8'))
-                      for k, v in RE_SAFE_ENTITY_PATTERN.findall(replacement))
-    return version, data, safe_entities
-
-def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None):
-    '''Parse a feed from a URL, file, stream, or string.
-
-    request_headers, if given, is a dict from http header name to value to add
-    to the request; this overrides internally generated values.
-    '''
-
-    if handlers is None:
-        handlers = []
-    if request_headers is None:
-        request_headers = {}
-    if response_headers is None:
-        response_headers = {}
-
-    result = FeedParserDict()
-    result['feed'] = FeedParserDict()
-    result['entries'] = []
-    result['bozo'] = 0
-    if not isinstance(handlers, list):
-        handlers = [handlers]
-    try:
-        f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers)
-        data = f.read()
-    except Exception, e:
-        result['bozo'] = 1
-        result['bozo_exception'] = e
-        data = None
-        f = None
-
-    if hasattr(f, 'headers'):
-        result['headers'] = dict(f.headers)
-    # overwrite existing headers using response_headers
-    if 'headers' in result:
-        result['headers'].update(response_headers)
-    elif response_headers:
-        result['headers'] = copy.deepcopy(response_headers)
-
-    # lowercase all of the HTTP headers for comparisons per RFC 2616
-    if 'headers' in result:
-        http_headers = dict((k.lower(), v) for k, v in result['headers'].items())
-    else:
-        http_headers = {}
-
-    # if feed is gzip-compressed, decompress it
-    if f and data and http_headers:
-        if gzip and 'gzip' in http_headers.get('content-encoding', ''):
-            try:
-                data = gzip.GzipFile(fileobj=_StringIO(data)).read()
-            except (IOError, struct.error), e:
-                # IOError can occur if the gzip header is bad.
-                # struct.error can occur if the data is damaged.
-                result['bozo'] = 1
-                result['bozo_exception'] = e
-                if isinstance(e, struct.error):
-                    # A gzip header was found but the data is corrupt.
-                    # Ideally, we should re-request the feed without the
-                    # 'Accept-encoding: gzip' header, but we don't.
-                    data = None
-        elif zlib and 'deflate' in http_headers.get('content-encoding', ''):
-            try:
-                data = zlib.decompress(data)
-            except zlib.error, e:
-                try:
-                    # The data may have no headers and no checksum.
-                    data = zlib.decompress(data, -15)
-                except zlib.error, e:
-                    result['bozo'] = 1
-                    result['bozo_exception'] = e
-
-    # save HTTP headers
-    if http_headers:
-        if 'etag' in http_headers:
-            etag = http_headers.get('etag', u'')
-            if not isinstance(etag, unicode):
-                etag = etag.decode('utf-8', 'ignore')
-            if etag:
-                result['etag'] = etag
-        if 'last-modified' in http_headers:
-            modified = http_headers.get('last-modified', u'')
-            if modified:
-                result['modified'] = modified
-                result['modified_parsed'] = _parse_date(modified)
-    if hasattr(f, 'url'):
-        if not isinstance(f.url, unicode):
-            result['href'] = f.url.decode('utf-8', 'ignore')
-        else:
-            result['href'] = f.url
-        result['status'] = 200
-    if hasattr(f, 'status'):
-        result['status'] = f.status
-    if hasattr(f, 'close'):
-        f.close()
-
-    if data is None:
-        return result
-
-    # Stop processing if the server sent HTTP 304 Not Modified.
-    if getattr(f, 'code', 0) == 304:
-        result['version'] = u''
-        result['debug_message'] = 'The feed has not changed since you last checked, ' + \
-            'so the server sent no data.  This is a feature, not a bug!'
-        return result
-
-    data, result['encoding'], error = convert_to_utf8(http_headers, data)
-    use_strict_parser = result['encoding'] and True or False
-    if error is not None:
-        result['bozo'] = 1
-        result['bozo_exception'] = error
-
-    result['version'], data, entities = replace_doctype(data)
-
-    # Ensure that baseuri is an absolute URI using an acceptable URI scheme.
-    contentloc = http_headers.get('content-location', u'')
-    href = result.get('href', u'')
-    baseuri = _makeSafeAbsoluteURI(href, contentloc) or _makeSafeAbsoluteURI(contentloc) or href
-
-    baselang = http_headers.get('content-language', None)
-    if not isinstance(baselang, unicode) and baselang is not None:
-        baselang = baselang.decode('utf-8', 'ignore')
-
-    if not _XML_AVAILABLE:
-        use_strict_parser = 0
-    if use_strict_parser:
-        # initialize the SAX parser
-        feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
-        saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
-        saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
-        try:
-            # disable downloading external doctype references, if possible
-            saxparser.setFeature(xml.sax.handler.feature_external_ges, 0)
-        except xml.sax.SAXNotSupportedException:
-            pass
-        saxparser.setContentHandler(feedparser)
-        saxparser.setErrorHandler(feedparser)
-        source = xml.sax.xmlreader.InputSource()
-        source.setByteStream(_StringIO(data))
-        try:
-            saxparser.parse(source)
-        except xml.sax.SAXException, e:
-            result['bozo'] = 1
-            result['bozo_exception'] = feedparser.exc or e
-            use_strict_parser = 0
-    if not use_strict_parser and _SGML_AVAILABLE:
-        feedparser = _LooseFeedParser(baseuri, baselang, 'utf-8', entities)
-        feedparser.feed(data.decode('utf-8', 'replace'))
-    result['feed'] = feedparser.feeddata
-    result['entries'] = feedparser.entries
-    result['version'] = result['version'] or feedparser.version
-    result['namespaces'] = feedparser.namespacesInUse
-    return result

+ 0 - 16
frameworks/Python/web2py/web2py/gluon/contrib/fpdf/__init__.py

@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"FPDF for python"
-
-__license__ = "LGPL 3.0"
-__version__ = "1.7"
-
-from fpdf import *
-try:
-    from html import HTMLMixin
-except ImportError:
-    import warnings
-    warnings.warn("web2py gluon package not installed, required for html2pdf")
-
-from template import Template

+ 0 - 156
frameworks/Python/web2py/web2py/gluon/contrib/fpdf/fonts.py

@@ -1,156 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: latin-1 -*-
-
-# Fonts:
-
-fpdf_charwidths = {}
-
-fpdf_charwidths['courier']={}
-
-for i in xrange(0,256):
-    fpdf_charwidths['courier'][chr(i)]=600
-    fpdf_charwidths['courierB']=fpdf_charwidths['courier']
-    fpdf_charwidths['courierI']=fpdf_charwidths['courier']
-    fpdf_charwidths['courierBI']=fpdf_charwidths['courier']
-
-fpdf_charwidths['helvetica']={
-    '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
-    '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584,
-    ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,
-    'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
-    'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,
-    'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556,
-    '\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
-    '\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
-    '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
-    '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556,
-    '\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
-
-fpdf_charwidths['helveticaB']={
-    '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
-    '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584,
-    ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,
-    'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
-    'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,
-    'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556,
-    '\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
-    '\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
-    '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
-    '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611,
-    '\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556
-}
-
-fpdf_charwidths['helveticaBI']={
-    '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
-    '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584,
-    ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722,
-    'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
-    'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889,
-    'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556,
-    '\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
-    '\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
-    '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
-    '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611,
-    '\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556}
-
-fpdf_charwidths['helveticaI']={
-    '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278,
-    '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584,
-    ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667,
-    'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944,
-    'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833,
-    'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556,
-    '\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000,
-    '\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333,
-    '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
-    '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556,
-    '\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
-
-fpdf_charwidths['symbol']={
-    '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
-    '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':713,'#':500,'$':549,'%':833,'&':778,'\'':439,'(':333,')':333,'*':500,'+':549,
-    ',':250,'-':549,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':549,'=':549,'>':549,'?':444,'@':549,'A':722,
-    'B':667,'C':722,'D':612,'E':611,'F':763,'G':603,'H':722,'I':333,'J':631,'K':722,'L':686,'M':889,'N':722,'O':722,'P':768,'Q':741,'R':556,'S':592,'T':611,'U':690,'V':439,'W':768,
-    'X':645,'Y':795,'Z':611,'[':333,'\\':863,']':333,'^':658,'_':500,'`':500,'a':631,'b':549,'c':549,'d':494,'e':439,'f':521,'g':411,'h':603,'i':329,'j':603,'k':549,'l':549,'m':576,
-    'n':521,'o':549,'p':549,'q':521,'r':549,'s':603,'t':439,'u':576,'v':713,'w':686,'x':493,'y':686,'z':494,'{':480,'|':200,'}':480,'~':549,'\x7f':0,'\x80':0,'\x81':0,'\x82':0,'\x83':0,
-    '\x84':0,'\x85':0,'\x86':0,'\x87':0,'\x88':0,'\x89':0,'\x8a':0,'\x8b':0,'\x8c':0,'\x8d':0,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0,
-    '\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':750,'\xa1':620,'\xa2':247,'\xa3':549,'\xa4':167,'\xa5':713,'\xa6':500,'\xa7':753,'\xa8':753,'\xa9':753,'\xaa':753,'\xab':1042,'\xac':987,'\xad':603,'\xae':987,'\xaf':603,
-    '\xb0':400,'\xb1':549,'\xb2':411,'\xb3':549,'\xb4':549,'\xb5':713,'\xb6':494,'\xb7':460,'\xb8':549,'\xb9':549,'\xba':549,'\xbb':549,'\xbc':1000,'\xbd':603,'\xbe':1000,'\xbf':658,'\xc0':823,'\xc1':686,'\xc2':795,'\xc3':987,'\xc4':768,'\xc5':768,
-    '\xc6':823,'\xc7':768,'\xc8':768,'\xc9':713,'\xca':713,'\xcb':713,'\xcc':713,'\xcd':713,'\xce':713,'\xcf':713,'\xd0':768,'\xd1':713,'\xd2':790,'\xd3':790,'\xd4':890,'\xd5':823,'\xd6':549,'\xd7':250,'\xd8':713,'\xd9':603,'\xda':603,'\xdb':1042,
-    '\xdc':987,'\xdd':603,'\xde':987,'\xdf':603,'\xe0':494,'\xe1':329,'\xe2':790,'\xe3':790,'\xe4':786,'\xe5':713,'\xe6':384,'\xe7':384,'\xe8':384,'\xe9':384,'\xea':384,'\xeb':384,'\xec':494,'\xed':494,'\xee':494,'\xef':494,'\xf0':0,'\xf1':329,
-    '\xf2':274,'\xf3':686,'\xf4':686,'\xf5':686,'\xf6':384,'\xf7':384,'\xf8':384,'\xf9':384,'\xfa':384,'\xfb':384,'\xfc':494,'\xfd':494,'\xfe':494,'\xff':0}
-    
-fpdf_charwidths['times']={
-    '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
-    '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':408,'#':500,'$':500,'%':833,'&':778,'\'':180,'(':333,')':333,'*':500,'+':564,
-    ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':564,'=':564,'>':564,'?':444,'@':921,'A':722,
-    'B':667,'C':667,'D':722,'E':611,'F':556,'G':722,'H':722,'I':333,'J':389,'K':722,'L':611,'M':889,'N':722,'O':722,'P':556,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':722,'W':944,
-    'X':722,'Y':722,'Z':611,'[':333,'\\':278,']':333,'^':469,'_':500,'`':333,'a':444,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':500,'i':278,'j':278,'k':500,'l':278,'m':778,
-    'n':500,'o':500,'p':500,'q':500,'r':333,'s':389,'t':278,'u':500,'v':500,'w':722,'x':500,'y':500,'z':444,'{':480,'|':200,'}':480,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
-    '\x84':444,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':889,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':444,'\x94':444,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':980,
-    '\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':200,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':564,'\xad':333,'\xae':760,'\xaf':333,
-    '\xb0':400,'\xb1':564,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':453,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':444,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
-    '\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':564,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':722,'\xde':556,'\xdf':500,'\xe0':444,'\xe1':444,'\xe2':444,'\xe3':444,'\xe4':444,'\xe5':444,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500,
-    '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':564,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':500,'\xfe':500,'\xff':500}
-
-fpdf_charwidths['timesB']={
-    '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
-    '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':555,'#':500,'$':500,'%':1000,'&':833,'\'':278,'(':333,')':333,'*':500,'+':570,
-    ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':930,'A':722,
-    'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':778,'I':389,'J':500,'K':778,'L':667,'M':944,'N':722,'O':778,'P':611,'Q':778,'R':722,'S':556,'T':667,'U':722,'V':722,'W':1000,
-    'X':722,'Y':722,'Z':667,'[':333,'\\':278,']':333,'^':581,'_':500,'`':333,'a':500,'b':556,'c':444,'d':556,'e':444,'f':333,'g':500,'h':556,'i':278,'j':333,'k':556,'l':278,'m':833,
-    'n':556,'o':500,'p':556,'q':556,'r':444,'s':389,'t':333,'u':556,'v':500,'w':722,'x':500,'y':500,'z':444,'{':394,'|':220,'}':394,'~':520,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
-    '\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':667,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000,
-    '\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':300,'\xab':500,'\xac':570,'\xad':333,'\xae':747,'\xaf':333,
-    '\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':556,'\xb6':540,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':330,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
-    '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':570,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':722,'\xde':611,'\xdf':556,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556,
-    '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500}
-    
-fpdf_charwidths['timesBI']={
-    '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
-    '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':389,'"':555,'#':500,'$':500,'%':833,'&':778,'\'':278,'(':333,')':333,'*':500,'+':570,
-    ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':832,'A':667,
-    'B':667,'C':667,'D':722,'E':667,'F':667,'G':722,'H':778,'I':389,'J':500,'K':667,'L':611,'M':889,'N':722,'O':722,'P':611,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':667,'W':889,
-    'X':667,'Y':611,'Z':611,'[':333,'\\':278,']':333,'^':570,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':556,'i':278,'j':278,'k':500,'l':278,'m':778,
-    'n':556,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':556,'v':444,'w':667,'x':500,'y':444,'z':389,'{':348,'|':220,'}':348,'~':570,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
-    '\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000,
-    '\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':389,'\x9f':611,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':266,'\xab':500,'\xac':606,'\xad':333,'\xae':747,'\xaf':333,
-    '\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':576,'\xb6':500,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':300,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667,
-    '\xc6':944,'\xc7':667,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':570,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':611,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556,
-    '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':444,'\xfe':500,'\xff':444}
-
-fpdf_charwidths['timesI']={
-    '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
-    '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':420,'#':500,'$':500,'%':833,'&':778,'\'':214,'(':333,')':333,'*':500,'+':675,
-    ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':675,'=':675,'>':675,'?':500,'@':920,'A':611,
-    'B':611,'C':667,'D':722,'E':611,'F':611,'G':722,'H':722,'I':333,'J':444,'K':667,'L':556,'M':833,'N':667,'O':722,'P':611,'Q':722,'R':611,'S':500,'T':556,'U':722,'V':611,'W':833,
-    'X':611,'Y':556,'Z':556,'[':389,'\\':278,']':389,'^':422,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':278,'g':500,'h':500,'i':278,'j':278,'k':444,'l':278,'m':722,
-    'n':500,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':500,'v':444,'w':667,'x':444,'y':444,'z':389,'{':400,'|':275,'}':400,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
-    '\x84':556,'\x85':889,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':500,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':556,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':556,'\x94':556,'\x95':350,'\x96':500,'\x97':889,'\x98':333,'\x99':980,
-    '\x9a':389,'\x9b':333,'\x9c':667,'\x9d':350,'\x9e':389,'\x9f':556,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':275,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':675,'\xad':333,'\xae':760,'\xaf':333,
-    '\xb0':400,'\xb1':675,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':523,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':611,'\xc1':611,'\xc2':611,'\xc3':611,'\xc4':611,'\xc5':611,
-    '\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':667,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':675,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
-    '\xdc':722,'\xdd':556,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500,
-    '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':675,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':444,'\xfe':500,'\xff':444}
-
-fpdf_charwidths['zapfdingbats']={
-    '\x00':0,'\x01':0,'\x02':0,'\x03':0,'\x04':0,'\x05':0,'\x06':0,'\x07':0,'\x08':0,'\t':0,'\n':0,'\x0b':0,'\x0c':0,'\r':0,'\x0e':0,'\x0f':0,'\x10':0,'\x11':0,'\x12':0,'\x13':0,'\x14':0,'\x15':0,
-    '\x16':0,'\x17':0,'\x18':0,'\x19':0,'\x1a':0,'\x1b':0,'\x1c':0,'\x1d':0,'\x1e':0,'\x1f':0,' ':278,'!':974,'"':961,'#':974,'$':980,'%':719,'&':789,'\'':790,'(':791,')':690,'*':960,'+':939,
-    ',':549,'-':855,'.':911,'/':933,'0':911,'1':945,'2':974,'3':755,'4':846,'5':762,'6':761,'7':571,'8':677,'9':763,':':760,';':759,'<':754,'=':494,'>':552,'?':537,'@':577,'A':692,
-    'B':786,'C':788,'D':788,'E':790,'F':793,'G':794,'H':816,'I':823,'J':789,'K':841,'L':823,'M':833,'N':816,'O':831,'P':923,'Q':744,'R':723,'S':749,'T':790,'U':792,'V':695,'W':776,
-    'X':768,'Y':792,'Z':759,'[':707,'\\':708,']':682,'^':701,'_':826,'`':815,'a':789,'b':789,'c':707,'d':687,'e':696,'f':689,'g':786,'h':787,'i':713,'j':791,'k':785,'l':791,'m':873,
-    'n':761,'o':762,'p':762,'q':759,'r':759,'s':892,'t':892,'u':788,'v':784,'w':438,'x':138,'y':277,'z':415,'{':392,'|':392,'}':668,'~':668,'\x7f':0,'\x80':390,'\x81':390,'\x82':317,'\x83':317,
-    '\x84':276,'\x85':276,'\x86':509,'\x87':509,'\x88':410,'\x89':410,'\x8a':234,'\x8b':234,'\x8c':334,'\x8d':334,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0,
-    '\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':0,'\xa1':732,'\xa2':544,'\xa3':544,'\xa4':910,'\xa5':667,'\xa6':760,'\xa7':760,'\xa8':776,'\xa9':595,'\xaa':694,'\xab':626,'\xac':788,'\xad':788,'\xae':788,'\xaf':788,
-    '\xb0':788,'\xb1':788,'\xb2':788,'\xb3':788,'\xb4':788,'\xb5':788,'\xb6':788,'\xb7':788,'\xb8':788,'\xb9':788,'\xba':788,'\xbb':788,'\xbc':788,'\xbd':788,'\xbe':788,'\xbf':788,'\xc0':788,'\xc1':788,'\xc2':788,'\xc3':788,'\xc4':788,'\xc5':788,
-    '\xc6':788,'\xc7':788,'\xc8':788,'\xc9':788,'\xca':788,'\xcb':788,'\xcc':788,'\xcd':788,'\xce':788,'\xcf':788,'\xd0':788,'\xd1':788,'\xd2':788,'\xd3':788,'\xd4':894,'\xd5':838,'\xd6':1016,'\xd7':458,'\xd8':748,'\xd9':924,'\xda':748,'\xdb':918,
-    '\xdc':927,'\xdd':928,'\xde':928,'\xdf':834,'\xe0':873,'\xe1':828,'\xe2':924,'\xe3':924,'\xe4':917,'\xe5':930,'\xe6':931,'\xe7':463,'\xe8':883,'\xe9':836,'\xea':836,'\xeb':867,'\xec':867,'\xed':696,'\xee':696,'\xef':874,'\xf0':0,'\xf1':874,
-    '\xf2':760,'\xf3':946,'\xf4':771,'\xf5':865,'\xf6':771,'\xf7':888,'\xf8':967,'\xf9':888,'\xfa':831,'\xfb':873,'\xfc':927,'\xfd':970,'\xfe':918,'\xff':0}
-

+ 0 - 1920
frameworks/Python/web2py/web2py/gluon/contrib/fpdf/fpdf.py

@@ -1,1920 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: latin-1 -*-
-# ****************************************************************************
-# * Software: FPDF for python                                                *
-# * Version:  1.7.1                                                          *
-# * Date:     2010-09-10                                                     *
-# * Last update: 2012-08-16                                                  *
-# * License:  LGPL v3.0                                                      *
-# *                                                                          *
-# * Original Author (PHP):  Olivier PLATHEY 2004-12-31                       *
-# * Ported to Python 2.4 by Max ([email protected]) on 2006-05               *
-# * Maintainer:  Mariano Reingart ([email protected]) et al since 2008 est. *
-# * NOTE: 'I' and 'D' destinations are disabled, and simply print to STDOUT  *
-# ****************************************************************************
-
-from datetime import datetime
-import math
-import errno
-import os, sys, zlib, struct, re, tempfile, struct
-
-try:
-    import cPickle as pickle
-except ImportError:
-    import pickle
-
-# Check if PIL is available (tries importing both pypi version and corrected or manually installed versions).
-# Necessary for JPEG and GIF support.
-try:
-    try:
-        import Image
-    except:
-        from PIL import Image
-except ImportError:
-    Image = None
-
-
-from ttfonts import TTFontFile
-from fonts import fpdf_charwidths
-from php import substr, sprintf, print_r, UTF8ToUTF16BE, UTF8StringToArray
-
-
-# Global variables
-FPDF_VERSION = '1.7.1'
-FPDF_FONT_DIR = os.path.join(os.path.dirname(__file__),'font')
-SYSTEM_TTFONTS = None
-
-PY3K = sys.version_info >= (3, 0)
-
-def set_global(var, val):
-    globals()[var] = val
-
-
-class FPDF(object):
-    "PDF Generation class"
-
-    def __init__(self, orientation='P',unit='mm',format='A4'):
-        # Some checks
-        self._dochecks()
-        # Initialization of properties
-        self.offsets={}                 # array of object offsets
-        self.page=0                     # current page number
-        self.n=2                        # current object number
-        self.buffer=''                  # buffer holding in-memory PDF
-        self.pages={}                   # array containing pages
-        self.orientation_changes={}     # array indicating orientation changes
-        self.state=0                    # current document state
-        self.fonts={}                   # array of used fonts
-        self.font_files={}              # array of font files
-        self.diffs={}                   # array of encoding differences
-        self.images={}                  # array of used images
-        self.page_links={}              # array of links in pages
-        self.links={}                   # array of internal links
-        self.in_footer=0                # flag set when processing footer
-        self.lastw=0
-        self.lasth=0                    # height of last cell printed
-        self.font_family=''             # current font family
-        self.font_style=''              # current font style
-        self.font_size_pt=12            # current font size in points
-        self.underline=0                # underlining flag
-        self.draw_color='0 G'
-        self.fill_color='0 g'
-        self.text_color='0 g'
-        self.color_flag=0               # indicates whether fill and text colors are different
-        self.ws=0                       # word spacing
-        self.angle=0
-        # Standard fonts
-        self.core_fonts={'courier':'Courier','courierB':'Courier-Bold','courierI':'Courier-Oblique','courierBI':'Courier-BoldOblique',
-            'helvetica':'Helvetica','helveticaB':'Helvetica-Bold','helveticaI':'Helvetica-Oblique','helveticaBI':'Helvetica-BoldOblique',
-            'times':'Times-Roman','timesB':'Times-Bold','timesI':'Times-Italic','timesBI':'Times-BoldItalic',
-            'symbol':'Symbol','zapfdingbats':'ZapfDingbats'}
-        # Scale factor
-        if(unit=='pt'):
-            self.k=1
-        elif(unit=='mm'):
-            self.k=72/25.4
-        elif(unit=='cm'):
-            self.k=72/2.54
-        elif(unit=='in'):
-            self.k=72
-        else:
-            self.error('Incorrect unit: '+unit)
-        # Page format
-        if(isinstance(format,basestring)):
-            format=format.lower()
-            if(format=='a3'):
-                format=(841.89,1190.55)
-            elif(format=='a4'):
-                format=(595.28,841.89)
-            elif(format=='a5'):
-                format=(420.94,595.28)
-            elif(format=='letter'):
-                format=(612,792)
-            elif(format=='legal'):
-                format=(612,1008)
-            else:
-                self.error('Unknown page format: '+format)
-            self.fw_pt=format[0]
-            self.fh_pt=format[1]
-        else:
-            self.fw_pt=format[0]*self.k
-            self.fh_pt=format[1]*self.k
-        self.fw=self.fw_pt/self.k
-        self.fh=self.fh_pt/self.k
-        # Page orientation
-        orientation=orientation.lower()
-        if(orientation=='p' or orientation=='portrait'):
-            self.def_orientation='P'
-            self.w_pt=self.fw_pt
-            self.h_pt=self.fh_pt
-        elif(orientation=='l' or orientation=='landscape'):
-            self.def_orientation='L'
-            self.w_pt=self.fh_pt
-            self.h_pt=self.fw_pt
-        else:
-            self.error('Incorrect orientation: '+orientation)
-        self.cur_orientation=self.def_orientation
-        self.w=self.w_pt/self.k
-        self.h=self.h_pt/self.k
-        # Page margins (1 cm)
-        margin=28.35/self.k
-        self.set_margins(margin,margin)
-        # Interior cell margin (1 mm)
-        self.c_margin=margin/10.0
-        # line width (0.2 mm)
-        self.line_width=.567/self.k
-        # Automatic page break
-        self.set_auto_page_break(1,2*margin)
-        # Full width display mode
-        self.set_display_mode('fullwidth')
-        # Enable compression
-        self.set_compression(1)
-        # Set default PDF version number
-        self.pdf_version='1.3'
-
-    def set_margins(self, left,top,right=-1):
-        "Set left, top and right margins"
-        self.l_margin=left
-        self.t_margin=top
-        if(right==-1):
-            right=left
-        self.r_margin=right
-
-    def set_left_margin(self, margin):
-        "Set left margin"
-        self.l_margin=margin
-        if(self.page>0 and self.x<margin):
-            self.x=margin
-
-    def set_top_margin(self, margin):
-        "Set top margin"
-        self.t_margin=margin
-
-    def set_right_margin(self, margin):
-        "Set right margin"
-        self.r_margin=margin
-
-    def set_auto_page_break(self, auto,margin=0):
-        "Set auto page break mode and triggering margin"
-        self.auto_page_break=auto
-        self.b_margin=margin
-        self.page_break_trigger=self.h-margin
-
-    def set_display_mode(self, zoom,layout='continuous'):
-        "Set display mode in viewer"
-        if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)):
-            self.zoom_mode=zoom
-        else:
-            self.error('Incorrect zoom display mode: '+zoom)
-        if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'):
-            self.layout_mode=layout
-        else:
-            self.error('Incorrect layout display mode: '+layout)
-
-    def set_compression(self, compress):
-        "Set page compression"
-        self.compress=compress
-
-    def set_title(self, title):
-        "Title of document"
-        self.title=title
-
-    def set_subject(self, subject):
-        "Subject of document"
-        self.subject=subject
-
-    def set_author(self, author):
-        "Author of document"
-        self.author=author
-
-    def set_keywords(self, keywords):
-        "Keywords of document"
-        self.keywords=keywords
-
-    def set_creator(self, creator):
-        "Creator of document"
-        self.creator=creator
-
-    def alias_nb_pages(self, alias='{nb}'):
-        "Define an alias for total number of pages"
-        self.str_alias_nb_pages=alias
-        return alias
-
-    def error(self, msg):
-        "Fatal error"
-        raise RuntimeError('FPDF error: '+msg)
-
-    def open(self):
-        "Begin document"
-        self.state=1
-
-    def close(self):
-        "Terminate document"
-        if(self.state==3):
-            return
-        if(self.page==0):
-            self.add_page()
-        #Page footer
-        self.in_footer=1
-        self.footer()
-        self.in_footer=0
-        #close page
-        self._endpage()
-        #close document
-        self._enddoc()
-
-    def add_page(self, orientation=''):
-        "Start a new page"
-        if(self.state==0):
-            self.open()
-        family=self.font_family
-        if self.underline:
-            style = self.font_style + 'U'
-        else:
-            style = self.font_style
-        size=self.font_size_pt
-        lw=self.line_width
-        dc=self.draw_color
-        fc=self.fill_color
-        tc=self.text_color
-        cf=self.color_flag
-        if(self.page>0):
-            #Page footer
-            self.in_footer=1
-            self.footer()
-            self.in_footer=0
-            #close page
-            self._endpage()
-        #Start new page
-        self._beginpage(orientation)
-        #Set line cap style to square
-        self._out('2 J')
-        #Set line width
-        self.line_width=lw
-        self._out(sprintf('%.2f w',lw*self.k))
-        #Set font
-        if(family):
-            self.set_font(family,style,size)
-        #Set colors
-        self.draw_color=dc
-        if(dc!='0 G'):
-            self._out(dc)
-        self.fill_color=fc
-        if(fc!='0 g'):
-            self._out(fc)
-        self.text_color=tc
-        self.color_flag=cf
-        #Page header
-        self.header()
-        #Restore line width
-        if(self.line_width!=lw):
-            self.line_width=lw
-            self._out(sprintf('%.2f w',lw*self.k))
-        #Restore font
-        if(family):
-            self.set_font(family,style,size)
-        #Restore colors
-        if(self.draw_color!=dc):
-            self.draw_color=dc
-            self._out(dc)
-        if(self.fill_color!=fc):
-            self.fill_color=fc
-            self._out(fc)
-        self.text_color=tc
-        self.color_flag=cf
-
-    def header(self):
-        "Header to be implemented in your own inherited class"
-        pass
-
-    def footer(self):
-        "Footer to be implemented in your own inherited class"
-        pass
-
-    def page_no(self):
-        "Get current page number"
-        return self.page
-
-    def set_draw_color(self, r,g=-1,b=-1):
-        "Set color for all stroking operations"
-        if((r==0 and g==0 and b==0) or g==-1):
-            self.draw_color=sprintf('%.3f G',r/255.0)
-        else:
-            self.draw_color=sprintf('%.3f %.3f %.3f RG',r/255.0,g/255.0,b/255.0)
-        if(self.page>0):
-            self._out(self.draw_color)
-
-    def set_fill_color(self,r,g=-1,b=-1):
-        "Set color for all filling operations"
-        if((r==0 and g==0 and b==0) or g==-1):
-            self.fill_color=sprintf('%.3f g',r/255.0)
-        else:
-            self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
-        self.color_flag=(self.fill_color!=self.text_color)
-        if(self.page>0):
-            self._out(self.fill_color)
-
-    def set_text_color(self, r,g=-1,b=-1):
-        "Set color for text"
-        if((r==0 and g==0 and b==0) or g==-1):
-            self.text_color=sprintf('%.3f g',r/255.0)
-        else:
-            self.text_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0)
-        self.color_flag=(self.fill_color!=self.text_color)
-
-    def get_string_width(self, s):
-        "Get width of a string in the current font"
-        cw=self.current_font['cw']
-        w=0
-        l=len(s)
-        if self.unifontsubset:
-            for char in s:
-                char = ord(char)
-                if len(cw) > char:
-                    w += cw[char] # ord(cw[2*char])<<8 + ord(cw[2*char+1])
-                #elif (char>0 and char<128 and isset($cw[chr($char)])) { $w += $cw[chr($char)]; }
-                elif (self.current_font['desc']['MissingWidth']) :
-                    w += self.current_font['desc']['MissingWidth']
-                #elif (isset($this->CurrentFont['MissingWidth'])) { $w += $this->CurrentFont['MissingWidth']; }
-                else:
-                    w += 500
-        else:
-            for i in xrange(0, l):
-                w += cw.get(s[i],0)
-        return w*self.font_size/1000.0
-
-    def set_line_width(self, width):
-        "Set line width"
-        self.line_width=width
-        if(self.page>0):
-            self._out(sprintf('%.2f w',width*self.k))
-
-    def line(self, x1,y1,x2,y2):
-        "Draw a line"
-        self._out(sprintf('%.2f %.2f m %.2f %.2f l S',x1*self.k,(self.h-y1)*self.k,x2*self.k,(self.h-y2)*self.k))
-
-    def _set_dash(self, dash_length=False, space_length=False):
-        if(dash_length and space_length):
-            s = sprintf('[%.3f %.3f] 0 d', dash_length*self.k, space_length*self.k)
-        else:
-            s = '[] 0 d'
-        self._out(s)
-
-    def dashed_line(self, x1,y1,x2,y2, dash_length=1, space_length=1):
-        """Draw a dashed line. Same interface as line() except:
-           - dash_length: Length of the dash
-           - space_length: Length of the space between dashes"""
-        self._set_dash(dash_length, space_length)
-        self.line(x1, y1, x2, y2)
-        self._set_dash()
-
-    def rect(self, x,y,w,h,style=''):
-        "Draw a rectangle"
-        if(style=='F'):
-            op='f'
-        elif(style=='FD' or style=='DF'):
-            op='B'
-        else:
-            op='S'
-        self._out(sprintf('%.2f %.2f %.2f %.2f re %s',x*self.k,(self.h-y)*self.k,w*self.k,-h*self.k,op))
-
-    def add_font(self, family, style='', fname='', uni=False):
-        "Add a TrueType or Type1 font"
-        family = family.lower()
-        if (fname == ''):
-            fname = family.replace(' ','') + style.lower() + '.pkl'
-        if (family == 'arial'):
-            family = 'helvetica'
-        style = style.upper()
-        if (style == 'IB'):
-            style = 'BI'
-        fontkey = family+style
-        if fontkey in self.fonts:
-            # Font already added!
-            return
-        if (uni):
-            global SYSTEM_TTFONTS
-            if os.path.exists(fname):
-                ttffilename = fname
-            elif (FPDF_FONT_DIR and
-                os.path.exists(os.path.join(FPDF_FONT_DIR, fname))):
-                ttffilename = os.path.join(FPDF_FONT_DIR, fname)
-            elif (SYSTEM_TTFONTS and
-                os.path.exists(os.path.join(SYSTEM_TTFONTS, fname))):
-                ttffilename = os.path.join(SYSTEM_TTFONTS, fname)
-            else:
-                raise RuntimeError("TTF Font file not found: %s" % fname)
-            unifilename = os.path.splitext(ttffilename)[0] + '.pkl'
-            name = ''
-            if os.path.exists(unifilename):
-                fh = open(unifilename)
-                try:
-                    font_dict = pickle.load(fh)
-                finally:
-                    fh.close()
-            else:
-                ttf = TTFontFile()
-                ttf.getMetrics(ttffilename)
-                desc = {
-                    'Ascent': int(round(ttf.ascent, 0)),
-                    'Descent': int(round(ttf.descent, 0)),
-                    'CapHeight': int(round(ttf.capHeight, 0)),
-                    'Flags': ttf.flags,
-                    'FontBBox': "[%s %s %s %s]" % (
-                        int(round(ttf.bbox[0], 0)),
-                        int(round(ttf.bbox[1], 0)),
-                        int(round(ttf.bbox[2], 0)),
-                        int(round(ttf.bbox[3], 0))),
-                    'ItalicAngle': int(ttf.italicAngle),
-                    'StemV': int(round(ttf.stemV, 0)),
-                    'MissingWidth': int(round(ttf.defaultWidth, 0)),
-                    }
-                # Generate metrics .pkl file
-                font_dict = {
-                    'name': re.sub('[ ()]', '', ttf.fullName),
-                    'type': 'TTF',
-                    'desc': desc,
-                    'up': round(ttf.underlinePosition),
-                    'ut': round(ttf.underlineThickness),
-                    'ttffile': ttffilename,
-                    'fontkey': fontkey,
-                    'originalsize': os.stat(ttffilename).st_size,
-                    'cw': ttf.charWidths,
-                    }
-                try:
-                    fh = open(unifilename, "w")
-                    pickle.dump(font_dict, fh)
-                    fh.close()
-                except IOError, e:
-                    if not e.errno == errno.EACCES:
-                        raise  # Not a permission error.
-                del ttf
-            if hasattr(self,'str_alias_nb_pages'):
-                sbarr = range(0,57)   # include numbers in the subset!
-            else:
-                sbarr = range(0,32)
-            self.fonts[fontkey] = {
-                'i': len(self.fonts)+1, 'type': font_dict['type'],
-                'name': font_dict['name'], 'desc': font_dict['desc'],
-                'up': font_dict['up'], 'ut': font_dict['ut'],
-                'cw': font_dict['cw'],
-                'ttffile': font_dict['ttffile'], 'fontkey': fontkey,
-                'subset': sbarr, 'unifilename': unifilename,
-                }
-            self.font_files[fontkey] = {'length1': font_dict['originalsize'],
-                                        'type': "TTF", 'ttffile': ttffilename}
-            self.font_files[fname] = {'type': "TTF"}
-        else:
-            fontfile = open(fname)
-            try:
-                font_dict = pickle.load(fontfile)
-            finally:
-                fontfile.close()
-            self.fonts[fontkey] = {'i': len(self.fonts)+1}
-            self.fonts[fontkey].update(font_dict)
-            if (diff):
-                #Search existing encodings
-                d = 0
-                nb = len(self.diffs)
-                for i in xrange(1, nb+1):
-                    if(self.diffs[i] == diff):
-                        d = i
-                        break
-                if (d == 0):
-                    d = nb + 1
-                    self.diffs[d] = diff
-                self.fonts[fontkey]['diff'] = d
-            filename = font_dict.get('filename')
-            if (filename):
-                if (type == 'TrueType'):
-                    self.font_files[filename]={'length1': originalsize}
-                else:
-                    self.font_files[filename]={'length1': size1,
-                                               'length2': size2}
-
-    def set_font(self, family,style='',size=0):
-        "Select a font; size given in points"
-        family=family.lower()
-        if(family==''):
-            family=self.font_family
-        if(family=='arial'):
-            family='helvetica'
-        elif(family=='symbol' or family=='zapfdingbats'):
-            style=''
-        style=style.upper()
-        if('U' in style):
-            self.underline=1
-            style=style.replace('U','')
-        else:
-            self.underline=0
-        if(style=='IB'):
-            style='BI'
-        if(size==0):
-            size=self.font_size_pt
-        #Test if font is already selected
-        if(self.font_family==family and self.font_style==style and self.font_size_pt==size):
-            return
-        #Test if used for the first time
-        fontkey=family+style
-        if fontkey not in self.fonts:
-            #Check if one of the standard fonts
-            if fontkey in self.core_fonts:
-                if fontkey not in fpdf_charwidths:
-                    #Load metric file
-                    name=os.path.join(FPDF_FONT_DIR,family)
-                    if(family=='times' or family=='helvetica'):
-                        name+=style.lower()
-                    execfile(name+'.font')
-                    if fontkey not in fpdf_charwidths:
-                        self.error('Could not include font metric file for'+fontkey)
-                i=len(self.fonts)+1
-                self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]}
-            else:
-                self.error('Undefined font: '+family+' '+style)
-        #Select it
-        self.font_family=family
-        self.font_style=style
-        self.font_size_pt=size
-        self.font_size=size/self.k
-        self.current_font=self.fonts[fontkey]
-        self.unifontsubset = (self.fonts[fontkey]['type'] == 'TTF')
-        if(self.page>0):
-            self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
-
-    def set_font_size(self, size):
-        "Set font size in points"
-        if(self.font_size_pt==size):
-            return
-        self.font_size_pt=size
-        self.font_size=size/self.k
-        if(self.page>0):
-            self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt))
-
-    def add_link(self):
-        "Create a new internal link"
-        n=len(self.links)+1
-        self.links[n]=(0,0)
-        return n
-
-    def set_link(self, link,y=0,page=-1):
-        "Set destination of internal link"
-        if(y==-1):
-            y=self.y
-        if(page==-1):
-            page=self.page
-        self.links[link]=[page,y]
-
-    def link(self, x,y,w,h,link):
-        "Put a link on the page"
-        if not self.page in self.page_links:
-            self.page_links[self.page] = []
-        self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),]
-
-    def text(self, x, y, txt=''):
-        "Output a string"
-        txt = self.normalize_text(txt)
-        if (self.unifontsubset):
-            txt2 = self._escape(UTF8ToUTF16BE(txt, False))
-            for uni in UTF8StringToArray(txt):
-                self.current_font['subset'].append(uni)
-        else:
-            txt2 = self._escape(txt)
-        s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k, txt2)
-        if(self.underline and txt!=''):
-            s+=' '+self._dounderline(x,y,txt)
-        if(self.color_flag):
-            s='q '+self.text_color+' '+s+' Q'
-        self._out(s)
-
-    def rotate(self, angle, x=None, y=None):
-        if x is None:
-            x = self.x
-        if y is None:
-            y = self.y;
-        if self.angle!=0:
-            self._out('Q')
-        self.angle = angle
-        if angle!=0:
-            angle *= math.pi/180;
-            c = math.cos(angle);
-            s = math.sin(angle);
-            cx = x*self.k;
-            cy = (self.h-y)*self.k
-            s = sprintf('q %.5F %.5F %.5F %.5F %.2F %.2F cm 1 0 0 1 %.2F %.2F cm',c,s,-s,c,cx,cy,-cx,-cy)
-            self._out(s)
-
-    def accept_page_break(self):
-        "Accept automatic page break or not"
-        return self.auto_page_break
-
-    def cell(self, w,h=0,txt='',border=0,ln=0,align='',fill=0,link=''):
-        "Output a cell"
-        txt = self.normalize_text(txt)
-        k=self.k
-        if(self.y+h>self.page_break_trigger and not self.in_footer and self.accept_page_break()):
-            #Automatic page break
-            x=self.x
-            ws=self.ws
-            if(ws>0):
-                self.ws=0
-                self._out('0 Tw')
-            self.add_page(self.cur_orientation)
-            self.x=x
-            if(ws>0):
-                self.ws=ws
-                self._out(sprintf('%.3f Tw',ws*k))
-        if(w==0):
-            w=self.w-self.r_margin-self.x
-        s=''
-        if(fill==1 or border==1):
-            if(fill==1):
-                if border==1:
-                    op='B'
-                else:
-                    op='f'
-            else:
-                op='S'
-            s=sprintf('%.2f %.2f %.2f %.2f re %s ',self.x*k,(self.h-self.y)*k,w*k,-h*k,op)
-        if(isinstance(border,basestring)):
-            x=self.x
-            y=self.y
-            if('L' in border):
-                s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,x*k,(self.h-(y+h))*k)
-            if('T' in border):
-                s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,(x+w)*k,(self.h-y)*k)
-            if('R' in border):
-                s+=sprintf('%.2f %.2f m %.2f %.2f l S ',(x+w)*k,(self.h-y)*k,(x+w)*k,(self.h-(y+h))*k)
-            if('B' in border):
-                s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-(y+h))*k,(x+w)*k,(self.h-(y+h))*k)
-        if(txt!=''):
-            if(align=='R'):
-                dx=w-self.c_margin-self.get_string_width(txt)
-            elif(align=='C'):
-                dx=(w-self.get_string_width(txt))/2.0
-            else:
-                dx=self.c_margin
-            if(self.color_flag):
-                s+='q '+self.text_color+' '
-
-            # If multibyte, Tw has no effect - do word spacing using an adjustment before each space
-            if (self.ws and self.unifontsubset):
-                for uni in UTF8StringToArray(txt):
-                    self.current_font['subset'].append(uni)
-                space = self._escape(UTF8ToUTF16BE(' ', False))
-                s += sprintf('BT 0 Tw %.2F %.2F Td [',(self.x + dx) * k,(self.h - (self.y + 0.5*h+ 0.3 * self.font_size)) * k)
-                t = txt.split(' ')
-                numt = len(t)
-                for i in range(numt):
-                    tx = t[i]
-                    tx = '(' + self._escape(UTF8ToUTF16BE(tx, False)) + ')'
-                    s += sprintf('%s ', tx);
-                    if ((i+1)<numt):
-                        adj = -(self.ws * self.k) * 1000 / self.font_size_pt
-                        s += sprintf('%d(%s) ', adj, space)
-                s += '] TJ'
-                s += ' ET'
-            else:
-                if (self.unifontsubset):
-                    txt2 = self._escape(UTF8ToUTF16BE(txt, False))
-                    for uni in UTF8StringToArray(txt):
-                        self.current_font['subset'].append(uni)
-                else:
-                    txt2 = self._escape(txt)
-                s += sprintf('BT %.2f %.2f Td (%s) Tj ET',(self.x+dx)*k,(self.h-(self.y+.5*h+.3*self.font_size))*k,txt2)
-
-            if(self.underline):
-                s+=' '+self._dounderline(self.x+dx,self.y+.5*h+.3*self.font_size,txt)
-            if(self.color_flag):
-                s+=' Q'
-            if(link):
-                self.link(self.x+dx,self.y+.5*h-.5*self.font_size,self.get_string_width(txt),self.font_size,link)
-        if(s):
-            self._out(s)
-        self.lasth=h
-        if(ln>0):
-            #Go to next line
-            self.y+=h
-            if(ln==1):
-                self.x=self.l_margin
-        else:
-            self.x+=w
-
-    def multi_cell(self, w, h, txt='', border=0, align='J', fill=0, split_only=False):
-        "Output text with automatic or explicit line breaks"
-        txt = self.normalize_text(txt)
-        ret = [] # if split_only = True, returns splited text cells
-        cw=self.current_font['cw']
-        if(w==0):
-            w=self.w-self.r_margin-self.x
-        wmax=(w-2*self.c_margin)*1000.0/self.font_size
-        s=txt.replace("\r",'')
-        nb=len(s)
-        if(nb>0 and s[nb-1]=="\n"):
-            nb-=1
-        b=0
-        if(border):
-            if(border==1):
-                border='LTRB'
-                b='LRT'
-                b2='LR'
-            else:
-                b2=''
-                if('L' in border):
-                    b2+='L'
-                if('R' in border):
-                    b2+='R'
-                if ('T' in border):
-                    b=b2+'T'
-                else:
-                    b=b2
-        sep=-1
-        i=0
-        j=0
-        l=0
-        ns=0
-        nl=1
-        while(i<nb):
-            #Get next character
-            c=s[i]
-            if(c=="\n"):
-                #Explicit line break
-                if(self.ws>0):
-                    self.ws=0
-                    if not split_only:
-                        self._out('0 Tw')
-                if not split_only:
-                    self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
-                else:
-                    ret.append(substr(s,j,i-j))
-                i+=1
-                sep=-1
-                j=i
-                l=0
-                ns=0
-                nl+=1
-                if(border and nl==2):
-                    b=b2
-                continue
-            if(c==' '):
-                sep=i
-                ls=l
-                ns+=1
-            if self.unifontsubset:
-                l += self.get_string_width(c) / self.font_size*1000.0
-            else:
-                l += cw.get(c,0)
-            if(l>wmax):
-                #Automatic line break
-                if(sep==-1):
-                    if(i==j):
-                        i+=1
-                    if(self.ws>0):
-                        self.ws=0
-                        if not split_only:
-                            self._out('0 Tw')
-                    if not split_only:
-                        self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
-                    else:
-                        ret.append(substr(s,j,i-j))
-                else:
-                    if(align=='J'):
-                        if ns>1:
-                            self.ws=(wmax-ls)/1000.0*self.font_size/(ns-1)
-                        else:
-                            self.ws=0
-                        if not split_only:
-                            self._out(sprintf('%.3f Tw',self.ws*self.k))
-                    if not split_only:
-                        self.cell(w,h,substr(s,j,sep-j),b,2,align,fill)
-                    else:
-                        ret.append(substr(s,j,sep-j))
-                    i=sep+1
-                sep=-1
-                j=i
-                l=0
-                ns=0
-                nl+=1
-                if(border and nl==2):
-                    b=b2
-            else:
-                i+=1
-        #Last chunk
-        if(self.ws>0):
-            self.ws=0
-            if not split_only:
-                self._out('0 Tw')
-        if(border and 'B' in border):
-            b+='B'
-        if not split_only:
-            self.cell(w,h,substr(s,j,i-j),b,2,align,fill)
-            self.x=self.l_margin
-        else:
-            ret.append(substr(s,j,i-j))
-        return ret
-
-    def write(self, h, txt='', link=''):
-        "Output text in flowing mode"
-        txt = self.normalize_text(txt)
-        cw=self.current_font['cw']
-        w=self.w-self.r_margin-self.x
-        wmax=(w-2*self.c_margin)*1000.0/self.font_size
-        s=txt.replace("\r",'')
-        nb=len(s)
-        sep=-1
-        i=0
-        j=0
-        l=0
-        nl=1
-        while(i<nb):
-            #Get next character
-            c=s[i]
-            if(c=="\n"):
-                #Explicit line break
-                self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
-                i+=1
-                sep=-1
-                j=i
-                l=0
-                if(nl==1):
-                    self.x=self.l_margin
-                    w=self.w-self.r_margin-self.x
-                    wmax=(w-2*self.c_margin)*1000.0/self.font_size
-                nl+=1
-                continue
-            if(c==' '):
-                sep=i
-            if self.unifontsubset:
-                l += self.get_string_width(c) / self.font_size*1000.0
-            else:
-                l += cw.get(c,0)
-            if(l>wmax):
-                #Automatic line break
-                if(sep==-1):
-                    if(self.x>self.l_margin):
-                        #Move to next line
-                        self.x=self.l_margin
-                        self.y+=h
-                        w=self.w-self.r_margin-self.x
-                        wmax=(w-2*self.c_margin)*1000.0/self.font_size
-                        i+=1
-                        nl+=1
-                        continue
-                    if(i==j):
-                        i+=1
-                    self.cell(w,h,substr(s,j,i-j),0,2,'',0,link)
-                else:
-                    self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link)
-                    i=sep+1
-                sep=-1
-                j=i
-                l=0
-                if(nl==1):
-                    self.x=self.l_margin
-                    w=self.w-self.r_margin-self.x
-                    wmax=(w-2*self.c_margin)*1000.0/self.font_size
-                nl+=1
-            else:
-                i+=1
-        #Last chunk
-        if(i!=j):
-            self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link)
-
-    def image(self, name, x=None, y=None, w=0,h=0,type='',link=''):
-        "Put an image on the page"
-        if not name in self.images:
-            #First use of image, get info
-            if(type==''):
-                pos=name.rfind('.')
-                if(not pos):
-                    self.error('image file has no extension and no type was specified: '+name)
-                type=substr(name,pos+1)
-            type=type.lower()
-            if(type=='jpg' or type=='jpeg'):
-                info=self._parsejpg(name)
-            elif(type=='png'):
-                info=self._parsepng(name)
-            else:
-                #Allow for additional formats
-                #maybe the image is not showing the correct extension,
-                #but the header is OK,
-                succeed_parsing = False
-                #try all the parsing functions
-                parsing_functions = [self._parsejpg,self._parsepng,self._parsegif]
-                for pf in parsing_functions:
-                    try:
-                        info = pf(name)
-                        succeed_parsing = True
-                        break;
-                    except:
-                        pass
-                #last resource
-                if not succeed_parsing:
-                    mtd='_parse'+type
-                    if not hasattr(self,mtd):
-                        self.error('Unsupported image type: '+type)
-                    info=getattr(self, mtd)(name)
-                mtd='_parse'+type
-                if not hasattr(self,mtd):
-                    self.error('Unsupported image type: '+type)
-                info=getattr(self, mtd)(name)
-            info['i']=len(self.images)+1
-            self.images[name]=info
-        else:
-            info=self.images[name]
-        #Automatic width and height calculation if needed
-        if(w==0 and h==0):
-            #Put image at 72 dpi
-            w=info['w']/self.k
-            h=info['h']/self.k
-        elif(w==0):
-            w=h*info['w']/info['h']
-        elif(h==0):
-            h=w*info['h']/info['w']
-        # Flowing mode
-        if y is None:
-            if (self.y + h > self.page_break_trigger and not self.in_footer and self.accept_page_break()):
-                #Automatic page break
-                x = self.x
-                self.add_page(self.cur_orientation)
-                self.x = x
-            y = self.y
-            self.y += h
-        if x is None:
-            x = self.x
-        self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i']))
-        if(link):
-            self.link(x,y,w,h,link)
-
-    def ln(self, h=''):
-        "Line Feed; default value is last cell height"
-        self.x=self.l_margin
-        if(isinstance(h, basestring)):
-            self.y+=self.lasth
-        else:
-            self.y+=h
-
-    def get_x(self):
-        "Get x position"
-        return self.x
-
-    def set_x(self, x):
-        "Set x position"
-        if(x>=0):
-            self.x=x
-        else:
-            self.x=self.w+x
-
-    def get_y(self):
-        "Get y position"
-        return self.y
-
-    def set_y(self, y):
-        "Set y position and reset x"
-        self.x=self.l_margin
-        if(y>=0):
-            self.y=y
-        else:
-            self.y=self.h+y
-
-    def set_xy(self, x,y):
-        "Set x and y positions"
-        self.set_y(y)
-        self.set_x(x)
-
-    def output(self, name='',dest=''):
-        "Output PDF to some destination"
-        #Finish document if necessary
-        if(self.state<3):
-            self.close()
-        dest=dest.upper()
-        if(dest==''):
-            if(name==''):
-                name='doc.pdf'
-                dest='I'
-            else:
-                dest='F'
-        if dest=='I':
-            print self.buffer
-        elif dest=='D':
-            print self.buffer
-        elif dest=='F':
-            #Save to local file
-            f=open(name,'wb')
-            if(not f):
-                self.error('Unable to create output file: '+name)
-            if PY3K:
-                # TODO: proper unicode support
-                f.write(self.buffer.encode("latin1"))
-            else:
-                f.write(self.buffer)
-            f.close()
-        elif dest=='S':
-            #Return as a string
-            return self.buffer
-        else:
-            self.error('Incorrect output destination: '+dest)
-        return ''
-
-    def normalize_text(self, txt):
-        "Check that text input is in the correct format/encoding"
-        # - for TTF unicode fonts: unicode object (utf8 encoding)
-        # - for built-in fonts: string instances (latin 1 encoding)
-        if self.unifontsubset and isinstance(txt, str):
-            txt = txt.decode('utf8')
-        elif not self.unifontsubset and isinstance(txt, unicode) and not PY3K:
-            txt = txt.encode('latin1')
-        return txt
-
-
-    def _dochecks(self):
-        #Check for locale-related bug
-#        if(1.1==1):
-#            self.error("Don\'t alter the locale before including class file");
-        #Check for decimal separator
-        if(sprintf('%.1f',1.0)!='1.0'):
-            import locale
-            locale.setlocale(locale.LC_NUMERIC,'C')
-
-    def _getfontpath(self):
-        return FPDF_FONT_DIR+'/'
-
-    def _putpages(self):
-        nb=self.page
-        if hasattr(self,'str_alias_nb_pages'):
-            # Replace number of pages in fonts using subsets (unicode)
-            alias = UTF8ToUTF16BE(self.str_alias_nb_pages, False);
-            r = UTF8ToUTF16BE(str(nb), False)
-            for n in xrange(1, nb+1):
-                self.pages[n] = self.pages[n].replace(alias, r)
-            # Now repeat for no pages in non-subset fonts
-            for n in xrange(1,nb+1):
-                self.pages[n]=self.pages[n].replace(self.str_alias_nb_pages,str(nb))
-        if(self.def_orientation=='P'):
-            w_pt=self.fw_pt
-            h_pt=self.fh_pt
-        else:
-            w_pt=self.fh_pt
-            h_pt=self.fw_pt
-        if self.compress:
-            filter='/Filter /FlateDecode '
-        else:
-            filter=''
-        for n in xrange(1,nb+1):
-            #Page
-            self._newobj()
-            self._out('<</Type /Page')
-            self._out('/Parent 1 0 R')
-            if n in self.orientation_changes:
-                self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',h_pt,w_pt))
-            self._out('/Resources 2 0 R')
-            if self.page_links and n in self.page_links:
-                #Links
-                annots='/Annots ['
-                for pl in self.page_links[n]:
-                    rect=sprintf('%.2f %.2f %.2f %.2f',pl[0],pl[1],pl[0]+pl[2],pl[1]-pl[3])
-                    annots+='<</Type /Annot /Subtype /Link /Rect ['+rect+'] /Border [0 0 0] '
-                    if(isinstance(pl[4],basestring)):
-                        annots+='/A <</S /URI /URI '+self._textstring(pl[4])+'>>>>'
-                    else:
-                        l=self.links[pl[4]]
-                        if l[0] in self.orientation_changes:
-                            h=w_pt
-                        else:
-                            h=h_pt
-                        annots+=sprintf('/Dest [%d 0 R /XYZ 0 %.2f null]>>',1+2*l[0],h-l[1]*self.k)
-                self._out(annots+']')
-            if(self.pdf_version>'1.3'):
-                self._out('/Group <</Type /Group /S /Transparency /CS /DeviceRGB>>')
-            self._out('/Contents '+str(self.n+1)+' 0 R>>')
-            self._out('endobj')
-            #Page content
-            if self.compress:
-                p = zlib.compress(self.pages[n])
-            else:
-                p = self.pages[n]
-            self._newobj()
-            self._out('<<'+filter+'/Length '+str(len(p))+'>>')
-            self._putstream(p)
-            self._out('endobj')
-        #Pages root
-        self.offsets[1]=len(self.buffer)
-        self._out('1 0 obj')
-        self._out('<</Type /Pages')
-        kids='/Kids ['
-        for i in xrange(0,nb):
-            kids+=str(3+2*i)+' 0 R '
-        self._out(kids+']')
-        self._out('/Count '+str(nb))
-        self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',w_pt,h_pt))
-        self._out('>>')
-        self._out('endobj')
-
-    def _putfonts(self):
-        nf=self.n
-        for diff in self.diffs:
-            #Encodings
-            self._newobj()
-            self._out('<</Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences ['+self.diffs[diff]+']>>')
-            self._out('endobj')
-        for name,info in self.font_files.iteritems():
-            if 'type' in info and info['type'] != 'TTF':
-                #Font file embedding
-                self._newobj()
-                self.font_files[name]['n']=self.n
-                font=''
-                f=open(self._getfontpath()+name,'rb',1)
-                if(not f):
-                    self.error('Font file not found')
-                font=f.read()
-                f.close()
-                compressed=(substr(name,-2)=='.z')
-                if(not compressed and 'length2' in info):
-                    header=(ord(font[0])==128)
-                    if(header):
-                        #Strip first binary header
-                        font=substr(font,6)
-                    if(header and ord(font[info['length1']])==128):
-                        #Strip second binary header
-                        font=substr(font,0,info['length1'])+substr(font,info['length1']+6)
-                self._out('<</Length '+str(len(font)))
-                if(compressed):
-                    self._out('/Filter /FlateDecode')
-                self._out('/Length1 '+str(info['length1']))
-                if('length2' in info):
-                    self._out('/Length2 '+str(info['length2'])+' /Length3 0')
-                self._out('>>')
-                self._putstream(font)
-                self._out('endobj')
-        for k,font in self.fonts.iteritems():
-            #Font objects
-            self.fonts[k]['n']=self.n+1
-            type=font['type']
-            name=font['name']
-            if(type=='core'):
-                #Standard font
-                self._newobj()
-                self._out('<</Type /Font')
-                self._out('/BaseFont /'+name)
-                self._out('/Subtype /Type1')
-                if(name!='Symbol' and name!='ZapfDingbats'):
-                    self._out('/Encoding /WinAnsiEncoding')
-                self._out('>>')
-                self._out('endobj')
-            elif(type=='Type1' or type=='TrueType'):
-                #Additional Type1 or TrueType font
-                self._newobj()
-                self._out('<</Type /Font')
-                self._out('/BaseFont /'+name)
-                self._out('/Subtype /'+type)
-                self._out('/FirstChar 32 /LastChar 255')
-                self._out('/Widths '+str(self.n+1)+' 0 R')
-                self._out('/FontDescriptor '+str(self.n+2)+' 0 R')
-                if(font['enc']):
-                    if('diff' in font):
-                        self._out('/Encoding '+str(nf+font['diff'])+' 0 R')
-                    else:
-                        self._out('/Encoding /WinAnsiEncoding')
-                self._out('>>')
-                self._out('endobj')
-                #Widths
-                self._newobj()
-                cw=font['cw']
-                s='['
-                for i in xrange(32,256):
-                    # Get doesn't rise exception; returns 0 instead of None if not set
-                    s+=str(cw.get(chr(i)) or 0)+' '
-                self._out(s+']')
-                self._out('endobj')
-                #Descriptor
-                self._newobj()
-                s='<</Type /FontDescriptor /FontName /'+name
-                for k in ('Ascent', 'Descent', 'CapHeight', 'Falgs', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
-                    s += ' /%s %s' % (k, font['desc'][k])
-                filename=font['file']
-                if(filename):
-                    s+=' /FontFile'
-                    if type!='Type1':
-                        s+='2'
-                    s+=' '+str(self.font_files[filename]['n'])+' 0 R'
-                self._out(s+'>>')
-                self._out('endobj')
-            elif (type == 'TTF'):
-                self.fonts[k]['n'] = self.n + 1
-                ttf = TTFontFile()
-                fontname = 'MPDFAA' + '+' + font['name']
-                subset = font['subset']
-                del subset[0]
-                ttfontstream = ttf.makeSubset(font['ttffile'], subset)
-                ttfontsize = len(ttfontstream)
-                fontstream = zlib.compress(ttfontstream)
-                codeToGlyph = ttf.codeToGlyph
-                ##del codeToGlyph[0]
-                # Type0 Font
-                # A composite font - a font composed of other fonts, organized hierarchically
-                self._newobj()
-                self._out('<</Type /Font');
-                self._out('/Subtype /Type0');
-                self._out('/BaseFont /' + fontname + '');
-                self._out('/Encoding /Identity-H');
-                self._out('/DescendantFonts [' + str(self.n + 1) + ' 0 R]')
-                self._out('/ToUnicode ' + str(self.n + 2) + ' 0 R')
-                self._out('>>')
-                self._out('endobj')
-
-                # CIDFontType2
-                # A CIDFont whose glyph descriptions are based on TrueType font technology
-                self._newobj()
-                self._out('<</Type /Font')
-                self._out('/Subtype /CIDFontType2')
-                self._out('/BaseFont /' + fontname + '')
-                self._out('/CIDSystemInfo ' + str(self.n + 2) + ' 0 R')
-                self._out('/FontDescriptor ' + str(self.n + 3) + ' 0 R')
-                if (font['desc'].get('MissingWidth')):
-                    self._out('/DW %d' % font['desc']['MissingWidth'])
-                self._putTTfontwidths(font, ttf.maxUni)
-                self._out('/CIDToGIDMap ' + str(self.n + 4) + ' 0 R')
-                self._out('>>')
-                self._out('endobj')
-
-                # ToUnicode
-                self._newobj()
-                toUni = "/CIDInit /ProcSet findresource begin\n" \
-                        "12 dict begin\n" \
-                        "begincmap\n" \
-                        "/CIDSystemInfo\n" \
-                        "<</Registry (Adobe)\n" \
-                        "/Ordering (UCS)\n" \
-                        "/Supplement 0\n" \
-                        ">> def\n" \
-                        "/CMapName /Adobe-Identity-UCS def\n" \
-                        "/CMapType 2 def\n" \
-                        "1 begincodespacerange\n" \
-                        "<0000> <FFFF>\n" \
-                        "endcodespacerange\n" \
-                        "1 beginbfrange\n" \
-                        "<0000> <FFFF> <0000>\n" \
-                        "endbfrange\n" \
-                        "endcmap\n" \
-                        "CMapName currentdict /CMap defineresource pop\n" \
-                        "end\n" \
-                        "end"
-                self._out('<</Length ' + str(len(toUni)) + '>>')
-                self._putstream(toUni)
-                self._out('endobj')
-
-                # CIDSystemInfo dictionary
-                self._newobj()
-                self._out('<</Registry (Adobe)')
-                self._out('/Ordering (UCS)')
-                self._out('/Supplement 0')
-                self._out('>>')
-                self._out('endobj')
-
-                # Font descriptor
-                self._newobj()
-                self._out('<</Type /FontDescriptor')
-                self._out('/FontName /' + fontname)
-                for kd in ('Ascent', 'Descent', 'CapHeight', 'Flags', 'FontBBox', 'ItalicAngle', 'StemV', 'MissingWidth'):
-                    v = font['desc'][kd]
-                    if (kd == 'Flags'):
-                        v = v | 4;
-                        v = v & ~32; # SYMBOLIC font flag
-                    self._out(' /%s %s' % (kd, v))
-                self._out('/FontFile2 ' + str(self.n + 2) + ' 0 R')
-                self._out('>>')
-                self._out('endobj')
-
-                # Embed CIDToGIDMap
-                # A specification of the mapping from CIDs to glyph indices
-                cidtogidmap = '';
-                cidtogidmap = ["\x00"] * 256*256*2
-                for cc, glyph in codeToGlyph.items():
-                    cidtogidmap[cc*2] = chr(glyph >> 8)
-                    cidtogidmap[cc*2 + 1] = chr(glyph & 0xFF)
-                cidtogidmap = zlib.compress(''.join(cidtogidmap));
-                self._newobj()
-                self._out('<</Length ' + str(len(cidtogidmap)) + '')
-                self._out('/Filter /FlateDecode')
-                self._out('>>')
-                self._putstream(cidtogidmap)
-                self._out('endobj')
-
-                #Font file
-                self._newobj()
-                self._out('<</Length ' + str(len(fontstream)))
-                self._out('/Filter /FlateDecode')
-                self._out('/Length1 ' + str(ttfontsize))
-                self._out('>>')
-                self._putstream(fontstream)
-                self._out('endobj')
-                del ttf
-            else:
-                #Allow for additional types
-                mtd='_put'+type.lower()
-                if(not method_exists(self,mtd)):
-                    self.error('Unsupported font type: '+type)
-                self.mtd(font)
-
-    def _putTTfontwidths(self, font, maxUni):
-        cw127fname = os.path.splitext(font['unifilename'])[0] + '.cw127.pkl'
-        if (os.path.exists(cw127fname)):
-            fh = open(cw127fname);
-            try:
-                font_dict = pickle.load(fh)
-            finally:
-                fh.close()
-            rangeid = font_dict['rangeid']
-            range_ = font_dict['range']
-            prevcid = font_dict['prevcid']
-            prevwidth = font_dict['prevwidth']
-            interval = font_dict['interval']
-            range_interval = font_dict['range_interval']
-            startcid = 128
-        else:
-            rangeid = 0
-            range_ = {}
-            range_interval = {}
-            prevcid = -2
-            prevwidth = -1
-            interval = False
-            startcid = 1
-        cwlen = maxUni + 1
-
-        # for each character
-        for cid in range(startcid, cwlen):
-            if (cid==128 and not os.path.exists(cw127fname)):
-                try:
-                    fh = open(cw127fname, "wb")
-                    font_dict = {}
-                    font_dict['rangeid'] = rangeid
-                    font_dict['prevcid'] = prevcid
-                    font_dict['prevwidth'] = prevwidth
-                    font_dict['interval'] = interval
-                    font_dict['range_interval'] = range_interval
-                    font_dict['range'] = range_
-                    pickle.dump(font_dict, fh)
-                    fh.close()
-                except IOError, e:
-                    if not e.errno == errno.EACCES:
-                        raise  # Not a permission error.
-            if (font['cw'][cid] == 0):
-                continue
-            width = font['cw'][cid]
-            if (width == 65535): width = 0
-            if (cid > 255 and (cid not in font['subset']) or not cid): #
-                continue
-            if ('dw' not in font or (font['dw'] and width != font['dw'])):
-                if (cid == (prevcid + 1)):
-                    if (width == prevwidth):
-                        if (width == range_[rangeid][0]):
-                            range_.setdefault(rangeid, []).append(width)
-                        else:
-                            range_[rangeid].pop()
-                            # new range
-                            rangeid = prevcid
-                            range_[rangeid] = [prevwidth, width]
-                        interval = True
-                        range_interval[rangeid] = True
-                    else:
-                        if (interval):
-                            # new range
-                            rangeid = cid
-                            range_[rangeid] = [width]
-                        else:
-                            range_[rangeid].append(width)
-                        interval = False
-                else:
-                    rangeid = cid
-                    range_[rangeid] = [width]
-                    interval = False
-                prevcid = cid
-                prevwidth = width
-        prevk = -1
-        nextk = -1
-        prevint = False
-        for k, ws in sorted(range_.items()):
-            cws = len(ws)
-            if (k == nextk and not prevint and (not k in range_interval or cws < 3)):
-                if (k in range_interval):
-                    del range_interval[k]
-                range_[prevk] = range_[prevk] + range_[k]
-                del range_[k]
-            else:
-                prevk = k
-            nextk = k + cws
-            if (k in range_interval):
-                prevint = (cws > 3)
-                del range_interval[k]
-                nextk -= 1
-            else:
-                prevint = False
-        w = []
-        for k, ws in sorted(range_.items()):
-            if (len(set(ws)) == 1):
-                w.append(' %s %s %s' % (k, k + len(ws) - 1, ws[0]))
-            else:
-                w.append(' %s [ %s ]\n' % (k, ' '.join([str(int(h)) for h in ws]))) ##
-        self._out('/W [%s]' % ''.join(w))
-
-    def _putimages(self):
-        filter=''
-        if self.compress:
-            filter='/Filter /FlateDecode '
-        for filename,info in self.images.iteritems():
-            self._putimage(info)
-            del info['data']
-            if 'smask' in info:
-                del info['smask']
-
-    def _putimage(self, info):
-        if 'data' in info:
-            self._newobj()
-            info['n']=self.n
-            self._out('<</Type /XObject')
-            self._out('/Subtype /Image')
-            self._out('/Width '+str(info['w']))
-            self._out('/Height '+str(info['h']))
-            if(info['cs']=='Indexed'):
-                self._out('/ColorSpace [/Indexed /DeviceRGB '+str(len(info['pal'])/3-1)+' '+str(self.n+1)+' 0 R]')
-            else:
-                self._out('/ColorSpace /'+info['cs'])
-                if(info['cs']=='DeviceCMYK'):
-                    self._out('/Decode [1 0 1 0 1 0 1 0]')
-            self._out('/BitsPerComponent '+str(info['bpc']))
-            if 'f' in info:
-                self._out('/Filter /'+info['f'])
-            if 'dp' in info:
-                self._out('/DecodeParms <<' + info['dp'] + '>>')
-            if('trns' in info and isinstance(info['trns'], list)):
-                trns=''
-                for i in xrange(0,len(info['trns'])):
-                    trns+=str(info['trns'][i])+' '+str(info['trns'][i])+' '
-                self._out('/Mask ['+trns+']')
-            if('smask' in info):
-                self._out('/SMask ' + str(self.n+1) + ' 0 R');
-            self._out('/Length '+str(len(info['data']))+'>>')
-            self._putstream(info['data'])
-            self._out('endobj')
-            # Soft mask
-            if('smask' in info):
-                dp = '/Predictor 15 /Colors 1 /BitsPerComponent 8 /Columns ' + str(info['w'])
-                smask = {'w': info['w'], 'h': info['h'], 'cs': 'DeviceGray', 'bpc': 8, 'f': info['f'], 'dp': dp, 'data': info['smask']}
-                self._putimage(smask)
-            #Palette
-            if(info['cs']=='Indexed'):
-                self._newobj()
-                filter = self.compress and '/Filter /FlateDecode ' or ''
-                if self.compress:
-                    pal=zlib.compress(info['pal'])
-                else:
-                    pal=info['pal']
-                self._out('<<'+filter+'/Length '+str(len(pal))+'>>')
-                self._putstream(pal)
-                self._out('endobj')
-
-    def _putxobjectdict(self):
-        for image in self.images.values():
-            self._out('/I'+str(image['i'])+' '+str(image['n'])+' 0 R')
-
-    def _putresourcedict(self):
-        self._out('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]')
-        self._out('/Font <<')
-        for font in self.fonts.values():
-            self._out('/F'+str(font['i'])+' '+str(font['n'])+' 0 R')
-        self._out('>>')
-        self._out('/XObject <<')
-        self._putxobjectdict()
-        self._out('>>')
-
-    def _putresources(self):
-        self._putfonts()
-        self._putimages()
-        #Resource dictionary
-        self.offsets[2]=len(self.buffer)
-        self._out('2 0 obj')
-        self._out('<<')
-        self._putresourcedict()
-        self._out('>>')
-        self._out('endobj')
-
-    def _putinfo(self):
-        self._out('/Producer '+self._textstring('PyFPDF '+FPDF_VERSION+' http://pyfpdf.googlecode.com/'))
-        if hasattr(self,'title'):
-            self._out('/Title '+self._textstring(self.title))
-        if hasattr(self,'subject'):
-            self._out('/Subject '+self._textstring(self.subject))
-        if hasattr(self,'author'):
-            self._out('/Author '+self._textstring(self.author))
-        if hasattr (self,'keywords'):
-            self._out('/Keywords '+self._textstring(self.keywords))
-        if hasattr(self,'creator'):
-            self._out('/Creator '+self._textstring(self.creator))
-        self._out('/CreationDate '+self._textstring('D:'+datetime.now().strftime('%Y%m%d%H%M%S')))
-
-    def _putcatalog(self):
-        self._out('/Type /Catalog')
-        self._out('/Pages 1 0 R')
-        if(self.zoom_mode=='fullpage'):
-            self._out('/OpenAction [3 0 R /Fit]')
-        elif(self.zoom_mode=='fullwidth'):
-            self._out('/OpenAction [3 0 R /FitH null]')
-        elif(self.zoom_mode=='real'):
-            self._out('/OpenAction [3 0 R /XYZ null null 1]')
-        elif(not isinstance(self.zoom_mode,basestring)):
-            self._out('/OpenAction [3 0 R /XYZ null null '+(self.zoom_mode/100)+']')
-        if(self.layout_mode=='single'):
-            self._out('/PageLayout /SinglePage')
-        elif(self.layout_mode=='continuous'):
-            self._out('/PageLayout /OneColumn')
-        elif(self.layout_mode=='two'):
-            self._out('/PageLayout /TwoColumnLeft')
-
-    def _putheader(self):
-        self._out('%PDF-'+self.pdf_version)
-
-    def _puttrailer(self):
-        self._out('/Size '+str(self.n+1))
-        self._out('/Root '+str(self.n)+' 0 R')
-        self._out('/Info '+str(self.n-1)+' 0 R')
-
-    def _enddoc(self):
-        self._putheader()
-        self._putpages()
-        self._putresources()
-        #Info
-        self._newobj()
-        self._out('<<')
-        self._putinfo()
-        self._out('>>')
-        self._out('endobj')
-        #Catalog
-        self._newobj()
-        self._out('<<')
-        self._putcatalog()
-        self._out('>>')
-        self._out('endobj')
-        #Cross-ref
-        o=len(self.buffer)
-        self._out('xref')
-        self._out('0 '+(str(self.n+1)))
-        self._out('0000000000 65535 f ')
-        for i in xrange(1,self.n+1):
-            self._out(sprintf('%010d 00000 n ',self.offsets[i]))
-        #Trailer
-        self._out('trailer')
-        self._out('<<')
-        self._puttrailer()
-        self._out('>>')
-        self._out('startxref')
-        self._out(o)
-        self._out('%%EOF')
-        self.state=3
-
-    def _beginpage(self, orientation):
-        self.page+=1
-        self.pages[self.page]=''
-        self.state=2
-        self.x=self.l_margin
-        self.y=self.t_margin
-        self.font_family=''
-        #Page orientation
-        if(not orientation):
-            orientation=self.def_orientation
-        else:
-            orientation=orientation[0].upper()
-            if(orientation!=self.def_orientation):
-                self.orientation_changes[self.page]=1
-        if(orientation!=self.cur_orientation):
-            #Change orientation
-            if(orientation=='P'):
-                self.w_pt=self.fw_pt
-                self.h_pt=self.fh_pt
-                self.w=self.fw
-                self.h=self.fh
-            else:
-                self.w_pt=self.fh_pt
-                self.h_pt=self.fw_pt
-                self.w=self.fh
-                self.h=self.fw
-            self.page_break_trigger=self.h-self.b_margin
-            self.cur_orientation=orientation
-
-    def _endpage(self):
-        #End of page contents
-        self.state=1
-
-    def _newobj(self):
-        #Begin a new object
-        self.n+=1
-        self.offsets[self.n]=len(self.buffer)
-        self._out(str(self.n)+' 0 obj')
-
-    def _dounderline(self, x,y,txt):
-        #Underline text
-        up=self.current_font['up']
-        ut=self.current_font['ut']
-        w=self.get_string_width(txt)+self.ws*txt.count(' ')
-        return sprintf('%.2f %.2f %.2f %.2f re f',x*self.k,(self.h-(y-up/1000.0*self.font_size))*self.k,w*self.k,-ut/1000.0*self.font_size_pt)
-
-    def _parsejpg(self, filename):
-        # Extract info from a JPEG file
-        if Image is None:
-            self.error('PIL not installed')
-        try:
-            f = open(filename, 'rb')
-            im = Image.open(f)
-        except Exception, e:
-            self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
-        else:
-            a = im.size
-        # We shouldn't get into here, as Jpeg is RGB=8bpp right(?), but, just in case...
-        bpc=8
-        if im.mode == 'RGB':
-            colspace='DeviceRGB'
-        elif im.mode == 'CMYK':
-            colspace='DeviceCMYK'
-        else:
-            colspace='DeviceGray'
-
-        # Read whole file from the start
-        f.seek(0)
-        data = f.read()
-        f.close()
-        return {'w':a[0],'h':a[1],'cs':colspace,'bpc':bpc,'f':'DCTDecode','data':data}
-
-    def _parsegif(self, filename):
-        # Extract info from a GIF file (via PNG conversion)
-        if Image is None:
-            self.error('PIL is required for GIF support')
-        try:
-            im = Image.open(filename)
-        except Exception, e:
-            self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e)))
-        else:
-            # Use temporary file
-            f = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
-            tmp = f.name
-            f.close()
-            if "transparency" in im.info:
-                im.save(tmp, transparency = im.info['transparency'])
-            else:
-                im.save(tmp)
-            info = self._parsepng(tmp)
-            os.unlink(tmp)
-        return info
-
-    def _parsepng(self, name):
-        #Extract info from a PNG file
-        if name.startswith("http://") or name.startswith("https://"):
-            import urllib
-            f = urllib.urlopen(name)
-        else:
-            f=open(name,'rb')
-        if(not f):
-            self.error("Can't open image file: "+name)
-        #Check signature
-        if(f.read(8)!='\x89'+'PNG'+'\r'+'\n'+'\x1a'+'\n'):
-            self.error('Not a PNG file: '+name)
-        #Read header chunk
-        f.read(4)
-        if(f.read(4)!='IHDR'):
-            self.error('Incorrect PNG file: '+name)
-        w=self._freadint(f)
-        h=self._freadint(f)
-        bpc=ord(f.read(1))
-        if(bpc>8):
-            self.error('16-bit depth not supported: '+name)
-        ct=ord(f.read(1))
-        if(ct==0 or ct==4):
-            colspace='DeviceGray'
-        elif(ct==2 or ct==6):
-            colspace='DeviceRGB'
-        elif(ct==3):
-            colspace='Indexed'
-        else:
-            self.error('Unknown color type: '+name)
-        if(ord(f.read(1))!=0):
-            self.error('Unknown compression method: '+name)
-        if(ord(f.read(1))!=0):
-            self.error('Unknown filter method: '+name)
-        if(ord(f.read(1))!=0):
-            self.error('Interlacing not supported: '+name)
-        f.read(4)
-        dp='/Predictor 15 /Colors '
-        if colspace == 'DeviceRGB':
-            dp+='3'
-        else:
-            dp+='1'
-        dp+=' /BitsPerComponent '+str(bpc)+' /Columns '+str(w)+''
-        #Scan chunks looking for palette, transparency and image data
-        pal=''
-        trns=''
-        data=''
-        n=1
-        while n != None:
-            n=self._freadint(f)
-            type=f.read(4)
-            if(type=='PLTE'):
-                #Read palette
-                pal=f.read(n)
-                f.read(4)
-            elif(type=='tRNS'):
-                #Read transparency info
-                t=f.read(n)
-                if(ct==0):
-                    trns=[ord(substr(t,1,1)),]
-                elif(ct==2):
-                    trns=[ord(substr(t,1,1)),ord(substr(t,3,1)),ord(substr(t,5,1))]
-                else:
-                    pos=t.find('\x00')
-                    if(pos!=-1):
-                        trns=[pos,]
-                f.read(4)
-            elif(type=='IDAT'):
-                #Read image data block
-                data+=f.read(n)
-                f.read(4)
-            elif(type=='IEND'):
-                break
-            else:
-                f.read(n+4)
-        if(colspace=='Indexed' and not pal):
-            self.error('Missing palette in '+name)
-        f.close()
-        info = {'w':w,'h':h,'cs':colspace,'bpc':bpc,'f':'FlateDecode','dp':dp,'pal':pal,'trns':trns,}
-        if(ct>=4):
-            # Extract alpha channel
-            data = zlib.decompress(data)
-            color = '';
-            alpha = '';
-            if(ct==4):
-                # Gray image
-                length = 2*w
-                for i in range(h):
-                    pos = (1+length)*i
-                    color += data[pos]
-                    alpha += data[pos]
-                    line = substr(data, pos+1, length)
-                    color += re.sub('(.).',lambda m: m.group(1),line, flags=re.DOTALL)
-                    alpha += re.sub('.(.)',lambda m: m.group(1),line, flags=re.DOTALL)
-            else:
-                # RGB image
-                length = 4*w
-                for i in range(h):
-                    pos = (1+length)*i
-                    color += data[pos]
-                    alpha += data[pos]
-                    line = substr(data, pos+1, length)
-                    color += re.sub('(.{3}).',lambda m: m.group(1),line, flags=re.DOTALL)
-                    alpha += re.sub('.{3}(.)',lambda m: m.group(1),line, flags=re.DOTALL)
-            del data
-            data = zlib.compress(color)
-            info['smask'] = zlib.compress(alpha)
-            if (self.pdf_version < '1.4'):
-                self.pdf_version = '1.4'
-        info['data'] = data
-        return info
-
-    def _freadint(self, f):
-        #Read a 4-byte integer from file
-        try:
-            return struct.unpack('>I', f.read(4))[0]
-        except:
-            return None
-
-    def _textstring(self, s):
-        #Format a text string
-        return '('+self._escape(s)+')'
-
-    def _escape(self, s):
-        #Add \ before \, ( and )
-        return s.replace('\\','\\\\').replace(')','\\)').replace('(','\\(').replace('\r','\\r')
-
-    def _putstream(self, s):
-        self._out('stream')
-        self._out(s)
-        self._out('endstream')
-
-    def _out(self, s):
-        #Add a line to the document
-        if(self.state==2):
-            self.pages[self.page]+=s+"\n"
-        else:
-            self.buffer+=str(s)+"\n"
-
-    def interleaved2of5(self, txt, x, y, w=1.0, h=10.0):
-        "Barcode I2of5 (numeric), adds a 0 if odd lenght"
-        narrow = w / 3.0
-        wide = w
-
-        # wide/narrow codes for the digits
-        bar_char={'0': 'nnwwn', '1': 'wnnnw', '2': 'nwnnw', '3': 'wwnnn',
-                  '4': 'nnwnw', '5': 'wnwnn', '6': 'nwwnn', '7': 'nnnww',
-                  '8': 'wnnwn', '9': 'nwnwn', 'A': 'nn', 'Z': 'wn'}
-
-        self.set_fill_color(0)
-        code = txt
-        # add leading zero if code-length is odd
-        if len(code) % 2 != 0:
-            code = '0' + code
-
-        # add start and stop codes
-        code = 'AA' + code.lower() + 'ZA'
-
-        for i in xrange(0, len(code), 2):
-            # choose next pair of digits
-            char_bar = code[i]
-            char_space = code[i+1]
-            # check whether it is a valid digit
-            if not char_bar in bar_char.keys():
-                raise RuntimeError ('Char "%s" invalid for I25: ' % char_bar)
-            if not char_space in bar_char.keys():
-                raise RuntimeError ('Char "%s" invalid for I25: ' % char_space)
-
-            # create a wide/narrow-seq (first digit=bars, second digit=spaces)
-            seq = ''
-            for s in xrange(0, len(bar_char[char_bar])):
-                seq += bar_char[char_bar][s] + bar_char[char_space][s]
-
-            for bar in xrange(0, len(seq)):
-                # set line_width depending on value
-                if seq[bar] == 'n':
-                    line_width = narrow
-                else:
-                    line_width = wide
-
-                # draw every second value, the other is represented by space
-                if bar % 2 == 0:
-                    self.rect(x, y, line_width, h, 'F')
-
-                x += line_width
-
-
-    def code39(self, txt, x, y, w=1.5, h=5.0):
-        "Barcode 3of9"
-        wide = w
-        narrow = w / 3.0
-        gap = narrow
-
-        bar_char={'0': 'nnnwwnwnn', '1': 'wnnwnnnnw', '2': 'nnwwnnnnw',
-                  '3': 'wnwwnnnnn', '4': 'nnnwwnnnw', '5': 'wnnwwnnnn',
-                  '6': 'nnwwwnnnn', '7': 'nnnwnnwnw', '8': 'wnnwnnwnn',
-                  '9': 'nnwwnnwnn', 'A': 'wnnnnwnnw', 'B': 'nnwnnwnnw',
-                  'C': 'wnwnnwnnn', 'D': 'nnnnwwnnw', 'E': 'wnnnwwnnn',
-                  'F': 'nnwnwwnnn', 'G': 'nnnnnwwnw', 'H': 'wnnnnwwnn',
-                  'I': 'nnwnnwwnn', 'J': 'nnnnwwwnn', 'K': 'wnnnnnnww',
-                  'L': 'nnwnnnnww', 'M': 'wnwnnnnwn', 'N': 'nnnnwnnww',
-                  'O': 'wnnnwnnwn', 'P': 'nnwnwnnwn', 'Q': 'nnnnnnwww',
-                  'R': 'wnnnnnwwn', 'S': 'nnwnnnwwn', 'T': 'nnnnwnwwn',
-                  'U': 'wwnnnnnnw', 'V': 'nwwnnnnnw', 'W': 'wwwnnnnnn',
-                  'X': 'nwnnwnnnw', 'Y': 'wwnnwnnnn', 'Z': 'nwwnwnnnn',
-                  '-': 'nwnnnnwnw', '.': 'wwnnnnwnn', ' ': 'nwwnnnwnn',
-                  '*': 'nwnnwnwnn', '$': 'nwnwnwnnn', '/': 'nwnwnnnwn',
-                  '+': 'nwnnnwnwn', '%': 'nnnwnwnwn'}
-
-        self.set_fill_color(0)
-        code = txt
-
-        code = code.upper()
-        for i in xrange (0, len(code), 2):
-            char_bar = code[i]
-
-            if not char_bar in bar_char.keys():
-                raise RuntimeError ('Char "%s" invalid for Code39' % char_bar)
-
-            seq= ''
-            for s in xrange(0, len(bar_char[char_bar])):
-                seq += bar_char[char_bar][s]
-
-            for bar in xrange(0, len(seq)):
-                if seq[bar] == 'n':
-                    line_width = narrow
-                else:
-                    line_width = wide
-
-                if bar % 2 == 0:
-                    self.rect(x, y, line_width, h, 'F')
-                x += line_width
-        x += gap
-
-

+ 0 - 398
frameworks/Python/web2py/web2py/gluon/contrib/fpdf/html.py

@@ -1,398 +0,0 @@
-# -*- coding: latin-1 -*-
-
-"HTML Renderer for FPDF.py"
-
-__author__ = "Mariano Reingart <[email protected]>"
-__copyright__ = "Copyright (C) 2010 Mariano Reingart"
-__license__ = "LGPL 3.0"
-
-# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc.
-
-from fpdf import FPDF
-from HTMLParser import HTMLParser
-
-DEBUG = False
-
-def px2mm(px):
-    return int(px)*25.4/72.0
-
-def hex2dec(color = "#000000"):
-    if color:
-        r = int(color[1:3], 16)
-        g = int(color[3:5], 16)
-        b = int(color[5:7], 16)
-        return r, g, b
-
-class HTML2FPDF(HTMLParser):
-    "Render basic HTML to FPDF"
-
-    def __init__(self, pdf, image_map=None):
-        HTMLParser.__init__(self)
-        self.style = {}
-        self.pre = False
-        self.href = ''
-        self.align = ''
-        self.page_links = {}
-        self.font = None
-        self.font_stack = [] 
-        self.pdf = pdf
-        self.image_map = image_map or (lambda src: src)
-        self.r = self.g = self.b = 0
-        self.indent = 0
-        self.bullet = []
-        self.set_font("times", 12)
-        self.font_face = "times"    # initialize font
-        self.color = 0              #initialize font color
-        self.table = None           # table attributes
-        self.table_col_width = None # column (header) widths
-        self.table_col_index = None # current column index
-        self.td = None              # cell attributes
-        self.th = False             # header enabled
-        self.tr = None
-        self.theader = None           # table header cells
-        self.tfooter = None           # table footer cells
-        self.thead = None
-        self.tfoot = None
-        self.theader_out = self.tfooter_out = False
-        self.hsize = dict(h1=2, h2=1.5, h3=1.17, h4=1, h5=0.83, h6=0.67)
-        
-    def width2mm(self, length):
-        if length[-1]=='%':
-            total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
-            if self.table['width'][-1]=='%':
-                total *= int(self.table['width'][:-1])/100.0
-            return int(length[:-1]) * total / 101.0
-        else:
-            return int(length) / 6.0
-
-    def handle_data(self, txt):
-        if self.td is not None: # drawing a table?
-            if 'width' not in self.td and 'colspan' not in self.td:
-                try:
-                    l = [self.table_col_width[self.table_col_index]]
-                except IndexError:
-                    raise RuntimeError("Table column/cell width not specified, unable to continue")
-            elif 'colspan' in self.td:
-                i = self.table_col_index
-                colspan = int(self.td['colspan'])
-                l = self.table_col_width[i:i+colspan]
-            else:
-                l = [self.td.get('width','240')]
-            w = sum([self.width2mm(lenght) for lenght in l])
-            h = int(self.td.get('height', 0)) / 4 or self.h*1.30
-            self.table_h = h
-            border = int(self.table.get('border', 0))
-            if not self.th:
-                align = self.td.get('align', 'L')[0].upper()
-                border = border and 'LR'
-            else:
-                self.set_style('B',True)
-                border = border or 'B'
-                align = self.td.get('align', 'C')[0].upper()
-            bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', '')))
-            # parsing table header/footer (drawn later):
-            if self.thead is not None:
-                self.theader.append(((w,h,txt,border,0,align), bgcolor))
-            if self.tfoot is not None:
-                self.tfooter.append(((w,h,txt,border,0,align), bgcolor))
-            # check if reached end of page, add table footer and header:
-            height = h + (self.tfooter and self.tfooter[0][0][1] or 0)
-            if self.pdf.y+height>self.pdf.page_break_trigger and not self.th:
-                self.output_table_footer()
-                self.pdf.add_page()
-                self.theader_out = self.tfooter_out = False
-            if self.tfoot is None and self.thead is None:
-                if not self.theader_out: 
-                    self.output_table_header()
-                self.box_shadow(w, h, bgcolor)
-                if DEBUG: print "td cell", self.pdf.x, w, txt, "*"
-                self.pdf.cell(w,h,txt,border,0,align)
-        elif self.table is not None:
-            # ignore anything else than td inside a table 
-            pass
-        elif self.align:
-            if DEBUG: print "cell", txt, "*"
-            self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href)
-        else:
-            txt = txt.replace("\n"," ")
-            if self.href:
-                self.put_link(self.href,txt)
-            else:
-                if DEBUG: print "write", txt, "*"
-                self.pdf.write(self.h,txt)
-
-    def box_shadow(self, w, h, bgcolor):
-        if DEBUG: print "box_shadow", w, h, bgcolor
-        if bgcolor:
-            fill_color = self.pdf.fill_color
-            self.pdf.set_fill_color(*bgcolor)
-            self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F')
-            self.pdf.fill_color = fill_color
-
-    def output_table_header(self):
-        if self.theader:
-            b = self.b
-            x = self.pdf.x
-            self.pdf.set_x(self.table_offset)
-            self.set_style('B',True)
-            for cell, bgcolor in self.theader:
-                self.box_shadow(cell[0], cell[1], bgcolor)
-                self.pdf.cell(*cell)
-            self.set_style('B',b)
-            self.pdf.ln(self.theader[0][0][1])
-            self.pdf.set_x(self.table_offset)
-            #self.pdf.set_x(x)
-        self.theader_out = True
-        
-    def output_table_footer(self):
-        if self.tfooter:
-            x = self.pdf.x
-            self.pdf.set_x(self.table_offset)
-            #TODO: self.output_table_sep()
-            for cell, bgcolor in self.tfooter:
-                self.box_shadow(cell[0], cell[1], bgcolor)
-                self.pdf.cell(*cell)
-            self.pdf.ln(self.tfooter[0][0][1])
-            self.pdf.set_x(x)
-        if int(self.table.get('border', 0)):
-            self.output_table_sep()
-        self.tfooter_out = True
-            
-    def output_table_sep(self):
-        self.pdf.set_x(self.table_offset)
-        x1 = self.pdf.x
-        y1 = self.pdf.y
-        w = sum([self.width2mm(lenght) for lenght in self.table_col_width])
-        self.pdf.line(x1,y1,x1+w,y1)
-
-
-    def handle_starttag(self, tag, attrs):
-        attrs = dict(attrs)
-        if DEBUG: print "STARTTAG", tag, attrs
-        if tag=='b' or tag=='i' or tag=='u':
-            self.set_style(tag,1)
-        if tag=='a':
-            self.href=attrs['href']
-        if tag=='br':
-            self.pdf.ln(5)
-        if tag=='p':
-            self.pdf.ln(5)
-            if attrs:
-                if attrs: self.align = attrs.get('align')
-        if tag in self.hsize:
-            k = self.hsize[tag]
-            self.pdf.ln(5*k)
-            self.pdf.set_text_color(150,0,0)
-            self.pdf.set_font_size(12 * k)
-            if attrs: self.align = attrs.get('align')
-        if tag=='hr':
-            self.put_line()
-        if tag=='pre':
-            self.pdf.set_font('Courier','',11)
-            self.pdf.set_font_size(11)
-            self.set_style('B',False)
-            self.set_style('I',False)
-            self.pre = True
-        if tag=='blockquote':
-            self.set_text_color(100,0,45)
-            self.pdf.ln(3)
-        if tag=='ul':
-            self.indent+=1
-            self.bullet.append('\x95')
-        if tag=='ol':
-            self.indent+=1
-            self.bullet.append(0)
-        if tag=='li':
-            self.pdf.ln(self.h+2)
-            self.pdf.set_text_color(190,0,0)
-            bullet = self.bullet[self.indent-1]
-            if not isinstance(bullet, basestring):
-                bullet += 1
-                self.bullet[self.indent-1] = bullet
-                bullet = "%s. " % bullet
-            self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet))
-            self.set_text_color()
-        if tag=='font':
-            # save previous font state:
-            self.font_stack.append((self.font_face, self.font_size, self.color))
-            if 'color' in attrs:
-                self.color = hex2dec(attrs['color'])
-                self.set_text_color(*color)
-                self.color = color
-            if 'face' in attrs:
-                face = attrs.get('face').lower()
-                self.pdf.set_font(face)
-                self.font_face = face
-            if 'size' in attrs:
-                size = int(attrs.get('size'))
-                self.pdf.set_font(self.font_face, size=int(size))
-                self.font_size = size
-        if tag=='table':
-            self.table = dict([(k.lower(), v) for k,v in attrs.items()])
-            if not 'width' in self.table:
-                self.table['width'] = '100%'
-            if self.table['width'][-1]=='%':
-                w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin
-                w *= int(self.table['width'][:-1])/100.0
-                self.table_offset = (self.pdf.w-w)/2.0
-            self.table_col_width = []
-            self.theader_out = self.tfooter_out = False
-            self.theader = []
-            self.tfooter = []
-            self.thead = None
-            self.tfoot = None
-            self.table_h = 0
-            self.pdf.ln()
-        if tag=='tr':
-            self.tr = dict([(k.lower(), v) for k,v in attrs.items()])
-            self.table_col_index = 0
-            self.pdf.set_x(self.table_offset)
-        if tag=='td':
-            self.td = dict([(k.lower(), v) for k,v in attrs.items()])
-        if tag=='th':
-            self.td = dict([(k.lower(), v) for k,v in attrs.items()])
-            self.th = True
-            if 'width' in self.td:
-                self.table_col_width.append(self.td['width'])
-        if tag=='thead':
-            self.thead = {}
-        if tag=='tfoot':
-            self.tfoot = {}
-        if tag=='img':
-            if 'src' in attrs:
-                x = self.pdf.get_x()
-                y = self.pdf.get_y()
-                w = px2mm(attrs.get('width', 0))
-                h = px2mm(attrs.get('height',0))
-                if self.align and self.align[0].upper() == 'C':
-                    x = (self.pdf.w-x)/2.0 - w/2.0
-                self.pdf.image(self.image_map(attrs['src']),
-                               x, y, w, h, link=self.href)
-                self.pdf.set_x(x+w)
-                self.pdf.set_y(y+h)
-        if tag=='b' or tag=='i' or tag=='u':
-            self.set_style(tag, True)
-        if tag=='center':
-            self.align = 'Center'
-
-    def handle_endtag(self, tag):
-        #Closing tag
-        if DEBUG: print "ENDTAG", tag
-        if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4':
-            self.pdf.ln(6)
-            self.set_font()
-            self.set_style()
-            self.align = None
-        if tag=='pre':
-            self.pdf.set_font(self.font or 'Times','',12)
-            self.pdf.set_font_size(12)
-            self.pre=False
-        if tag=='blockquote':
-            self.set_text_color(0,0,0)
-            self.pdf.ln(3)
-        if tag=='strong':
-            tag='b'
-        if tag=='em':
-            tag='i'
-        if tag=='b' or tag=='i' or tag=='u':
-            self.set_style(tag, False)
-        if tag=='a':
-            self.href=''
-        if tag=='p':
-            self.align=''
-        if tag in ('ul', 'ol'):
-            self.indent-=1
-            self.bullet.pop()
-        if tag=='table':
-            if not self.tfooter_out:
-                self.output_table_footer()
-            self.table = None
-            self.th = False
-            self.theader = None
-            self.tfooter = None
-            self.pdf.ln()
-        if tag=='thead':
-            self.thead = None
-        if tag=='tfoot':
-            self.tfoot = None
-        if tag=='tbody':
-            # draw a line separator between table bodies
-            self.pdf.set_x(self.table_offset)
-            self.output_table_sep()
-        if tag=='tr':
-            h = self.table_h
-            if self.tfoot is None:
-                self.pdf.ln(h)
-            self.tr = None
-        if tag=='td' or tag=='th':
-            if self.th:
-                if DEBUG: print "revert style"
-                self.set_style('B', False) # revert style
-            self.table_col_index += int(self.td.get('colspan','1'))
-            self.td = None
-            self.th = False
-        if tag=='font':
-            # recover last font state
-            face, size, color = self.font_stack.pop()
-            if face:
-                self.pdf.set_text_color(0,0,0)
-                self.color = None
-            self.set_font(face, size)                
-            self.font = None
-        if tag=='center':
-            self.align = None
-
-    def set_font(self, face=None, size=None):
-        if face:
-            self.font_face = face
-        if size:
-            self.font_size = size
-            self.h = size / 72.0*25.4
-            if DEBUG: print "H", self.h
-        self.pdf.set_font(self.font_face or 'times','',12)
-        self.pdf.set_font_size(self.font_size or 12)
-        self.set_style('u', False)
-        self.set_style('b', False)
-        self.set_style('i', False)
-        self.set_text_color()        
-
-    def set_style(self, tag=None, enable=None):
-        #Modify style and select corresponding font
-        if tag:
-            t = self.style.get(tag.lower())
-            self.style[tag.lower()] = enable
-        style=''
-        for s in ('b','i','u'):
-            if self.style.get(s):
-                style+=s
-        if DEBUG: print "SET_FONT_STYLE", style
-        self.pdf.set_font('',style)
-
-    def set_text_color(self, r=None, g=0, b=0):
-        if r is None:
-            self.pdf.set_text_color(self.r,self.g,self.b)
-        else:
-            self.pdf.set_text_color(r, g, b)
-            self.r = r
-            self.g = g
-            self.b = b
-    
-    def put_link(self, url, txt):
-        #Put a hyperlink
-        self.set_text_color(0,0,255)
-        self.set_style('u', True)
-        self.pdf.write(5,txt,url)
-        self.set_style('u', False)
-        self.set_text_color(0)
-
-    def put_line(self):
-        self.pdf.ln(2)
-        self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y())
-        self.pdf.ln(3)
-
-class HTMLMixin(object):
-    def write_html(self, text, image_map=None):
-        "Parse HTML and convert it to PDF"
-        h2p = HTML2FPDF(self, image_map)
-        h2p.feed(text)
-

+ 0 - 49
frameworks/Python/web2py/web2py/gluon/contrib/fpdf/php.py

@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: latin-1 -*-
-
-# fpdf php helpers:
-
-def substr(s, start, length=-1):
-       if length < 0:
-               length=len(s)-start
-       return s[start:start+length]
-
-def sprintf(fmt, *args): return fmt % args
-
-def print_r(array):
-    if not isinstance(array, dict):
-        array = dict([(k, k) for k in array])
-    for k, v in array.items():
-        print "[%s] => %s" % (k, v),
-        
-def UTF8ToUTF16BE(instr, setbom=True):
-    "Converts UTF-8 strings to UTF16-BE."
-    outstr = ""
-    if (setbom):
-        outstr += "\xFE\xFF"; 
-    if not isinstance(instr, unicode):
-        instr = instr.decode('UTF-8')
-    outstr += instr.encode('UTF-16BE')
-    return outstr
-
-def UTF8StringToArray(instr):
-    "Converts UTF-8 strings to codepoints array"
-    return [ord(c) for c in instr]
-
-# ttfints php helpers:    
-
-def die(msg):
-    raise RuntimeError(msg)
-    
-def str_repeat(s, count):
-    return s * count
-    
-def str_pad(s, pad_length=0, pad_char= " ", pad_type= +1 ):
-    if pad_type<0: # pad left
-        return s.rjust(pad_length, pad_char)
-    elif pad_type>0: # pad right
-        return s.ljust(pad_length, pad_char)
-    else: # pad both
-        return s.center(pad_length, pad_char)
-
-strlen = count = lambda s: len(s)

+ 0 - 301
frameworks/Python/web2py/web2py/gluon/contrib/fpdf/template.py

@@ -1,301 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-
-"PDF Template Helper for FPDF.py"
-
-__author__ = "Mariano Reingart <[email protected]>"
-__copyright__ = "Copyright (C) 2010 Mariano Reingart"
-__license__ = "LGPL 3.0"
-
-import sys,os,csv
-from fpdf import FPDF
-
-def rgb(col):
-    return (col // 65536), (col // 256 % 256), (col% 256)
-
-class Template:
-    def __init__(self, infile=None, elements=None, format='A4', orientation='portrait',
-                 title='', author='', subject='', creator='', keywords=''):
-        if elements:
-            self.elements = elements
-            self.keys = [v['name'].lower() for v in self.elements]
-        self.handlers = {'T': self.text, 'L': self.line, 'I': self.image, 
-                         'B': self.rect, 'BC': self.barcode, }
-        self.pg_no = 0
-        self.texts = {}
-        pdf = self.pdf = FPDF(format=format,orientation=orientation, unit="mm")
-        pdf.set_title(title)
-        pdf.set_author(author)
-        pdf.set_creator(creator)
-        pdf.set_subject(subject)
-        pdf.set_keywords(keywords)
-
-    def parse_csv(self, infile, delimiter=",", decimal_sep="."):
-        "Parse template format csv file and create elements dict"
-        keys = ('name','type','x1','y1','x2','y2','font','size',
-            'bold','italic','underline','foreground','background',
-            'align','text','priority', 'multiline')
-        self.elements = []
-        for row in csv.reader(open(infile, 'rb'), delimiter=delimiter):
-            kargs = {}
-            for i,v in enumerate(row):
-                if not v.startswith("'") and decimal_sep!=".": 
-                    v = v.replace(decimal_sep,".")
-                else:
-                    v = v
-                if v=='':
-                    v = None
-                else:
-                    v = eval(v.strip())
-                kargs[keys[i]] = v
-            self.elements.append(kargs)
-        self.keys = [v['name'].lower() for v in self.elements]
-
-    def add_page(self):
-        self.pg_no += 1
-        self.texts[self.pg_no] = {}
-        
-    def __setitem__(self, name, value):
-        if self.has_key(name):
-            if isinstance(value,unicode):
-                value = value.encode("latin1","ignore")
-            elif value is None:
-                value = ""
-            else:
-                value = str(value)
-            self.texts[self.pg_no][name.lower()] = value
-
-    # setitem shortcut (may be further extended)
-    set = __setitem__
-
-    def has_key(self, name):
-        return name.lower() in self.keys
-        
-    def __getitem__(self, name):
-        if self.has_key(name):
-            key = name.lower()
-            if key in self.texts:
-                # text for this page:
-                return self.texts[self.pg_no][key]
-            else:
-                # find first element for default text:
-                elements = [element for element in self.elements
-                    if element['name'].lower() == key]
-                if elements:
-                    return elements[0]['text']
-
-    def split_multicell(self, text, element_name):
-        "Divide (\n) a string using a given element width"
-        pdf = self.pdf
-        element = [element for element in self.elements
-            if element['name'].lower() == element_name.lower()][0]
-        style = ""
-        if element['bold']: style += "B"
-        if element['italic']: style += "I"
-        if element['underline']: style += "U"
-        pdf.set_font(element['font'],style,element['size'])
-        align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(element['align']) # D/I in spanish
-        if isinstance(text, unicode):
-            text = text.encode("latin1","ignore")
-        else:
-            text = str(text)
-        return pdf.multi_cell(w=element['x2']-element['x1'],
-                             h=element['y2']-element['y1'],
-                             txt=text,align=align,split_only=True)
-        
-    def render(self, outfile, dest="F"):
-        pdf = self.pdf
-        for pg in range(1, self.pg_no+1):
-            pdf.add_page()
-            pdf.set_font('Arial','B',16)
-            pdf.set_auto_page_break(False,margin=0)
-
-            for element in sorted(self.elements,key=lambda x: x['priority']):
-                # make a copy of the element:
-                element = dict(element)
-                element['text'] = self.texts[pg].get(element['name'].lower(), element['text'])
-                if 'rotate' in element:
-                    pdf.rotate(element['rotate'], element['x1'], element['y1'])
-                self.handlers[element['type'].upper()](pdf, **element)
-                if 'rotate' in element:
-                    pdf.rotate(0)
-                    
-        return pdf.output(outfile, dest)
-        
-    def text(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=10, 
-             bold=False, italic=False, underline=False, align="", 
-             foreground=0, backgroud=65535, multiline=None,
-             *args, **kwargs):
-        if text:
-            if pdf.text_color!=rgb(foreground):
-                pdf.set_text_color(*rgb(foreground))
-            if pdf.fill_color!=rgb(backgroud):
-                pdf.set_fill_color(*rgb(backgroud))
-
-            font = font.strip().lower()
-            if font == 'arial black':
-                font = 'arial'
-            style = ""
-            for tag in 'B', 'I', 'U':
-                if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)):
-                    text = text[3:-4]
-                    style += tag
-            if bold: style += "B"
-            if italic: style += "I"
-            if underline: style += "U"
-            align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish
-            pdf.set_font(font,style,size)
-            ##m_k = 72 / 2.54
-            ##h = (size/m_k)
-            pdf.set_xy(x1,y1)
-            if multiline is None:
-                # multiline==None: write without wrapping/trimming (default)
-                pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
-            elif multiline:
-                # multiline==True: automatic word - warp
-                pdf.multi_cell(w=x2-x1,h=y2-y1,txt=text,border=0,align=align)
-            else:
-                # multiline==False: trim to fit exactly the space defined
-                text = pdf.multi_cell(w=x2-x1, h=y2-y1,
-                             txt=text, align=align, split_only=True)[0]
-                print "trimming: *%s*" % text
-                pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align)
-
-            #pdf.Text(x=x1,y=y1,txt=text)
-
-    def line(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, *args, **kwargs):
-        if pdf.draw_color!=rgb(foreground):
-            #print "SetDrawColor", hex(foreground)
-            pdf.set_draw_color(*rgb(foreground))
-        #print "SetLineWidth", size
-        pdf.set_line_width(size)
-        pdf.line(x1, y1, x2, y2)
-
-    def rect(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, backgroud=65535, *args, **kwargs):
-        if pdf.draw_color!=rgb(foreground):
-            pdf.set_draw_color(*rgb(foreground))
-        if pdf.fill_color!=rgb(backgroud):
-            pdf.set_fill_color(*rgb(backgroud))
-        pdf.set_line_width(size)
-        pdf.rect(x1, y1, x2-x1, y2-y1)
-
-    def image(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', *args,**kwargs):
-        pdf.image(text,x1,y1,w=x2-x1,h=y2-y1,type='',link='')
-
-    def barcode(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1,
-             foreground=0, *args, **kwargs):
-        if pdf.draw_color!=rgb(foreground):
-            pdf.set_draw_color(*rgb(foreground))
-        font = font.lower().strip()
-        if font == 'interleaved 2of5 nt':
-            pdf.interleaved2of5(text,x1,y1,w=size,h=y2-y1)
-
-
-if __name__ == "__main__":
-
-    # generate sample invoice (according Argentina's regulations)
-
-    import random
-    from decimal import Decimal
-
-    f = Template(format="A4",
-             title="Sample Invoice", author="Sample Company",
-             subject="Sample Customer", keywords="Electronic TAX Invoice")
-    f.parse_csv(infile="invoice.csv", delimiter=";", decimal_sep=",")
-    
-    detail = "Lorem ipsum dolor sit amet, consectetur. " * 30
-    items = []
-    for i in range(1, 30):
-        ds = "Sample product %s" % i
-        qty = random.randint(1,10)
-        price = round(random.random()*100,3)
-        code = "%s%s%02d" % (chr(random.randint(65,90)), chr(random.randint(65,90)),i)
-        items.append(dict(code=code, unit='u',
-                          qty=qty, price=price, 
-                          amount=qty*price,
-                          ds="%s: %s" % (i,ds)))
-
-    # divide and count lines
-    lines = 0
-    li_items = []
-    for it in items:
-        qty = it['qty']
-        code = it['code']
-        unit = it['unit']
-        for ds in f.split_multicell(it['ds'], 'item_description01'):
-            # add item description line (without price nor amount)
-            li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
-            # clean qty and code (show only at first)
-            unit = qty = code = None
-        # set last item line price and amount
-        li_items[-1].update(amount = it['amount'],
-                            price = it['price'])
-
-    obs="\n<U>Detail:</U>\n\n" + detail
-    for ds in f.split_multicell(obs, 'item_description01'):
-        li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None))
-
-    # calculate pages:
-    lines = len(li_items)
-    max_lines_per_page = 24
-    pages = lines / (max_lines_per_page - 1)
-    if lines % (max_lines_per_page - 1): pages = pages + 1
-
-    # completo campos y hojas
-    for page in range(1, pages+1):
-        f.add_page()
-        f['page'] = 'Page %s of %s' % (page, pages)
-        if pages>1 and page<pages:
-            s = 'Continues on page %s' % (page+1)
-        else:
-            s = ''
-        f['item_description%02d' % (max_lines_per_page+1)] = s
-
-        f["company_name"] = "Sample Company"
-        f["company_logo"] = "tutorial/logo.png"
-        f["company_header1"] = "Some Address - somewhere -"
-        f["company_header2"] = "http://www.example.com"        
-        f["company_footer1"] = "Tax Code ..."
-        f["company_footer2"] = "Tax/VAT ID ..."
-        f['number'] = '0001-00001234'
-        f['issue_date'] = '2010-09-10'
-        f['due_date'] = '2099-09-10'
-        f['customer_name'] = "Sample Client"
-        f['customer_address'] = "Siempreviva 1234"
-       
-        # print line item...
-        li = 0 
-        k = 0
-        total = Decimal("0.00")
-        for it in li_items:
-            k = k + 1
-            if k > page * (max_lines_per_page - 1):
-                break
-            if it['amount']:
-                total += Decimal("%.6f" % it['amount'])
-            if k > (page - 1) * (max_lines_per_page - 1):
-                li += 1
-                if it['qty'] is not None:
-                    f['item_quantity%02d' % li] = it['qty']
-                if it['code'] is not None:
-                    f['item_code%02d' % li] = it['code']
-                if it['unit'] is not None:
-                    f['item_unit%02d' % li] = it['unit']
-                f['item_description%02d' % li] = it['ds']
-                if it['price'] is not None:
-                    f['item_price%02d' % li] = "%0.3f" % it['price']
-                if it['amount'] is not None:
-                    f['item_amount%02d' % li] = "%0.2f" % it['amount']
-
-        if pages == page:
-            f['net'] = "%0.2f" % (total/Decimal("1.21"))
-            f['vat'] = "%0.2f" % (total*(1-1/Decimal("1.21")))
-            f['total_label'] = 'Total:'
-        else:
-            f['total_label'] = 'SubTotal:'
-        f['total'] = "%0.2f" % total
-            
-    f.render("./invoice.pdf")
-    if sys.platform.startswith("linux"):
-        os.system("evince ./invoice.pdf")
-    else:
-        os.system("./invoice.pdf")

+ 0 - 1083
frameworks/Python/web2py/web2py/gluon/contrib/fpdf/ttfonts.py

@@ -1,1083 +0,0 @@
-#******************************************************************************
-# TTFontFile class                                                             
-#                                                                              
-# This class is based on The ReportLab Open Source PDF library                 
-# written in Python - http://www.reportlab.com/software/opensource/            
-# together with ideas from the OpenOffice source code and others.              
-#                                                                              
-# Version:  1.04                                                               
-# Date:     2011-09-18                                                         
-# Author:   Ian Back <[email protected]>                                           
-# License:  LGPL                                                               
-# Copyright (c) Ian Back, 2010                                                 
-# Ported to Python 2.7 by Mariano Reingart ([email protected]) on 2012        
-# This header must be retained in any redistribution or                        
-# modification of the file.                                                    
-#                                                                              
-#******************************************************************************
-
-from struct import pack, unpack, unpack_from
-import re
-import warnings
-from php import die, substr, str_repeat, str_pad, strlen, count
-
-
-# Define the value used in the "head" table of a created TTF file
-# 0x74727565 "true" for Mac
-# 0x00010000 for Windows
-# Either seems to work for a font embedded in a PDF file
-# when read by Adobe Reader on a Windows PC(!)
-_TTF_MAC_HEADER = False
-
-
-# TrueType Font Glyph operators
-GF_WORDS = (1 << 0)
-GF_SCALE = (1 << 3)
-GF_MORE  = (1 << 5)
-GF_XYSCALE  = (1 << 6)
-GF_TWOBYTWO = (1 << 7)
-
-
-def sub32(x, y):
-    xlo = x[1]
-    xhi = x[0]
-    ylo = y[1]
-    yhi = y[0]
-    if (ylo > xlo):  
-        xlo += 1 << 16 
-        yhi += 1 
-    reslo = xlo-ylo
-    if (yhi > xhi):  
-        xhi += 1 << 16  
-    reshi = xhi-yhi
-    reshi = reshi & 0xFFFF
-    return (reshi, reslo)
-
-def calcChecksum(data): 
-    if (strlen(data) % 4):
-        data += str_repeat("\0", (4-(len(data) % 4)))
-    hi=0x0000
-    lo=0x0000
-    for i in range(0, len(data), 4): 
-        hi += (ord(data[i])<<8) + ord(data[i+1])
-        lo += (ord(data[i+2])<<8) + ord(data[i+3])
-        hi += lo >> 16
-        lo = lo & 0xFFFF
-        hi = hi & 0xFFFF
-    return (hi, lo)
-
-
-class TTFontFile:
-
-    def __init__(self):
-        self.maxStrLenRead = 200000    # Maximum size of glyf table to read in as string (otherwise reads each glyph from file)
-
-    def getMetrics(self, file):
-        self.filename = file
-        self.fh = open(file,'rb')
-        self._pos = 0
-        self.charWidths = []
-        self.glyphPos = {}
-        self.charToGlyph = {}
-        self.tables = {}
-        self.otables = {}
-        self.ascent = 0
-        self.descent = 0
-        self.TTCFonts = {}
-        self.version = version = self.read_ulong()
-        if (version==0x4F54544F):
-            die("Postscript outlines are not supported")
-        if (version==0x74746366):
-            die("ERROR - TrueType Fonts Collections not supported")
-        if (version not in (0x00010000,0x74727565)):
-            die("Not a TrueType font: version=" + version)
-        self.readTableDirectory()
-        self.extractInfo()
-        self.fh.close()
-    
-    def readTableDirectory(self, ):
-        self.numTables = self.read_ushort()
-        self.searchRange = self.read_ushort()
-        self.entrySelector = self.read_ushort()
-        self.rangeShift = self.read_ushort()
-        self.tables = {}    
-        for i in range(self.numTables):
-            record = {}
-            record['tag'] = self.read_tag()
-            record['checksum'] = (self.read_ushort(),self.read_ushort())
-            record['offset'] = self.read_ulong()
-            record['length'] = self.read_ulong()
-            self.tables[record['tag']] = record    
-
-    def get_table_pos(self, tag):
-        offset = self.tables[tag]['offset']
-        length = self.tables[tag]['length']
-        return (offset, length)
-    
-    def seek(self, pos): 
-        self._pos = pos
-        self.fh.seek(self._pos)
-    
-    def skip(self, delta): 
-        self._pos = self._pos + delta
-        self.fh.seek(self._pos)
-    
-    def seek_table(self, tag, offset_in_table = 0):
-        tpos = self.get_table_pos(tag)
-        self._pos = tpos[0] + offset_in_table
-        self.fh.seek(self._pos)
-        return self._pos
-
-    def read_tag(self):
-        self._pos += 4
-        return self.fh.read(4)
-
-    def read_short(self): 
-        self._pos += 2
-        s = self.fh.read(2)
-        a = (ord(s[0])<<8) + ord(s[1])
-        if (a & (1 << 15) ):
-            a = (a - (1 << 16)) 
-        return a
-    
-    def unpack_short(self, s):
-        a = (ord(s[0])<<8) + ord(s[1])
-        if (a & (1 << 15) ):
-            a = (a - (1 << 16))     
-        return a
-    
-    def read_ushort(self):
-        self._pos += 2
-        s = self.fh.read(2)
-        return (ord(s[0])<<8) + ord(s[1])
-
-    def read_ulong(self): 
-        self._pos += 4
-        s = self.fh.read(4)
-        # if large uInt32 as an integer, PHP converts it to -ve
-        return (ord(s[0])*16777216) + (ord(s[1])<<16) + (ord(s[2])<<8) + ord(s[3]) #     16777216  = 1<<24
-
-    def get_ushort(self, pos): 
-        self.fh.seek(pos)
-        s = self.fh.read(2)
-        return (ord(s[0])<<8) + ord(s[1])
-
-    def get_ulong(self, pos):
-        self.fh.seek(pos)
-        s = self.fh.read(4)
-        # iF large uInt32 as an integer, PHP converts it to -ve
-        return (ord(s[0])*16777216) + (ord(s[1])<<16) + (ord(s[2])<<8) + ord(s[3]) #     16777216  = 1<<24    
-
-    def pack_short(self, val):
-        if (val<0):
-            val = abs(val)
-            val = ~val
-            val += 1
-        return pack(">H",val) 
-    
-    def splice(self, stream, offset, value):
-        return substr(stream,0,offset) + value + substr(stream,offset+strlen(value))
-    
-    def _set_ushort(self, stream, offset, value):
-        up = pack(">H", value)
-        return self.splice(stream, offset, up)    
-
-    def _set_short(self, stream, offset, val):
-        if (val<0):
-            val = abs(val)
-            val = ~val
-            val += 1
-        up = pack(">H",val) 
-        return self.splice(stream, offset, up)
-
-    def get_chunk(self, pos, length): 
-        self.fh.seek(pos)
-        if (length <1):  return '' 
-        return (self.fh.read(length))
-
-    def get_table(self, tag):
-        (pos, length) = self.get_table_pos(tag)
-        if (length == 0):
-            die('Truetype font (' + self.filename + '): error reading table: ' + tag) 
-        self.fh.seek(pos)
-        return (self.fh.read(length))
-
-    def add(self, tag, data):
-        if (tag == 'head') :
-            data = self.splice(data, 8, "\0\0\0\0")        
-        self.otables[tag] = data
-
-############################################/
-############################################/
-
-############################################/
-
-    def extractInfo(self): 
-        #################/
-        # name - Naming table
-        #################/
-        self.sFamilyClass = 0
-        self.sFamilySubClass = 0
-
-        name_offset = self.seek_table("name")
-        format = self.read_ushort()
-        if (format != 0):
-            die("Unknown name table format " + format)
-        numRecords = self.read_ushort()
-        string_data_offset = name_offset + self.read_ushort()
-        names = {1:'',2:'',3:'',4:'',6:''}
-        K = names.keys()
-        nameCount = len(names)
-        for i in range(numRecords): 
-            platformId = self.read_ushort()
-            encodingId = self.read_ushort()
-            languageId = self.read_ushort()
-            nameId = self.read_ushort()
-            length = self.read_ushort()
-            offset = self.read_ushort()
-            if (nameId not in K): continue
-            N = ''
-            if (platformId == 3 and encodingId == 1 and languageId == 0x409):  # Microsoft, Unicode, US English, PS Name
-                opos = self._pos
-                self.seek(string_data_offset + offset)
-                if (length % 2 != 0):
-                    die("PostScript name is UTF-16BE string of odd length")
-                length /= 2
-                N = ''
-                while (length > 0):
-                    char = self.read_ushort()
-                    N += (chr(char))
-                    length -= 1
-                self._pos = opos
-                self.seek(opos)
-            
-            elif (platformId == 1 and encodingId == 0 and languageId == 0):  # Macintosh, Roman, English, PS Name
-                opos = self._pos
-                N = self.get_chunk(string_data_offset + offset, length)
-                self._pos = opos
-                self.seek(opos)
-            
-            if (N and names[nameId]==''):
-                names[nameId] = N
-                nameCount -= 1
-                if (nameCount==0): break
-            
-        
-        if (names[6]):
-            psName = names[6]
-        elif (names[4]):
-            psName = re.sub(' ','-',names[4])
-        elif (names[1]):
-            psName = re.sub(' ','-',names[1])
-        else:
-            psName = ''
-        if (not psName):
-            die("Could not find PostScript font name")
-        self.name = psName
-        if (names[1]):
-            self.familyName = names[1]  
-        else:  
-            self.familyName = psName 
-        if (names[2]):
-            self.styleName = names[2]
-        else:
-            self.styleName = 'Regular' 
-        if (names[4]):
-            self.fullName = names[4]
-        else:
-            self.fullName = psName 
-        if (names[3]):
-            self.uniqueFontID = names[3]
-        else:
-            self.uniqueFontID = psName 
-        if (names[6]):
-            self.fullName = names[6] 
-
-        #################/
-        # head - Font header table
-        #################/
-        self.seek_table("head")
-        self.skip(18) 
-        self.unitsPerEm = unitsPerEm = self.read_ushort()
-        scale = 1000 / float(unitsPerEm)
-        self.skip(16)
-        xMin = self.read_short()
-        yMin = self.read_short()
-        xMax = self.read_short()
-        yMax = self.read_short()
-        self.bbox = [(xMin*scale), (yMin*scale), (xMax*scale), (yMax*scale)]
-        self.skip(3*2)
-        indexToLocFormat = self.read_ushort()
-        glyphDataFormat = self.read_ushort()
-        if (glyphDataFormat != 0):
-            die('Unknown glyph data format ' + glyphDataFormat)
-
-        #################/
-        # hhea metrics table
-        #################/
-        # ttf2t1 seems to use this value rather than the one in OS/2 - so put in for compatibility
-        if ("hhea" in self.tables):
-            self.seek_table("hhea")
-            self.skip(4)
-            hheaAscender = self.read_short()
-            hheaDescender = self.read_short()
-            self.ascent = (hheaAscender *scale)
-            self.descent = (hheaDescender *scale)
-        
-
-        #################/
-        # OS/2 - OS/2 and Windows metrics table
-        #################/
-        if ("OS/2" in self.tables): 
-            self.seek_table("OS/2")
-            version = self.read_ushort()
-            self.skip(2)
-            usWeightClass = self.read_ushort()
-            self.skip(2)
-            fsType = self.read_ushort()
-            if (fsType == 0x0002 or (fsType & 0x0300) != 0): 
-                die('ERROR - Font file ' + self.filename + ' cannot be embedded due to copyright restrictions.')
-                self.restrictedUse = True
-            
-            self.skip(20)
-            sF = self.read_short()
-            self.sFamilyClass = (sF >> 8)
-            self.sFamilySubClass = (sF & 0xFF)
-            self._pos += 10  #PANOSE = 10 byte length
-            panose = self.fh.read(10)
-            self.skip(26)
-            sTypoAscender = self.read_short()
-            sTypoDescender = self.read_short()
-            if (not self.ascent): 
-                self.ascent = (sTypoAscender*scale)
-            if (not self.descent): 
-                self.descent = (sTypoDescender*scale)
-            if (version > 1):
-                self.skip(16)
-                sCapHeight = self.read_short()
-                self.capHeight = (sCapHeight*scale)
-            else:
-                self.capHeight = self.ascent            
-        
-        else:
-            usWeightClass = 500
-            if (not self.ascent): self.ascent = (yMax*scale)
-            if (not self.descent): self.descent = (yMin*scale)
-            self.capHeight = self.ascent
-        
-        self.stemV = 50 + int(pow((usWeightClass / 65.0),2))
-
-        #################/
-        # post - PostScript table
-        #################/
-        self.seek_table("post")
-        self.skip(4) 
-        self.italicAngle = self.read_short() + self.read_ushort() / 65536.0
-        self.underlinePosition = self.read_short() * scale
-        self.underlineThickness = self.read_short() * scale
-        isFixedPitch = self.read_ulong()
-
-        self.flags = 4
-
-        if (self.italicAngle!= 0):
-            self.flags = self.flags | 64
-        if (usWeightClass >= 600):
-            self.flags = self.flags | 262144
-        if (isFixedPitch):
-            self.flags = self.flags | 1
-
-        #################/
-        # hhea - Horizontal header table
-        #################/
-        self.seek_table("hhea")
-        self.skip(32) 
-        metricDataFormat = self.read_ushort()
-        if (metricDataFormat != 0):
-            die('Unknown horizontal metric data format '.metricDataFormat)
-        numberOfHMetrics = self.read_ushort()
-        if (numberOfHMetrics == 0):
-            die('Number of horizontal metrics is 0')
-
-        #################/
-        # maxp - Maximum profile table
-        #################/
-        self.seek_table("maxp")
-        self.skip(4)
-        numGlyphs = self.read_ushort()
-
-        #################/
-        # cmap - Character to glyph index mapping table
-        #################/
-        cmap_offset = self.seek_table("cmap")
-        self.skip(2)
-        cmapTableCount = self.read_ushort()
-        unicode_cmap_offset = 0
-        unicode_cmap_offset12 = 0
-        
-        for i in range(cmapTableCount):
-            platformID = self.read_ushort()
-            encodingID = self.read_ushort()
-            offset = self.read_ulong()
-            save_pos = self._pos
-            if platformID == 3 and encodingID == 10:  # Microsoft, UCS-4
-                format = self.get_ushort(cmap_offset + offset)
-                if (format == 12):
-                    if not unicode_cmap_offset12:
-                        unicode_cmap_offset12 = cmap_offset + offset
-                    break
-            if ((platformID == 3 and encodingID == 1) or platformID == 0):  # Microsoft, Unicode
-                format = self.get_ushort(cmap_offset + offset)
-                if (format == 4):
-                    if (not unicode_cmap_offset):
-                        unicode_cmap_offset = cmap_offset + offset
-                    break
-                    
-            self.seek(save_pos)
-        
-        if not unicode_cmap_offset and not unicode_cmap_offset12:
-            die('Font (' + self.filename + ') does not have cmap for Unicode (platform 3, encoding 1, format 4, or platform 3, encoding 10, format 12, or platform 0, any encoding, format 4)')
-
-        glyphToChar = {}
-        charToGlyph = {}
-        if unicode_cmap_offset12:
-            self.getCMAP12(unicode_cmap_offset12, glyphToChar, charToGlyph)
-        else:    
-            self.getCMAP4(unicode_cmap_offset, glyphToChar, charToGlyph)
-
-        #################/
-        # hmtx - Horizontal metrics table
-        #################/
-        self.getHMTX(numberOfHMetrics, numGlyphs, glyphToChar, scale)
-
-
-############################################/
-############################################/
-
-    def makeSubset(self, file, subset):
-        self.filename = file
-        self.fh = open(file ,'rb')
-        self._pos = 0
-        self.charWidths = []
-        self.glyphPos = {}
-        self.charToGlyph = {}
-        self.tables = {}
-        self.otables = {}
-        self.ascent = 0
-        self.descent = 0
-        self.skip(4)
-        self.maxUni = 0
-        self.readTableDirectory()
-
-        #################/
-        # head - Font header table
-        #################/
-        self.seek_table("head")
-        self.skip(50) 
-        indexToLocFormat = self.read_ushort()
-        glyphDataFormat = self.read_ushort()
-
-        #################/
-        # hhea - Horizontal header table
-        #################/
-        self.seek_table("hhea")
-        self.skip(32) 
-        metricDataFormat = self.read_ushort()
-        orignHmetrics = numberOfHMetrics = self.read_ushort()
-
-        #################/
-        # maxp - Maximum profile table
-        #################/
-        self.seek_table("maxp")
-        self.skip(4)
-        numGlyphs = self.read_ushort()
-
-        #################/
-        # cmap - Character to glyph index mapping table
-        #################/
-        cmap_offset = self.seek_table("cmap")
-        self.skip(2)
-        cmapTableCount = self.read_ushort()
-        unicode_cmap_offset = 0
-        unicode_cmap_offset12 = 0
-        for i in range(cmapTableCount):
-            platformID = self.read_ushort()
-            encodingID = self.read_ushort()
-            offset = self.read_ulong()
-            save_pos = self._pos
-            if platformID == 3 and encodingID == 10:  # Microsoft, UCS-4
-                format = self.get_ushort(cmap_offset + offset)
-                if (format == 12):
-                    if not unicode_cmap_offset12:
-                        unicode_cmap_offset12 = cmap_offset + offset
-                    break
-            if ((platformID == 3 and encodingID == 1) or platformID == 0):  # Microsoft, Unicode
-                format = self.get_ushort(cmap_offset + offset)
-                if (format == 4):
-                    unicode_cmap_offset = cmap_offset + offset
-                    break
-                
-            self.seek(save_pos )
-        
-        if not unicode_cmap_offset and not unicode_cmap_offset12:
-            die('Font (' + self.filename + ') does not have cmap for Unicode (platform 3, encoding 1, format 4, or platform 3, encoding 10, format 12, or platform 0, any encoding, format 4)')
-
-        glyphToChar = {}
-        charToGlyph = {}
-        if unicode_cmap_offset12:
-            self.getCMAP12(unicode_cmap_offset12, glyphToChar, charToGlyph)
-        else:    
-            self.getCMAP4(unicode_cmap_offset, glyphToChar, charToGlyph)
-
-        self.charToGlyph = charToGlyph
-
-        #################/
-        # hmtx - Horizontal metrics table
-        #################/
-        scale = 1    # not used
-        self.getHMTX(numberOfHMetrics, numGlyphs, glyphToChar, scale)
-
-        #################/
-        # loca - Index to location
-        #################/
-        self.getLOCA(indexToLocFormat, numGlyphs)
-
-        subsetglyphs = [(0, 0)]     # special "sorted dict"!
-        subsetCharToGlyph = {}
-        for code in subset: 
-            if (code in self.charToGlyph):
-                if (self.charToGlyph[code], code) not in subsetglyphs:
-                    subsetglyphs.append((self.charToGlyph[code], code))   # Old Glyph ID => Unicode
-                subsetCharToGlyph[code] = self.charToGlyph[code]    # Unicode to old GlyphID
-            self.maxUni = max(self.maxUni, code)
-        (start,dummy) = self.get_table_pos('glyf')
-
-        subsetglyphs.sort()
-        glyphSet = {}
-        n = 0
-        fsLastCharIndex = 0    # maximum Unicode index (character code) in this font, according to the cmap subtable for platform ID 3 and platform- specific encoding ID 0 or 1.
-        for originalGlyphIdx, uni in subsetglyphs:
-            fsLastCharIndex = max(fsLastCharIndex , uni)
-            glyphSet[originalGlyphIdx] = n    # old glyphID to new glyphID
-            n += 1
-
-        codeToGlyph = {}
-        for uni, originalGlyphIdx in sorted(subsetCharToGlyph.items()):
-            codeToGlyph[uni] = glyphSet[originalGlyphIdx] 
-        
-        self.codeToGlyph = codeToGlyph
-        
-        for originalGlyphIdx, uni in subsetglyphs: 
-            nonlocals = {'start': start, 'glyphSet': glyphSet, 
-                         'subsetglyphs': subsetglyphs}
-            self.getGlyphs(originalGlyphIdx, nonlocals)
-
-        numGlyphs = numberOfHMetrics = len(subsetglyphs)
-
-        #tables copied from the original
-        tags = ['name']
-        for tag in tags:  
-            self.add(tag, self.get_table(tag)) 
-        tags = ['cvt ', 'fpgm', 'prep', 'gasp']
-        for tag in tags:
-            if (tag in self.tables):  
-                self.add(tag, self.get_table(tag))        
-
-        # post - PostScript
-        opost = self.get_table('post')
-        post = "\x00\x03\x00\x00" + substr(opost,4,12) + "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
-        self.add('post', post)
-
-        # Sort CID2GID map into segments of contiguous codes
-        if 0 in codeToGlyph:
-            del codeToGlyph[0]
-        #unset(codeToGlyph[65535])
-        rangeid = 0
-        range_ = {}
-        prevcid = -2
-        prevglidx = -1
-        # for each character
-        for cid, glidx in sorted(codeToGlyph.items()):
-            if (cid == (prevcid + 1) and glidx == (prevglidx + 1)):
-                range_[rangeid].append(glidx)
-            else:
-                # new range
-                rangeid = cid
-                range_[rangeid] = []
-                range_[rangeid].append(glidx)
-            prevcid = cid
-            prevglidx = glidx
-
-        # cmap - Character to glyph mapping - Format 4 (MS / )
-        segCount = len(range_) + 1    # + 1 Last segment has missing character 0xFFFF
-        searchRange = 1
-        entrySelector = 0
-        while (searchRange * 2 <= segCount ):
-            searchRange = searchRange * 2
-            entrySelector = entrySelector + 1
-        
-        searchRange = searchRange * 2
-        rangeShift = segCount * 2 - searchRange
-        length = 16 + (8*segCount ) + (numGlyphs+1)
-        cmap = [0, 1,        # Index : version, number of encoding subtables
-            3, 1,                # Encoding Subtable : platform (MS=3), encoding (Unicode)
-            0, 12,            # Encoding Subtable : offset (hi,lo)
-            4, length, 0,         # Format 4 Mapping subtable: format, length, language
-            segCount*2,
-            searchRange,
-            entrySelector,
-            rangeShift]
-
-        range_ = sorted(range_.items())
-        
-        # endCode(s)
-        for start, subrange in range_:
-            endCode = start + (len(subrange)-1)
-            cmap.append(endCode)    # endCode(s)
-        
-        cmap.append(0xFFFF)    # endCode of last Segment
-        cmap.append(0)    # reservedPad
-
-        # startCode(s)
-        for start, subrange in range_: 
-            cmap.append(start)    # startCode(s)
-        
-        cmap.append(0xFFFF)    # startCode of last Segment
-        # idDelta(s) 
-        for start, subrange in range_: 
-            idDelta = -(start-subrange[0])
-            n += count(subrange)
-            cmap.append(idDelta)    # idDelta(s)
-        
-        cmap.append(1)    # idDelta of last Segment
-        # idRangeOffset(s) 
-        for subrange in range_: 
-            cmap.append(0)    # idRangeOffset[segCount]      Offset in bytes to glyph indexArray, or 0
-        
-        cmap.append(0)    # idRangeOffset of last Segment
-        for subrange, glidx in range_: 
-            cmap.extend(glidx)
-        
-        cmap.append(0)    # Mapping for last character
-        cmapstr = ''
-        for cm in cmap:
-            if cm >= 0:
-                cmapstr += pack(">H", cm) 
-            else:
-                try:
-                    cmapstr += pack(">h", cm) 
-                except:
-                    warnings.warn("cmap value too big/small: %s" % cm)
-                    cmapstr += pack(">H", -cm) 
-        self.add('cmap', cmapstr)
-
-        # glyf - Glyph data
-        (glyfOffset,glyfLength) = self.get_table_pos('glyf')
-        if (glyfLength < self.maxStrLenRead):
-            glyphData = self.get_table('glyf')
-
-        offsets = []
-        glyf = ''
-        pos = 0
-
-        hmtxstr = ''
-        xMinT = 0
-        yMinT = 0
-        xMaxT = 0
-        yMaxT = 0
-        advanceWidthMax = 0
-        minLeftSideBearing = 0
-        minRightSideBearing = 0
-        xMaxExtent = 0
-        maxPoints = 0            # points in non-compound glyph
-        maxContours = 0            # contours in non-compound glyph
-        maxComponentPoints = 0    # points in compound glyph
-        maxComponentContours = 0    # contours in compound glyph
-        maxComponentElements = 0    # number of glyphs referenced at top level
-        maxComponentDepth = 0        # levels of recursion, set to 0 if font has only simple glyphs
-        self.glyphdata = {}
-
-        for originalGlyphIdx, uni in subsetglyphs: 
-            # hmtx - Horizontal Metrics
-            hm = self.getHMetric(orignHmetrics, originalGlyphIdx)    
-            hmtxstr += hm
-
-            offsets.append(pos)
-            try:
-                glyphPos = self.glyphPos[originalGlyphIdx]
-                glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
-            except IndexError:
-                warnings.warn("missing glyph %s" % (originalGlyphIdx))
-                glyphLen = 0
-
-            if (glyfLength < self.maxStrLenRead):
-                data = substr(glyphData,glyphPos,glyphLen)
-            else:
-                if (glyphLen > 0):
-                    data = self.get_chunk(glyfOffset+glyphPos,glyphLen)
-                else:
-                    data = ''
-            
-            if (glyphLen > 0):
-                up = unpack(">H", substr(data,0,2))[0]
-            if (glyphLen > 2 and (up & (1 << 15)) ):     # If number of contours <= -1 i.e. composiste glyph
-                pos_in_glyph = 10
-                flags = GF_MORE
-                nComponentElements = 0
-                while (flags & GF_MORE):
-                    nComponentElements += 1    # number of glyphs referenced at top level
-                    up = unpack(">H", substr(data,pos_in_glyph,2))
-                    flags = up[0]
-                    up = unpack(">H", substr(data,pos_in_glyph+2,2))
-                    glyphIdx = up[0]
-                    self.glyphdata.setdefault(originalGlyphIdx, {}).setdefault('compGlyphs', []).append(glyphIdx)
-                    try:
-                        data = self._set_ushort(data, pos_in_glyph + 2, glyphSet[glyphIdx])
-                    except KeyError:
-                        data = 0
-                        warnings.warn("missing glyph data %s" % glyphIdx)
-                    pos_in_glyph += 4
-                    if (flags & GF_WORDS): 
-                        pos_in_glyph += 4 
-                    else: 
-                        pos_in_glyph += 2 
-                    if (flags & GF_SCALE):
-                        pos_in_glyph += 2 
-                    elif (flags & GF_XYSCALE):
-                        pos_in_glyph += 4 
-                    elif (flags & GF_TWOBYTWO):
-                        pos_in_glyph += 8 
-                
-                maxComponentElements = max(maxComponentElements, nComponentElements)
-            
-            glyf += data
-            pos += glyphLen
-            if (pos % 4 != 0): 
-                padding = 4 - (pos % 4)
-                glyf += str_repeat("\0",padding)
-                pos += padding
-
-        offsets.append(pos)
-        self.add('glyf', glyf)
-
-        # hmtx - Horizontal Metrics
-        self.add('hmtx', hmtxstr)
-
-        # loca - Index to location
-        locastr = ''
-        if (((pos + 1) >> 1) > 0xFFFF): 
-            indexToLocFormat = 1        # long format
-            for offset in offsets:
-                locastr += pack(">L",offset) 
-        else:
-            indexToLocFormat = 0        # short format
-            for offset in offsets:  
-                locastr += pack(">H",(offset/2)) 
-        
-        self.add('loca', locastr)
-
-        # head - Font header
-        head = self.get_table('head')
-        head = self._set_ushort(head, 50, indexToLocFormat)
-        self.add('head', head)
-
-        # hhea - Horizontal Header
-        hhea = self.get_table('hhea')
-        hhea = self._set_ushort(hhea, 34, numberOfHMetrics)
-        self.add('hhea', hhea)
-
-        # maxp - Maximum Profile
-        maxp = self.get_table('maxp')
-        maxp = self._set_ushort(maxp, 4, numGlyphs)
-        self.add('maxp', maxp)
-
-        # OS/2 - OS/2
-        os2 = self.get_table('OS/2')
-        self.add('OS/2', os2 )
-
-        self.fh.close()
-
-        # Put the TTF file together
-        stm = self.endTTFile('')
-        return stm 
-    
-
-    #########################################
-    # Recursively get composite glyph data
-    def getGlyphData(self, originalGlyphIdx, nonlocals):
-        # &maxdepth, &depth, &points, &contours
-        nonlocals['depth'] += 1
-        nonlocals['maxdepth'] = max(nonlocals['maxdepth'], nonlocals['depth'])
-        if (len(self.glyphdata[originalGlyphIdx]['compGlyphs'])):
-            for glyphIdx in self.glyphdata[originalGlyphIdx]['compGlyphs']: 
-                self.getGlyphData(glyphIdx, nonlocals)            
-        
-        elif ((self.glyphdata[originalGlyphIdx]['nContours'] > 0) and nonlocals['depth'] > 0):     # simple
-            contours += self.glyphdata[originalGlyphIdx]['nContours']
-            points += self.glyphdata[originalGlyphIdx]['nPoints']
-        
-        nonlocals['depth'] -= 1
-
-
-    #########################################
-    # Recursively get composite glyphs
-    def getGlyphs(self, originalGlyphIdx, nonlocals):
-        # &start, &glyphSet, &subsetglyphs) 
-        
-        try:
-            glyphPos = self.glyphPos[originalGlyphIdx]
-            glyphLen = self.glyphPos[originalGlyphIdx + 1] - glyphPos
-        except IndexError:
-            warnings.warn("missing glyph %s" % (originalGlyphIdx))
-            return
-
-        if (not glyphLen):  
-            return
-        
-        self.seek(nonlocals['start'] + glyphPos)
-        numberOfContours = self.read_short()
-        if (numberOfContours < 0):
-            self.skip(8)
-            flags = GF_MORE
-            while (flags & GF_MORE): 
-                flags = self.read_ushort()
-                glyphIdx = self.read_ushort()
-                if (glyphIdx not in nonlocals['glyphSet']):
-                    nonlocals['glyphSet'][glyphIdx] = len(nonlocals['subsetglyphs'])    # old glyphID to new glyphID
-                    nonlocals['subsetglyphs'].append((glyphIdx, 1))
-                
-                savepos = self.fh.tell()
-                self.getGlyphs(glyphIdx, nonlocals)
-                self.seek(savepos)
-                if (flags & GF_WORDS):
-                    self.skip(4)
-                else:
-                    self.skip(2)
-                if (flags & GF_SCALE):
-                    self.skip(2)
-                elif (flags & GF_XYSCALE):
-                    self.skip(4)
-                elif (flags & GF_TWOBYTWO):
-                    self.skip(8)
-
-    #########################################
-
-    def getHMTX(self, numberOfHMetrics, numGlyphs, glyphToChar, scale):
-        start = self.seek_table("hmtx")
-        aw = 0
-        self.charWidths = [0] * 256*256*2
-        nCharWidths = 0
-        if ((numberOfHMetrics*4) < self.maxStrLenRead): 
-            data = self.get_chunk(start,(numberOfHMetrics*4))
-            arr = unpack(">" + "H" * (len(data)/2), data)
-        else:
-            self.seek(start) 
-        for glyph in range(numberOfHMetrics): 
-            if ((numberOfHMetrics*4) < self.maxStrLenRead):
-                aw = arr[(glyph*2)] # PHP starts arrays from index 0!? +1
-            else:
-                aw = self.read_ushort()
-                lsb = self.read_ushort()
-            
-            if (glyph in glyphToChar or glyph == 0):
-                if (aw >= (1 << 15) ):
-                    aw = 0     # 1.03 Some (arabic) fonts have -ve values for width
-                    # although should be unsigned value - comes out as e.g. 65108 (intended -50)
-                if (glyph == 0): 
-                    self.defaultWidth = scale*aw
-                    continue
-                
-                for char in glyphToChar[glyph]: 
-                    if (char != 0 and char != 65535): 
-                        w = int(round(scale*aw))
-                        if (w == 0):  w = 65535 
-                        if (char < 196608): 
-                            self.charWidths[char] = w 
-                            nCharWidths += 1
-            
-        
-        data = self.get_chunk((start+numberOfHMetrics*4),(numGlyphs*2))
-        arr = unpack(">" + "H" * (len(data)/2), data)
-        diff = numGlyphs-numberOfHMetrics
-        for pos in range(diff): 
-            glyph = pos + numberOfHMetrics
-            if (glyph in glyphToChar): 
-                for char in glyphToChar[glyph]: 
-                    if (char != 0 and char != 65535): 
-                        w = int(round(scale*aw))
-                        if (w == 0):  w = 65535 
-                        if (char < 196608):
-                            self.charWidths[char] = w
-                            nCharWidths += 1 
-                        
-        
-        # NB 65535 is a set width of 0
-        # First bytes define number of chars in font
-        self.charWidths[0] = nCharWidths 
-    
-
-    def getHMetric(self, numberOfHMetrics, gid): 
-        start = self.seek_table("hmtx")
-        if (gid < numberOfHMetrics):
-            self.seek(start+(gid*4))
-            hm = self.fh.read(4)
-        else:
-            self.seek(start+((numberOfHMetrics-1)*4))
-            hm = self.fh.read(2)
-            self.seek(start+(numberOfHMetrics*2)+(gid*2))
-            hm += self.fh.read(2)
-        return hm
-    
-
-    def getLOCA(self, indexToLocFormat, numGlyphs): 
-        start = self.seek_table('loca')
-        self.glyphPos = []
-        if (indexToLocFormat == 0):
-            data = self.get_chunk(start,(numGlyphs*2)+2)
-            arr = unpack(">" + "H" * (len(data)/2), data)
-            for n in range(numGlyphs): 
-                self.glyphPos.append((arr[n] * 2))  # n+1 !?
-        elif (indexToLocFormat == 1):
-            data = self.get_chunk(start,(numGlyphs*4)+4)
-            arr = unpack(">" + "L" * (len(data)/4), data)
-            for n in range(numGlyphs):
-                self.glyphPos.append((arr[n]))  # n+1 !?
-        else:
-            die('Unknown location table format ' + indexToLocFormat)
-
-    # CMAP Format 4
-    def getCMAP4(self, unicode_cmap_offset, glyphToChar, charToGlyph):
-        self.maxUniChar = 0
-        self.seek(unicode_cmap_offset + 2)
-        length = self.read_ushort()
-        limit = unicode_cmap_offset + length
-        self.skip(2)
-
-        segCount = self.read_ushort() / 2
-        self.skip(6)
-        endCount = []
-        for i in range(segCount):
-            endCount.append(self.read_ushort())
-        self.skip(2)
-        startCount = []
-        for i in range(segCount):
-            startCount.append(self.read_ushort()) 
-        idDelta = []
-        for i in range(segCount):
-            idDelta.append(self.read_short())         # ???? was unsigned short
-        idRangeOffset_start = self._pos
-        idRangeOffset = []
-        for i in range(segCount):
-            idRangeOffset.append(self.read_ushort()) 
-
-        for n in range(segCount): 
-            endpoint = (endCount[n] + 1)
-            for unichar in range(startCount[n], endpoint, 1): 
-                if (idRangeOffset[n] == 0):
-                    glyph = (unichar + idDelta[n]) & 0xFFFF
-                else:
-                    offset = (unichar - startCount[n]) * 2 + idRangeOffset[n]
-                    offset = idRangeOffset_start + 2 * n + offset
-                    if (offset >= limit):
-                        glyph = 0
-                    else:
-                        glyph = self.get_ushort(offset)
-                        if (glyph != 0):
-                           glyph = (glyph + idDelta[n]) & 0xFFFF
-                    
-                charToGlyph[unichar] = glyph
-                if (unichar < 196608):
-                    self.maxUniChar = max(unichar,self.maxUniChar) 
-                glyphToChar.setdefault(glyph, []).append(unichar)
-
-    # CMAP Format 12
-    def getCMAP12(self, unicode_cmap_offset, glyphToChar, charToGlyph):
-        self.maxUniChar = 0
-        # table (skip format version, should be 12)
-        self.seek(unicode_cmap_offset + 2)
-        # reserved
-        self.skip(2)
-        # table length
-        length = self.read_ulong()
-        # language (should be 0)
-        self.skip(4)
-        # groups count
-        grpCount = self.read_ulong()
-
-        if 2 + 2 + 4 + 4 + 4 + grpCount * 3 * 4 > length:
-            die("TTF format 12 cmap table too small")  
-        for n in range(grpCount):
-            startCharCode = self.read_ulong()
-            endCharCode = self.read_ulong()
-            glyph = self.read_ulong()
-            for unichar in range(startCharCode, endCharCode + 1):
-                charToGlyph[unichar] = glyph
-                if (unichar < 196608):
-                    self.maxUniChar = max(unichar, self.maxUniChar) 
-                glyphToChar.setdefault(glyph, []).append(unichar)
-                glyph += 1
-            
-            
-
-    # Put the TTF file together
-    def endTTFile(self, stm): 
-        stm = ''
-        numTables = count(self.otables)
-        searchRange = 1
-        entrySelector = 0
-        while (searchRange * 2 <= numTables): 
-            searchRange = searchRange * 2
-            entrySelector = entrySelector + 1
-        
-        searchRange = searchRange * 16
-        rangeShift = numTables * 16 - searchRange
-
-        # Header
-        if (_TTF_MAC_HEADER): 
-            stm += (pack(">LHHHH", 0x74727565, numTables, searchRange, entrySelector, rangeShift))    # Mac
-        else:
-            stm += (pack(">LHHHH", 0x00010000 , numTables, searchRange, entrySelector, rangeShift))    # Windows
-
-        
-        # Table directory
-        tables = self.otables
-
-        offset = 12 + numTables * 16
-        sorted_tables = sorted(tables.items())
-        for tag, data in sorted_tables:
-            if (tag == 'head'):
-                head_start = offset 
-            stm += tag
-            checksum = calcChecksum(data)
-            stm += pack(">HH", checksum[0],checksum[1])
-            stm += pack(">LL", offset, strlen(data))
-            paddedLength = (strlen(data)+3)&~3
-            offset = offset + paddedLength
-
-        # Table data
-        for tag, data in sorted_tables: 
-            data += "\0\0\0"
-            stm += substr(data,0,(strlen(data)&~3))
-
-        checksum = calcChecksum(stm)
-        checksum = sub32((0xB1B0,0xAFBA), checksum)
-        chk = pack(">HH", checksum[0],checksum[1])
-        stm = self.splice(stm,(head_start + 8),chk)
-        return stm 
-    
-if __name__ == '__main__':
-    ttf = TTFontFile()
-    ttffile = 'DejaVuSansCondensed.ttf';
-    ttf.getMetrics(ttffile)
-    # test basic metrics:
-    assert round(ttf.descent, 0) == -236
-    assert round(ttf.capHeight, 0) == 928
-    assert ttf.flags == 4
-    assert [round(i, 0) for i in ttf.bbox] == [-918, -415, 1513, 1167]
-    assert ttf.italicAngle == 0
-    assert ttf.stemV == 87
-    assert round(ttf.defaultWidth, 0) == 540
-    assert round(ttf.underlinePosition, 0) == -63
-    assert round(ttf.underlineThickness, 0) == 44
-    # test char widths 8(against binary file generated by tfpdf.php):
-    assert ''.join(ttf.charWidths) == open("dejavusanscondensed.cw.dat").read()
-    

+ 0 - 74
frameworks/Python/web2py/web2py/gluon/contrib/gae_memcache.py

@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-Developed by Robin Bhattacharyya (memecache for GAE)
-Released under the web2py license (LGPL)
-
-from gluon.contrib.gae_memcache import MemcacheClient
-cache.ram=cache.disk=MemcacheClient(request)
-"""
-
-import time
-from google.appengine.api.memcache import Client
-
-
-class MemcacheClient(object):
-
-    client = Client()
-
-    def __init__(self, request, default_time_expire = 300):
-        self.request = request
-        self.default_time_expire = default_time_expire
-
-    def initialize(self):
-        pass
-
-    def __call__(
-        self,
-        key,
-        f,
-        time_expire=None,
-    ):
-        if time_expire is None:
-            time_expire = self.default_time_expire
-
-        key = '%s/%s' % (self.request.application, key)
-        value = None
-        obj = self.client.get(key) if time_expire != 0 else None
-        if obj:
-            value = obj[1]
-        elif f is not None:
-            value = f()
-            self.client.set(key, (time.time(), value), time=time_expire)
-        return value
-
-    def increment(self, key, value=1):
-        key = '%s/%s' % (self.request.application, key)
-        obj = self.client.get(key)
-        if obj:
-            value = obj[1] + value
-        self.client.set(key, (time.time(), value))
-        return value
-
-    def incr(self, key, value=1):
-        return self.increment(key, value)
-
-    def clear(self, key=None):
-        if key:
-            key = '%s/%s' % (self.request.application, key)
-            self.client.delete(key)
-        else:
-            self.client.flush_all()
-
-    def delete(self, *a, **b):
-        return self.client.delete(*a, **b)
-
-    def get(self, *a, **b):
-        return self.client.get(*a, **b)
-
-    def set(self, *a, **b):
-        return self.client.set(*a, **b)
-
-    def flush_all(self, *a, **b):
-        return self.client.delete(*a, **b)

+ 0 - 89
frameworks/Python/web2py/web2py/gluon/contrib/gae_retry.py

@@ -1,89 +0,0 @@
-def autoretry_datastore_timeouts(attempts=5.0, interval=0.1, exponent=2.0):
-    """
-    Copyright (C)  2009  twitter.com/rcb
-
-    Permission is hereby granted, free of charge, to any person
-    obtaining a copy of this software and associated documentation
-    files (the "Software"), to deal in the Software without
-    restriction, including without limitation the rights to use,
-    copy, modify, merge, publish, distribute, sublicense, and/or sell
-    copies of the Software, and to permit persons to whom the
-    Software is furnished to do so, subject to the following
-    conditions:
-
-    The above copyright notice and this permission notice shall be
-    included in all copies or substantial portions of the Software.
-    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-    EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-    OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-    NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-    HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-    WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-    OTHER DEALINGS IN THE SOFTWARE.
-
-    ======================================================================
-
-    This function wraps the AppEngine Datastore API to autoretry
-    datastore timeouts at the lowest accessible level.
-
-    The benefits of this approach are:
-
-    1. Small Footprint:  Does not monkey with Model internals
-                         which may break in future releases.
-    2. Max Performance:  Retrying at this lowest level means
-                         serialization and key formatting is not
-                         needlessly repeated on each retry.
-    At initialization time, execute this:
-
-    >>> autoretry_datastore_timeouts()
-
-    Should only be called once, subsequent calls have no effect.
-
-    >>> autoretry_datastore_timeouts() # no effect
-
-    Default (5) attempts: .1, .2, .4, .8, 1.6 seconds
-
-    Parameters can each be specified as floats.
-
-    :param attempts: maximum number of times to retry.
-    :param interval: base seconds to sleep between retries.
-    :param exponent: rate of exponential back-off.
-    """
-
-    import time
-    import logging
-    from google.appengine.api import apiproxy_stub_map
-    from google.appengine.runtime import apiproxy_errors
-    from google.appengine.datastore import datastore_pb
-
-    attempts = float(attempts)
-    interval = float(interval)
-    exponent = float(exponent)
-    wrapped = apiproxy_stub_map.MakeSyncCall
-    errors = {datastore_pb.Error.TIMEOUT: 'Timeout',
-              datastore_pb.Error.CONCURRENT_TRANSACTION: 'TransactionFailedError'}
-
-    def wrapper(*args, **kwargs):
-        count = 0.0
-        while True:
-            try:
-                return wrapped(*args, **kwargs)
-            except apiproxy_errors.ApplicationError, err:
-                errno = err.application_error
-                if errno not in errors:
-                    raise
-                sleep = (exponent ** count) * interval
-                count += 1.0
-                if count > attempts:
-                    raise
-                msg = "Datastore %s: retry #%d in %s seconds.\n%s"
-                vals = ''
-                if count == 1.0:
-                    vals = '\n'.join([str(a) for a in args])
-                logging.warning(msg % (errors[errno], count, sleep, vals))
-                time.sleep(sleep)
-
-    setattr(wrapper, '_autoretry_datastore_timeouts', False)
-    if getattr(wrapped, '_autoretry_datastore_timeouts', True):
-        apiproxy_stub_map.MakeSyncCall = wrapper

+ 0 - 2
frameworks/Python/web2py/web2py/gluon/contrib/gateways/__init__.py

@@ -1,2 +0,0 @@
-
-

+ 0 - 1332
frameworks/Python/web2py/web2py/gluon/contrib/gateways/fcgi.py

@@ -1,1332 +0,0 @@
-# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <[email protected]>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in the
-#    documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-# $Id$
-
-"""
-fcgi - a FastCGI/WSGI gateway.
-
-For more information about FastCGI, see <http://www.fastcgi.com/>.
-
-For more information about the Web Server Gateway Interface, see
-<http://www.python.org/peps/pep-0333.html>.
-
-Example usage:
-
-  #!/usr/bin/env python
-  from myapplication import app # Assume app is your WSGI application object
-  from fcgi import WSGIServer
-  WSGIServer(app).run()
-
-See the documentation for WSGIServer/Server for more information.
-
-On most platforms, fcgi will fallback to regular CGI behavior if run in a
-non-FastCGI context. If you want to force CGI behavior, set the environment
-variable FCGI_FORCE_CGI to "Y" or "y".
-"""
-
-__author__ = 'Allan Saddi <[email protected]>'
-__version__ = '$Revision$'
-
-import sys
-import os
-import signal
-import struct
-import cStringIO as StringIO
-import select
-import socket
-import errno
-import traceback
-
-try:
-    import thread
-    import threading
-    thread_available = True
-except ImportError:
-    import dummy_thread as thread
-    import dummy_threading as threading
-    thread_available = False
-
-# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
-if not hasattr(socket, 'SHUT_WR'):
-    socket.SHUT_WR = 1
-
-__all__ = ['WSGIServer']
-
-# Constants from the spec.
-FCGI_LISTENSOCK_FILENO = 0
-
-FCGI_HEADER_LEN = 8
-
-FCGI_VERSION_1 = 1
-
-FCGI_BEGIN_REQUEST = 1
-FCGI_ABORT_REQUEST = 2
-FCGI_END_REQUEST = 3
-FCGI_PARAMS = 4
-FCGI_STDIN = 5
-FCGI_STDOUT = 6
-FCGI_STDERR = 7
-FCGI_DATA = 8
-FCGI_GET_VALUES = 9
-FCGI_GET_VALUES_RESULT = 10
-FCGI_UNKNOWN_TYPE = 11
-FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
-
-FCGI_NULL_REQUEST_ID = 0
-
-FCGI_KEEP_CONN = 1
-
-FCGI_RESPONDER = 1
-FCGI_AUTHORIZER = 2
-FCGI_FILTER = 3
-
-FCGI_REQUEST_COMPLETE = 0
-FCGI_CANT_MPX_CONN = 1
-FCGI_OVERLOADED = 2
-FCGI_UNKNOWN_ROLE = 3
-
-FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
-FCGI_MAX_REQS = 'FCGI_MAX_REQS'
-FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
-
-FCGI_Header = '!BBHHBx'
-FCGI_BeginRequestBody = '!HB5x'
-FCGI_EndRequestBody = '!LB3x'
-FCGI_UnknownTypeBody = '!B7x'
-
-FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
-FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
-
-if __debug__:
-    import time
-
-    # Set non-zero to write debug output to a file.
-    DEBUG = 0
-    DEBUGLOG = '/tmp/fcgi.log'
-
-    def _debug(level, msg):
-        if DEBUG < level:
-            return
-
-        try:
-            f = open(DEBUGLOG, 'a')
-            f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
-            f.close()
-        except:
-            pass
-
-class InputStream(object):
-    """
-    File-like object representing FastCGI input streams (FCGI_STDIN and
-    FCGI_DATA). Supports the minimum methods required by WSGI spec.
-    """
-    def __init__(self, conn):
-        self._conn = conn
-
-        # See Server.
-        self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
-
-        self._buf = ''
-        self._bufList = []
-        self._pos = 0 # Current read position.
-        self._avail = 0 # Number of bytes currently available.
-
-        self._eof = False # True when server has sent EOF notification.
-
-    def _shrinkBuffer(self):
-        """Gets rid of already read data (since we can't rewind)."""
-        if self._pos >= self._shrinkThreshold:
-            self._buf = self._buf[self._pos:]
-            self._avail -= self._pos
-            self._pos = 0
-
-            assert self._avail >= 0
-
-    def _waitForData(self):
-        """Waits for more data to become available."""
-        self._conn.process_input()
-
-    def read(self, n=-1):
-        if self._pos == self._avail and self._eof:
-            return ''
-        while True:
-            if n < 0 or (self._avail - self._pos) < n:
-                # Not enough data available.
-                if self._eof:
-                    # And there's no more coming.
-                    newPos = self._avail
-                    break
-                else:
-                    # Wait for more data.
-                    self._waitForData()
-                    continue
-            else:
-                newPos = self._pos + n
-                break
-        # Merge buffer list, if necessary.
-        if self._bufList:
-            self._buf += ''.join(self._bufList)
-            self._bufList = []
-        r = self._buf[self._pos:newPos]
-        self._pos = newPos
-        self._shrinkBuffer()
-        return r
-
-    def readline(self, length=None):
-        if self._pos == self._avail and self._eof:
-            return ''
-        while True:
-            # Unfortunately, we need to merge the buffer list early.
-            if self._bufList:
-                self._buf += ''.join(self._bufList)
-                self._bufList = []
-            # Find newline.
-            i = self._buf.find('\n', self._pos)
-            if i < 0:
-                # Not found?
-                if self._eof:
-                    # No more data coming.
-                    newPos = self._avail
-                    break
-                else:
-                    # Wait for more to come.
-                    self._waitForData()
-                    continue
-            else:
-                newPos = i + 1
-                break
-        if length is not None:
-            if self._pos + length < newPos:
-                newPos = self._pos + length
-        r = self._buf[self._pos:newPos]
-        self._pos = newPos
-        self._shrinkBuffer()
-        return r
-
-    def readlines(self, sizehint=0):
-        total = 0
-        lines = []
-        line = self.readline()
-        while line:
-            lines.append(line)
-            total += len(line)
-            if 0 < sizehint <= total:
-                break
-            line = self.readline()
-        return lines
-
-    def __iter__(self):
-        return self
-
-    def next(self):
-        r = self.readline()
-        if not r:
-            raise StopIteration
-        return r
-
-    def add_data(self, data):
-        if not data:
-            self._eof = True
-        else:
-            self._bufList.append(data)
-            self._avail += len(data)
-
-class MultiplexedInputStream(InputStream):
-    """
-    A version of InputStream meant to be used with MultiplexedConnections.
-    Assumes the MultiplexedConnection (the producer) and the Request
-    (the consumer) are running in different threads.
-    """
-    def __init__(self, conn):
-        super(MultiplexedInputStream, self).__init__(conn)
-
-        # Arbitrates access to this InputStream (it's used simultaneously
-        # by a Request and its owning Connection object).
-        lock = threading.RLock()
-
-        # Notifies Request thread that there is new data available.
-        self._lock = threading.Condition(lock)
-
-    def _waitForData(self):
-        # Wait for notification from add_data().
-        self._lock.wait()
-
-    def read(self, n=-1):
-        self._lock.acquire()
-        try:
-            return super(MultiplexedInputStream, self).read(n)
-        finally:
-            self._lock.release()
-
-    def readline(self, length=None):
-        self._lock.acquire()
-        try:
-            return super(MultiplexedInputStream, self).readline(length)
-        finally:
-            self._lock.release()
-
-    def add_data(self, data):
-        self._lock.acquire()
-        try:
-            super(MultiplexedInputStream, self).add_data(data)
-            self._lock.notify()
-        finally:
-            self._lock.release()
-
-class OutputStream(object):
-    """
-    FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
-    write() or writelines() immediately result in Records being sent back
-    to the server. Buffering should be done in a higher level!
-    """
-    def __init__(self, conn, req, type, buffered=False):
-        self._conn = conn
-        self._req = req
-        self._type = type
-        self._buffered = buffered
-        self._bufList = [] # Used if buffered is True
-        self.dataWritten = False
-        self.closed = False
-
-    def _write(self, data):
-        length = len(data)
-        while length:
-            toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
-
-            rec = Record(self._type, self._req.requestId)
-            rec.contentLength = toWrite
-            rec.contentData = data[:toWrite]
-            self._conn.writeRecord(rec)
-
-            data = data[toWrite:]
-            length -= toWrite
-
-    def write(self, data):
-        assert not self.closed
-
-        if not data:
-            return
-
-        self.dataWritten = True
-
-        if self._buffered:
-            self._bufList.append(data)
-        else:
-            self._write(data)
-
-    def writelines(self, lines):
-        assert not self.closed
-
-        for line in lines:
-            self.write(line)
-
-    def flush(self):
-        # Only need to flush if this OutputStream is actually buffered.
-        if self._buffered:
-            data = ''.join(self._bufList)
-            self._bufList = []
-            self._write(data)
-
-    # Though available, the following should NOT be called by WSGI apps.
-    def close(self):
-        """Sends end-of-stream notification, if necessary."""
-        if not self.closed and self.dataWritten:
-            self.flush()
-            rec = Record(self._type, self._req.requestId)
-            self._conn.writeRecord(rec)
-            self.closed = True
-
-class TeeOutputStream(object):
-    """
-    Simple wrapper around two or more output file-like objects that copies
-    written data to all streams.
-    """
-    def __init__(self, streamList):
-        self._streamList = streamList
-
-    def write(self, data):
-        for f in self._streamList:
-            f.write(data)
-
-    def writelines(self, lines):
-        for line in lines:
-            self.write(line)
-
-    def flush(self):
-        for f in self._streamList:
-            f.flush()
-
-class StdoutWrapper(object):
-    """
-    Wrapper for sys.stdout so we know if data has actually been written.
-    """
-    def __init__(self, stdout):
-        self._file = stdout
-        self.dataWritten = False
-
-    def write(self, data):
-        if data:
-            self.dataWritten = True
-        self._file.write(data)
-
-    def writelines(self, lines):
-        for line in lines:
-            self.write(line)
-
-    def __getattr__(self, name):
-        return getattr(self._file, name)
-
-def decode_pair(s, pos=0):
-    """
-    Decodes a name/value pair.
-
-    The number of bytes decoded as well as the name/value pair
-    are returned.
-    """
-    nameLength = ord(s[pos])
-    if nameLength & 128:
-        nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
-        pos += 4
-    else:
-        pos += 1
-
-    valueLength = ord(s[pos])
-    if valueLength & 128:
-        valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
-        pos += 4
-    else:
-        pos += 1
-
-    name = s[pos:pos+nameLength]
-    pos += nameLength
-    value = s[pos:pos+valueLength]
-    pos += valueLength
-
-    return (pos, (name, value))
-
-def encode_pair(name, value):
-    """
-    Encodes a name/value pair.
-
-    The encoded string is returned.
-    """
-    nameLength = len(name)
-    if nameLength < 128:
-        s = chr(nameLength)
-    else:
-        s = struct.pack('!L', nameLength | 0x80000000L)
-
-    valueLength = len(value)
-    if valueLength < 128:
-        s += chr(valueLength)
-    else:
-        s += struct.pack('!L', valueLength | 0x80000000L)
-
-    return s + name + value
-
-class Record(object):
-    """
-    A FastCGI Record.
-
-    Used for encoding/decoding records.
-    """
-    def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
-        self.version = FCGI_VERSION_1
-        self.type = type
-        self.requestId = requestId
-        self.contentLength = 0
-        self.paddingLength = 0
-        self.contentData = ''
-
-    def _recvall(sock, length):
-        """
-        Attempts to receive length bytes from a socket, blocking if necessary.
-        (Socket may be blocking or non-blocking.)
-        """
-        dataList = []
-        recvLen = 0
-        while length:
-            try:
-                data = sock.recv(length)
-            except socket.error, e:
-                if e[0] == errno.EAGAIN:
-                    select.select([sock], [], [])
-                    continue
-                else:
-                    raise
-            if not data: # EOF
-                break
-            dataList.append(data)
-            dataLen = len(data)
-            recvLen += dataLen
-            length -= dataLen
-        return ''.join(dataList), recvLen
-    _recvall = staticmethod(_recvall)
-
-    def read(self, sock):
-        """Read and decode a Record from a socket."""
-        try:
-            header, length = self._recvall(sock, FCGI_HEADER_LEN)
-        except:
-            raise EOFError
-
-        if length < FCGI_HEADER_LEN:
-            raise EOFError
-
-        self.version, self.type, self.requestId, self.contentLength, \
-                      self.paddingLength = struct.unpack(FCGI_Header, header)
-
-        if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
-                             'contentLength = %d' %
-                             (sock.fileno(), self.type, self.requestId,
-                              self.contentLength))
-
-        if self.contentLength:
-            try:
-                self.contentData, length = self._recvall(sock,
-                                                         self.contentLength)
-            except:
-                raise EOFError
-
-            if length < self.contentLength:
-                raise EOFError
-
-        if self.paddingLength:
-            try:
-                self._recvall(sock, self.paddingLength)
-            except:
-                raise EOFError
-
-    def _sendall(sock, data):
-        """
-        Writes data to a socket and does not return until all the data is sent.
-        """
-        length = len(data)
-        while length:
-            try:
-                sent = sock.send(data)
-            except socket.error, e:
-                if e[0] == errno.EAGAIN:
-                    select.select([], [sock], [])
-                    continue
-                else:
-                    raise
-            data = data[sent:]
-            length -= sent
-    _sendall = staticmethod(_sendall)
-
-    def write(self, sock):
-        """Encode and write a Record to a socket."""
-        self.paddingLength = -self.contentLength & 7
-
-        if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
-                             'contentLength = %d' %
-                             (sock.fileno(), self.type, self.requestId,
-                              self.contentLength))
-
-        header = struct.pack(FCGI_Header, self.version, self.type,
-                             self.requestId, self.contentLength,
-                             self.paddingLength)
-        self._sendall(sock, header)
-        if self.contentLength:
-            self._sendall(sock, self.contentData)
-        if self.paddingLength:
-            self._sendall(sock, '\x00'*self.paddingLength)
-
-class Request(object):
-    """
-    Represents a single FastCGI request.
-
-    These objects are passed to your handler and is the main interface
-    between your handler and the fcgi module. The methods should not
-    be called by your handler. However, server, params, stdin, stdout,
-    stderr, and data are free for your handler's use.
-    """
-    def __init__(self, conn, inputStreamClass):
-        self._conn = conn
-
-        self.server = conn.server
-        self.params = {}
-        self.stdin = inputStreamClass(conn)
-        self.stdout = OutputStream(conn, self, FCGI_STDOUT)
-        self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
-        self.data = inputStreamClass(conn)
-
-    def run(self):
-        """Runs the handler, flushes the streams, and ends the request."""
-        try:
-            protocolStatus, appStatus = self.server.handler(self)
-        except:
-            traceback.print_exc(file=self.stderr)
-            self.stderr.flush()
-            if not self.stdout.dataWritten:
-                self.server.error(self)
-
-            protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
-
-        if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
-                             (protocolStatus, appStatus))
-
-        self._flush()
-        self._end(appStatus, protocolStatus)
-
-    def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
-        self._conn.end_request(self, appStatus, protocolStatus)
-
-    def _flush(self):
-        self.stdout.close()
-        self.stderr.close()
-
-class CGIRequest(Request):
-    """A normal CGI request disguised as a FastCGI request."""
-    def __init__(self, server):
-        # These are normally filled in by Connection.
-        self.requestId = 1
-        self.role = FCGI_RESPONDER
-        self.flags = 0
-        self.aborted = False
-
-        self.server = server
-        self.params = dict(os.environ)
-        self.stdin = sys.stdin
-        self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
-        self.stderr = sys.stderr
-        self.data = StringIO.StringIO()
-
-    def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
-        sys.exit(appStatus)
-
-    def _flush(self):
-        # Not buffered, do nothing.
-        pass
-
-class Connection(object):
-    """
-    A Connection with the web server.
-
-    Each Connection is associated with a single socket (which is
-    connected to the web server) and is responsible for handling all
-    the FastCGI message processing for that socket.
-    """
-    _multiplexed = False
-    _inputStreamClass = InputStream
-
-    def __init__(self, sock, addr, server):
-        self._sock = sock
-        self._addr = addr
-        self.server = server
-
-        # Active Requests for this Connection, mapped by request ID.
-        self._requests = {}
-
-    def _cleanupSocket(self):
-        """Close the Connection's socket."""
-        try:
-            self._sock.shutdown(socket.SHUT_WR)
-        except:
-            return
-        try:
-            while True:
-                r, w, e = select.select([self._sock], [], [])
-                if not r or not self._sock.recv(1024):
-                    break
-        except:
-            pass
-        self._sock.close()
-
-    def run(self):
-        """Begin processing data from the socket."""
-        self._keepGoing = True
-        while self._keepGoing:
-            try:
-                self.process_input()
-            except EOFError:
-                break
-            except (select.error, socket.error), e:
-                if e[0] == errno.EBADF: # Socket was closed by Request.
-                    break
-                raise
-
-        self._cleanupSocket()
-
-    def process_input(self):
-        """Attempt to read a single Record from the socket and process it."""
-        # Currently, any children Request threads notify this Connection
-        # that it is no longer needed by closing the Connection's socket.
-        # We need to put a timeout on select, otherwise we might get
-        # stuck in it indefinitely... (I don't like this solution.)
-        while self._keepGoing:
-            try:
-                r, w, e = select.select([self._sock], [], [], 1.0)
-            except ValueError:
-                # Sigh. ValueError gets thrown sometimes when passing select
-                # a closed socket.
-                raise EOFError
-            if r: break
-        if not self._keepGoing:
-            return
-        rec = Record()
-        rec.read(self._sock)
-
-        if rec.type == FCGI_GET_VALUES:
-            self._do_get_values(rec)
-        elif rec.type == FCGI_BEGIN_REQUEST:
-            self._do_begin_request(rec)
-        elif rec.type == FCGI_ABORT_REQUEST:
-            self._do_abort_request(rec)
-        elif rec.type == FCGI_PARAMS:
-            self._do_params(rec)
-        elif rec.type == FCGI_STDIN:
-            self._do_stdin(rec)
-        elif rec.type == FCGI_DATA:
-            self._do_data(rec)
-        elif rec.requestId == FCGI_NULL_REQUEST_ID:
-            self._do_unknown_type(rec)
-        else:
-            # Need to complain about this.
-            pass
-
-    def writeRecord(self, rec):
-        """
-        Write a Record to the socket.
-        """
-        rec.write(self._sock)
-
-    def end_request(self, req, appStatus=0L,
-                    protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
-        """
-        End a Request.
-
-        Called by Request objects. An FCGI_END_REQUEST Record is
-        sent to the web server. If the web server no longer requires
-        the connection, the socket is closed, thereby ending this
-        Connection (run() returns).
-        """
-        rec = Record(FCGI_END_REQUEST, req.requestId)
-        rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
-                                      protocolStatus)
-        rec.contentLength = FCGI_EndRequestBody_LEN
-        self.writeRecord(rec)
-
-        if remove:
-            del self._requests[req.requestId]
-
-        if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
-
-        if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
-            self._cleanupSocket()
-            self._keepGoing = False
-
-    def _do_get_values(self, inrec):
-        """Handle an FCGI_GET_VALUES request from the web server."""
-        outrec = Record(FCGI_GET_VALUES_RESULT)
-
-        pos = 0
-        while pos < inrec.contentLength:
-            pos, (name, value) = decode_pair(inrec.contentData, pos)
-            cap = self.server.capability.get(name)
-            if cap is not None:
-                outrec.contentData += encode_pair(name, str(cap))
-
-        outrec.contentLength = len(outrec.contentData)
-        self.writeRecord(outrec)
-
-    def _do_begin_request(self, inrec):
-        """Handle an FCGI_BEGIN_REQUEST from the web server."""
-        role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
-
-        req = self.server.request_class(self, self._inputStreamClass)
-        req.requestId, req.role, req.flags = inrec.requestId, role, flags
-        req.aborted = False
-
-        if not self._multiplexed and self._requests:
-            # Can't multiplex requests.
-            self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
-        else:
-            self._requests[inrec.requestId] = req
-
-    def _do_abort_request(self, inrec):
-        """
-        Handle an FCGI_ABORT_REQUEST from the web server.
-
-        We just mark a flag in the associated Request.
-        """
-        req = self._requests.get(inrec.requestId)
-        if req is not None:
-            req.aborted = True
-
-    def _start_request(self, req):
-        """Run the request."""
-        # Not multiplexed, so run it inline.
-        req.run()
-
-    def _do_params(self, inrec):
-        """
-        Handle an FCGI_PARAMS Record.
-
-        If the last FCGI_PARAMS Record is received, start the request.
-        """
-        req = self._requests.get(inrec.requestId)
-        if req is not None:
-            if inrec.contentLength:
-                pos = 0
-                while pos < inrec.contentLength:
-                    pos, (name, value) = decode_pair(inrec.contentData, pos)
-                    req.params[name] = value
-            else:
-                self._start_request(req)
-
-    def _do_stdin(self, inrec):
-        """Handle the FCGI_STDIN stream."""
-        req = self._requests.get(inrec.requestId)
-        if req is not None:
-            req.stdin.add_data(inrec.contentData)
-
-    def _do_data(self, inrec):
-        """Handle the FCGI_DATA stream."""
-        req = self._requests.get(inrec.requestId)
-        if req is not None:
-            req.data.add_data(inrec.contentData)
-
-    def _do_unknown_type(self, inrec):
-        """Handle an unknown request type. Respond accordingly."""
-        outrec = Record(FCGI_UNKNOWN_TYPE)
-        outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
-        outrec.contentLength = FCGI_UnknownTypeBody_LEN
-        self.writeRecord(outrec)
-
-class MultiplexedConnection(Connection):
-    """
-    A version of Connection capable of handling multiple requests
-    simultaneously.
-    """
-    _multiplexed = True
-    _inputStreamClass = MultiplexedInputStream
-
-    def __init__(self, sock, addr, server):
-        super(MultiplexedConnection, self).__init__(sock, addr, server)
-
-        # Used to arbitrate access to self._requests.
-        lock = threading.RLock()
-
-        # Notification is posted everytime a request completes, allowing us
-        # to quit cleanly.
-        self._lock = threading.Condition(lock)
-
-    def _cleanupSocket(self):
-        # Wait for any outstanding requests before closing the socket.
-        self._lock.acquire()
-        while self._requests:
-            self._lock.wait()
-        self._lock.release()
-
-        super(MultiplexedConnection, self)._cleanupSocket()
-
-    def writeRecord(self, rec):
-        # Must use locking to prevent intermingling of Records from different
-        # threads.
-        self._lock.acquire()
-        try:
-            # Probably faster than calling super. ;)
-            rec.write(self._sock)
-        finally:
-            self._lock.release()
-
-    def end_request(self, req, appStatus=0L,
-                    protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
-        self._lock.acquire()
-        try:
-            super(MultiplexedConnection, self).end_request(req, appStatus,
-                                                           protocolStatus,
-                                                           remove)
-            self._lock.notify()
-        finally:
-            self._lock.release()
-
-    def _do_begin_request(self, inrec):
-        self._lock.acquire()
-        try:
-            super(MultiplexedConnection, self)._do_begin_request(inrec)
-        finally:
-            self._lock.release()
-
-    def _do_abort_request(self, inrec):
-        self._lock.acquire()
-        try:
-            super(MultiplexedConnection, self)._do_abort_request(inrec)
-        finally:
-            self._lock.release()
-
-    def _start_request(self, req):
-        thread.start_new_thread(req.run, ())
-
-    def _do_params(self, inrec):
-        self._lock.acquire()
-        try:
-            super(MultiplexedConnection, self)._do_params(inrec)
-        finally:
-            self._lock.release()
-
-    def _do_stdin(self, inrec):
-        self._lock.acquire()
-        try:
-            super(MultiplexedConnection, self)._do_stdin(inrec)
-        finally:
-            self._lock.release()
-
-    def _do_data(self, inrec):
-        self._lock.acquire()
-        try:
-            super(MultiplexedConnection, self)._do_data(inrec)
-        finally:
-            self._lock.release()
-
-class Server(object):
-    """
-    The FastCGI server.
-
-    Waits for connections from the web server, processing each
-    request.
-
-    If run in a normal CGI context, it will instead instantiate a
-    CGIRequest and run the handler through there.
-    """
-    request_class = Request
-    cgirequest_class = CGIRequest
-
-    # Limits the size of the InputStream's string buffer to this size + the
-    # server's maximum Record size. Since the InputStream is not seekable,
-    # we throw away already-read data once this certain amount has been read.
-    inputStreamShrinkThreshold = 102400 - 8192
-
-    def __init__(self, handler=None, maxwrite=8192, bindAddress=None,
-                 umask=None, multiplexed=False):
-        """
-        handler, if present, must reference a function or method that
-        takes one argument: a Request object. If handler is not
-        specified at creation time, Server *must* be subclassed.
-        (The handler method below is abstract.)
-
-        maxwrite is the maximum number of bytes (per Record) to write
-        to the server. I've noticed mod_fastcgi has a relatively small
-        receive buffer (8K or so).
-
-        bindAddress, if present, must either be a string or a 2-tuple. If
-        present, run() will open its own listening socket. You would use
-        this if you wanted to run your application as an 'external' FastCGI
-        app. (i.e. the webserver would no longer be responsible for starting
-        your app) If a string, it will be interpreted as a filename and a UNIX
-        socket will be opened. If a tuple, the first element, a string,
-        is the interface name/IP to bind to, and the second element (an int)
-        is the port number.
-
-        Set multiplexed to True if you want to handle multiple requests
-        per connection. Some FastCGI backends (namely mod_fastcgi) don't
-        multiplex requests at all, so by default this is off (which saves
-        on thread creation/locking overhead). If threads aren't available,
-        this keyword is ignored; it's not possible to multiplex requests
-        at all.
-        """
-        if handler is not None:
-            self.handler = handler
-        self.maxwrite = maxwrite
-        if thread_available:
-            try:
-                import resource
-                # Attempt to glean the maximum number of connections
-                # from the OS.
-                maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
-            except ImportError:
-                maxConns = 100 # Just some made up number.
-            maxReqs = maxConns
-            if multiplexed:
-                self._connectionClass = MultiplexedConnection
-                maxReqs *= 5 # Another made up number.
-            else:
-                self._connectionClass = Connection
-            self.capability = {
-                FCGI_MAX_CONNS: maxConns,
-                FCGI_MAX_REQS: maxReqs,
-                FCGI_MPXS_CONNS: multiplexed and 1 or 0
-                }
-        else:
-            self._connectionClass = Connection
-            self.capability = {
-                # If threads aren't available, these are pretty much correct.
-                FCGI_MAX_CONNS: 1,
-                FCGI_MAX_REQS: 1,
-                FCGI_MPXS_CONNS: 0
-                }
-        self._bindAddress = bindAddress
-        self._umask = umask
-
-    def _setupSocket(self):
-        if self._bindAddress is None: # Run as a normal FastCGI?
-            isFCGI = True
-
-            sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
-                                 socket.SOCK_STREAM)
-            try:
-                sock.getpeername()
-            except socket.error, e:
-                if e[0] == errno.ENOTSOCK:
-                    # Not a socket, assume CGI context.
-                    isFCGI = False
-                elif e[0] != errno.ENOTCONN:
-                    raise
-
-            # FastCGI/CGI discrimination is broken on Mac OS X.
-            # Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
-            # if you want to run your app as a simple CGI. (You can do
-            # this with Apache's mod_env [not loaded by default in OS X
-            # client, ha ha] and the SetEnv directive.)
-            if not isFCGI or \
-               os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
-                req = self.cgirequest_class(self)
-                req.run()
-                sys.exit(0)
-        else:
-            # Run as a server
-            oldUmask = None
-            if type(self._bindAddress) is str:
-                # Unix socket
-                sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-                try:
-                    os.unlink(self._bindAddress)
-                except OSError:
-                    pass
-                if self._umask is not None:
-                    oldUmask = os.umask(self._umask)
-            else:
-                # INET socket
-                assert type(self._bindAddress) is tuple
-                assert len(self._bindAddress) == 2
-                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-
-            sock.bind(self._bindAddress)
-            sock.listen(socket.SOMAXCONN)
-
-            if oldUmask is not None:
-                os.umask(oldUmask)
-
-        return sock
-
-    def _cleanupSocket(self, sock):
-        """Closes the main socket."""
-        sock.close()
-
-    def _installSignalHandlers(self):
-        self._oldSIGs = [(x,signal.getsignal(x)) for x in
-                         (signal.SIGHUP, signal.SIGINT, signal.SIGTERM)]
-        signal.signal(signal.SIGHUP, self._hupHandler)
-        signal.signal(signal.SIGINT, self._intHandler)
-        signal.signal(signal.SIGTERM, self._intHandler)
-
-    def _restoreSignalHandlers(self):
-        for signum,handler in self._oldSIGs:
-            signal.signal(signum, handler)
-
-    def _hupHandler(self, signum, frame):
-        self._hupReceived = True
-        self._keepGoing = False
-
-    def _intHandler(self, signum, frame):
-        self._keepGoing = False
-
-    def run(self, timeout=1.0):
-        """
-        The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
-        SIGHUP was received, False otherwise.
-        """
-        web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
-        if web_server_addrs is not None:
-            web_server_addrs = map(lambda x: x.strip(),
-                                   web_server_addrs.split(','))
-
-        sock = self._setupSocket()
-
-        self._keepGoing = True
-        self._hupReceived = False
-
-        # Install signal handlers.
-        self._installSignalHandlers()
-
-        while self._keepGoing:
-            try:
-                r, w, e = select.select([sock], [], [], timeout)
-            except select.error, e:
-                if e[0] == errno.EINTR:
-                    continue
-                raise
-
-            if r:
-                try:
-                    clientSock, addr = sock.accept()
-                except socket.error, e:
-                    if e[0] in (errno.EINTR, errno.EAGAIN):
-                        continue
-                    raise
-
-                if web_server_addrs and \
-                       (len(addr) != 2 or addr[0] not in web_server_addrs):
-                    clientSock.close()
-                    continue
-
-                # Instantiate a new Connection and begin processing FastCGI
-                # messages (either in a new thread or this thread).
-                conn = self._connectionClass(clientSock, addr, self)
-                thread.start_new_thread(conn.run, ())
-
-            self._mainloopPeriodic()
-
-        # Restore signal handlers.
-        self._restoreSignalHandlers()
-
-        self._cleanupSocket(sock)
-
-        return self._hupReceived
-
-    def _mainloopPeriodic(self):
-        """
-        Called with just about each iteration of the main loop. Meant to
-        be overridden.
-        """
-        pass
-
-    def _exit(self, reload=False):
-        """
-        Protected convenience method for subclasses to force an exit. Not
-        really thread-safe, which is why it isn't public.
-        """
-        if self._keepGoing:
-            self._keepGoing = False
-            self._hupReceived = reload
-
-    def handler(self, req):
-        """
-        Default handler, which just raises an exception. Unless a handler
-        is passed at initialization time, this must be implemented by
-        a subclass.
-        """
-        raise NotImplementedError, self.__class__.__name__ + '.handler'
-
-    def error(self, req):
-        """
-        Called by Request if an exception occurs within the handler. May and
-        should be overridden.
-        """
-        import cgitb
-        req.stdout.write('Content-Type: text/html\r\n\r\n' +
-                         cgitb.html(sys.exc_info()))
-
-class WSGIServer(Server):
-    """
-    FastCGI server that supports the Web Server Gateway Interface. See
-    <http://www.python.org/peps/pep-0333.html>.
-    """
-    def __init__(self, application, environ=None,
-                 multithreaded=True, **kw):
-        """
-        environ, if present, must be a dictionary-like object. Its
-        contents will be copied into application's environ. Useful
-        for passing application-specific variables.
-
-        Set multithreaded to False if your application is not MT-safe.
-        """
-        if kw.has_key('handler'):
-            del kw['handler'] # Doesn't make sense to let this through
-        super(WSGIServer, self).__init__(**kw)
-
-        if environ is None:
-            environ = {}
-
-        self.application = application
-        self.environ = environ
-        self.multithreaded = multithreaded
-
-        # Used to force single-threadedness
-        self._app_lock = thread.allocate_lock()
-
-    def handler(self, req):
-        """Special handler for WSGI."""
-        if req.role != FCGI_RESPONDER:
-            return FCGI_UNKNOWN_ROLE, 0
-
-        # Mostly taken from example CGI gateway.
-        environ = req.params
-        environ.update(self.environ)
-
-        environ['wsgi.version'] = (1,0)
-        environ['wsgi.input'] = req.stdin
-        if self._bindAddress is None:
-            stderr = req.stderr
-        else:
-            stderr = TeeOutputStream((sys.stderr, req.stderr))
-        environ['wsgi.errors'] = stderr
-        environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \
-                                      thread_available and self.multithreaded
-        # Rationale for the following: If started by the web server
-        # (self._bindAddress is None) in either FastCGI or CGI mode, the
-        # possibility of being spawned multiple times simultaneously is quite
-        # real. And, if started as an external server, multiple copies may be
-        # spawned for load-balancing/redundancy. (Though I don't think
-        # mod_fastcgi supports this?)
-        environ['wsgi.multiprocess'] = True
-        environ['wsgi.run_once'] = isinstance(req, CGIRequest)
-
-        if environ.get('HTTPS', 'off') in ('on', '1'):
-            environ['wsgi.url_scheme'] = 'https'
-        else:
-            environ['wsgi.url_scheme'] = 'http'
-
-        self._sanitizeEnv(environ)
-
-        headers_set = []
-        headers_sent = []
-        result = None
-
-        def write(data):
-            assert type(data) is str, 'write() argument must be string'
-            assert headers_set, 'write() before start_response()'
-
-            if not headers_sent:
-                status, responseHeaders = headers_sent[:] = headers_set
-                found = False
-                for header,value in responseHeaders:
-                    if header.lower() == 'content-length':
-                        found = True
-                        break
-                if not found and result is not None:
-                    try:
-                        if len(result) == 1:
-                            responseHeaders.append(('Content-Length',
-                                                    str(len(data))))
-                    except:
-                        pass
-                s = 'Status: %s\r\n' % status
-                for header in responseHeaders:
-                    s += '%s: %s\r\n' % header
-                s += '\r\n'
-                req.stdout.write(s)
-
-            req.stdout.write(data)
-            req.stdout.flush()
-
-        def start_response(status, response_headers, exc_info=None):
-            if exc_info:
-                try:
-                    if headers_sent:
-                        # Re-raise if too late
-                        raise exc_info[0], exc_info[1], exc_info[2]
-                finally:
-                    exc_info = None # avoid dangling circular ref
-            else:
-                assert not headers_set, 'Headers already set!'
-
-            assert type(status) is str, 'Status must be a string'
-            assert len(status) >= 4, 'Status must be at least 4 characters'
-            assert int(status[:3]), 'Status must begin with 3-digit code'
-            assert status[3] == ' ', 'Status must have a space after code'
-            assert type(response_headers) is list, 'Headers must be a list'
-            if __debug__:
-                for name,val in response_headers:
-                    assert type(name) is str, 'Header names must be strings'
-                    assert type(val) is str, 'Header values must be strings'
-
-            headers_set[:] = [status, response_headers]
-            return write
-
-        if not self.multithreaded:
-            self._app_lock.acquire()
-        try:
-            try:
-                result = self.application(environ, start_response)
-                try:
-                    for data in result:
-                        if data:
-                            write(data)
-                    if not headers_sent:
-                        write('') # in case body was empty
-                finally:
-                    if hasattr(result, 'close'):
-                        result.close()
-            except socket.error, e:
-                if e[0] != errno.EPIPE:
-                    raise # Don't let EPIPE propagate beyond server
-        finally:
-            if not self.multithreaded:
-                self._app_lock.release()
-
-        return FCGI_REQUEST_COMPLETE, 0
-
-    def _sanitizeEnv(self, environ):
-        """Ensure certain values are present, if required by WSGI."""
-        if not environ.has_key('SCRIPT_NAME'):
-            environ['SCRIPT_NAME'] = ''
-        if not environ.has_key('PATH_INFO'):
-            environ['PATH_INFO'] = ''
-
-        # If any of these are missing, it probably signifies a broken
-        # server...
-        for name,default in [('REQUEST_METHOD', 'GET'),
-                             ('SERVER_NAME', 'localhost'),
-                             ('SERVER_PORT', '80'),
-                             ('SERVER_PROTOCOL', 'HTTP/1.0')]:
-            if not environ.has_key(name):
-                environ['wsgi.errors'].write('%s: missing FastCGI param %s '
-                                             'required by WSGI!\n' %
-                                             (self.__class__.__name__, name))
-                environ[name] = default
-
-if __name__ == '__main__':
-    def test_app(environ, start_response):
-        """Probably not the most efficient example."""
-        import cgi
-        start_response('200 OK', [('Content-Type', 'text/html')])
-        yield '<html><head><title>Hello World!</title></head>\n' \
-              '<body>\n' \
-              '<p>Hello World!</p>\n' \
-              '<table border="1">'
-        names = environ.keys()
-        names.sort()
-        for name in names:
-            yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
-                name, cgi.escape(`environ[name]`))
-
-        form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
-                                keep_blank_values=1)
-        if form.list:
-            yield '<tr><th colspan="2">Form data</th></tr>'
-
-        for field in form.list:
-            yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
-                field.name, field.value)
-
-        yield '</table>\n' \
-              '</body></html>\n'
-
-    WSGIServer(test_app).run()
-

+ 0 - 76
frameworks/Python/web2py/web2py/gluon/contrib/generics.py

@@ -1,76 +0,0 @@
-# fix response
-
-import os
-from gluon import current, HTTP
-from gluon.html import markmin_serializer, TAG, HTML, BODY, UL, XML, H1
-from gluon.contrib.fpdf import FPDF, HTMLMixin
-from gluon.sanitizer import sanitize
-from gluon.contrib.markmin.markmin2latex import markmin2latex
-from gluon.contrib.markmin.markmin2pdf import markmin2pdf
-
-
-def wrapper(f):
-    def g(data):
-        try:
-            output = f(data)
-            return XML(ouput)
-        except (TypeError, ValueError), e:
-            raise HTTP(405, '%s serialization error' % e)
-        except ImportError, e:
-            raise HTTP(405, '%s not available' % e)
-        except Exception, e:
-            raise HTTP(405, '%s error' % e)
-    return g
-
-
-def latex_from_html(html):
-    markmin = TAG(html).element('body').flatten(markmin_serializer)
-    return XML(markmin2latex(markmin))
-
-
-def pdflatex_from_html(html):
-    if os.system('which pdflatex > /dev/null') == 0:
-        markmin = TAG(html).element('body').flatten(markmin_serializer)
-        out, warnings, errors = markmin2pdf(markmin)
-        if errors:
-            current.response.headers['Content-Type'] = 'text/html'
-            raise HTTP(405, HTML(BODY(H1('errors'),
-                                      UL(*errors),
-                                      H1('warnings'),
-                                      UL(*warnings))).xml())
-        else:
-            return XML(out)
-
-
-def pyfpdf_from_html(html):
-    request = current.request
-
-    def image_map(path):
-        if path.startswith('/%s/static/' % request.application):
-            return os.path.join(request.folder, path.split('/', 2)[2])
-        return 'http%s://%s%s' % (request.is_https and 's' or '', request.env.http_host, path)
-
-    class MyFPDF(FPDF, HTMLMixin):
-        pass
-    pdf = MyFPDF()
-    pdf.add_page()
-    # pyfpdf needs some attributes to render the table correctly:
-    html = sanitize(
-        html, allowed_attributes={
-            'a': ['href', 'title'],
-            'img': ['src', 'alt'],
-            'blockquote': ['type'],
-            'td': ['align', 'bgcolor', 'colspan', 'height', 'width'],
-            'tr': ['bgcolor', 'height', 'width'],
-            'table': ['border', 'bgcolor', 'height', 'width'],
-        }, escape=False)
-    pdf.write_html(html, image_map=image_map)
-    return XML(pdf.output(dest='S'))
-
-
-def pdf_from_html(html):
-    # try use latex and pdflatex
-    if os.system('which pdflatex > /dev/null') == 0:
-        return pdflatex_from_html(html)
-    else:
-        return pyfpdf_from_html(html)

+ 0 - 15
frameworks/Python/web2py/web2py/gluon/contrib/google_wallet.py

@@ -1,15 +0,0 @@
-from gluon import XML
-
-def button(merchant_id="123456789012345",
-           products=[dict(name="shoes",
-                          quantity=1,
-                          price=23.5,
-                          currency='USD',
-                          description="running shoes black")]):
-    t = '<input name="item_%(key)s_%(k)s" type="hidden" value="%(value)s"/>\n'
-    list_products = ''
-    for k, product in enumerate(products):
-        for key in ('name','description','quantity','price','currency'):
-            list_products += t % dict(k=k + 1, key=key, value=product[key])
-    button = """<form action="https://checkout.google.com/api/checkout/v2/checkoutForm/Merchant/%(merchant_id)s" id="BB_BuyButtonForm" method="post" name="BB_BuyButtonForm" target="_top">\n%(list_products)s<input name="_charset_" type="hidden" value="utf-8"/>\n<input alt="" src="https://checkout.google.com/buttons/buy.gif?merchant_id=%(merchant_id)s&amp;w=117&amp;h=48&amp;style=white&amp;variant=text&amp;loc=en_US" type="image"/>\n</form>""" % dict(merchant_id=merchant_id, list_products=list_products)
-    return XML(button)

+ 0 - 30
frameworks/Python/web2py/web2py/gluon/contrib/heroku.py

@@ -1,30 +0,0 @@
-"""
-Usage: in web2py models/db.py
-
-from gluon.contrib.heroku import get_db
-db = get_db()
-
-"""
-import os
-from gluon import *
-from pydal.adapters import ADAPTERS, PostgreSQLAdapter
-from pydal.helpers.classes import UseDatabaseStoredFile
-
-class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):
-    drivers = ('psycopg2',)
-    uploads_in_blob = True
-
-ADAPTERS['postgres'] = HerokuPostgresAdapter
-
-def get_db(name = None, pool_size=10):
-    if not name:
-        names = [n for n in os.environ.keys()
-                 if n[:18]+n[-4:]=='HEROKU_POSTGRESQL__URL']
-        if names:
-            name = names[0]
-    if name:
-        db = DAL(os.environ[name], pool_size=pool_size)
-        current.session.connect(current.request, current.response, db=db)
-    else:
-        db = DAL('sqlite://heroku.test.sqlite')
-    return db

+ 0 - 342
frameworks/Python/web2py/web2py/gluon/contrib/hypermedia.py

@@ -1,342 +0,0 @@
-import json
-from collections import OrderedDict
-from gluon import URL, IS_SLUG
-
-# compliant with https://github.com/collection-json/spec
-# also compliant with http://code.ge/media-types/collection-next-json/
-
-"""
-
-Example controller:
-
-def api():
-    from gluon.contrib.hypermedia import Collection
-    policies = {
-        'thing': {
-            'GET':{'query':None,'fields':['id', 'name']},
-            'POST':{'query':None,'fields':['name']},
-            'PUT':{'query':None,'fields':['name']},
-            'DELETE':{'query':None},
-            },
-        'attr': {
-            'GET':{'query':None,'fields':['id', 'name', 'thing']},
-            'POST':{'query':None,'fields':['name', 'thing']},
-            'PUT':{'query':None,'fields':['name', 'thing']},
-            'DELETE':{'query':None},
-            },
-        }
-    return Collection(db).process(request,response,policies)
-
-"""
-
-__all__ = ['Collection']
-
-class Collection(object):
-
-    VERSION = '1.0'
-    MAXITEMS = 100
-
-    def __init__(self,db, extensions=True, compact=False):
-        self.db = db
-        self.extensions = extensions
-        self.compact = compact
-
-    def row2data(self,table,row,text=False):
-        """ converts a DAL Row object into a collection.item """
-        data = []
-        if self.compact:
-            for fieldname in (self.table_policy.get('fields',table.fields)):
-                field = table[fieldname]
-                if not ((field.type=='text' and text==False) or
-                        field.type=='blob' or
-                        field.type.startswith('reference ') or
-                        field.type.startswith('list:reference ')) and field.name in row:
-                    data.append(row[field.name])
-        else:
-            for fieldname in (self.table_policy.get('fields',table.fields)):
-                field = table[fieldname]
-                if not ((field.type=='text' and text==False) or
-                        field.type=='blob' or
-                        field.type.startswith('reference ') or
-                        field.type.startswith('list:reference ')) and field.name in row:
-                    data.append({'name':field.name,'value':row[field.name],
-                                 'prompt':field.label, 'type':field.type})
-        return data
-
-    def row2links(self,table,row):
-        """ converts a DAL Row object into a set of links referencing the row """
-        links = []
-        for field in table._referenced_by:
-            if field._tablename in self.policies:
-                if row:
-                    href = URL(args=field._tablename,vars={field.name:row.id},scheme=True)
-                else:
-                    href = URL(args=field._tablename,scheme=True)+'?%s={id}' % field.name
-                links.append({'rel':'current','href':href,'prompt':str(field),
-                              'type':'children'})
-        if row:
-            fields = self.table_policy.get('fields', table.fields)
-            for fieldname in fields:
-                field = table[fieldname]
-                if field.type.startswith('reference '):
-                    href = URL(args=field.type[10:],vars={'id':row[fieldname]},
-                               scheme=True)
-                    links.append({'rel':'current','href':href,'prompt':str(field),
-                                  'type':'parent'})
-
-            for fieldname in fields:
-                field = table[fieldname]
-                if field.type=='upload' and row[fieldname]:
-                    href = URL('download',args=row[fieldname],scheme=True)
-                    links.append({'rel':'current','href':href,'prompt':str(field),
-                                  'type':'attachment'})
-
-            # should this be supported?
-            for rel,build in (self.table_policy.get('links',{}).items()):
-                links.append({'rel':'current','href':build(row),'prompt':rel})
-        # not sure
-        return links
-
-    def table2template(self,table):
-        """ confeverts a table into its form template """
-        data = []
-        fields = self.table_policy.get('fields', table.fields)
-        for fieldname in fields:
-            field = table[fieldname]
-            info = {'name': field.name, 'value': '', 'prompt': field.label}
-            policies = self.policies[table._tablename]
-            # https://github.com/collection-json/extensions/blob/master/template-validation.md
-            info['type'] = str(field.type) # FIX THIS
-            if hasattr(field,'regexp_validator'):
-                info['regexp'] = field.regexp_validator
-            info['required'] = field.required
-            info['post_writable'] = field.name in policies['POST'].get('fields',fields)
-            info['put_writable'] = field.name in policies['PUT'].get('fields',fields)
-            info['options'] = {} # FIX THIS
-            data.append(info)
-        return {'data':data}
-
-    def request2query(self,table,vars):
-        """ parses a request and converts it into a query """
-        if len(self.request.args)>1:
-            vars.id = self.request.args[1]
-
-        fieldnames = table.fields
-        queries = [table]
-        limitby = [0,self.MAXITEMS+1]
-        orderby = 'id'
-        for key,value in vars.items():
-            if key=='_offset':
-                limitby[0] = int(value) # MAY FAIL
-            elif key == '_limit':
-                limitby[1] = int(value)+1 # MAY FAIL
-            elif key=='_orderby':
-                orderby = value
-            elif key in fieldnames:
-                queries.append(table[key] == value)
-            elif key.endswith('.eq') and key[:-3] in fieldnames: # for completeness (useless)
-                queries.append(table[key[:-3]] == value)
-            elif key.endswith('.lt') and key[:-3] in fieldnames:
-                queries.append(table[key[:-3]] < value)
-            elif key.endswith('.le') and key[:-3] in fieldnames:
-                queries.append(table[key[:-3]] <= value)
-            elif key.endswith('.gt') and key[:-3] in fieldnames:
-                queries.append(table[key[:-3]] > value)
-            elif key.endswith('.ge') and key[:-3] in fieldnames:
-                queries.append(table[key[:-3]] >= value)
-            elif key.endswith('.contains') and key[:-9] in fieldnames:
-                queries.append(table[key[:-9]].contains(value))
-            elif key.endswith('.startswith') and key[:-11] in fieldnames:
-                queries.append(table[key[:-11]].startswith(value))
-            elif key.endswith('.ne') and key[:-3] in fieldnames:
-                queries.append(table[key][:-3] != value)
-            else:
-                raise ValueError("Invalid Query")
-        filter_query = self.table_policy.get('query')
-        if filter_query:
-            queries.append(filter_query)
-        query = reduce(lambda a,b:a&b,queries[1:]) if len(queries)>1 else queries[0]
-        orderby = [table[f] if f[0]!='~' else ~table[f[1:]] for f in orderby.split(',')]
-        return (query, limitby, orderby)
-
-    def table2queries(self,table, href):
-        """ generates a set of collection.queries examples for the table """
-        data = []
-        for fieldname in (self.table_policy.get('fields', table.fields)):
-            data.append({'name':fieldname,'value':''})
-            if self.extensions:
-                data.append({'name':fieldname+'.ne','value':''}) # NEW !!!
-                data.append({'name':fieldname+'.lt','value':''})
-                data.append({'name':fieldname+'.le','value':''})
-                data.append({'name':fieldname+'.gt','value':''})
-                data.append({'name':fieldname+'.ge','value':''})
-                if table[fieldname].type in ['string','text']:
-                    data.append({'name':fieldname+'.contains','value':''})
-                    data.append({'name':fieldname+'.startswith','value':''})
-                data.append({'name':'_limitby','value':''})
-                data.append({'name':'_offset','value':''})
-                data.append({'name':'_orderby','value':''})
-        return [{'rel' : 'search', 'href' : href, 'prompt' : 'Search', 'data' : data}]
-
-    def process(self,request,response,policies=None):
-        """ the main method, processes a request, filters by policies and produces a JSON response """
-        self.request = request
-        self.response = response
-        self.policies = policies
-        db = self.db
-        tablename = request.args(0)
-        r = OrderedDict()
-        r['version'] = self.VERSION
-        tablenames = policies.keys() if policies else db.tables
-        # if there is no tables
-        if not tablename:
-            r['href'] = URL(scheme=True),
-            # https://github.com/collection-json/extensions/blob/master/model.md
-            r['links'] = [{'rel' : t, 'href' : URL(args=t,scheme=True), 'model':t}
-                          for t in tablenames]
-            response.headers['Content-Type'] = 'application/vnd.collection+json'
-            return response.json({'collection':r})
-        # or if the tablenames is invalid
-        if not tablename in tablenames:
-            return self.error(400,'BAD REQUEST','Invalid table name')
-        # of if the method is invalid
-        if not request.env.request_method in policies[tablename]:
-            return self.error(400,'BAD REQUEST','Method not recognized')
-        # get the policies
-        self.table_policy = policies[tablename][request.env.request_method]
-        # process GET
-        if request.env.request_method=='GET':
-            table = db[tablename]
-            r['href'] = URL(args=tablename)
-            r['items'] = items = []
-            try:
-                (query, limitby, orderby) = self.request2query(table,request.get_vars)
-                fields = [table[fn] for fn in (self.table_policy.get('fields', table.fields))]
-                fields = filter(lambda field: field.readable, fields)
-                rows = db(query).select(*fields,**dict(limitby=limitby, orderby=orderby))
-            except:
-                db.rollback()
-                return self.error(400,'BAD REQUEST','Invalid Query')
-            r['items_found'] = db(query).count()
-            delta = limitby[1]-limitby[0]-1
-            r['links'] = self.row2links(table,None) if self.compact else []
-            text = r['items_found']<2
-            for row in rows[:delta]:
-                id = row.id
-                for name in ('slug','fullname','title','name'):
-                    if name in row:
-                        href = URL(args=(tablename,id,IS_SLUG.urlify(row[name] or '')),
-                                   scheme=True)
-                        break
-                else:
-                    href = URL(args=(tablename,id),scheme=True)
-                if self.compact:
-                    items.append(self.row2data(table,row,text))
-                else:
-                    items.append({
-                            'href':href,
-                            'data':self.row2data(table,row,text),
-                            'links':self.row2links(table,row)
-                            });
-            if self.extensions and len(rows)>delta:
-                vars = dict(request.get_vars)
-                vars['_offset'] = limitby[1]-1
-                vars['_limit'] = limitby[1]-1+delta
-                r['next'] = {'rel':'next',
-                             'href':URL(args=request.args,vars=vars,scheme=True)}
-            if self.extensions and limitby[0]>0:
-                vars = dict(request.get_vars)
-                vars['_offset'] = max(0,limitby[0]-delta)
-                vars['_limit'] = limitby[0]
-                r['previous'] = {'rel':'previous',
-                                 'href':URL(args=request.args,vars=vars,scheme=True)}
-            data = []
-            if not self.compact:
-                r['queries'] = self.table2queries(table, r['href'])
-            r['template'] = self.table2template(table)
-            response.headers['Content-Type'] = 'application/vnd.collection+json'
-            return response.json({'collection':r})
-        # process DELETE
-        elif request.env.request_method=='DELETE':
-            table = db[tablename]
-            if not request.get_vars:
-                return self.error(400, "BAD REQUEST", "Nothing to delete")
-            else:
-                try:
-                    (query, limitby, orderby) = self.request2query(table, request.vars)
-                    n = db(query).delete() # MAY FAIL
-                    response.status = 204
-                    return ''
-                except:
-                    db.rollback()
-                    return self.error(400,'BAD REQUEST','Invalid Query')
-            return response.json(r)
-        # process POST and PUT (on equal footing!)
-        elif request.env.request_method in ('POST','PUT'): # we treat them the same!
-            table = db[tablename]
-            if 'json' in request.env.content_type:
-                data = request.post_vars.data
-            else:
-                data = request.post_vars
-            if request.get_vars or len(request.args)>1: # update
-                # ADD validate fields and return error
-                try:
-                    (query, limitby, orderby) = self.request2query(table, request.get_vars)
-                    fields = filter(lambda (fn,value):table[fn].writable,data.items())
-                    res = db(query).validate_and_update(**dict(fields)) # MAY FAIL
-                    if res.errors:
-                        return self.error(400,'BAD REQUEST','Validation Error',res.errors)
-                    else:
-                        response.status = 200
-                        return ''
-                except:
-                    db.rollback()
-                    return self.error(400,'BAD REQUEST','Invalid Query')
-            else: # create
-                # ADD validate fields and return error
-                try:
-                    fields = filter(lambda (fn,value):table[fn].writable,data.items())
-                    res = table.validate_and_insert(**dict(fields)) # MAY FAIL
-                    if res.errors:
-                        return self.error(400,'BAD REQUEST','Validation Error',res.errors)
-                    else:
-                        response.status = 201
-                        response.headers['location'] = \
-                            URL(args=(tablename,res.id),scheme=True)
-                        return ''
-                except SyntaxError,e: #Exception,e:
-                    db.rollback()
-                    return self.error(400,'BAD REQUEST','Invalid Query:'+e)
-
-    def error(self,code="400", title="BAD REQUEST", message="UNKNOWN", form_errors={}):
-        request, response = self.request, self.response
-        r = OrderedDict({
-                "version" : self.VERSION,
-                "href" : URL(args=request.args,vars=request.vars),
-                "error" : {
-                    "title" : title,
-                    "code" : code,
-                    "message" : message}})
-        if self.extensions and form_errors:
-            # https://github.com/collection-json/extensions/blob/master/errors.md
-            r['errors'] = errors = {}
-            for key, value in form_errors.items():
-                errors[key] = {'title':'Validation Error','code':'','message':value}
-                response.headers['Content-Type'] = 'application/vnd.collection+json'
-        response.status = 400
-        return response.json({'collection':r})
-
-example_policies = {
-    'thing': {
-        'GET':{'query':None,'fields':['id', 'name']},
-        'POST':{'query':None,'fields':['name']},
-        'PUT':{'query':None,'fields':['name']},
-        'DELETE':{'query':None},
-        },
-    'attr': {
-        'GET':{'query':None,'fields':['id', 'name', 'thing']},
-        'POST':{'query':None,'fields':['name', 'thing']},
-        'PUT':{'query':None,'fields':['name', 'thing']},
-        'DELETE':{'query':None},
-        },
-    }

+ 0 - 61
frameworks/Python/web2py/web2py/gluon/contrib/imageutils.py

@@ -1,61 +0,0 @@
-# -*- coding: utf-8 -*-
-
-#######################################################################
-#
-# Put this file in yourapp/modules/images.py
-#
-# Given the model
-#
-# db.define_table("table_name", Field("picture", "upload"), Field("thumbnail", "upload"))
-#
-# # to resize the picture on upload
-#
-# from images import RESIZE
-#
-# db.table_name.picture.requires = RESIZE(200, 200)
-#
-# # to store original image in picture and create a thumbnail in 'thumbnail' field
-#
-# from images import THUMB
-# db.table_name.thumbnail.compute = lambda row: THUMB(row.picture, 200, 200)
-
-#########################################################################
-from gluon import current
-
-
-class RESIZE(object):
-    def __init__(self, nx=160, ny=80, error_message=' image resize'):
-        (self.nx, self.ny, self.error_message) = (nx, ny, error_message)
-
-    def __call__(self, value):
-        if isinstance(value, str) and len(value) == 0:
-            return (value, None)
-        from PIL import Image
-        import cStringIO
-        try:
-            img = Image.open(value.file)
-            img.thumbnail((self.nx, self.ny), Image.ANTIALIAS)
-            s = cStringIO.StringIO()
-            img.save(s, 'JPEG', quality=100)
-            s.seek(0)
-            value.file = s
-        except:
-            return (value, self.error_message)
-        else:
-            return (value, None)
-
-
-def THUMB(image, nx=120, ny=120, gae=False, name='thumb'):
-    if image:
-        if not gae:
-            request = current.request
-            from PIL import Image
-            import os
-            img = Image.open(os.path.join(request.folder,'uploads',image))
-            img.thumbnail((nx, ny), Image.ANTIALIAS)
-            root, ext = os.path.splitext(image)
-            thumb = '%s_%s%s' % (root, name, ext)
-            img.save(request.folder + 'uploads/' + thumb)
-            return thumb
-        else:
-            return image

+ 0 - 1865
frameworks/Python/web2py/web2py/gluon/contrib/ipaddr.py

@@ -1,1865 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright 2007 Google Inc.
-#  Licensed to PSF under a Contributor Agreement.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License.
-
-"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
-
-This library is used to create/poke/manipulate IPv4 and IPv6 addresses
-and networks.
-
-"""
-
-__version__ = '2.1.11'
-
-import struct
-
-IPV4LENGTH = 32
-IPV6LENGTH = 128
-
-
-class AddressValueError(ValueError):
-    """A Value Error related to the address."""
-
-
-class NetmaskValueError(ValueError):
-    """A Value Error related to the netmask."""
-
-
-def IPAddress(address, version=None):
-    """Take an IP string/int and return an object of the correct type.
-
-    Args:
-        address: A string or integer, the IP address.  Either IPv4 or
-          IPv6 addresses may be supplied; integers less than 2**32 will
-          be considered to be IPv4 by default.
-        version: An Integer, 4 or 6. If set, don't try to automatically
-          determine what the IP address type is. important for things
-          like IPAddress(1), which could be IPv4, '0.0.0.1',  or IPv6,
-          '::1'.
-
-    Returns:
-        An IPv4Address or IPv6Address object.
-
-    Raises:
-        ValueError: if the string passed isn't either a v4 or a v6
-          address.
-
-    """
-    if version:
-        if version == 4:
-            return IPv4Address(address)
-        elif version == 6:
-            return IPv6Address(address)
-
-    try:
-        return IPv4Address(address)
-    except (AddressValueError, NetmaskValueError):
-        pass
-
-    try:
-        return IPv6Address(address)
-    except (AddressValueError, NetmaskValueError):
-        pass
-
-    raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
-                     address)
-
-
-def IPNetwork(address, version=None, strict=False):
-    """Take an IP string/int and return an object of the correct type.
-
-    Args:
-        address: A string or integer, the IP address.  Either IPv4 or
-          IPv6 addresses may be supplied; integers less than 2**32 will
-          be considered to be IPv4 by default.
-        version: An Integer, if set, don't try to automatically
-          determine what the IP address type is. important for things
-          like IPNetwork(1), which could be IPv4, '0.0.0.1/32', or IPv6,
-          '::1/128'.
-
-    Returns:
-        An IPv4Network or IPv6Network object.
-
-    Raises:
-        ValueError: if the string passed isn't either a v4 or a v6
-          address. Or if a strict network was requested and a strict
-          network wasn't given.
-
-    """
-    if version:
-        if version == 4:
-            return IPv4Network(address, strict)
-        elif version == 6:
-            return IPv6Network(address, strict)
-
-    try:
-        return IPv4Network(address, strict)
-    except (AddressValueError, NetmaskValueError):
-        pass
-
-    try:
-        return IPv6Network(address, strict)
-    except (AddressValueError, NetmaskValueError):
-        pass
-
-    raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
-                     address)
-
-
-def v4_int_to_packed(address):
-    """The binary representation of this address.
-
-    Args:
-        address: An integer representation of an IPv4 IP address.
-
-    Returns:
-        The binary representation of this address.
-
-    Raises:
-        ValueError: If the integer is too large to be an IPv4 IP
-          address.
-    """
-    if address > _BaseV4._ALL_ONES:
-        raise ValueError('Address too large for IPv4')
-    return Bytes(struct.pack('!I', address))
-
-
-def v6_int_to_packed(address):
-    """The binary representation of this address.
-
-    Args:
-        address: An integer representation of an IPv6 IP address.
-
-    Returns:
-        The binary representation of this address.
-    """
-    return Bytes(struct.pack('!QQ', address >> 64, address & (2**64 - 1)))
-
-
-def _find_address_range(addresses):
-    """Find a sequence of addresses.
-
-    Args:
-        addresses: a list of IPv4 or IPv6 addresses.
-
-    Returns:
-        A tuple containing the first and last IP addresses in the sequence.
-
-    """
-    first = last = addresses[0]
-    for ip in addresses[1:]:
-        if ip._ip == last._ip + 1:
-            last = ip
-        else:
-            break
-    return (first, last)
-
-def _get_prefix_length(number1, number2, bits):
-    """Get the number of leading bits that are same for two numbers.
-
-    Args:
-        number1: an integer.
-        number2: another integer.
-        bits: the maximum number of bits to compare.
-
-    Returns:
-        The number of leading bits that are the same for two numbers.
-
-    """
-    for i in range(bits):
-        if number1 >> i == number2 >> i:
-            return bits - i
-    return 0
-
-def _count_righthand_zero_bits(number, bits):
-    """Count the number of zero bits on the right hand side.
-
-    Args:
-        number: an integer.
-        bits: maximum number of bits to count.
-
-    Returns:
-        The number of zero bits on the right hand side of the number.
-
-    """
-    if number == 0:
-        return bits
-    for i in range(bits):
-        if (number >> i) % 2:
-            return i
-
-def summarize_address_range(first, last):
-    """Summarize a network range given the first and last IP addresses.
-
-    Example:
-        >>> summarize_address_range(IPv4Address('1.1.1.0'),
-            IPv4Address('1.1.1.130'))
-        [IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
-        IPv4Network('1.1.1.130/32')]
-
-    Args:
-        first: the first IPv4Address or IPv6Address in the range.
-        last: the last IPv4Address or IPv6Address in the range.
-
-    Returns:
-        The address range collapsed to a list of IPv4Network's or
-        IPv6Network's.
-
-    Raise:
-        TypeError:
-            If the first and last objects are not IP addresses.
-            If the first and last objects are not the same version.
-        ValueError:
-            If the last object is not greater than the first.
-            If the version is not 4 or 6.
-
-    """
-    if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
-        raise TypeError('first and last must be IP addresses, not networks')
-    if first.version != last.version:
-        raise TypeError("%s and %s are not of the same version" % (
-                str(first), str(last)))
-    if first > last:
-        raise ValueError('last IP address must be greater than first')
-
-    networks = []
-
-    if first.version == 4:
-        ip = IPv4Network
-    elif first.version == 6:
-        ip = IPv6Network
-    else:
-        raise ValueError('unknown IP version')
-
-    ip_bits = first._max_prefixlen
-    first_int = first._ip
-    last_int = last._ip
-    while first_int <= last_int:
-        nbits = _count_righthand_zero_bits(first_int, ip_bits)
-        current = None
-        while nbits >= 0:
-            addend = 2**nbits - 1
-            current = first_int + addend
-            nbits -= 1
-            if current <= last_int:
-                break
-        prefix = _get_prefix_length(first_int, current, ip_bits)
-        net = ip('%s/%d' % (str(first), prefix))
-        networks.append(net)
-        if current == ip._ALL_ONES:
-            break
-        first_int = current + 1
-        first = IPAddress(first_int, version=first._version)
-    return networks
-
-def _collapse_address_list_recursive(addresses):
-    """Loops through the addresses, collapsing concurrent netblocks.
-
-    Example:
-
-        ip1 = IPv4Network('1.1.0.0/24')
-        ip2 = IPv4Network('1.1.1.0/24')
-        ip3 = IPv4Network('1.1.2.0/24')
-        ip4 = IPv4Network('1.1.3.0/24')
-        ip5 = IPv4Network('1.1.4.0/24')
-        ip6 = IPv4Network('1.1.0.1/22')
-
-        _collapse_address_list_recursive([ip1, ip2, ip3, ip4, ip5, ip6]) ->
-          [IPv4Network('1.1.0.0/22'), IPv4Network('1.1.4.0/24')]
-
-        This shouldn't be called directly; it is called via
-          collapse_address_list([]).
-
-    Args:
-        addresses: A list of IPv4Network's or IPv6Network's
-
-    Returns:
-        A list of IPv4Network's or IPv6Network's depending on what we were
-        passed.
-
-    """
-    ret_array = []
-    optimized = False
-
-    for cur_addr in addresses:
-        if not ret_array:
-            ret_array.append(cur_addr)
-            continue
-        if cur_addr in ret_array[-1]:
-            optimized = True
-        elif cur_addr == ret_array[-1].supernet().subnet()[1]:
-            ret_array.append(ret_array.pop().supernet())
-            optimized = True
-        else:
-            ret_array.append(cur_addr)
-
-    if optimized:
-        return _collapse_address_list_recursive(ret_array)
-
-    return ret_array
-
-
-def collapse_address_list(addresses):
-    """Collapse a list of IP objects.
-
-    Example:
-        collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) ->
-          [IPv4('1.1.0.0/23')]
-
-    Args:
-        addresses: A list of IPv4Network or IPv6Network objects.
-
-    Returns:
-        A list of IPv4Network or IPv6Network objects depending on what we
-        were passed.
-
-    Raises:
-        TypeError: If passed a list of mixed version objects.
-
-    """
-    i = 0
-    addrs = []
-    ips = []
-    nets = []
-
-    # split IP addresses and networks
-    for ip in addresses:
-        if isinstance(ip, _BaseIP):
-            if ips and ips[-1]._version != ip._version:
-                raise TypeError("%s and %s are not of the same version" % (
-                        str(ip), str(ips[-1])))
-            ips.append(ip)
-        elif ip._prefixlen == ip._max_prefixlen:
-            if ips and ips[-1]._version != ip._version:
-                raise TypeError("%s and %s are not of the same version" % (
-                        str(ip), str(ips[-1])))
-            ips.append(ip.ip)
-        else:
-            if nets and nets[-1]._version != ip._version:
-                raise TypeError("%s and %s are not of the same version" % (
-                        str(ip), str(nets[-1])))
-            nets.append(ip)
-
-    # sort and dedup
-    ips = sorted(set(ips))
-    nets = sorted(set(nets))
-
-    while i < len(ips):
-        (first, last) = _find_address_range(ips[i:])
-        i = ips.index(last) + 1
-        addrs.extend(summarize_address_range(first, last))
-
-    return _collapse_address_list_recursive(sorted(
-        addrs + nets, key=_BaseNet._get_networks_key))
-
-# backwards compatibility
-CollapseAddrList = collapse_address_list
-
-# We need to distinguish between the string and packed-bytes representations
-# of an IP address.  For example, b'0::1' is the IPv4 address 48.58.58.49,
-# while '0::1' is an IPv6 address.
-#
-# In Python 3, the native 'bytes' type already provides this functionality,
-# so we use it directly.  For earlier implementations where bytes is not a
-# distinct type, we create a subclass of str to serve as a tag.
-#
-# Usage example (Python 2):
-#   ip = ipaddr.IPAddress(ipaddr.Bytes('xxxx'))
-#
-# Usage example (Python 3):
-#   ip = ipaddr.IPAddress(b'xxxx')
-try:
-    if bytes is str:
-        raise TypeError("bytes is not a distinct type")
-    Bytes = bytes
-except (NameError, TypeError):
-    class Bytes(str):
-        def __repr__(self):
-            return 'Bytes(%s)' % str.__repr__(self)
-
-def get_mixed_type_key(obj):
-    """Return a key suitable for sorting between networks and addresses.
-
-    Address and Network objects are not sortable by default; they're
-    fundamentally different so the expression
-
-        IPv4Address('1.1.1.1') <= IPv4Network('1.1.1.1/24')
-
-    doesn't make any sense.  There are some times however, where you may wish
-    to have ipaddr sort these for you anyway. If you need to do this, you
-    can use this function as the key= argument to sorted().
-
-    Args:
-      obj: either a Network or Address object.
-    Returns:
-      appropriate key.
-
-    """
-    if isinstance(obj, _BaseNet):
-        return obj._get_networks_key()
-    elif isinstance(obj, _BaseIP):
-        return obj._get_address_key()
-    return NotImplemented
-
-class _IPAddrBase(object):
-
-    """The mother class."""
-
-    def __index__(self):
-        return self._ip
-
-    def __int__(self):
-        return self._ip
-
-    def __hex__(self):
-        return hex(self._ip)
-
-    @property
-    def exploded(self):
-        """Return the longhand version of the IP address as a string."""
-        return self._explode_shorthand_ip_string()
-
-    @property
-    def compressed(self):
-        """Return the shorthand version of the IP address as a string."""
-        return str(self)
-
-
-class _BaseIP(_IPAddrBase):
-
-    """A generic IP object.
-
-    This IP class contains the version independent methods which are
-    used by single IP addresses.
-
-    """
-
-    def __eq__(self, other):
-        try:
-            return (self._ip == other._ip
-                    and self._version == other._version)
-        except AttributeError:
-            return NotImplemented
-
-    def __ne__(self, other):
-        eq = self.__eq__(other)
-        if eq is NotImplemented:
-            return NotImplemented
-        return not eq
-
-    def __le__(self, other):
-        gt = self.__gt__(other)
-        if gt is NotImplemented:
-            return NotImplemented
-        return not gt
-
-    def __ge__(self, other):
-        lt = self.__lt__(other)
-        if lt is NotImplemented:
-            return NotImplemented
-        return not lt
-
-    def __lt__(self, other):
-        if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                    str(self), str(other)))
-        if not isinstance(other, _BaseIP):
-            raise TypeError('%s and %s are not of the same type' % (
-                    str(self), str(other)))
-        if self._ip != other._ip:
-            return self._ip < other._ip
-        return False
-
-    def __gt__(self, other):
-        if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                    str(self), str(other)))
-        if not isinstance(other, _BaseIP):
-            raise TypeError('%s and %s are not of the same type' % (
-                    str(self), str(other)))
-        if self._ip != other._ip:
-            return self._ip > other._ip
-        return False
-
-    # Shorthand for Integer addition and subtraction. This is not
-    # meant to ever support addition/subtraction of addresses.
-    def __add__(self, other):
-        if not isinstance(other, int):
-            return NotImplemented
-        return IPAddress(int(self) + other, version=self._version)
-
-    def __sub__(self, other):
-        if not isinstance(other, int):
-            return NotImplemented
-        return IPAddress(int(self) - other, version=self._version)
-
-    def __repr__(self):
-        return '%s(%r)' % (self.__class__.__name__, str(self))
-
-    def __str__(self):
-        return  '%s' % self._string_from_ip_int(self._ip)
-
-    def __hash__(self):
-        return hash(hex(long(self._ip)))
-
-    def _get_address_key(self):
-        return (self._version, self)
-
-    @property
-    def version(self):
-        raise NotImplementedError('BaseIP has no version')
-
-
-class _BaseNet(_IPAddrBase):
-
-    """A generic IP object.
-
-    This IP class contains the version independent methods which are
-    used by networks.
-
-    """
-
-    def __init__(self, address):
-        self._cache = {}
-
-    def __repr__(self):
-        return '%s(%r)' % (self.__class__.__name__, str(self))
-
-    def iterhosts(self):
-        """Generate Iterator over usable hosts in a network.
-
-           This is like __iter__ except it doesn't return the network
-           or broadcast addresses.
-
-        """
-        cur = int(self.network) + 1
-        bcast = int(self.broadcast) - 1
-        while cur <= bcast:
-            cur += 1
-            yield IPAddress(cur - 1, version=self._version)
-
-    def __iter__(self):
-        cur = int(self.network)
-        bcast = int(self.broadcast)
-        while cur <= bcast:
-            cur += 1
-            yield IPAddress(cur - 1, version=self._version)
-
-    def __getitem__(self, n):
-        network = int(self.network)
-        broadcast = int(self.broadcast)
-        if n >= 0:
-            if network + n > broadcast:
-                raise IndexError
-            return IPAddress(network + n, version=self._version)
-        else:
-            n += 1
-            if broadcast + n < network:
-                raise IndexError
-            return IPAddress(broadcast + n, version=self._version)
-
-    def __lt__(self, other):
-        if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                    str(self), str(other)))
-        if not isinstance(other, _BaseNet):
-            raise TypeError('%s and %s are not of the same type' % (
-                    str(self), str(other)))
-        if self.network != other.network:
-            return self.network < other.network
-        if self.netmask != other.netmask:
-            return self.netmask < other.netmask
-        return False
-
-    def __gt__(self, other):
-        if self._version != other._version:
-            raise TypeError('%s and %s are not of the same version' % (
-                    str(self), str(other)))
-        if not isinstance(other, _BaseNet):
-            raise TypeError('%s and %s are not of the same type' % (
-                    str(self), str(other)))
-        if self.network != other.network:
-            return self.network > other.network
-        if self.netmask != other.netmask:
-            return self.netmask > other.netmask
-        return False
-
-    def __le__(self, other):
-        gt = self.__gt__(other)
-        if gt is NotImplemented:
-            return NotImplemented
-        return not gt
-
-    def __ge__(self, other):
-        lt = self.__lt__(other)
-        if lt is NotImplemented:
-            return NotImplemented
-        return not lt
-
-    def __eq__(self, other):
-        try:
-            return (self._version == other._version
-                    and self.network == other.network
-                    and int(self.netmask) == int(other.netmask))
-        except AttributeError:
-            if isinstance(other, _BaseIP):
-                return (self._version == other._version
-                        and self._ip == other._ip)
-
-    def __ne__(self, other):
-        eq = self.__eq__(other)
-        if eq is NotImplemented:
-            return NotImplemented
-        return not eq
-
-    def __str__(self):
-        return  '%s/%s' % (str(self.ip),
-                           str(self._prefixlen))
-
-    def __hash__(self):
-        return hash(int(self.network) ^ int(self.netmask))
-
-    def __contains__(self, other):
-        # always false if one is v4 and the other is v6.
-        if self._version != other._version:
-          return False
-        # dealing with another network.
-        if isinstance(other, _BaseNet):
-            return (self.network <= other.network and
-                    self.broadcast >= other.broadcast)
-        # dealing with another address
-        else:
-            return (int(self.network) <= int(other._ip) <=
-                    int(self.broadcast))
-
-    def overlaps(self, other):
-        """Tell if self is partly contained in other."""
-        return self.network in other or self.broadcast in other or (
-            other.network in self or other.broadcast in self)
-
-    @property
-    def network(self):
-        x = self._cache.get('network')
-        if x is None:
-            x = IPAddress(self._ip & int(self.netmask), version=self._version)
-            self._cache['network'] = x
-        return x
-
-    @property
-    def broadcast(self):
-        x = self._cache.get('broadcast')
-        if x is None:
-            x = IPAddress(self._ip | int(self.hostmask), version=self._version)
-            self._cache['broadcast'] = x
-        return x
-
-    @property
-    def hostmask(self):
-        x = self._cache.get('hostmask')
-        if x is None:
-            x = IPAddress(int(self.netmask) ^ self._ALL_ONES,
-                          version=self._version)
-            self._cache['hostmask'] = x
-        return x
-
-    @property
-    def with_prefixlen(self):
-        return '%s/%d' % (str(self.ip), self._prefixlen)
-
-    @property
-    def with_netmask(self):
-        return '%s/%s' % (str(self.ip), str(self.netmask))
-
-    @property
-    def with_hostmask(self):
-        return '%s/%s' % (str(self.ip), str(self.hostmask))
-
-    @property
-    def numhosts(self):
-        """Number of hosts in the current subnet."""
-        return int(self.broadcast) - int(self.network) + 1
-
-    @property
-    def version(self):
-        raise NotImplementedError('BaseNet has no version')
-
-    @property
-    def prefixlen(self):
-        return self._prefixlen
-
-    def address_exclude(self, other):
-        """Remove an address from a larger block.
-
-        For example:
-
-            addr1 = IPNetwork('10.1.1.0/24')
-            addr2 = IPNetwork('10.1.1.0/26')
-            addr1.address_exclude(addr2) =
-                [IPNetwork('10.1.1.64/26'), IPNetwork('10.1.1.128/25')]
-
-        or IPv6:
-
-            addr1 = IPNetwork('::1/32')
-            addr2 = IPNetwork('::1/128')
-            addr1.address_exclude(addr2) = [IPNetwork('::0/128'),
-                IPNetwork('::2/127'),
-                IPNetwork('::4/126'),
-                IPNetwork('::8/125'),
-                ...
-                IPNetwork('0:0:8000::/33')]
-
-        Args:
-            other: An IPvXNetwork object of the same type.
-
-        Returns:
-            A sorted list of IPvXNetwork objects addresses which is self
-            minus other.
-
-        Raises:
-            TypeError: If self and other are of difffering address
-              versions, or if other is not a network object.
-            ValueError: If other is not completely contained by self.
-
-        """
-        if not self._version == other._version:
-            raise TypeError("%s and %s are not of the same version" % (
-                str(self), str(other)))
-
-        if not isinstance(other, _BaseNet):
-            raise TypeError("%s is not a network object" % str(other))
-
-        if other not in self:
-            raise ValueError('%s not contained in %s' % (str(other),
-                                                         str(self)))
-        if other == self:
-            return []
-
-        ret_addrs = []
-
-        # Make sure we're comparing the network of other.
-        other = IPNetwork('%s/%s' % (str(other.network), str(other.prefixlen)),
-                   version=other._version)
-
-        s1, s2 = self.subnet()
-        while s1 != other and s2 != other:
-            if other in s1:
-                ret_addrs.append(s2)
-                s1, s2 = s1.subnet()
-            elif other in s2:
-                ret_addrs.append(s1)
-                s1, s2 = s2.subnet()
-            else:
-                # If we got here, there's a bug somewhere.
-                assert True == False, ('Error performing exclusion: '
-                                       's1: %s s2: %s other: %s' %
-                                       (str(s1), str(s2), str(other)))
-        if s1 == other:
-            ret_addrs.append(s2)
-        elif s2 == other:
-            ret_addrs.append(s1)
-        else:
-            # If we got here, there's a bug somewhere.
-            assert True == False, ('Error performing exclusion: '
-                                   's1: %s s2: %s other: %s' %
-                                   (str(s1), str(s2), str(other)))
-
-        return sorted(ret_addrs, key=_BaseNet._get_networks_key)
-
-    def compare_networks(self, other):
-        """Compare two IP objects.
-
-        This is only concerned about the comparison of the integer
-        representation of the network addresses.  This means that the
-        host bits aren't considered at all in this method.  If you want
-        to compare host bits, you can easily enough do a
-        'HostA._ip < HostB._ip'
-
-        Args:
-            other: An IP object.
-
-        Returns:
-            If the IP versions of self and other are the same, returns:
-
-            -1 if self < other:
-              eg: IPv4('1.1.1.0/24') < IPv4('1.1.2.0/24')
-              IPv6('1080::200C:417A') < IPv6('1080::200B:417B')
-            0 if self == other
-              eg: IPv4('1.1.1.1/24') == IPv4('1.1.1.2/24')
-              IPv6('1080::200C:417A/96') == IPv6('1080::200C:417B/96')
-            1 if self > other
-              eg: IPv4('1.1.1.0/24') > IPv4('1.1.0.0/24')
-              IPv6('1080::1:200C:417A/112') >
-              IPv6('1080::0:200C:417A/112')
-
-            If the IP versions of self and other are different, returns:
-
-            -1 if self._version < other._version
-              eg: IPv4('10.0.0.1/24') < IPv6('::1/128')
-            1 if self._version > other._version
-              eg: IPv6('::1/128') > IPv4('255.255.255.0/24')
-
-        """
-        if self._version < other._version:
-            return -1
-        if self._version > other._version:
-            return 1
-        # self._version == other._version below here:
-        if self.network < other.network:
-            return -1
-        if self.network > other.network:
-            return 1
-        # self.network == other.network below here:
-        if self.netmask < other.netmask:
-            return -1
-        if self.netmask > other.netmask:
-            return 1
-        # self.network == other.network and self.netmask == other.netmask
-        return 0
-
-    def _get_networks_key(self):
-        """Network-only key function.
-
-        Returns an object that identifies this address' network and
-        netmask. This function is a suitable "key" argument for sorted()
-        and list.sort().
-
-        """
-        return (self._version, self.network, self.netmask)
-
-    def _ip_int_from_prefix(self, prefixlen):
-        """Turn the prefix length into a bitwise netmask.
-
-        Args:
-            prefixlen: An integer, the prefix length.
-
-        Returns:
-            An integer.
-
-        """
-        return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
-
-    def _prefix_from_ip_int(self, ip_int):
-        """Return prefix length from a bitwise netmask.
-
-        Args:
-            ip_int: An integer, the netmask in expanded bitwise format.
-
-        Returns:
-            An integer, the prefix length.
-
-        Raises:
-            NetmaskValueError: If the input is not a valid netmask.
-
-        """
-        prefixlen = self._max_prefixlen
-        while prefixlen:
-            if ip_int & 1:
-                break
-            ip_int >>= 1
-            prefixlen -= 1
-
-        if ip_int == (1 << prefixlen) - 1:
-            return prefixlen
-        else:
-            raise NetmaskValueError('Bit pattern does not match /1*0*/')
-
-    def _prefix_from_prefix_string(self, prefixlen_str):
-        """Turn a prefix length string into an integer.
-
-        Args:
-            prefixlen_str: A decimal string containing the prefix length.
-
-        Returns:
-            The prefix length as an integer.
-
-        Raises:
-            NetmaskValueError: If the input is malformed or out of range.
-
-        """
-        try:
-            if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
-                raise ValueError
-            prefixlen = int(prefixlen_str)
-            if not (0 <= prefixlen <= self._max_prefixlen):
-               raise ValueError
-        except ValueError:
-            raise NetmaskValueError('%s is not a valid prefix length' %
-                                    prefixlen_str)
-        return prefixlen
-
-    def _prefix_from_ip_string(self, ip_str):
-        """Turn a netmask/hostmask string into a prefix length.
-
-        Args:
-            ip_str: A netmask or hostmask, formatted as an IP address.
-
-        Returns:
-            The prefix length as an integer.
-
-        Raises:
-            NetmaskValueError: If the input is not a netmask or hostmask.
-
-        """
-        # Parse the netmask/hostmask like an IP address.
-        try:
-            ip_int = self._ip_int_from_string(ip_str)
-        except AddressValueError:
-            raise NetmaskValueError('%s is not a valid netmask' % ip_str)
-
-        # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
-        # Note that the two ambiguous cases (all-ones and all-zeroes) are
-        # treated as netmasks.
-        try:
-            return self._prefix_from_ip_int(ip_int)
-        except NetmaskValueError:
-            pass
-
-        # Invert the bits, and try matching a /0+1+/ hostmask instead.
-        ip_int ^= self._ALL_ONES
-        try:
-            return self._prefix_from_ip_int(ip_int)
-        except NetmaskValueError:
-            raise NetmaskValueError('%s is not a valid netmask' % ip_str)
-
-    def iter_subnets(self, prefixlen_diff=1, new_prefix=None):
-        """The subnets which join to make the current subnet.
-
-        In the case that self contains only one IP
-        (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
-        for IPv6), return a list with just ourself.
-
-        Args:
-            prefixlen_diff: An integer, the amount the prefix length
-              should be increased by. This should not be set if
-              new_prefix is also set.
-            new_prefix: The desired new prefix length. This must be a
-              larger number (smaller prefix) than the existing prefix.
-              This should not be set if prefixlen_diff is also set.
-
-        Returns:
-            An iterator of IPv(4|6) objects.
-
-        Raises:
-            ValueError: The prefixlen_diff is too small or too large.
-                OR
-            prefixlen_diff and new_prefix are both set or new_prefix
-              is a smaller number than the current prefix (smaller
-              number means a larger network)
-
-        """
-        if self._prefixlen == self._max_prefixlen:
-            yield self
-            return
-
-        if new_prefix is not None:
-            if new_prefix < self._prefixlen:
-                raise ValueError('new prefix must be longer')
-            if prefixlen_diff != 1:
-                raise ValueError('cannot set prefixlen_diff and new_prefix')
-            prefixlen_diff = new_prefix - self._prefixlen
-
-        if prefixlen_diff < 0:
-            raise ValueError('prefix length diff must be > 0')
-        new_prefixlen = self._prefixlen + prefixlen_diff
-
-        if new_prefixlen > self._max_prefixlen:
-            raise ValueError(
-                'prefix length diff %d is invalid for netblock %s' % (
-                    new_prefixlen, str(self)))
-
-        first = IPNetwork('%s/%s' % (str(self.network),
-                                     str(self._prefixlen + prefixlen_diff)),
-                         version=self._version)
-
-        yield first
-        current = first
-        while True:
-            broadcast = current.broadcast
-            if broadcast == self.broadcast:
-                return
-            new_addr = IPAddress(int(broadcast) + 1, version=self._version)
-            current = IPNetwork('%s/%s' % (str(new_addr), str(new_prefixlen)),
-                                version=self._version)
-
-            yield current
-
-    def masked(self):
-        """Return the network object with the host bits masked out."""
-        return IPNetwork('%s/%d' % (self.network, self._prefixlen),
-                         version=self._version)
-
-    def subnet(self, prefixlen_diff=1, new_prefix=None):
-        """Return a list of subnets, rather than an iterator."""
-        return list(self.iter_subnets(prefixlen_diff, new_prefix))
-
-    def supernet(self, prefixlen_diff=1, new_prefix=None):
-        """The supernet containing the current network.
-
-        Args:
-            prefixlen_diff: An integer, the amount the prefix length of
-              the network should be decreased by.  For example, given a
-              /24 network and a prefixlen_diff of 3, a supernet with a
-              /21 netmask is returned.
-
-        Returns:
-            An IPv4 network object.
-
-        Raises:
-            ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have a
-              negative prefix length.
-                OR
-            If prefixlen_diff and new_prefix are both set or new_prefix is a
-              larger number than the current prefix (larger number means a
-              smaller network)
-
-        """
-        if self._prefixlen == 0:
-            return self
-
-        if new_prefix is not None:
-            if new_prefix > self._prefixlen:
-                raise ValueError('new prefix must be shorter')
-            if prefixlen_diff != 1:
-                raise ValueError('cannot set prefixlen_diff and new_prefix')
-            prefixlen_diff = self._prefixlen - new_prefix
-
-
-        if self.prefixlen - prefixlen_diff < 0:
-            raise ValueError(
-                'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
-                (self.prefixlen, prefixlen_diff))
-        return IPNetwork('%s/%s' % (str(self.network),
-                                    str(self.prefixlen - prefixlen_diff)),
-                         version=self._version)
-
-    # backwards compatibility
-    Subnet = subnet
-    Supernet = supernet
-    AddressExclude = address_exclude
-    CompareNetworks = compare_networks
-    Contains = __contains__
-
-
-class _BaseV4(object):
-
-    """Base IPv4 object.
-
-    The following methods are used by IPv4 objects in both single IP
-    addresses and networks.
-
-    """
-
-    # Equivalent to 255.255.255.255 or 32 bits of 1's.
-    _ALL_ONES = (2**IPV4LENGTH) - 1
-    _DECIMAL_DIGITS = frozenset('0123456789')
-
-    def __init__(self, address):
-        self._version = 4
-        self._max_prefixlen = IPV4LENGTH
-
-    def _explode_shorthand_ip_string(self):
-        return str(self)
-
-    def _ip_int_from_string(self, ip_str):
-        """Turn the given IP string into an integer for comparison.
-
-        Args:
-            ip_str: A string, the IP ip_str.
-
-        Returns:
-            The IP ip_str as an integer.
-
-        Raises:
-            AddressValueError: if ip_str isn't a valid IPv4 Address.
-
-        """
-        octets = ip_str.split('.')
-        if len(octets) != 4:
-            raise AddressValueError(ip_str)
-
-        packed_ip = 0
-        for oc in octets:
-            try:
-                packed_ip = (packed_ip << 8) | self._parse_octet(oc)
-            except ValueError:
-                raise AddressValueError(ip_str)
-        return packed_ip
-
-    def _parse_octet(self, octet_str):
-        """Convert a decimal octet into an integer.
-
-        Args:
-            octet_str: A string, the number to parse.
-
-        Returns:
-            The octet as an integer.
-
-        Raises:
-            ValueError: if the octet isn't strictly a decimal from [0..255].
-
-        """
-        # Whitelist the characters, since int() allows a lot of bizarre stuff.
-        if not self._DECIMAL_DIGITS.issuperset(octet_str):
-            raise ValueError
-        octet_int = int(octet_str, 10)
-        # Disallow leading zeroes, because no clear standard exists on
-        # whether these should be interpreted as decimal or octal.
-        if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1):
-            raise ValueError
-        return octet_int
-
-    def _string_from_ip_int(self, ip_int):
-        """Turns a 32-bit integer into dotted decimal notation.
-
-        Args:
-            ip_int: An integer, the IP address.
-
-        Returns:
-            The IP address as a string in dotted decimal notation.
-
-        """
-        octets = []
-        for _ in xrange(4):
-            octets.insert(0, str(ip_int & 0xFF))
-            ip_int >>= 8
-        return '.'.join(octets)
-
-    @property
-    def max_prefixlen(self):
-        return self._max_prefixlen
-
-    @property
-    def packed(self):
-        """The binary representation of this address."""
-        return v4_int_to_packed(self._ip)
-
-    @property
-    def version(self):
-        return self._version
-
-    @property
-    def is_reserved(self):
-       """Test if the address is otherwise IETF reserved.
-
-        Returns:
-            A boolean, True if the address is within the
-            reserved IPv4 Network range.
-
-       """
-       return self in IPv4Network('240.0.0.0/4')
-
-    @property
-    def is_private(self):
-        """Test if this address is allocated for private networks.
-
-        Returns:
-            A boolean, True if the address is reserved per RFC 1918.
-
-        """
-        return (self in IPv4Network('10.0.0.0/8') or
-                self in IPv4Network('172.16.0.0/12') or
-                self in IPv4Network('192.168.0.0/16'))
-
-    @property
-    def is_multicast(self):
-        """Test if the address is reserved for multicast use.
-
-        Returns:
-            A boolean, True if the address is multicast.
-            See RFC 3171 for details.
-
-        """
-        return self in IPv4Network('224.0.0.0/4')
-
-    @property
-    def is_unspecified(self):
-        """Test if the address is unspecified.
-
-        Returns:
-            A boolean, True if this is the unspecified address as defined in
-            RFC 5735 3.
-
-        """
-        return self in IPv4Network('0.0.0.0')
-
-    @property
-    def is_loopback(self):
-        """Test if the address is a loopback address.
-
-        Returns:
-            A boolean, True if the address is a loopback per RFC 3330.
-
-        """
-        return self in IPv4Network('127.0.0.0/8')
-
-    @property
-    def is_link_local(self):
-        """Test if the address is reserved for link-local.
-
-        Returns:
-            A boolean, True if the address is link-local per RFC 3927.
-
-        """
-        return self in IPv4Network('169.254.0.0/16')
-
-
-class IPv4Address(_BaseV4, _BaseIP):
-
-    """Represent and manipulate single IPv4 Addresses."""
-
-    def __init__(self, address):
-
-        """
-        Args:
-            address: A string or integer representing the IP
-              '192.168.1.1'
-
-              Additionally, an integer can be passed, so
-              IPv4Address('192.168.1.1') == IPv4Address(3232235777).
-              or, more generally
-              IPv4Address(int(IPv4Address('192.168.1.1'))) ==
-                IPv4Address('192.168.1.1')
-
-        Raises:
-            AddressValueError: If ipaddr isn't a valid IPv4 address.
-
-        """
-        _BaseV4.__init__(self, address)
-
-        # Efficient constructor from integer.
-        if isinstance(address, (int, long)):
-            self._ip = address
-            if address < 0 or address > self._ALL_ONES:
-                raise AddressValueError(address)
-            return
-
-        # Constructing from a packed address
-        if isinstance(address, Bytes):
-            try:
-                self._ip, = struct.unpack('!I', address)
-            except struct.error:
-                raise AddressValueError(address)  # Wrong length.
-            return
-
-        # Assume input argument to be string or any object representation
-        # which converts into a formatted IP string.
-        addr_str = str(address)
-        self._ip = self._ip_int_from_string(addr_str)
-
-
-class IPv4Network(_BaseV4, _BaseNet):
-
-    """This class represents and manipulates 32-bit IPv4 networks.
-
-    Attributes: [examples for IPv4Network('1.2.3.4/27')]
-        ._ip: 16909060
-        .ip: IPv4Address('1.2.3.4')
-        .network: IPv4Address('1.2.3.0')
-        .hostmask: IPv4Address('0.0.0.31')
-        .broadcast: IPv4Address('1.2.3.31')
-        .netmask: IPv4Address('255.255.255.224')
-        .prefixlen: 27
-
-    """
-
-    def __init__(self, address, strict=False):
-        """Instantiate a new IPv4 network object.
-
-        Args:
-            address: A string or integer representing the IP [& network].
-              '192.168.1.1/24'
-              '192.168.1.1/255.255.255.0'
-              '192.168.1.1/0.0.0.255'
-              are all functionally the same in IPv4. Similarly,
-              '192.168.1.1'
-              '192.168.1.1/255.255.255.255'
-              '192.168.1.1/32'
-              are also functionaly equivalent. That is to say, failing to
-              provide a subnetmask will create an object with a mask of /32.
-
-              If the mask (portion after the / in the argument) is given in
-              dotted quad form, it is treated as a netmask if it starts with a
-              non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
-              starts with a zero field (e.g. 0.255.255.255 == /8), with the
-              single exception of an all-zero mask which is treated as a
-              netmask == /0. If no mask is given, a default of /32 is used.
-
-              Additionally, an integer can be passed, so
-              IPv4Network('192.168.1.1') == IPv4Network(3232235777).
-              or, more generally
-              IPv4Network(int(IPv4Network('192.168.1.1'))) ==
-                IPv4Network('192.168.1.1')
-
-            strict: A boolean. If true, ensure that we have been passed
-              A true network address, eg, 192.168.1.0/24 and not an
-              IP address on a network, eg, 192.168.1.1/24.
-
-        Raises:
-            AddressValueError: If ipaddr isn't a valid IPv4 address.
-            NetmaskValueError: If the netmask isn't valid for
-              an IPv4 address.
-            ValueError: If strict was True and a network address was not
-              supplied.
-
-        """
-        _BaseNet.__init__(self, address)
-        _BaseV4.__init__(self, address)
-
-        # Constructing from an integer or packed bytes.
-        if isinstance(address, (int, long, Bytes)):
-            self.ip = IPv4Address(address)
-            self._ip = self.ip._ip
-            self._prefixlen = self._max_prefixlen
-            self.netmask = IPv4Address(self._ALL_ONES)
-            return
-
-        # Assume input argument to be string or any object representation
-        # which converts into a formatted IP prefix string.
-        addr = str(address).split('/')
-
-        if len(addr) > 2:
-            raise AddressValueError(address)
-
-        self._ip = self._ip_int_from_string(addr[0])
-        self.ip = IPv4Address(self._ip)
-
-        if len(addr) == 2:
-            try:
-                # Check for a netmask in prefix length form.
-                self._prefixlen = self._prefix_from_prefix_string(addr[1])
-            except NetmaskValueError:
-                # Check for a netmask or hostmask in dotted-quad form.
-                # This may raise NetmaskValueError.
-                self._prefixlen = self._prefix_from_ip_string(addr[1])
-        else:
-            self._prefixlen = self._max_prefixlen
-
-        self.netmask = IPv4Address(self._ip_int_from_prefix(self._prefixlen))
-
-        if strict:
-            if self.ip != self.network:
-                raise ValueError('%s has host bits set' %
-                                 self.ip)
-        if self._prefixlen == (self._max_prefixlen - 1):
-            self.iterhosts = self.__iter__
-
-    # backwards compatibility
-    IsRFC1918 = lambda self: self.is_private
-    IsMulticast = lambda self: self.is_multicast
-    IsLoopback = lambda self: self.is_loopback
-    IsLinkLocal = lambda self: self.is_link_local
-
-
-class _BaseV6(object):
-
-    """Base IPv6 object.
-
-    The following methods are used by IPv6 objects in both single IP
-    addresses and networks.
-
-    """
-
-    _ALL_ONES = (2**IPV6LENGTH) - 1
-    _HEXTET_COUNT = 8
-    _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
-
-    def __init__(self, address):
-        self._version = 6
-        self._max_prefixlen = IPV6LENGTH
-
-    def _ip_int_from_string(self, ip_str):
-        """Turn an IPv6 ip_str into an integer.
-
-        Args:
-            ip_str: A string, the IPv6 ip_str.
-
-        Returns:
-            A long, the IPv6 ip_str.
-
-        Raises:
-            AddressValueError: if ip_str isn't a valid IPv6 Address.
-
-        """
-        parts = ip_str.split(':')
-
-        # An IPv6 address needs at least 2 colons (3 parts).
-        if len(parts) < 3:
-            raise AddressValueError(ip_str)
-
-        # If the address has an IPv4-style suffix, convert it to hexadecimal.
-        if '.' in parts[-1]:
-            ipv4_int = IPv4Address(parts.pop())._ip
-            parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
-            parts.append('%x' % (ipv4_int & 0xFFFF))
-
-        # An IPv6 address can't have more than 8 colons (9 parts).
-        if len(parts) > self._HEXTET_COUNT + 1:
-            raise AddressValueError(ip_str)
-
-        # Disregarding the endpoints, find '::' with nothing in between.
-        # This indicates that a run of zeroes has been skipped.
-        try:
-            skip_index, = (
-                [i for i in xrange(1, len(parts) - 1) if not parts[i]] or
-                [None])
-        except ValueError:
-            # Can't have more than one '::'
-            raise AddressValueError(ip_str)
-
-        # parts_hi is the number of parts to copy from above/before the '::'
-        # parts_lo is the number of parts to copy from below/after the '::'
-        if skip_index is not None:
-            # If we found a '::', then check if it also covers the endpoints.
-            parts_hi = skip_index
-            parts_lo = len(parts) - skip_index - 1
-            if not parts[0]:
-                parts_hi -= 1
-                if parts_hi:
-                    raise AddressValueError(ip_str)  # ^: requires ^::
-            if not parts[-1]:
-                parts_lo -= 1
-                if parts_lo:
-                    raise AddressValueError(ip_str)  # :$ requires ::$
-            parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
-            if parts_skipped < 1:
-                raise AddressValueError(ip_str)
-        else:
-            # Otherwise, allocate the entire address to parts_hi.  The endpoints
-            # could still be empty, but _parse_hextet() will check for that.
-            if len(parts) != self._HEXTET_COUNT:
-                raise AddressValueError(ip_str)
-            parts_hi = len(parts)
-            parts_lo = 0
-            parts_skipped = 0
-
-        try:
-            # Now, parse the hextets into a 128-bit integer.
-            ip_int = 0L
-            for i in xrange(parts_hi):
-                ip_int <<= 16
-                ip_int |= self._parse_hextet(parts[i])
-            ip_int <<= 16 * parts_skipped
-            for i in xrange(-parts_lo, 0):
-                ip_int <<= 16
-                ip_int |= self._parse_hextet(parts[i])
-            return ip_int
-        except ValueError:
-            raise AddressValueError(ip_str)
-
-    def _parse_hextet(self, hextet_str):
-        """Convert an IPv6 hextet string into an integer.
-
-        Args:
-            hextet_str: A string, the number to parse.
-
-        Returns:
-            The hextet as an integer.
-
-        Raises:
-            ValueError: if the input isn't strictly a hex number from [0..FFFF].
-
-        """
-        # Whitelist the characters, since int() allows a lot of bizarre stuff.
-        if not self._HEX_DIGITS.issuperset(hextet_str):
-            raise ValueError
-        if len(hextet_str) > 4:
-          raise ValueError
-        hextet_int = int(hextet_str, 16)
-        if hextet_int > 0xFFFF:
-            raise ValueError
-        return hextet_int
-
-    def _compress_hextets(self, hextets):
-        """Compresses a list of hextets.
-
-        Compresses a list of strings, replacing the longest continuous
-        sequence of "0" in the list with "" and adding empty strings at
-        the beginning or at the end of the string such that subsequently
-        calling ":".join(hextets) will produce the compressed version of
-        the IPv6 address.
-
-        Args:
-            hextets: A list of strings, the hextets to compress.
-
-        Returns:
-            A list of strings.
-
-        """
-        best_doublecolon_start = -1
-        best_doublecolon_len = 0
-        doublecolon_start = -1
-        doublecolon_len = 0
-        for index in range(len(hextets)):
-            if hextets[index] == '0':
-                doublecolon_len += 1
-                if doublecolon_start == -1:
-                    # Start of a sequence of zeros.
-                    doublecolon_start = index
-                if doublecolon_len > best_doublecolon_len:
-                    # This is the longest sequence of zeros so far.
-                    best_doublecolon_len = doublecolon_len
-                    best_doublecolon_start = doublecolon_start
-            else:
-                doublecolon_len = 0
-                doublecolon_start = -1
-
-        if best_doublecolon_len > 1:
-            best_doublecolon_end = (best_doublecolon_start +
-                                    best_doublecolon_len)
-            # For zeros at the end of the address.
-            if best_doublecolon_end == len(hextets):
-                hextets += ['']
-            hextets[best_doublecolon_start:best_doublecolon_end] = ['']
-            # For zeros at the beginning of the address.
-            if best_doublecolon_start == 0:
-                hextets = [''] + hextets
-
-        return hextets
-
-    def _string_from_ip_int(self, ip_int=None):
-        """Turns a 128-bit integer into hexadecimal notation.
-
-        Args:
-            ip_int: An integer, the IP address.
-
-        Returns:
-            A string, the hexadecimal representation of the address.
-
-        Raises:
-            ValueError: The address is bigger than 128 bits of all ones.
-
-        """
-        if not ip_int and ip_int != 0:
-            ip_int = int(self._ip)
-
-        if ip_int > self._ALL_ONES:
-            raise ValueError('IPv6 address is too large')
-
-        hex_str = '%032x' % ip_int
-        hextets = []
-        for x in range(0, 32, 4):
-            hextets.append('%x' % int(hex_str[x:x+4], 16))
-
-        hextets = self._compress_hextets(hextets)
-        return ':'.join(hextets)
-
-    def _explode_shorthand_ip_string(self):
-        """Expand a shortened IPv6 address.
-
-        Args:
-            ip_str: A string, the IPv6 address.
-
-        Returns:
-            A string, the expanded IPv6 address.
-
-        """
-        if isinstance(self, _BaseNet):
-            ip_str = str(self.ip)
-        else:
-            ip_str = str(self)
-
-        ip_int = self._ip_int_from_string(ip_str)
-        parts = []
-        for i in xrange(self._HEXTET_COUNT):
-            parts.append('%04x' % (ip_int & 0xFFFF))
-            ip_int >>= 16
-        parts.reverse()
-        if isinstance(self, _BaseNet):
-            return '%s/%d' % (':'.join(parts), self.prefixlen)
-        return ':'.join(parts)
-
-    @property
-    def max_prefixlen(self):
-        return self._max_prefixlen
-
-    @property
-    def packed(self):
-        """The binary representation of this address."""
-        return v6_int_to_packed(self._ip)
-
-    @property
-    def version(self):
-        return self._version
-
-    @property
-    def is_multicast(self):
-        """Test if the address is reserved for multicast use.
-
-        Returns:
-            A boolean, True if the address is a multicast address.
-            See RFC 2373 2.7 for details.
-
-        """
-        return self in IPv6Network('ff00::/8')
-
-    @property
-    def is_reserved(self):
-        """Test if the address is otherwise IETF reserved.
-
-        Returns:
-            A boolean, True if the address is within one of the
-            reserved IPv6 Network ranges.
-
-        """
-        return (self in IPv6Network('::/8') or
-                self in IPv6Network('100::/8') or
-                self in IPv6Network('200::/7') or
-                self in IPv6Network('400::/6') or
-                self in IPv6Network('800::/5') or
-                self in IPv6Network('1000::/4') or
-                self in IPv6Network('4000::/3') or
-                self in IPv6Network('6000::/3') or
-                self in IPv6Network('8000::/3') or
-                self in IPv6Network('A000::/3') or
-                self in IPv6Network('C000::/3') or
-                self in IPv6Network('E000::/4') or
-                self in IPv6Network('F000::/5') or
-                self in IPv6Network('F800::/6') or
-                self in IPv6Network('FE00::/9'))
-
-    @property
-    def is_unspecified(self):
-        """Test if the address is unspecified.
-
-        Returns:
-            A boolean, True if this is the unspecified address as defined in
-            RFC 2373 2.5.2.
-
-        """
-        return self._ip == 0 and getattr(self, '_prefixlen', 128) == 128
-
-    @property
-    def is_loopback(self):
-        """Test if the address is a loopback address.
-
-        Returns:
-            A boolean, True if the address is a loopback address as defined in
-            RFC 2373 2.5.3.
-
-        """
-        return self._ip == 1 and getattr(self, '_prefixlen', 128) == 128
-
-    @property
-    def is_link_local(self):
-        """Test if the address is reserved for link-local.
-
-        Returns:
-            A boolean, True if the address is reserved per RFC 4291.
-
-        """
-        return self in IPv6Network('fe80::/10')
-
-    @property
-    def is_site_local(self):
-        """Test if the address is reserved for site-local.
-
-        Note that the site-local address space has been deprecated by RFC 3879.
-        Use is_private to test if this address is in the space of unique local
-        addresses as defined by RFC 4193.
-
-        Returns:
-            A boolean, True if the address is reserved per RFC 3513 2.5.6.
-
-        """
-        return self in IPv6Network('fec0::/10')
-
-    @property
-    def is_private(self):
-        """Test if this address is allocated for private networks.
-
-        Returns:
-            A boolean, True if the address is reserved per RFC 4193.
-
-        """
-        return self in IPv6Network('fc00::/7')
-
-    @property
-    def ipv4_mapped(self):
-        """Return the IPv4 mapped address.
-
-        Returns:
-            If the IPv6 address is a v4 mapped address, return the
-            IPv4 mapped address. Return None otherwise.
-
-        """
-        if (self._ip >> 32) != 0xFFFF:
-            return None
-        return IPv4Address(self._ip & 0xFFFFFFFF)
-
-    @property
-    def teredo(self):
-        """Tuple of embedded teredo IPs.
-
-        Returns:
-            Tuple of the (server, client) IPs or None if the address
-            doesn't appear to be a teredo address (doesn't start with
-            2001::/32)
-
-        """
-        if (self._ip >> 96) != 0x20010000:
-            return None
-        return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
-                IPv4Address(~self._ip & 0xFFFFFFFF))
-
-    @property
-    def sixtofour(self):
-        """Return the IPv4 6to4 embedded address.
-
-        Returns:
-            The IPv4 6to4-embedded address if present or None if the
-            address doesn't appear to contain a 6to4 embedded address.
-
-        """
-        if (self._ip >> 112) != 0x2002:
-            return None
-        return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
-
-
-class IPv6Address(_BaseV6, _BaseIP):
-
-    """Represent and manipulate single IPv6 Addresses.
-    """
-
-    def __init__(self, address):
-        """Instantiate a new IPv6 address object.
-
-        Args:
-            address: A string or integer representing the IP
-
-              Additionally, an integer can be passed, so
-              IPv6Address('2001:4860::') ==
-                IPv6Address(42541956101370907050197289607612071936L).
-              or, more generally
-              IPv6Address(IPv6Address('2001:4860::')._ip) ==
-                IPv6Address('2001:4860::')
-
-        Raises:
-            AddressValueError: If address isn't a valid IPv6 address.
-
-        """
-        _BaseV6.__init__(self, address)
-
-        # Efficient constructor from integer.
-        if isinstance(address, (int, long)):
-            self._ip = address
-            if address < 0 or address > self._ALL_ONES:
-                raise AddressValueError(address)
-            return
-
-        # Constructing from a packed address
-        if isinstance(address, Bytes):
-            try:
-                hi, lo = struct.unpack('!QQ', address)
-            except struct.error:
-                raise AddressValueError(address)  # Wrong length.
-            self._ip = (hi << 64) | lo
-            return
-
-        # Assume input argument to be string or any object representation
-        # which converts into a formatted IP string.
-        addr_str = str(address)
-        if not addr_str:
-            raise AddressValueError('')
-
-        self._ip = self._ip_int_from_string(addr_str)
-
-
-class IPv6Network(_BaseV6, _BaseNet):
-
-    """This class represents and manipulates 128-bit IPv6 networks.
-
-    Attributes: [examples for IPv6('2001:658:22A:CAFE:200::1/64')]
-        .ip: IPv6Address('2001:658:22a:cafe:200::1')
-        .network: IPv6Address('2001:658:22a:cafe::')
-        .hostmask: IPv6Address('::ffff:ffff:ffff:ffff')
-        .broadcast: IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff')
-        .netmask: IPv6Address('ffff:ffff:ffff:ffff::')
-        .prefixlen: 64
-
-    """
-
-
-    def __init__(self, address, strict=False):
-        """Instantiate a new IPv6 Network object.
-
-        Args:
-            address: A string or integer representing the IPv6 network or the IP
-              and prefix/netmask.
-              '2001:4860::/128'
-              '2001:4860:0000:0000:0000:0000:0000:0000/128'
-              '2001:4860::'
-              are all functionally the same in IPv6.  That is to say,
-              failing to provide a subnetmask will create an object with
-              a mask of /128.
-
-              Additionally, an integer can be passed, so
-              IPv6Network('2001:4860::') ==
-                IPv6Network(42541956101370907050197289607612071936L).
-              or, more generally
-              IPv6Network(IPv6Network('2001:4860::')._ip) ==
-                IPv6Network('2001:4860::')
-
-            strict: A boolean. If true, ensure that we have been passed
-              A true network address, eg, 192.168.1.0/24 and not an
-              IP address on a network, eg, 192.168.1.1/24.
-
-        Raises:
-            AddressValueError: If address isn't a valid IPv6 address.
-            NetmaskValueError: If the netmask isn't valid for
-              an IPv6 address.
-            ValueError: If strict was True and a network address was not
-              supplied.
-
-        """
-        _BaseNet.__init__(self, address)
-        _BaseV6.__init__(self, address)
-
-        # Constructing from an integer or packed bytes.
-        if isinstance(address, (int, long, Bytes)):
-            self.ip = IPv6Address(address)
-            self._ip = self.ip._ip
-            self._prefixlen = self._max_prefixlen
-            self.netmask = IPv6Address(self._ALL_ONES)
-            return
-
-        # Assume input argument to be string or any object representation
-        # which converts into a formatted IP prefix string.
-        addr = str(address).split('/')
-
-        if len(addr) > 2:
-            raise AddressValueError(address)
-
-        self._ip = self._ip_int_from_string(addr[0])
-        self.ip = IPv6Address(self._ip)
-
-        if len(addr) == 2:
-            # This may raise NetmaskValueError
-            self._prefixlen = self._prefix_from_prefix_string(addr[1])
-        else:
-            self._prefixlen = self._max_prefixlen
-
-        self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
-
-        if strict:
-            if self.ip != self.network:
-                raise ValueError('%s has host bits set' %
-                                 self.ip)
-        if self._prefixlen == (self._max_prefixlen - 1):
-            self.iterhosts = self.__iter__
-
-    @property
-    def with_netmask(self):
-        return self.with_prefixlen

+ 0 - 1
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/__init__.py

@@ -1 +0,0 @@
-

+ 0 - 24
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/basic_auth.py

@@ -1,24 +0,0 @@
-import urllib
-import urllib2
-import base64
-
-
-def basic_auth(server="http://127.0.0.1"):
-    """
-    to use basic login with a different server
-    from gluon.contrib.login_methods.basic_auth import basic_auth
-    auth.settings.login_methods.append(basic_auth('http://server'))
-    """
-
-    def basic_login_aux(username,
-                        password,
-                        server=server):
-        key = base64.b64encode(username + ':' + password)
-        headers = {'Authorization': 'Basic ' + key}
-        request = urllib2.Request(server, None, headers)
-        try:
-            urllib2.urlopen(request)
-            return True
-        except (urllib2.URLError, urllib2.HTTPError):
-            return False
-    return basic_login_aux

+ 0 - 95
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/browserid_account.py

@@ -1,95 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-    BrowserID Authentication for web2py
-    developed by Madhukar R Pai (Copyright 2012)
-    Email <[email protected]>
-    License : LGPL
-
-    thanks and credits to the web2py community
-
-    This custom authenticator allows web2py to authenticate using browserid (https://login.persona.org/)
-    BrowserID is a project by Mozilla Labs (http://mozillalabs.com/)
-    to Know how browserid works please visit http://identity.mozilla.com/post/7616727542/introducing-browserid-a-better-way-to-sign-in
-
-    bottom line BrowserID provides a free, secure, de-centralized, easy to use(for users and developers) login solution.
-    You can use any email id as your login id. Browserid just verifys the email id and lets you login with that id.
-
-    credits for the doPost jquery function - itsadok (http://stackoverflow.com/users/7581/itsadok)
-
-"""
-import time
-from gluon import *
-from gluon.storage import Storage
-from gluon.tools import fetch
-import gluon.contrib.simplejson as json
-
-
-class BrowserID(object):
-    """
-    from gluon.contrib.login_methods.browserid_account import BrowserID
-    auth.settings.login_form = BrowserID(request,
-        audience = "http://127.0.0.1:8000"
-        assertion_post_url = "http://127.0.0.1:8000/%s/default/user/login" % request.application)
-    """
-
-    def __init__(self,
-                 request,
-                 audience="",
-                 assertion_post_url="",
-                 prompt="BrowserID Login",
-                 issuer="login.persona.org",
-                 verify_url="https://login.persona.org/verify",
-                 browserid_js="https://login.persona.org/include.js",
-                 browserid_button="https://login.persona.org/i/sign_in_red.png",
-                 crypto_js="https://crypto-js.googlecode.com/files/2.2.0-crypto-md5.js",
-                 on_login_failure=None,
-                 ):
-
-        self.request = request
-        self.audience = audience
-        self.assertion_post_url = assertion_post_url
-        self.prompt = prompt
-        self.issuer = issuer
-        self.verify_url = verify_url
-        self.browserid_js = browserid_js
-        self.browserid_button = browserid_button
-        self.crypto_js = crypto_js
-        self.on_login_failure = on_login_failure
-        self.asertion_js = """
-            (function($){$.extend({doPost:function(url,params){var $form=$("<form method='POST'>").attr("action",url);
-            $.each(params,function(name,value){$("<input type='hidden'>").attr("name",name).attr("value",value).appendTo($form)});
-            $form.appendTo("body");$form.submit()}})})(jQuery);
-            function gotVerifiedEmail(assertion){if(assertion !== null){$.doPost('%s',{'assertion':assertion});}}""" % self.assertion_post_url
-
-    def get_user(self):
-        request = self.request
-        if request.vars.assertion:
-            audience = self.audience
-            issuer = self.issuer
-            assertion = XML(request.vars.assertion, sanitize=True)
-            verify_data = {'assertion': assertion, 'audience': audience}
-            auth_info_json = fetch(self.verify_url, data=verify_data)
-            j = json.loads(auth_info_json)
-            epoch_time = int(time.time() * 1000)  # we need 13 digit epoch time
-            if j["status"] == "okay" and j["audience"] == audience and j['issuer'].endswith(issuer) and j['expires'] >= epoch_time:
-                return dict(email=j['email'])
-            elif self.on_login_failure:
-                #print "status:  ", j["status"]=="okay", j["status"]
-                #print "audience:", j["audience"]==audience, j["audience"], audience
-                #print "issuer:  ", j["issuer"]==issuer, j["issuer"], issuer
-                #print "expires:  ", j["expires"] >= epoch_time, j["expires"], epoch_time
-                redirect(self.on_login_failure)
-            else:
-                redirect('https://login.persona.org')
-        return None
-
-    def login_form(self):
-        request = self.request
-        onclick = "javascript:navigator.id.getVerifiedEmail(gotVerifiedEmail) ; return false"
-        form = DIV(SCRIPT(_src=self.browserid_js, _type="text/javascript"),
-                   SCRIPT(_src=self.crypto_js, _type="text/javascript"),
-                   A(IMG(_src=self.browserid_button, _alt=self.prompt), _href="#", _onclick=onclick, _class="browserid", _title="Login With BrowserID"),
-                   SCRIPT(self.asertion_js))
-        return form

+ 0 - 140
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/cas_auth.py

@@ -1,140 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-This file is part of web2py Web Framework (Copyrighted, 2007-2009).
-Developed by Massimo Di Pierro <[email protected]>.
-License: LGPL v3
-
-Tinkered by Szabolcs Gyuris < szimszo n @ o regpreshaz dot eu>
-"""
-
-from gluon import current, redirect, URL
-
-
-class CasAuth(object):
-    """
-    Login will be done via Web2py's CAS application, instead of web2py's
-    login form.
-
-    Include in your model (eg db.py)::
-
-        from gluon.contrib.login_methods.cas_auth import CasAuth
-        auth.define_tables(username=True)
-        auth.settings.login_form=CasAuth(
-            urlbase = "https://[your CAS provider]/app/default/user/cas",
-                       actions=['login','validate','logout'])
-
-    where urlbase is the actual CAS server url without the login,logout...
-    Enjoy.
-
-    ###UPDATE###
-    if you want to connect to a CAS version 2 JASIG Server use this:
-        auth.settings.login_form=CasAuth(
-            urlbase = "https://[Your CAS server]/cas",
-            actions = ['login','serviceValidate','logout'],
-            casversion = 2,
-            casusername = "cas:user")
-
-    where casusername is the xml node returned by CAS server which contains
-    user's username.
-
-    """
-    def __init__(self, g=None,  # g for backward compatibility ###
-                 urlbase="https://web2py.com/cas/cas",
-                 actions=['login', 'validate', 'logout'],
-                 maps=dict(username=lambda v: v.get('username', v['user']),
-                           email=lambda v: v.get('email', None),
-                           user_id=lambda v: v['user']),
-                 casversion=1,
-                 casusername='cas:user'
-                 ):
-        self.urlbase = urlbase
-        self.cas_login_url = "%s/%s" % (self.urlbase, actions[0])
-        self.cas_check_url = "%s/%s" % (self.urlbase, actions[1])
-        self.cas_logout_url = "%s/%s" % (self.urlbase, actions[2])
-        self.maps = maps
-        self.casversion = casversion
-        self.casusername = casusername
-        # vars commented because of
-        # https://code.google.com/p/web2py/issues/detail?id=1774
-        self.cas_my_url = URL(args=current.request.args,
-                              #vars=current.request.vars, 
-                              scheme=True)
-
-    def login_url(self, next="/"):
-        current.session.token = self._CAS_login()
-        return next
-
-    def logout_url(self, next="/"):
-        current.session.token = None
-        current.session.auth = None
-        self._CAS_logout()
-        return next
-
-    def get_user(self):
-        user = current.session.token
-        if user:
-            d = {'source': 'web2py cas'}
-            for key in self.maps:
-                d[key] = self.maps[key](user)
-            return d
-        return None
-
-    def _CAS_login(self):
-        """
-        exposed as CAS.login(request)
-        returns a token on success, None on failed authentication
-        """
-        import urllib
-        self.ticket = current.request.vars.ticket
-        if not current.request.vars.ticket:
-            redirect("%s?service=%s" % (self.cas_login_url,
-                                        self.cas_my_url))
-        else:
-            url = "%s?service=%s&ticket=%s" % (self.cas_check_url,
-                                               self.cas_my_url,
-                                               self.ticket)
-            data = urllib.urlopen(url).read()
-            if data.startswith('yes') or data.startswith('no'):
-                data = data.split('\n')
-                if data[0] == 'yes':
-                    if ':' in data[1]:  # for Compatibility with Custom CAS
-                        items = data[1].split(':')
-                        a = items[0]
-                        b = len(items) > 1 and items[1] or a
-                        c = len(items) > 2 and items[2] or b
-                    else:
-                        a = b = c = data[1]
-                    return dict(user=a, email=b, username=c)
-                return None
-            import xml.dom.minidom as dom
-            import xml.parsers.expat as expat
-            try:
-                dxml = dom.parseString(data)
-                envelop = dxml.getElementsByTagName(
-                    "cas:authenticationSuccess")
-                if len(envelop) > 0:
-                    res = dict()
-                    for x in envelop[0].childNodes:
-                        if x.nodeName.startswith('cas:') and len(x.childNodes):
-                            key = x.nodeName[4:].encode('utf8')
-                            value = x.childNodes[0].nodeValue.encode('utf8')
-                            if not key in res:
-                                res[key] = value
-                            else:
-                                if not isinstance(res[key], list):
-                                    res[key] = [res[key]]
-                                res[key].append(value)
-                    return res
-            except expat.ExpatError:
-                pass
-            return None  # fallback
-
-    def _CAS_logout(self):
-        """
-        exposed CAS.logout()
-        redirects to the CAS logout page
-        """
-        import urllib
-        redirect("%s?service=%s" % (self.cas_logout_url, self.cas_my_url))

+ 0 - 130
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/dropbox_account.py

@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-# coding: utf8
-
-"""
-Dropbox Authentication for web2py
-Developed by Massimo Di Pierro (2012)
-Same License as Web2py License
-"""
-
-# mind here session is dropbox session, not current.session
-
-import os
-import re
-import urllib
-from dropbox import client, rest, session
-from gluon import *
-from gluon.tools import fetch
-from gluon.storage import Storage
-import gluon.contrib.simplejson as json
-
-
-class DropboxAccount(object):
-
-    """
-    from gluon.contrib.login_methods.dropbox_account import DropboxAccount
-    auth.settings.actions_disabled=['register','change_password',
-        'request_reset_password']
-    auth.settings.login_form = DropboxAccount(request,
-              key="...",
-              secret="...",
-              access_type="...",
-              login_url = "http://localhost:8000/%s/default/user/login" % request.application)
-    when logged in
-    client = auth.settings.login_form.client
-    """
-
-    def __init__(self,
-                 request,
-                 key="",
-                 secret="",
-                 access_type="app_folder",
-                 login_url="",
-                 on_login_failure=None,
-                 ):
-
-        self.request = request
-        self.key = key
-        self.secret = secret
-        self.access_type = access_type
-        self.login_url = login_url
-        self.on_login_failure = on_login_failure
-        self.sess = session.DropboxSession(
-            self.key, self.secret, self.access_type)
-
-    def get_token(self):
-        if not current.session.dropbox_access_token:            
-            request_token = current.session.dropbox_request_token
-            self.sess.set_request_token(request_token[0], request_token[1])
-            access_token = self.sess.obtain_access_token(self.sess.token)
-            current.session.dropbox_access_token = \
-                (access_token.key, access_token.secret)
-        else:
-            access_token = current.session.dropbox_access_token
-            self.sess.set_token(access_token[0], access_token[1])
-
-    def get_user(self):
-        if not current.session.dropbox_request_token:
-            return None
-        self.get_token()
-        user = Storage()
-        self.client = client.DropboxClient(self.sess)
-        data = self.client.account_info()
-        display_name = data.get('display_name', '').split(' ', 1)
-        user = dict(email=data.get('email', None),
-                    first_name=display_name[0],
-                    last_name=display_name[-1],
-                    registration_id=data.get('uid', None))
-        if not user['registration_id'] and self.on_login_failure:
-            redirect(self.on_login_failure)
-        return user
-
-    def login_form(self):
-
-        request_token = self.sess.obtain_request_token()
-        current.session.dropbox_request_token =  \
-            (request_token.key, request_token.secret)
-        dropbox_url = self.sess.build_authorize_url(request_token,
-                                                    self.login_url)
-        redirect(dropbox_url)
-        form = IFRAME(_src=dropbox_url,
-                      _scrolling="no",
-                      _frameborder="no",
-                      _style="width:400px;height:240px;")
-        return form
-
-    def logout_url(self, next="/"):
-        self.sess.unlink()
-        current.session.auth = None
-        return next
-
-    def get_client(self):
-        self.get_token()
-        self.client = client.DropboxClient(self.sess)
-
-    def put(self, filename, file):
-        if not hasattr(self,'client'): self.get_client()
-        return self.client.put_file(filename, file)['bytes']
-
-    def get(self, filename):
-        if not hasattr(self,'client'): self.get_client()
-        return self.client.get_file(filename)
-
-    def dir(self, path):
-        if not hasattr(self,'client'): self.get_client()
-        return self.client.metadata(path)
-
-
-def use_dropbox(auth, filename='private/dropbox.key', **kwargs):
-    path = os.path.join(current.request.folder, filename)
-    if os.path.exists(path):
-        request = current.request
-        key, secret, access_type = open(path, 'r').read().strip().split(':')
-        host = current.request.env.http_host
-        login_url = "http://%s/%s/default/user/login" % \
-            (host, request.application)
-        auth.settings.actions_disabled = \
-            ['register', 'change_password', 'request_reset_password']
-        auth.settings.login_form = DropboxAccount(
-            request, key=key, secret=secret, access_type=access_type,
-            login_url=login_url, **kwargs)

+ 0 - 46
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/email_auth.py

@@ -1,46 +0,0 @@
-import smtplib
-import logging
-
-
-def email_auth(server="smtp.gmail.com:587",
-               domain="@gmail.com",
-               tls_mode=None):
-    """
-    to use email_login:
-    from gluon.contrib.login_methods.email_auth import email_auth
-    auth.settings.login_methods.append(email_auth("smtp.gmail.com:587",
-                                                  "@gmail.com"))
-    """
-
-    def email_auth_aux(email,
-                       password,
-                       server=server,
-                       domain=domain,
-                       tls_mode=tls_mode):
-        if domain:
-            if not isinstance(domain, (list, tuple)):
-                domain = [str(domain)]
-            if not [d for d in domain if email[-len(d):] == d]:
-                return False
-        (host, port) = server.split(':')
-        if tls_mode is None:  # then auto detect
-            tls_mode = port == '587'
-        try:
-            server = None
-            server = smtplib.SMTP(host, port)
-            server.ehlo()
-            if tls_mode:
-                server.starttls()
-                server.ehlo()
-            server.login(email, password)
-            server.quit()
-            return True
-        except:
-            logging.exception('email_auth() failed')
-            if server:
-                try:
-                    server.quit()
-                except:  # server might already close connection after error
-                    pass
-            return False
-    return email_auth_aux

+ 0 - 105
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/extended_login_form.py

@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-# coding: utf8
-
-"""
-ExtendedLoginForm is used to extend normal login form in web2py with one more login method.
-So user can choose the built-in login or extended login methods.
-"""
-
-from gluon import current, DIV
-
-
-class ExtendedLoginForm(object):
-    """
-    Put extended_login_form under web2py/gluon/contrib/login_methods folder.
-    Then inside your model where defines the auth:
-
-    auth = Auth(globals(),db)              # authentication/authorization
-    ...
-    auth.define_tables()                   # You might like to put the code after auth.define_tables
-    ...                                    # if the alt_login_form deals with tables of auth.
-
-    alt_login_form = RPXAccount(request,
-                                api_key="...",
-                                domain="...",
-                                url = "http://localhost:8000/%s/default/user/login" % request.application)
-    extended_login_form = ExtendedLoginForm(
-        auth, alt_login_form, signals=['token'])
-
-    auth.settings.login_form = extended_login_form
-
-    Note:
-        Since rpx_account doesn't create the password for the user, you
-        might need to provide a way for user to create password to do
-        normal login.
-
-    """
-
-    def __init__(self,
-                 auth,
-                 alt_login_form,
-                 signals=[],
-                 login_arg='login'
-                 ):
-        self.auth = auth
-        self.alt_login_form = alt_login_form
-        self.signals = signals
-        self.login_arg = login_arg
-
-    def get_user(self):
-        """
-        Delegate the get_user to alt_login_form.get_user.
-        """
-        if hasattr(self.alt_login_form, 'get_user'):
-            return self.alt_login_form.get_user()
-        return None  # let gluon.tools.Auth.get_or_create_user do the rest
-
-    def login_url(self, next):
-        """
-        Optional implement for alt_login_form.
-
-        In normal case, this should be replaced by get_user, and never get called.
-        """
-        if hasattr(self.alt_login_form, 'login_url'):
-            return self.alt_login_form.login_url(next)
-        return self.auth.settings.login_url
-
-    def logout_url(self, next):
-        """
-        Optional implement for alt_login_form.
-
-        Called if bool(alt_login_form.get_user) is True.
-
-        If alt_login_form implemented logout_url function, it will return that function call.
-        """
-        if hasattr(self.alt_login_form, 'logout_url'):
-            return self.alt_login_form.logout_url(next)
-        return next
-
-    def login_form(self):
-        """
-        Combine the auth() form with alt_login_form.
-
-        If signals are set and a parameter in request matches any signals,
-        it will return the call of alt_login_form.login_form instead.
-        So alt_login_form can handle some particular situations, for example,
-        multiple steps of OpenID login inside alt_login_form.login_form.
-
-        Otherwise it will render the normal login form combined with
-        alt_login_form.login_form.
-        """
-
-        request = current.request
-        args = request.args
-
-        if (self.signals and
-            any([True for signal in self.signals if signal in request.vars])
-            ):
-            return self.alt_login_form.login_form()
-
-        self.auth.settings.login_form = self.auth
-        form = DIV(self.auth())
-        self.auth.settings.login_form = self
-
-        form.components.append(self.alt_login_form.login_form())
-        return form

+ 0 - 41
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/gae_google_account.py

@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-This file is part of web2py Web Framework (Copyrighted, 2007-2009).
-Developed by Massimo Di Pierro <[email protected]>.
-License: LGPL v3
-
-Thanks to Hans Donner <[email protected]> for GaeGoogleAccount.
-"""
-
-from google.appengine.api import users
-
-
-class GaeGoogleAccount(object):
-    """
-    Login will be done via Google's Appengine login object, instead of web2py's
-    login form.
-
-    Include in your model (eg db.py)::
-
-        from gluon.contrib.login_methods.gae_google_account import \
-            GaeGoogleAccount
-        auth.settings.login_form=GaeGoogleAccount()
-
-    """
-
-    def login_url(self, next="/"):
-        return users.create_login_url(next)
-
-    def logout_url(self, next="/"):
-        return users.create_logout_url(next)
-
-    def get_user(self):
-        user = users.get_current_user()
-        if user:
-            return dict(nickname = user.nickname(),
-                        email = user.email(),
-                        registration_id = user.user_id(),
-                        user_id = user.user_id(),
-                        source = "google account")

+ 0 - 138
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/janrain_account.py

@@ -1,138 +0,0 @@
-#!/usr/bin/env python
-# coding: utf8
-
-"""
-   RPX Authentication for web2py
-   Developed by Nathan Freeze (Copyright © 2009)
-   Email <[email protected]>
-   Modified by Massimo Di Pierro
-
-   This file contains code to allow using RPXNow.com (now Jainrain.com)
-   services with web2py
-"""
-
-import os
-import re
-import urllib
-from gluon import *
-from gluon.tools import fetch
-from gluon.storage import Storage
-import gluon.contrib.simplejson as json
-
-
-class RPXAccount(object):
-
-    """
-    from gluon.contrib.login_methods.rpx_account import RPXAccount
-    auth.settings.actions_disabled=['register','change_password',
-        'request_reset_password']
-    auth.settings.login_form = RPXAccount(request,
-              api_key="...",
-              domain="...",
-              url = "http://localhost:8000/%s/default/user/login" % request.application)
-    """
-
-    def __init__(self,
-                 request,
-                 api_key="",
-                 domain="",
-                 url="",
-                 embed=True,
-                 auth_url="https://rpxnow.com/api/v2/auth_info",
-                 language="en",
-                 prompt='rpx',
-                 on_login_failure=None,
-                 ):
-
-        self.request = request
-        self.api_key = api_key
-        self.embed = embed
-        self.auth_url = auth_url
-        self.domain = domain
-        self.token_url = url
-        self.language = language
-        self.profile = None
-        self.prompt = prompt
-        self.on_login_failure = on_login_failure
-        self.mappings = Storage()
-
-        dn = {'givenName': '', 'familyName': ''}
-        self.mappings.Facebook = lambda profile, dn=dn:\
-            dict(registration_id=profile.get("identifier", ""),
-                 username=profile.get("preferredUsername", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("name", dn).get("givenName", ""),
-                 last_name=profile.get("name", dn).get("familyName", ""))
-        self.mappings.Google = lambda profile, dn=dn:\
-            dict(registration_id=profile.get("identifier", ""),
-                 username=profile.get("preferredUsername", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("name", dn).get("givenName", ""),
-                 last_name=profile.get("name", dn).get("familyName", ""))
-        self.mappings.default = lambda profile:\
-            dict(registration_id=profile.get("identifier", ""),
-                 username=profile.get("preferredUsername", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("preferredUsername", ""),
-                 last_name='')
-
-    def get_user(self):
-        request = self.request
-        if request.vars.token:
-            user = Storage()
-            data = urllib.urlencode(
-                dict(apiKey=self.api_key, token=request.vars.token))
-            auth_info_json = fetch(self.auth_url + '?' + data)
-            auth_info = json.loads(auth_info_json)
-
-            if auth_info['stat'] == 'ok':
-                self.profile = auth_info['profile']
-                provider = re.sub('[^\w\-]', '', self.profile['providerName'])
-                user = self.mappings.get(
-                    provider, self.mappings.default)(self.profile)
-                return user
-            elif self.on_login_failure:
-                redirect(self.on_login_failure)
-        return None
-
-    def login_form(self):
-        request = self.request
-        args = request.args
-        rpxform = """
-        <script type="text/javascript">
-        (function() {
-            if (typeof window.janrain !== 'object') window.janrain = {};
-            if (typeof window.janrain.settings !== 'object') window.janrain.settings = {};
-            janrain.settings.tokenUrl = '%s';
-            function isReady() { janrain.ready = true; };
-            if (document.addEventListener) {
-                document.addEventListener("DOMContentLoaded", isReady, false);
-            } else {
-                window.attachEvent('onload', isReady);
-            }
-            var e = document.createElement('script');
-            e.type = 'text/javascript';
-            e.id = 'janrainAuthWidget';
-            if (document.location.protocol === 'https:') {
-                e.src = 'https://rpxnow.com/js/lib/%s/engage.js';
-            } else {
-                e.src = 'http://widget-cdn.rpxnow.com/js/lib/%s/engage.js';
-            }
-            var s = document.getElementsByTagName('script')[0];
-            s.parentNode.insertBefore(e, s);
-        })();
-        </script>
-        <div id="janrainEngageEmbed"></div>""" % (self.token_url, self.domain, self.domain)
-        return XML(rpxform)
-
-def use_janrain(auth, filename='private/janrain.key', **kwargs):
-    path = os.path.join(current.request.folder, filename)
-    if os.path.exists(path):
-        request = current.request
-        domain, key = open(path, 'r').read().strip().split(':')
-        host = current.request.env.http_host
-        url = URL('default', 'user', args='login', scheme=True)
-        auth.settings.actions_disabled = \
-            ['register', 'change_password', 'request_reset_password']
-        auth.settings.login_form = RPXAccount(
-            request, api_key=key, domain=domain, url=url, **kwargs)

+ 0 - 688
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/ldap_auth.py

@@ -1,688 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# last tinkered with by korylprince at gmail.com on 2012-07-12
-#
-
-import sys
-import logging
-try:
-    import ldap
-    import ldap.filter
-    ldap.set_option(ldap.OPT_REFERRALS, 0)
-except Exception, e:
-    logging.error('missing ldap, try "easy_install python-ldap"')
-    raise e
-
-
-def ldap_auth(server='ldap', port=None,
-              base_dn='ou=users,dc=domain,dc=com',
-              mode='uid', secure=False, 
-              cert_path=None, cert_file=None,              
-              cacert_path=None, cacert_file=None, key_file=None,
-              bind_dn=None, bind_pw=None, filterstr='objectClass=*',
-              username_attrib='uid',
-              custom_scope='subtree',
-              allowed_groups=None,
-              manage_user=False,
-              user_firstname_attrib='cn:1',
-              user_lastname_attrib='cn:2',
-              user_mail_attrib='mail',
-              manage_groups=False,
-              db=None,
-              group_dn=None,
-              group_name_attrib='cn',
-              group_member_attrib='memberUid',
-              group_filterstr='objectClass=*',
-              logging_level='error'):
-
-    """
-    to use ldap login with MS Active Directory:
-
-        from gluon.contrib.login_methods.ldap_auth import ldap_auth
-        auth.settings.login_methods.append(ldap_auth(
-            mode='ad', server='my.domain.controller',
-            base_dn='ou=Users,dc=domain,dc=com'))
-
-    to use ldap login with Notes Domino:
-
-        auth.settings.login_methods.append(ldap_auth(
-            mode='domino',server='my.domino.server'))
-
-    to use ldap login with OpenLDAP:
-
-        auth.settings.login_methods.append(ldap_auth(
-            server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com'))
-
-    to use ldap login with OpenLDAP and subtree search and (optionally)
-    multiple DNs:
-
-        auth.settings.login_methods.append(ldap_auth(
-            mode='uid_r', server='my.ldap.server',
-            base_dn=['ou=Users,dc=domain,dc=com','ou=Staff,dc=domain,dc=com']))
-
-    or (if using CN):
-
-        auth.settings.login_methods.append(ldap_auth(
-            mode='cn', server='my.ldap.server',
-            base_dn='ou=Users,dc=domain,dc=com'))
-
-    or you can full customize the search for user:
-
-        auth.settings.login_methods.append(ldap_auth(
-            mode='custom', server='my.ldap.server',
-            base_dn='ou=Users,dc=domain,dc=com',
-            username_attrib='uid',
-            custom_scope='subtree'))
-
-    the custom_scope can be: base, onelevel, subtree.
-
-    If using secure ldaps:// pass secure=True and cert_path="..."
-    If ldap is using GnuTLS then you need cert_file="..." instead cert_path
-    because cert_path isn't implemented in GnuTLS :(
-
-    If you need to bind to the directory with an admin account in order to
-    search it then specify bind_dn & bind_pw to use for this.
-    - currently only implemented for Active Directory
-
-    If you need to restrict the set of allowed users (e.g. to members of a
-    department) then specify an rfc4515 search filter string.
-    - currently only implemented for mode in ['ad', 'company', 'uid_r']
-
-    You can manage user attributes first name, last name, email from ldap:
-        auth.settings.login_methods.append(ldap_auth(...as usual...,
-            manage_user=True,
-            user_firstname_attrib='cn:1',
-            user_lastname_attrib='cn:2',
-            user_mail_attrib='mail'
-           ))
-
-    Where:
-    manage_user - let web2py handle user data from ldap
-    user_firstname_attrib - the attribute containing the user's first name
-                            optionally you can specify parts.
-                            Example: cn: "John Smith" - 'cn:1'='John'
-    user_lastname_attrib - the attribute containing the user's last name
-                            optionally you can specify parts.
-                            Example: cn: "John Smith" - 'cn:2'='Smith'
-    user_mail_attrib - the attribute containing the user's email address
-
-
-    If you need group control from ldap to web2py app's database feel free
-    to set:
-
-        auth.settings.login_methods.append(ldap_auth(...as usual...,
-            manage_groups=True,
-            db=db,
-            group_dn='ou=Groups,dc=domain,dc=com',
-            group_name_attrib='cn',
-            group_member_attrib='memberUid',
-            group_filterstr='objectClass=*'
-           ))
-
-        Where:
-        manage_group - let web2py handle the groups from ldap
-        db - is the database object (need to have auth_user, auth_group,
-            auth_membership)
-        group_dn - the ldap branch of the groups
-        group_name_attrib - the attribute where the group name is stored
-        group_member_attrib - the attribute containing the group members name
-        group_filterstr - as the filterstr but for group select
-
-    You can restrict login access to specific groups if you specify:
-
-        auth.settings.login_methods.append(ldap_auth(...as usual...,
-            allowed_groups=[...],
-            group_dn='ou=Groups,dc=domain,dc=com',
-            group_name_attrib='cn',
-            group_member_attrib='memberUid',#use 'member' for Active Directory
-            group_filterstr='objectClass=*'
-           ))
-
-        Where:
-        allowed_groups - a list with allowed ldap group names
-        group_dn - the ldap branch of the groups
-        group_name_attrib - the attribute where the group name is stored
-        group_member_attrib - the attribute containing the group members name
-        group_filterstr - as the filterstr but for group select
-
-    If using Active Directory you must specify bind_dn and bind_pw for
-    allowed_groups unless anonymous bind works.
-
-    You can set the logging level with the "logging_level" parameter, default
-    is "error" and can be set to error, warning, info, debug.
-    """
-    logger = logging.getLogger('web2py.auth.ldap_auth')
-    if logging_level == 'error':
-        logger.setLevel(logging.ERROR)
-    elif logging_level == 'warning':
-        logger.setLevel(logging.WARNING)
-    elif logging_level == 'info':
-        logger.setLevel(logging.INFO)
-    elif logging_level == 'debug':
-        logger.setLevel(logging.DEBUG)
-
-    def ldap_auth_aux(username,
-                      password,
-                      ldap_server=server,
-                      ldap_port=port,
-                      ldap_basedn=base_dn,
-                      ldap_mode=mode,
-                      ldap_binddn=bind_dn,
-                      ldap_bindpw=bind_pw,
-                      secure=secure,
-                      cert_path=cert_path,
-                      cert_file=cert_file,
-                      cacert_file=cacert_file,
-                      key_file=key_file,
-                      filterstr=filterstr,
-                      username_attrib=username_attrib,
-                      custom_scope=custom_scope,
-                      manage_user=manage_user,
-                      user_firstname_attrib=user_firstname_attrib,
-                      user_lastname_attrib=user_lastname_attrib,
-                      user_mail_attrib=user_mail_attrib,
-                      manage_groups=manage_groups,
-                      allowed_groups=allowed_groups,
-                      db=db):
-        if password == '':  # http://tools.ietf.org/html/rfc4513#section-5.1.2
-            logger.warning('blank password not allowed')
-            return False
-        logger.debug('mode: [%s] manage_user: [%s] custom_scope: [%s]'
-                     ' manage_groups: [%s]' % (str(mode), str(manage_user),
-                     str(custom_scope), str(manage_groups)))
-        if manage_user:
-            if user_firstname_attrib.count(':') > 0:
-                (user_firstname_attrib,
-                 user_firstname_part) = user_firstname_attrib.split(':', 1)
-                user_firstname_part = (int(user_firstname_part) - 1)
-            else:
-                user_firstname_part = None
-            if user_lastname_attrib.count(':') > 0:
-                (user_lastname_attrib,
-                 user_lastname_part) = user_lastname_attrib.split(':', 1)
-                user_lastname_part = (int(user_lastname_part) - 1)
-            else:
-                user_lastname_part = None
-            user_firstname_attrib = ldap.filter.escape_filter_chars(
-                user_firstname_attrib)
-            user_lastname_attrib = ldap.filter.escape_filter_chars(
-                user_lastname_attrib)
-            user_mail_attrib = ldap.filter.escape_filter_chars(
-                user_mail_attrib)
-        try:
-            if allowed_groups:
-                if not is_user_in_allowed_groups(username, password):
-                    return False
-            con = init_ldap()
-            if ldap_mode == 'ad':
-                # Microsoft Active Directory
-                if '@' not in username:
-                    domain = []
-                    for x in ldap_basedn.split(','):
-                        if "DC=" in x.upper():
-                            domain.append(x.split('=')[-1])
-                    username = "%s@%s" % (username, '.'.join(domain))
-                username_bare = username.split("@")[0]
-                con.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
-                # In cases where ForestDnsZones and DomainDnsZones are found,
-                # result will look like the following:
-                # ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones,
-                #    DC=domain,DC=com']
-                if ldap_binddn:
-                    # need to search directory with an admin account 1st
-                    con.simple_bind_s(ldap_binddn, ldap_bindpw)
-                else:
-                    # credentials should be in the form of [email protected]
-                    con.simple_bind_s(username, password)
-                # this will throw an index error if the account is not found
-                # in the ldap_basedn
-                requested_attrs = ['sAMAccountName']
-                if manage_user:
-                    requested_attrs.extend([user_firstname_attrib,
-                                           user_lastname_attrib,
-                                           user_mail_attrib])
-                result = con.search_ext_s(
-                    ldap_basedn, ldap.SCOPE_SUBTREE,
-                    "(&(sAMAccountName=%s)(%s))" % (
-                                ldap.filter.escape_filter_chars(username_bare),
-                                filterstr),
-                    requested_attrs)[0][1]
-                if not isinstance(result, dict):
-                    # result should be a dict in the form
-                    # {'sAMAccountName': [username_bare]}
-                    logger.warning('User [%s] not found!' % username)
-                    return False
-                if ldap_binddn:
-                    # We know the user exists & is in the correct OU
-                    # so now we just check the password
-                    con.simple_bind_s(username, password)
-                username = username_bare
-
-            if ldap_mode == 'domino':
-                # Notes Domino
-                if "@" in username:
-                    username = username.split("@")[0]
-                con.simple_bind_s(username, password)
-                if manage_user:
-                    # TODO: sorry I have no clue how to query attrs in domino
-                    result = {user_firstname_attrib: username,
-                              user_lastname_attrib: None,
-                              user_mail_attrib: None}
-
-            if ldap_mode == 'cn':
-                # OpenLDAP (CN)
-                if ldap_binddn and ldap_bindpw:
-                    con.simple_bind_s(ldap_binddn, ldap_bindpw)
-                dn = "cn=" + username + "," + ldap_basedn
-                con.simple_bind_s(dn, password)
-                if manage_user:
-                    result = con.search_s(dn, ldap.SCOPE_BASE,
-                                          "(objectClass=*)",
-                                          [user_firstname_attrib,
-                                          user_lastname_attrib,
-                                          user_mail_attrib])[0][1]
-
-            if ldap_mode == 'uid':
-                # OpenLDAP (UID)
-                if ldap_binddn and ldap_bindpw:
-                    con.simple_bind_s(ldap_binddn, ldap_bindpw)
-                    dn = "uid=" + username + "," + ldap_basedn
-                    dn = con.search_s(ldap_basedn, ldap.SCOPE_SUBTREE, "(uid=%s)"%username, [''])[0][0]
-                else:
-                    dn = "uid=" + username + "," + ldap_basedn
-                con.simple_bind_s(dn, password)
-                if manage_user:
-                    result = con.search_s(dn, ldap.SCOPE_BASE,
-                                          "(objectClass=*)",
-                                          [user_firstname_attrib,
-                                          user_lastname_attrib,
-                                          user_mail_attrib])[0][1]
-
-            if ldap_mode == 'company':
-                # no DNs or password needed to search directory
-                dn = ""
-                pw = ""
-                # bind anonymously
-                con.simple_bind_s(dn, pw)
-                # search by e-mail address
-                filter = '(&(mail=%s)(%s))' % (
-                                ldap.filter.escape_filter_chars(username),
-                                filterstr)
-                # find the uid
-                attrs = ['uid']
-                if manage_user:
-                    attrs.extend([user_firstname_attrib,
-                                  user_lastname_attrib,
-                                  user_mail_attrib])
-                # perform the actual search
-                company_search_result = con.search_s(ldap_basedn,
-                                                     ldap.SCOPE_SUBTREE,
-                                                     filter, attrs)
-                dn = company_search_result[0][0]
-                result = company_search_result[0][1]
-                # perform the real authentication test
-                con.simple_bind_s(dn, password)
-
-            if ldap_mode == 'uid_r':
-                # OpenLDAP (UID) with subtree search and multiple DNs
-                if isinstance(ldap_basedn, list):
-                    basedns = ldap_basedn
-                else:
-                    basedns = [ldap_basedn]
-                filter = '(&(uid=%s)(%s))' % (
-                    ldap.filter.escape_filter_chars(username), filterstr)
-                found = False
-                for basedn in basedns:
-                    try:
-                        result = con.search_s(basedn, ldap.SCOPE_SUBTREE,
-                                              filter)
-                        if result:
-                            user_dn = result[0][0]
-                            # Check the password
-                            con.simple_bind_s(user_dn, password)
-                            found = True
-                            break
-                    except ldap.LDAPError, detail:
-                        (exc_type, exc_value) = sys.exc_info()[:2]
-                        logger.warning(
-                        "ldap_auth: searching %s for %s resulted in %s: %s\n" %
-                                       (basedn, filter, exc_type, exc_value)
-                                       )
-                if not found:
-                    logger.warning('User [%s] not found!' % username)
-                    return False
-                result = result[0][1]
-            if ldap_mode == 'custom':
-                # OpenLDAP (username_attrs) with subtree search and
-                # multiple DNs
-                if isinstance(ldap_basedn, list):
-                    basedns = ldap_basedn
-                else:
-                    basedns = [ldap_basedn]
-                filter = '(&(%s=%s)(%s))' % (username_attrib,
-                                             ldap.filter.escape_filter_chars(
-                                                 username),
-                                             filterstr)
-                if custom_scope == 'subtree':
-                    ldap_scope = ldap.SCOPE_SUBTREE
-                elif custom_scope == 'base':
-                    ldap_scope = ldap.SCOPE_BASE
-                elif custom_scope == 'onelevel':
-                    ldap_scope = ldap.SCOPE_ONELEVEL
-                found = False
-                for basedn in basedns:
-                    try:
-                        result = con.search_s(basedn, ldap_scope, filter)
-                        if result:
-                            user_dn = result[0][0]
-                            # Check the password
-                            con.simple_bind_s(user_dn, password)
-                            found = True
-                            break
-                    except ldap.LDAPError, detail:
-                        (exc_type, exc_value) = sys.exc_info()[:2]
-                        logger.warning(
-                        "ldap_auth: searching %s for %s resulted in %s: %s\n" %
-                                       (basedn, filter, exc_type, exc_value)
-                                       )
-                if not found:
-                    logger.warning('User [%s] not found!' % username)
-                    return False
-                result = result[0][1]
-            if manage_user:
-                logger.info('[%s] Manage user data' % str(username))
-                try:
-                    if user_firstname_part is not None:
-                        store_user_firstname = result[user_firstname_attrib][
-                            0].split(' ', 1)[user_firstname_part]
-                    else:
-                        store_user_firstname = result[user_firstname_attrib][0]
-                except KeyError, e:
-                    store_user_firstname = None
-                try:
-                    if user_lastname_part is not None:
-                        store_user_lastname = result[user_lastname_attrib][
-                            0].split(' ', 1)[user_lastname_part]
-                    else:
-                        store_user_lastname = result[user_lastname_attrib][0]
-                except KeyError, e:
-                    store_user_lastname = None
-                try:
-                    store_user_mail = result[user_mail_attrib][0]
-                except KeyError, e:
-                    store_user_mail = None
-                try:
-                    #
-                    # user as username
-                    # #################
-                    user_in_db = db(db.auth_user.username == username)
-                    if user_in_db.count() > 0:
-                        user_in_db.update(first_name=store_user_firstname,
-                                          last_name=store_user_lastname,
-                                          email=store_user_mail)
-                    else:
-                        db.auth_user.insert(first_name=store_user_firstname,
-                                            last_name=store_user_lastname,
-                                            email=store_user_mail,
-                                            username=username)
-                except:
-                    #
-                    # user as email
-                    # ##############
-                    user_in_db = db(db.auth_user.email == username)
-                    if user_in_db.count() > 0:
-                        user_in_db.update(first_name=store_user_firstname,
-                                          last_name=store_user_lastname)
-                    else:
-                        db.auth_user.insert(first_name=store_user_firstname,
-                                            last_name=store_user_lastname,
-                                            email=username)
-            con.unbind()
-
-            if manage_groups:
-                if not do_manage_groups(username, password):
-                    return False
-            return True
-        except ldap.INVALID_CREDENTIALS, e:
-            return False
-        except ldap.LDAPError, e:
-            import traceback
-            logger.warning('[%s] Error in ldap processing' % str(username))
-            logger.debug(traceback.format_exc())
-            return False
-        except IndexError, ex:  # for AD membership test
-            import traceback
-            logger.warning('[%s] Ldap result indexing error' % str(username))
-            logger.debug(traceback.format_exc())
-            return False
-
-    def is_user_in_allowed_groups(username,
-                                  password=None,
-                                  allowed_groups=allowed_groups):
-        """
-        Figure out if the username is a member of an allowed group
-        in ldap or not
-        """
-        #
-        # Get all group name where the user is in actually in ldap
-        # #########################################################
-        ldap_groups_of_the_user = get_user_groups_from_ldap(username, password)
-
-        # search for allowed group names
-        if type(allowed_groups) != type(list()):
-            allowed_groups = [allowed_groups]
-        for group in allowed_groups:
-            if ldap_groups_of_the_user.count(group) > 0:
-                # Match
-                return True
-        # No match
-        return False
-
-    def do_manage_groups(username,
-                         password=None,
-                         db=db):
-        """
-        Manage user groups
-
-        Get all user's group from ldap and refresh the already stored
-        ones in web2py's application database or create new groups
-        according to ldap.
-        """
-        logger.info('[%s] Manage user groups' % str(username))
-        try:
-            #
-            # Get all group name where the user is in actually in ldap
-            # #########################################################
-            ldap_groups_of_the_user = get_user_groups_from_ldap(
-                username, password)
-
-            #
-            # Get all group name where the user is in actually in local db
-            # #############################################################
-            try:
-                db_user_id = db(db.auth_user.username == username).select(
-                    db.auth_user.id).first().id
-            except:
-                try:
-                    db_user_id = db(db.auth_user.email == username).select(
-                        db.auth_user.id).first().id
-                except AttributeError, e:
-                    #
-                    # There is no user in local db
-                    # We create one
-                    # ##############################
-                    try:
-                        db_user_id = db.auth_user.insert(username=username,
-                                                         first_name=username)
-                    except AttributeError, e:
-                        db_user_id = db.auth_user.insert(email=username,
-                                                         first_name=username)
-            if not db_user_id:
-                logging.error(
-                    'There is no username or email for %s!' % username)
-                raise
-            db_group_search = db((db.auth_membership.user_id == db_user_id) &
-                            (db.auth_user.id == db.auth_membership.user_id) &
-                            (db.auth_group.id == db.auth_membership.group_id))
-            db_groups_of_the_user = list()
-            db_group_id = dict()
-
-            if db_group_search.count() > 0:
-                for group in db_group_search.select(db.auth_group.id,
-                                                    db.auth_group.role,
-                                                    distinct=True):
-                    db_group_id[group.role] = group.id
-                    db_groups_of_the_user.append(group.role)
-            logging.debug('db groups of user %s: %s' %
-                          (username, str(db_groups_of_the_user)))
-
-            #
-            # Delete user membership from groups where user is not anymore
-            # #############################################################
-            for group_to_del in db_groups_of_the_user:
-                if ldap_groups_of_the_user.count(group_to_del) == 0:
-                    db((db.auth_membership.user_id == db_user_id) &
-                       (db.auth_membership.group_id == \
-                         db_group_id[group_to_del])).delete()
-
-            #
-            # Create user membership in groups where user is not in already
-            # ##############################################################
-            for group_to_add in ldap_groups_of_the_user:
-                if db_groups_of_the_user.count(group_to_add) == 0:
-                    if db(db.auth_group.role == group_to_add).count() == 0:
-                        gid = db.auth_group.insert(role=group_to_add,
-                                            description='Generated from LDAP')
-                    else:
-                        gid = db(db.auth_group.role == group_to_add).select(
-                            db.auth_group.id).first().id
-                    db.auth_membership.insert(user_id=db_user_id,
-                                              group_id=gid)
-        except:
-            logger.warning("[%s] Groups are not managed successfully!" %
-                           str(username))
-            import traceback
-            logger.debug(traceback.format_exc())
-            return False
-        return True
-
-    def init_ldap(ldap_server=server,
-                  ldap_port=port,
-                  ldap_basedn=base_dn,
-                  ldap_mode=mode,
-                  secure=secure,
-                  cert_path=cert_path,
-                  cert_file=cert_file,
-                  cacert_file=cacert_file,
-                  key_file=key_file):
-        """
-        Inicialize ldap connection
-        """
-        logger.info('[%s] Initialize ldap connection' % str(ldap_server))
-        if secure:
-            if not ldap_port:
-                ldap_port = 636
-                
-            if cacert_path:
-                ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, cacert_path)
-                
-            if cacert_file:
-                ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
-                ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, cacert_file)
-            if cert_file:
-                ldap.set_option(ldap.OPT_X_TLS_CERTFILE, cert_file)
-            if key_file:
-                ldap.set_option(ldap.OPT_X_TLS_KEYFILE, key_file)
-                
-            con = ldap.initialize("ldaps://" + ldap_server + ":" + str(ldap_port))
-        else:
-            if not ldap_port:
-                ldap_port = 389
-            con = ldap.initialize(
-                "ldap://" + ldap_server + ":" + str(ldap_port))
-        return con
-
-    def get_user_groups_from_ldap(username,
-                                  password=None,
-                                  base_dn=base_dn,
-                                  ldap_binddn=bind_dn,
-                                  ldap_bindpw=bind_pw,
-                                  group_dn=group_dn,
-                                  group_name_attrib=group_name_attrib,
-                                  group_member_attrib=group_member_attrib,
-                                  group_filterstr=group_filterstr,
-                                  ldap_mode=mode):
-        """
-        Get all group names from ldap where the user is in
-        """
-        logger.info('[%s] Get user groups from ldap' % str(username))
-        #
-        # Get all group name where the user is in actually in ldap
-        # #########################################################
-        # Initialize ldap
-        if not group_dn:
-            group_dn = base_dn
-        con = init_ldap()
-        logger.debug('Username init: [%s]' % username)
-        if ldap_mode == 'ad':
-            #
-            # Get the AD username
-            # ####################
-            if '@' not in username:
-                domain = []
-                for x in base_dn.split(','):
-                    if "DC=" in x.upper():
-                        domain.append(x.split('=')[-1])
-                username = "%s@%s" % (username, '.'.join(domain))
-            username_bare = username.split("@")[0]
-            con.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
-            # In cases where ForestDnsZones and DomainDnsZones are found,
-            # result will look like the following:
-            # ['ldap://ForestDnsZones.domain.com/DC=ForestDnsZones,
-            #     DC=domain,DC=com']
-            if ldap_binddn:
-                # need to search directory with an admin account 1st
-                con.simple_bind_s(ldap_binddn, ldap_bindpw)
-                logger.debug('Ldap bind connect...')
-            else:
-                # credentials should be in the form of [email protected]
-                con.simple_bind_s(username, password)
-                logger.debug('Ldap username connect...')
-            # We have to use the full string
-            username = con.search_ext_s(base_dn, ldap.SCOPE_SUBTREE,
-                                        "(&(sAMAccountName=%s)(%s))" %
-                            (ldap.filter.escape_filter_chars(username_bare),
-                            filterstr), ["cn"])[0][0]
-        else:
-            if ldap_binddn:
-                # need to search directory with an bind_dn account 1st
-                con.simple_bind_s(ldap_binddn, ldap_bindpw)
-            else:
-                # bind as anonymous
-                con.simple_bind_s('', '')
-                
-        # if username is None, return empty list
-        if username is None:
-            return list()
-        # search for groups where user is in
-        filter = '(&(%s=%s)(%s))' % (ldap.filter.escape_filter_chars(
-                                                            group_member_attrib
-                                                            ),
-                                     ldap.filter.escape_filter_chars(username),
-                                     group_filterstr)
-        group_search_result = con.search_s(group_dn,
-                                           ldap.SCOPE_SUBTREE,
-                                           filter, [group_name_attrib])
-        ldap_groups_of_the_user = list()
-        for group_row in group_search_result:
-            group = group_row[1]
-            if type(group) == dict and group.has_key(group_name_attrib):
-                ldap_groups_of_the_user.extend(group[group_name_attrib])
-
-        con.unbind()
-        logger.debug('User groups: %s' % ldap_groups_of_the_user)
-        return list(ldap_groups_of_the_user)
-
-    if filterstr[0] == '(' and filterstr[-1] == ')':  # rfc4515 syntax
-        filterstr = filterstr[1:-1]  # parens added again where used
-    return ldap_auth_aux

+ 0 - 51
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/linkedin_account.py

@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-This file is part of web2py Web Framework (Copyrighted, 2007-2009).
-Developed by Massimo Di Pierro <[email protected]>.
-License: GPL v2
-
-Thanks to Hans Donner <[email protected]> for GaeGoogleAccount.
-"""
-
-from gluon.http import HTTP
-try:
-    import linkedin
-except ImportError:
-    raise HTTP(400, "linkedin module not found")
-
-
-class LinkedInAccount(object):
-    """
-    Login will be done via Google's Appengine login object, instead of web2py's
-    login form.
-
-    Include in your model (eg db.py)::
-
-        from gluon.contrib.login_methods.linkedin_account import LinkedInAccount
-        auth.settings.login_form=LinkedInAccount(request,KEY,SECRET,RETURN_URL)
-
-    """
-
-    def __init__(self, request, key, secret, return_url):
-        self.request = request
-        self.api = linkedin.LinkedIn(key, secret, return_url)
-        self.token = result = self.api.requestToken()
-
-    def login_url(self, next="/"):
-        return self.api.getAuthorizeURL(self.token)
-
-    def logout_url(self, next="/"):
-        return ''
-
-    def get_user(self):
-        result = self.request.vars.verifier and self.api.accessToken(
-            verifier=self.request.vars.verifier)
-        if result:
-            profile = self.api.GetProfile()
-            profile = self.api.GetProfile(
-                profile).public_url = "http://www.linkedin.com/in/ozgurv"
-            return dict(first_name=profile.first_name,
-                        last_name=profile.last_name,
-                        username=profile.id)

+ 0 - 97
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/loginradius_account.py

@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-# coding: utf8
-
-"""
-   LoginRadius Authentication for web2py
-   Developed by Nathan Freeze (Copyright © 2013)
-   Email <[email protected]>
-
-   This file contains code to allow using loginradius.com
-   authentication services with web2py
-"""
-
-import os
-from gluon import *
-from gluon.storage import Storage
-from gluon.contrib.simplejson import JSONDecodeError
-from gluon.tools import fetch
-import gluon.contrib.simplejson as json
-
-
-class LoginRadiusAccount(object):
-    """
-    from gluon.contrib.login_methods.loginradius_account import LoginRadiusAccount
-    auth.settings.actions_disabled=['register','change_password',
-        'request_reset_password']
-    auth.settings.login_form = LoginRadiusAccount(request,
-              api_key="...",
-              api_secret="...",
-              url = "http://localhost:8000/%s/default/user/login" % request.application)
-    """
-
-    def __init__(self, request, api_key="", api_secret="",
-                 url="", on_login_failure=None):
-
-        self.request = request
-        self.api_key = api_key
-        self.api_secret = api_secret
-        self.url = url
-        self.auth_base_url = "https://hub.loginradius.com/UserProfile.ashx/"
-        self.profile = None
-        self.on_login_failure = on_login_failure
-        self.mappings = Storage()
-
-        def defaultmapping(profile):
-            first_name = profile.get('FirstName')
-            last_name = profile.get('LastName')
-            email = profile.get('Email', [{}])[0].get('Value')
-            reg_id = profile.get('ID', '')
-            username = profile.get('ProfileName', email)
-
-            return dict(registration_id=reg_id, username=username, email=email,
-                        first_name=first_name, last_name=last_name)
-
-        self.mappings.default = defaultmapping
-
-    def get_user(self):
-        request = self.request
-        user = None
-        if request.vars.token:
-            try:
-                auth_url = self.auth_base_url + self.api_secret + "/" + request.vars.token
-                json_data = fetch(auth_url, headers={'User-Agent': "LoginRadius - Python - SDK"})
-                self.profile = json.loads(json_data)
-                provider = self.profile['Provider']
-                mapping = self.mappings.get(provider, self.mappings['default'])
-                user = mapping(self.profile)
-            except (JSONDecodeError, KeyError):
-                pass
-            if user is None and self.on_login_failure:
-                redirect(self.on_login_failure)
-        return user
-
-    def login_form(self):
-        loginradius_url = "https://hub.loginradius.com/include/js/LoginRadius.js"
-        loginradius_lib = SCRIPT(_src=loginradius_url, _type='text/javascript')
-        container = DIV(_id="interfacecontainerdiv", _class='interfacecontainerdiv')
-        widget = SCRIPT("""var options={}; options.login=true;
-        LoginRadius_SocialLogin.util.ready(function () {
-        $ui = LoginRadius_SocialLogin.lr_login_settings;
-        $ui.interfacesize = "";$ui.apikey = "%s";
-        $ui.callback="%s"; $ui.lrinterfacecontainer ="interfacecontainerdiv";
-        LoginRadius_SocialLogin.init(options); });""" % (self.api_key, self.url))
-        form = DIV(container, loginradius_lib, widget)
-        return form
-
-
-def use_loginradius(auth, filename='private/loginradius.key', **kwargs):
-    path = os.path.join(current.request.folder, filename)
-    if os.path.exists(path):
-        request = current.request
-        domain, public_key, private_key = open(path, 'r').read().strip().split(':')
-        url = URL('default', 'user', args='login', scheme=True)
-        auth.settings.actions_disabled = \
-            ['register', 'change_password', 'request_reset_password']
-        auth.settings.login_form = LoginRadiusAccount(
-            request, api_key=public_key, api_secret=private_key,
-            url=url, **kwargs)

+ 0 - 115
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/loginza.py

@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-   Loginza.ru authentication for web2py
-   Developed by Vladimir Dronnikov (Copyright © 2011)
-   Email <[email protected]>
-"""
-
-import urllib
-from gluon.html import *
-from gluon.tools import fetch
-from gluon.storage import Storage
-import gluon.contrib.simplejson as json
-
-
-class Loginza(object):
-
-    """
-    from gluon.contrib.login_methods.loginza import Loginza
-    auth.settings.login_form = Loginza(request,
-        url = "http://localhost:8000/%s/default/user/login" % request.application)
-    """
-
-    def __init__(self,
-                 request,
-                 url="",
-                 embed=True,
-                 auth_url="http://loginza.ru/api/authinfo",
-                 language="en",
-                 prompt="loginza",
-                 on_login_failure=None,
-                 ):
-
-        self.request = request
-        self.token_url = url
-        self.embed = embed
-        self.auth_url = auth_url
-        self.language = language
-        self.prompt = prompt
-        self.profile = None
-        self.on_login_failure = on_login_failure
-        self.mappings = Storage()
-
-        # TODO: profile.photo is the URL to the picture
-        # Howto download and store it locally?
-        # FIXME: what if email is unique=True
-
-        self.mappings["http://twitter.com/"] = lambda profile:\
-            dict(registration_id=profile.get("identity", ""),
-                 username=profile.get("nickname", ""),
-                 email=profile.get("email", ""),
-                 last_name=profile.get("name", "").get("full_name", ""),
-                 #avatar = profile.get("photo",""),
-                 )
-        self.mappings["https://www.google.com/accounts/o8/ud"] = lambda profile:\
-            dict(registration_id=profile.get("identity", ""),
-                 username=profile.get("name", "").get("full_name", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("name", "").get("first_name", ""),
-                 last_name=profile.get("name", "").get("last_name", ""),
-                 #avatar = profile.get("photo",""),
-                 )
-        self.mappings["http://vkontakte.ru/"] = lambda profile:\
-            dict(registration_id=profile.get("identity", ""),
-                 username=profile.get("name", "").get("full_name", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("name", "").get("first_name", ""),
-                 last_name=profile.get("name", "").get("last_name", ""),
-                 #avatar = profile.get("photo",""),
-                 )
-        self.mappings.default = lambda profile:\
-            dict(registration_id=profile.get("identity", ""),
-                 username=profile.get("name", "").get("full_name"),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("name", "").get("first_name", ""),
-                 last_name=profile.get("name", "").get("last_name", ""),
-                 #avatar = profile.get("photo",""),
-                 )
-
-    def get_user(self):
-        request = self.request
-        if request.vars.token:
-            user = Storage()
-            data = urllib.urlencode(dict(token=request.vars.token))
-            auth_info_json = fetch(self.auth_url + '?' + data)
-            #print auth_info_json
-            auth_info = json.loads(auth_info_json)
-            if auth_info["identity"] is not None:
-                self.profile = auth_info
-                provider = self.profile["provider"]
-                user = self.mappings.get(
-                    provider, self.mappings.default)(self.profile)
-                #user["password"] = ???
-                #user["avatar"] = ???
-                return user
-            elif self.on_login_failure:
-                redirect(self.on_login_failure)
-        return None
-
-    def login_form(self):
-        request = self.request
-        args = request.args
-        LOGINZA_URL = "https://loginza.ru/api/widget?lang=%s&token_url=%s&overlay=loginza"
-        if self.embed:
-            form = IFRAME(_src=LOGINZA_URL % (self.language, self.token_url),
-                          _scrolling="no",
-                          _frameborder="no",
-                          _style="width:359px;height:300px;")
-        else:
-            form = DIV(
-                A(self.prompt, _href=LOGINZA_URL % (
-                    self.language, self.token_url), _class="loginza"),
-                SCRIPT(_src="https://s3-eu-west-1.amazonaws.com/s1.loginza.ru/js/widget.js", _type="text/javascript"))
-        return form

+ 0 - 111
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/motp_auth.py

@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-
-import time
-from hashlib import md5
-from gluon.dal import DAL
-
-
-def motp_auth(db=DAL('sqlite://storage.sqlite'),
-              time_offset=60):
-
-    """
-    motp allows you to login with a one time password(OTP) generated on a motp client,
-    motp clients are available for practically all platforms.
-    to know more about OTP visit http://en.wikipedia.org/wiki/One-time_password
-    to know more visit http://motp.sourceforge.net
-
-
-    Written by Madhukar R Pai ([email protected])
-    License : MIT or GPL v2
-
-    thanks and credits to the web2py community
-
-    to use motp_auth:
-    motp_auth.py has to be located in gluon/contrib/login_methods/ folder
-    first auth_user has to have 2 extra fields - motp_secret and motp_pin
-    for that define auth like shown below:
-
-    ## after auth = Auth(db)
-    db.define_table(
-        auth.settings.table_user_name,
-        Field('first_name', length=128, default=''),
-        Field('last_name', length=128, default=''),
-        Field('email', length=128, default='', unique=True), # required
-        Field('password', 'password', length=512,            # required
-              readable=False, label='Password'),
-        Field('motp_secret',length=512,default='',
-              label='MOTP Seceret'),
-        Field('motp_pin',length=128,default='',
-              label='MOTP PIN'),
-        Field('registration_key', length=512,                # required
-              writable=False, readable=False, default=''),
-        Field('reset_password_key', length=512,              # required
-              writable=False, readable=False, default=''),
-        Field('registration_id', length=512,                 # required
-              writable=False, readable=False, default=''))
-
-    ##validators
-    custom_auth_table = db[auth.settings.table_user_name]
-        # get the custom_auth_table
-    custom_auth_table.first_name.requires = \
-      IS_NOT_EMPTY(error_message=auth.messages.is_empty)
-    custom_auth_table.last_name.requires = \
-      IS_NOT_EMPTY(error_message=auth.messages.is_empty)
-    custom_auth_table.password.requires = CRYPT()
-    custom_auth_table.email.requires = [
-      IS_EMAIL(error_message=auth.messages.invalid_email),
-      IS_NOT_IN_DB(db, custom_auth_table.email)]
-
-    auth.settings.table_user = custom_auth_table # tell auth to use custom_auth_table
-    ## before auth.define_tables()
-
-    ##after that:
-
-    from gluon.contrib.login_methods.motp_auth import motp_auth
-    auth.settings.login_methods.append(motp_auth(db=db))
-
-    ##Instructions for using MOTP
-    - after configuring motp for web2py, Install a MOTP client on your phone (android,IOS, java, windows phone, etc)
-    - initialize the motp client (to reset a motp secret type in #**#),
-      During user creation enter the secret generated during initialization into the motp_secret field in auth_user and
-      similarly enter a pre-decided pin into the motp_pin
-    - done.. to login, just generate a fresh OTP by typing in the pin and use the OTP as password
-
-    ###To Dos###
-    - both motp_secret and pin are stored in plain text! need to have some way of encrypting
-    - web2py stores the password in db on successful login (should not happen)
-    - maybe some utility or page to check the otp would be useful
-    - as of now user field is hardcoded to email. Some way of selecting user table and user field.
-    """
-
-    def verify_otp(otp, pin, secret, offset=60):
-        epoch_time = int(time.time())
-        time_start = int(str(epoch_time - offset)[:-1])
-        time_end = int(str(epoch_time + offset)[:-1])
-        for t in range(time_start - 1, time_end + 1):
-            to_hash = str(t) + secret + pin
-            hash = md5(to_hash).hexdigest()[:6]
-            if otp == hash:
-                return True
-        return False
-
-    def motp_auth_aux(email,
-                      password,
-                      db=db,
-                      offset=time_offset):
-        if db:
-            user_data = db(db.auth_user.email == email).select().first()
-            if user_data:
-                if user_data['motp_secret'] and user_data['motp_pin']:
-                    motp_secret = user_data['motp_secret']
-                    motp_pin = user_data['motp_pin']
-                    otp_check = verify_otp(
-                        password, motp_pin, motp_secret, offset=offset)
-                    if otp_check:
-                        return True
-                    else:
-                        return False
-                else:
-                    return False
-        return False
-    return motp_auth_aux

+ 0 - 184
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/oauth10a_account.py

@@ -1,184 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-Written by Michele Comitini <[email protected]>
-License: LGPL v3
-
-Adds support for  OAuth1.0a authentication to web2py.
-
-Dependencies:
- - python-oauth2 (http://github.com/simplegeo/python-oauth2)
-
-"""
-
-import oauth2 as oauth
-import cgi
-
-from urllib import urlencode
-
-from gluon import current
-
-class OAuthAccount(object):
-    """
-    Login will be done via   OAuth Framework, instead of web2py's
-    login form.
-
-    Include in your model (eg db.py)::
-        # define the auth_table before call to auth.define_tables()
-        auth_table = db.define_table(
-           auth.settings.table_user_name,
-           Field('first_name', length=128, default=""),
-           Field('last_name', length=128, default=""),
-           Field('username', length=128, default="", unique=True),
-           Field('password', 'password', length=256,
-           readable=False, label='Password'),
-           Field('registration_key', length=128, default= "",
-           writable=False, readable=False))
-
-        auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username)
-        .
-        .
-        .
-        auth.define_tables()
-        .
-        .
-        .
-
-        CLIENT_ID=\"<put your fb application id here>\"
-        CLIENT_SECRET=\"<put your fb application secret here>\"
-        AUTH_URL="..."
-        TOKEN_URL="..."
-        ACCESS_TOKEN_URL="..."
-        from gluon.contrib.login_methods.oauth10a_account import OAuthAccount
-        auth.settings.login_form=OAuthAccount(globals(
-            ),CLIENT_ID,CLIENT_SECRET, AUTH_URL, TOKEN_URL, ACCESS_TOKEN_URL)
-
-    """
-
-    def __redirect_uri(self, next=None):
-        """Build the uri used by the authenticating server to redirect
-        the client back to the page originating the auth request.
-        Appends the _next action to the generated url so the flows continues.
-        """
-        r = self.request
-        http_host = r.env.http_host
-
-        url_scheme = r.env.wsgi_url_scheme
-        if next:
-            path_info = next
-        else:
-            path_info = r.env.path_info
-        uri = '%s://%s%s' % (url_scheme, http_host, path_info)
-        if r.get_vars and not next:
-            uri += '?' + urlencode(r.get_vars)
-        return uri
-
-    def accessToken(self):
-        """Return the access token generated by the authenticating server.
-
-        If token is already in the session that one will be used.
-        Otherwise the token is fetched from the auth server.
-
-        """
-
-        if self.session.access_token:
-            # return the token (TODO: does it expire?)
-
-            return self.session.access_token
-        if self.session.request_token:
-            # Exchange the request token with an authorization token.
-            token = self.session.request_token
-            self.session.request_token = None
-
-            # Build an authorized client
-            # OAuth1.0a put the verifier!
-            token.set_verifier(self.request.vars.oauth_verifier)
-            client = oauth.Client(self.consumer, token)
-
-            resp, content = client.request(self.access_token_url, "POST")
-            if str(resp['status']) != '200':
-                self.session.request_token = None
-                self.globals['redirect'](self.globals[
-                                         'URL'](f='user', args='logout'))
-
-            self.session.access_token = oauth.Token.from_string(content)
-
-            return self.session.access_token
-
-        self.session.access_token = None
-        return None
-
-    def __init__(self, g, client_id, client_secret, auth_url, token_url, access_token_url, socket_timeout=60):
-        self.globals = g
-        self.client_id = client_id
-        self.client_secret = client_secret
-        self.code = None
-        self.request = current.request
-        self.session = current.session
-        self.auth_url = auth_url
-        self.token_url = token_url
-        self.access_token_url = access_token_url
-        self.socket_timeout = socket_timeout
-
-        # consumer init
-        self.consumer = oauth.Consumer(self.client_id, self.client_secret)
-
-    def login_url(self, next="/"):
-        self.__oauth_login(next)
-        return next
-
-    def logout_url(self, next="/"):
-        self.session.request_token = None
-        self.session.access_token = None
-        return next
-
-    def get_user(self):
-        '''Get user data.
-
-        Since OAuth does not specify what a user
-        is, this function must be implemented for the specific
-        provider.
-        '''
-        raise NotImplementedError("Must override get_user()")
-
-    def __oauth_login(self, next):
-        '''This method redirects the user to the authenticating form
-        on authentication server if the authentication code
-        and the authentication token are not available to the
-        application yet.
-
-        Once the authentication code has been received this method is
-        called to set the access token into the session by calling
-        accessToken()
-        '''
-
-        if not self.accessToken():
-            # setup the client
-            client = oauth.Client(self.consumer, None, timeout=self.socket_timeout)
-            # Get a request token.
-            # oauth_callback *is REQUIRED* for OAuth1.0a
-            # putting it in the body seems to work.
-            callback_url = self.__redirect_uri(next)
-            data = urlencode(dict(oauth_callback=callback_url))
-            resp, content = client.request(self.token_url, "POST", body=data)
-            if resp['status'] != '200':
-                self.session.request_token = None
-                self.globals['redirect'](self.globals[
-                                         'URL'](f='user', args='logout'))
-
-            # Store the request token in session.
-            request_token = self.session.request_token = oauth.Token.from_string(content)
-
-            # Redirect the user to the authentication URL and pass the callback url.
-            data = urlencode(dict(oauth_token=request_token.key,
-                                  oauth_callback=callback_url))
-            auth_request_url = self.auth_url + '?' + data
-
-            HTTP = self.globals['HTTP']
-
-            raise HTTP(302,
-                       "You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
-                       Location=auth_request_url)
-
-        return None

+ 0 - 291
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/oauth20_account.py

@@ -1,291 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-Written by Michele Comitini <[email protected]>
-License: LGPL v3
-
-Adds support for  OAuth 2.0 authentication to web2py.
-
-OAuth 2.0 spec: http://tools.ietf.org/html/rfc6749
-
-"""
-
-import time
-import cgi
-import urllib2
-
-from urllib import urlencode
-from gluon import current, redirect, HTTP
-
-import json
-
-class OAuthAccount(object):
-    """
-    Login will be done via   OAuth Framework, instead of web2py's
-    login form.
-
-    You need to override the get_user method to match your auth provider needs.
-    Example for facebook in your model (eg db.py)::
-        # define the auth_table before call to auth.define_tables()
-        auth_table = db.define_table(
-           auth.settings.table_user_name,
-           Field('first_name', length=128, default=""),
-           Field('last_name', length=128, default=""),
-           Field('username', length=128, default="", unique=True),
-           Field('password', 'password', length=256,
-           readable=False, label='Password'),
-           Field('registration_key', length=128, default= "",
-           writable=False, readable=False))
-
-        auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username)
-        auth.define_tables()
-
-        CLIENT_ID=\"<put your fb application id here>\"
-        CLIENT_SECRET=\"<put your fb application secret here>\"
-        AUTH_URL="http://..."
-        TOKEN_URL="http://..."
-        # remember to download and install facebook GraphAPI module in your app
-        from facebook import GraphAPI, GraphAPIError
-        from gluon.contrib.login_methods.oauth20_account import OAuthAccount
-        class FaceBookAccount(OAuthAccount):
-            '''OAuth impl for FaceBook'''
-            AUTH_URL="https://graph.facebook.com/oauth/authorize"
-            TOKEN_URL="https://graph.facebook.com/oauth/access_token"
-
-            def __init__(self):
-               OAuthAccount.__init__(self,
-                                     client_id=CLIENT_ID,
-                                     client_secret=CLIENT_SECRET,
-                                     auth_url=self.AUTH_URL,
-                                     token_url=self.TOKEN_URL,
-                                     scope='user_photos,friends_photos')
-               self.graph = None
-
-            def get_user(self):
-               '''
-                Returns the user using the Graph API.
-               '''
-
-               if not self.accessToken():
-                  return None
-
-               if not self.graph:
-                  self.graph = GraphAPI((self.accessToken()))
-
-               user = None
-               try:
-                   user = self.graph.get_object("me")
-               except GraphAPIError, e:
-                   self.session.token = None
-                   self.graph = None
-
-
-               if user:
-                   return dict(first_name = user['first_name'],
-                               last_name = user['last_name'],
-                               username = user['id'])
-
-
-               auth.settings.actions_disabled=['register',
-                   'change_password','request_reset_password','profile']
-               auth.settings.login_form=FaceBookAccount()
-
-Any optional arg in the constructor will be passed asis to remote
-server for requests.  It can be used for the optional"scope" parameters for Facebook.
-
-    """
-    def __redirect_uri(self, next=None):
-        """
-        Build the uri used by the authenticating server to redirect
-        the client back to the page originating the auth request.
-        Appends the _next action to the generated url so the flows continues.
-        """
-
-        r = current.request
-        http_host = r.env.http_host
-
-        if r.env.https == 'on':
-            url_scheme = 'https'
-        else:
-            url_scheme = r.env.wsgi_url_scheme
-        if next:
-            path_info = next
-        else:
-            path_info = r.env.path_info
-        uri = '%s://%s%s' % (url_scheme, http_host, path_info)
-        if r.get_vars and not next:
-            uri += '?' + urlencode(r.get_vars)
-        return uri
-
-
-    def __build_url_opener(self, uri):
-        """
-        Build the url opener for managing HTTP Basic Athentication
-        """
-        # Create an OpenerDirector with support
-        # for Basic HTTP Authentication...
-        password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
-        password_mgr.add_password(realm=None,
-                                  uri=uri,
-                                  user=self.client_id,
-                                  passwd=self.client_secret)
-        handler = urllib2.HTTPBasicAuthHandler(password_mgr)
-        opener = urllib2.build_opener(handler)
-        return opener
-
-    def accessToken(self):
-        """
-        Return the access token generated by the authenticating server.
-
-        If token is already in the session that one will be used.
-        Otherwise the token is fetched from the auth server.
-
-        """
-        if current.session.token and 'expires' in current.session.token:
-            expires = current.session.token['expires']
-            # reuse token until expiration
-            if expires == 0 or expires > time.time():
-                        return current.session.token['access_token']
-
-        code = current.request.vars.code
-
-        if code:
-            data = dict(client_id=self.client_id,
-                        client_secret=self.client_secret,
-                        redirect_uri=current.session.redirect_uri,
-                        code=code,
-                        grant_type='authorization_code'
-                        )
-
-            open_url = None
-            opener = self.__build_url_opener(self.token_url)
-            try:
-                open_url = opener.open(self.token_url, urlencode(data), self.socket_timeout)
-            except urllib2.HTTPError, e:
-                tmp = e.read()
-                raise Exception(tmp)
-            finally:
-                if current.session.code:
-                    del current.session.code  # throw it away
-
-            if open_url:
-                try:
-                    data = open_url.read()
-                    resp_type = open_url.info().gettype()
-                    # try json style first
-                    if not resp_type or resp_type[:16] == 'application/json':
-                        try:
-                            tokendata = json.loads(data)
-                            current.session.token = tokendata
-                        except Exception, e:
-                            raise Exception("Cannot parse oauth server response %s %s" % (data, e))
-                    else: # try facebook style first with x-www-form-encoded
-                        tokendata = cgi.parse_qs(data)
-                        current.session.token = \
-                          dict([(k, v[-1]) for k, v in tokendata.items()])
-                    if not tokendata: # parsing failed?
-                        raise Exception("Cannot parse oauth server response %s" % data)
-                    # set expiration absolute time try to avoid broken
-                    # implementations where "expires_in" becomes "expires"
-                    if 'expires_in' in current.session.token:
-                        exps = 'expires_in'
-                    elif 'expires' in current.session.token:
-                        exps = 'expires'
-                    else:
-                        exps = None
-                    current.session.token['expires'] = exps and \
-                        int(current.session.token[exps]) + \
-                        time.time()
-                finally:
-                    opener.close()
-                return current.session.token['access_token']
-
-        current.session.token = None
-        return None
-
-    def __init__(self, g=None,
-                 client_id=None, client_secret=None,
-                 auth_url=None, token_url=None, socket_timeout=60, **args):
-        """
-        first argument is unused. Here only for legacy reasons.
-        """
-        if [client_id, client_secret, auth_url, token_url].count(None) > 0:
-            raise RuntimeError("""Following args are mandatory:
-            client_id,
-            client_secret,
-            auth_url,
-            token_url.
-            """)
-        self.client_id = client_id
-        self.client_secret = client_secret
-        self.auth_url = auth_url
-        self.token_url = token_url
-        self.args = args
-        self.socket_timeout = socket_timeout
-
-    def login_url(self, next="/"):
-        self.__oauth_login(next)
-        return next
-
-    def logout_url(self, next="/"):
-        del current.session.token
-        return next
-
-    def get_user(self):
-        """
-        Override this method by sublcassing the class.
-
-        """
-        if not current.session.token:
-            return None
-        return dict(first_name='Pinco',
-                    last_name='Pallino',
-                    username='pincopallino')
-        raise NotImplementedError("Must override get_user()")
-
-        # Following code is never executed.  It can be used as example
-        # for overriding in subclasses.
-        if not self.accessToken():
-            return None
-
-        if not self.graph:
-            self.graph = GraphAPI((self.accessToken()))
-
-        user = None
-        try:
-            user = self.graph.get_object("me")
-        except GraphAPIError:
-            current.session.token = None
-            self.graph = None
-
-        if user:
-            return dict(first_name=user['first_name'],
-                        last_name=user['last_name'],
-                        username=user['id'])
-
-    def __oauth_login(self, next):
-        """
-        This method redirects the user to the authenticating form
-        on authentication server if the authentication code
-        and the authentication token are not available to the
-        application yet.
-
-        Once the authentication code has been received this method is
-        called to set the access token into the session by calling
-        accessToken()
-        """
-
-        token = self.accessToken()
-        if not token:
-            current.session.redirect_uri = self.__redirect_uri(next)
-            data = dict(redirect_uri=current.session.redirect_uri,
-                        response_type='code',
-                        client_id=self.client_id)
-            if self.args:
-                data.update(self.args)
-            auth_request_url = self.auth_url + "?" + urlencode(data)
-            raise HTTP(302,
-                       "You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
-                       Location=auth_request_url)
-        return

+ 0 - 107
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/oneall_account.py

@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-# coding: utf8
-
-"""
-   Oneall Authentication for web2py
-   Developed by Nathan Freeze (Copyright © 2013)
-   Email <[email protected]>
-
-   This file contains code to allow using onall.com
-   authentication services with web2py
-"""
-
-import os
-import base64
-from gluon import *
-from gluon.storage import Storage
-from gluon.contrib.simplejson import JSONDecodeError
-from gluon.tools import fetch
-import gluon.contrib.simplejson as json
-
-class OneallAccount(object):
-
-    """
-    from gluon.contrib.login_methods.oneall_account import OneallAccount
-    auth.settings.actions_disabled=['register','change_password',
-        'request_reset_password']
-    auth.settings.login_form = OneallAccount(request,
-              public_key="...",
-              private_key="...",
-              domain="...",
-              url = "http://localhost:8000/%s/default/user/login" % request.application)
-    """
-
-    def __init__(self, request, public_key="", private_key="",  domain="",
-                       url=None, providers=None, on_login_failure=None):
-
-        self.request = request
-        self.public_key = public_key
-        self.private_key = private_key
-        self.url = url
-        self.domain = domain
-        self.profile = None
-        self.on_login_failure = on_login_failure
-        self.providers = providers or ["facebook", "google", "yahoo", "openid"]
-
-        self.mappings = Storage()
-        def defaultmapping(profile):
-            name = profile.get('name',{})
-            dname = name.get('formatted',profile.get('displayName'))
-            email=profile.get('emails', [{}])[0].get('value')
-            reg_id=profile.get('identity_token','')
-            username=profile.get('preferredUsername',email)
-            first_name=name.get('givenName', dname.split(' ')[0])
-            last_name=profile.get('familyName',dname.split(' ')[1])
-            return dict(registration_id=reg_id,username=username,email=email,
-                        first_name=first_name,last_name=last_name)
-        self.mappings.default = defaultmapping
-
-    def get_user(self):
-        request = self.request
-        user = None
-        if request.vars.connection_token:
-            auth_url = "https://%s.api.oneall.com/connections/%s.json"  % \
-                       (self.domain, request.vars.connection_token)
-            auth_pw = "%s:%s" % (self.public_key,self.private_key)
-            auth_pw = base64.b64encode(auth_pw)
-            headers = dict(Authorization="Basic %s" % auth_pw)
-            try:
-                auth_info_json = fetch(auth_url,headers=headers)
-                auth_info = json.loads(auth_info_json)
-                data = auth_info['response']['result']['data']
-                if data['plugin']['key'] == 'social_login':
-                    if data['plugin']['data']['status'] == 'success':
-                        userdata = data['user']
-                        self.profile = userdata['identity']
-                        source = self.profile['source']['key']
-                        mapping = self.mappings.get(source,self.mappings['default'])
-                        user = mapping(self.profile)
-            except (JSONDecodeError, KeyError):
-                pass
-            if user is None and self.on_login_failure:
-                    redirect(self.on_login_failure)
-        return user
-
-    def login_form(self):
-        scheme = self.request.env.wsgi_url_scheme
-        oneall_url = scheme + "://%s.api.oneall.com/socialize/library.js"  % self.domain
-        oneall_lib = SCRIPT(_src=oneall_url,_type='text/javascript')
-        container = DIV(_id="oa_social_login_container")
-        widget = SCRIPT('oneall.api.plugins.social_login.build("oa_social_login_container",',
-                        '{providers : %s,' % self.providers,
-                        'callback_uri: "%s"});' % self.url,
-                _type="text/javascript")
-        form = DIV(oneall_lib,container,widget)
-        return form
-
-def use_oneall(auth, filename='private/oneall.key', **kwargs):
-    path = os.path.join(current.request.folder, filename)
-    if os.path.exists(path):
-        request = current.request
-        domain, public_key, private_key = open(path, 'r').read().strip().split(':')
-        url = URL('default', 'user', args='login', scheme=True)
-        auth.settings.actions_disabled =\
-        ['register', 'change_password', 'request_reset_password']
-        auth.settings.login_form = OneallAccount(
-            request, public_key=public_key,private_key=private_key,
-            domain=domain, url=url, **kwargs)

+ 0 - 653
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/openid_auth.py

@@ -1,653 +0,0 @@
-#!/usr/bin/env python
-# coding: utf8
-
-"""
-    OpenID authentication for web2py
-
-    Allowed using OpenID login together with web2py built-in login.
-
-    By default, to support OpenID login, put this in your db.py
-
-    >>> from gluon.contrib.login_methods.openid_auth import OpenIDAuth
-    >>> auth.settings.login_form = OpenIDAuth(auth)
-
-    To show OpenID list in user profile, you can add the following code
-    before the end of function user() of your_app/controllers/default.py
-
-    +     if (request.args and request.args(0) == "profile"):
-    +         form = DIV(form, openid_login_form.list_user_openids())
-        return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration)
-
-    More detail in the description of the class OpenIDAuth.
-
-    Requirements:
-        python-openid version 2.2.5 or later
-
-    Reference:
-        * w2p openID
-          http://w2popenid.appspot.com/init/default/wiki/w2popenid
-        * RPX and web2py auth module
-          http://www.web2pyslices.com/main/slices/take_slice/28
-        * built-in file: gluon/contrib/login_methods/rpx_account.py
-        * built-in file: gluon/tools.py (Auth class)
-"""
-import time
-from datetime import datetime, timedelta
-
-from gluon import *
-from gluon.storage import Storage, Messages
-
-try:
-    import openid.consumer.consumer
-    from openid.association import Association
-    from openid.store.interface import OpenIDStore
-    from openid.extensions.sreg import SRegRequest, SRegResponse
-    from openid.store import nonce
-    from openid.consumer.discover import DiscoveryFailure
-except ImportError, err:
-    raise ImportError("OpenIDAuth requires python-openid package")
-
-DEFAULT = lambda: None
-
-
-class OpenIDAuth(object):
-    """
-    OpenIDAuth
-
-    It supports the logout_url, implementing the get_user and login_form
-    for cas usage of gluon.tools.Auth.
-
-    It also uses the ExtendedLoginForm to allow the OpenIDAuth login_methods
-    combined with the standard logon/register procedure.
-
-    It uses OpenID Consumer when render the form and begins the OpenID
-    authentication.
-
-    Example: (put these code after auth.define_tables() in your models.)
-
-    auth = Auth(globals(), db)                # authentication/authorization
-    ...
-    auth.define_tables()                      # creates all needed tables
-    ...
-
-    #include in your model after auth has been defined
-    from gluon.contrib.login_methods.openid_auth import OpenIDAuth
-    openid_login_form = OpenIDAuth(request, auth, db)
-
-    from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm
-    extended_login_form = ExtendedLoginForm(request, auth, openid_login_form,
-                                            signals=['oid','janrain_nonce'])
-
-    auth.settings.login_form = extended_login_form
-    """
-
-    def __init__(self, auth):
-        self.auth = auth
-        self.db = auth.db
-
-        request = current.request
-        self.nextvar = '_next'
-        self.realm = 'http://%s' % request.env.http_host
-        self.login_url = URL(r=request, f='user', args=['login'])
-        self.return_to_url = self.realm + self.login_url
-
-        self.table_alt_logins_name = "alt_logins"
-        if not auth.settings.table_user:
-            raise
-        self.table_user = self.auth.settings.table_user
-        self.openid_expiration = 15  # minutes
-
-        self.messages = self._define_messages()
-
-        if not self.table_alt_logins_name in self.db.tables:
-            self._define_alt_login_table()
-
-    def _define_messages(self):
-        messages = Messages(current.T)
-        messages.label_alt_login_username = 'Sign-in with OpenID: '
-        messages.label_add_alt_login_username = 'Add a new OpenID: '
-        messages.submit_button = 'Sign in'
-        messages.submit_button_add = 'Add'
-        messages.a_delete = 'Delete'
-        messages.comment_openid_signin = 'What is OpenID?'
-        messages.comment_openid_help_title = 'Start using your OpenID'
-        messages.comment_openid_help_url = 'http://openid.net/get-an-openid/start-using-your-openid/'
-        messages.openid_fail_discover = 'Failed to discover OpenID service. Check your OpenID or "More about OpenID"?'
-        messages.flash_openid_expired = 'OpenID expired. Please login or authenticate OpenID again. Sorry for the inconvenient.'
-        messages.flash_openid_associated = 'OpenID associated'
-        messages.flash_associate_openid = 'Please login or register an account for this OpenID.'
-        messages.p_openid_not_registered = "This Open ID haven't be registered. " \
-            + "Please login to associate with it or register an account for it."
-        messages.flash_openid_authenticated = 'OpenID authenticated successfully.'
-        messages.flash_openid_fail_authentication = 'OpenID authentication failed. (Error message: %s)'
-        messages.flash_openid_canceled = 'OpenID authentication canceled by user.'
-        messages.flash_openid_need_setup = 'OpenID authentication needs to be setup by the user with the provider first.'
-        messages.h_openid_login = 'OpenID Login'
-        messages.h_openid_list = 'OpenID List'
-        return messages
-
-    def _define_alt_login_table(self):
-        """
-        Define the OpenID login table.
-        Note: oidtype is what I used for our project.
-              We're going to support 'fackbook' and
-              'plurk' alternate login methods.
-              Otherwise it's always 'openid' and you
-              may not need it. This should be easy to changed.
-              (Just remove the field of "type" and remove the
-              "and db.alt_logins.oidtype == type_"
-              in _find_matched_openid function)
-        """
-        db = self.db
-        table = db.define_table(
-            self.table_alt_logins_name,
-            Field('username', length=512, default=''),
-            Field('oidtype', length=128, default='openid', readable=False),
-            Field('oiduser', self.table_user, readable=False),
-        )
-        table.username.requires = IS_NOT_IN_DB(db, table.username)
-        self.table_alt_logins = table
-
-    def logout_url(self, next):
-        """
-        Delete the w2popenid record in session as logout
-        """
-        if current.session.w2popenid:
-            del(current.session.w2popenid)
-        return next
-
-    def login_form(self):
-        """
-        Start to process the OpenID response if 'janrain_nonce' in request parameters
-        and not processed yet. Else return the OpenID form for login.
-        """
-        request = current.request
-        if 'janrain_nonce' in request.vars and not self._processed():
-            self._process_response()
-            return self.auth()
-        return self._form()
-
-    def get_user(self):
-        """
-        It supports the logout_url, implementing the get_user and login_form
-        for cas usage of gluon.tools.Auth.
-        """
-        request = current.request
-        args = request.args
-
-        if args[0] == 'logout':
-            return True  # Let logout_url got called
-
-        if current.session.w2popenid:
-            w2popenid = current.session.w2popenid
-            db = self.db
-            if (w2popenid.ok is True and w2popenid.oid):  # OpenID authenticated
-                if self._w2popenid_expired(w2popenid):
-                    del(current.session.w2popenid)
-                    flash = self.messages.flash_openid_expired
-                    current.session.warning = flash
-                    redirect(self.auth.settings.login_url)
-                oid = self._remove_protocol(w2popenid.oid)
-                alt_login = self._find_matched_openid(db, oid)
-
-                nextvar = self.nextvar
-                # This OpenID not in the database. If user logged in then add it
-                # into database, else ask user to login or register.
-                if not alt_login:
-                    if self.auth.is_logged_in():
-                        # TODO: ask first maybe
-                        self._associate_user_openid(self.auth.user, oid)
-                        if current.session.w2popenid:
-                            del(current.session.w2popenid)
-                        current.session.flash = self.messages.flash_openid_associated
-                        if nextvar in request.vars:
-                            redirect(request.vars[nextvar])
-                        redirect(self.auth.settings.login_next)
-
-                    if nextvar not in request.vars:
-                        # no next var, add it and do login again
-                        # so if user login or register can go back here to associate the OpenID
-                        redirect(URL(r=request,
-                                     args=['login'],
-                                     vars={nextvar: self.login_url}))
-                    self.login_form = self._form_with_notification()
-                    current.session.flash = self.messages.flash_associate_openid
-                    return None  # need to login or register to associate this openid
-
-                # Get existed OpenID user
-                user = db(
-                    self.table_user.id == alt_login.oiduser).select().first()
-                if user:
-                    if current.session.w2popenid:
-                        del(current.session.w2popenid)
-                if 'username' in self.table_user.fields():
-                    username = 'username'
-                elif 'email' in self.table_user.fields():
-                    username = 'email'
-                return {username: user[username]} if user else None  # login success (almost)
-
-        return None  # just start to login
-
-    def _find_matched_openid(self, db, oid, type_='openid'):
-        """
-        Get the matched OpenID for given
-        """
-        query = (
-            (db.alt_logins.username == oid) & (db.alt_logins.oidtype == type_))
-        alt_login = db(query).select().first()  # Get the OpenID record
-        return alt_login
-
-    def _associate_user_openid(self, user, oid):
-        """
-        Associate the user logged in with given OpenID
-        """
-        # print "[DB] %s authenticated" % oid
-        self.db.alt_logins.insert(username=oid, oiduser=user.id)
-
-    def _form_with_notification(self):
-        """
-        Render the form for normal login with a notice of OpenID authenticated
-        """
-        form = DIV()
-        # TODO: check when will happen
-        if self.auth.settings.login_form in (self.auth, self):
-            self.auth.settings.login_form = self.auth
-            form = DIV(self.auth())
-
-        register_note = DIV(P(self.messages.p_openid_not_registered))
-        form.components.append(register_note)
-        return lambda: form
-
-    def _remove_protocol(self, oid):
-        """
-        Remove https:// or http:// from oid url
-        """
-        protocol = 'https://'
-        if oid.startswith(protocol):
-            oid = oid[len(protocol):]
-            return oid
-        protocol = 'http://'
-        if oid.startswith(protocol):
-            oid = oid[len(protocol):]
-            return oid
-        return oid
-
-    def _init_consumerhelper(self):
-        """
-        Initialize the ConsumerHelper
-        """
-        if not hasattr(self, "consumerhelper"):
-            self.consumerhelper = ConsumerHelper(current.session,
-                                                 self.db)
-        return self.consumerhelper
-
-    def _form(self, style=None):
-        form = DIV(H3(self.messages.h_openid_login), self._login_form(style))
-        return form
-
-    def _login_form(self,
-                    openid_field_label=None,
-                    submit_button=None,
-                    _next=None,
-                    style=None):
-        """
-        Render the form for OpenID login
-        """
-        def warning_openid_fail(session):
-            session.warning = messages.openid_fail_discover
-
-        style = style or """
-background-attachment: scroll;
-background-repeat: no-repeat;
-background-image: url("http://wiki.openid.net/f/openid-16x16.gif");
-background-position: 0% 50%;
-background-color: transparent;
-padding-left: 18px;
-width: 400px;
-"""
-        style = style.replace("\n", "")
-
-        request = current.request
-        session = current.session
-        messages = self.messages
-        hidden_next_input = ""
-        if _next == 'profile':
-            profile_url = URL(r=request, f='user', args=['profile'])
-            hidden_next_input = INPUT(
-                _type="hidden", _name="_next", _value=profile_url)
-        form = FORM(
-            openid_field_label or self.messages.label_alt_login_username,
-            INPUT(_type="input", _name="oid",
-                  requires=IS_NOT_EMPTY(
-                  error_message=messages.openid_fail_discover),
-                  _style=style),
-            hidden_next_input,
-            INPUT(_type="submit",
-                  _value=submit_button or messages.submit_button),
-            " ",
-            A(messages.comment_openid_signin,
-              _href=messages.comment_openid_help_url,
-              _title=messages.comment_openid_help_title,
-              _class='openid-identifier',
-              _target="_blank"),
-            _action=self.login_url
-        )
-        if form.accepts(request.vars, session):
-            oid = request.vars.oid
-            consumerhelper = self._init_consumerhelper()
-            url = self.login_url
-            return_to_url = self.return_to_url
-            if not oid:
-                warning_openid_fail(session)
-                redirect(url)
-            try:
-                if '_next' in request.vars:
-                    return_to_url = self.return_to_url + \
-                        '?_next=' + request.vars._next
-                url = consumerhelper.begin(oid, self.realm, return_to_url)
-            except DiscoveryFailure:
-                warning_openid_fail(session)
-            redirect(url)
-        return form
-
-    def _processed(self):
-        """
-        Check if w2popenid authentication is processed.
-        Return True if processed else False.
-        """
-        processed = (hasattr(current.session, 'w2popenid') and
-                     current.session.w2popenid.ok is True)
-        return processed
-
-    def _set_w2popenid_expiration(self, w2popenid):
-        """
-        Set expiration for OpenID authentication.
-        """
-        w2popenid.expiration = datetime.now(
-        ) + timedelta(minutes=self.openid_expiration)
-
-    def _w2popenid_expired(self, w2popenid):
-        """
-        Check if w2popenid authentication is expired.
-        Return True if expired else False.
-        """
-        return (not w2popenid.expiration) or (datetime.now() > w2popenid.expiration)
-
-    def _process_response(self):
-        """
-        Process the OpenID by ConsumerHelper.
-        """
-        request = current.request
-        request_vars = request.vars
-        consumerhelper = self._init_consumerhelper()
-        process_status = consumerhelper.process_response(
-            request_vars, self.return_to_url)
-        if process_status == "success":
-            w2popenid = current.session.w2popenid
-            user_data = self.consumerhelper.sreg()
-            current.session.w2popenid.ok = True
-            self._set_w2popenid_expiration(w2popenid)
-            w2popenid.user_data = user_data
-            current.session.flash = self.messages.flash_openid_authenticated
-        elif process_status == "failure":
-            flash = self.messages.flash_openid_fail_authentication % consumerhelper.error_message
-            current.session.warning = flash
-        elif process_status == "cancel":
-            current.session.warning = self.messages.flash_openid_canceled
-        elif process_status == "setup_needed":
-            current.session.warning = self.messages.flash_openid_need_setup
-
-    def list_user_openids(self):
-        messages = self.messages
-        request = current.request
-        if 'delete_openid' in request.vars:
-            self.remove_openid(request.vars.delete_openid)
-
-        query = self.db.alt_logins.oiduser == self.auth.user.id
-        alt_logins = self.db(query).select()
-        l = []
-        for alt_login in alt_logins:
-            username = alt_login.username
-            delete_href = URL(r=request, f='user',
-                              args=['profile'],
-                              vars={'delete_openid': username})
-            delete_link = A(messages.a_delete, _href=delete_href)
-            l.append(LI(username, " ", delete_link))
-
-        profile_url = URL(r=request, f='user', args=['profile'])
-        #return_to_url = self.return_to_url + '?' + self.nextvar + '=' + profile_url
-        openid_list = DIV(H3(messages.h_openid_list), UL(l),
-                          self._login_form(
-                              _next='profile',
-                              submit_button=messages.submit_button_add,
-                              openid_field_label=messages.label_add_alt_login_username)
-                          )
-        return openid_list
-
-    def remove_openid(self, openid):
-        query = self.db.alt_logins.username == openid
-        self.db(query).delete()
-
-
-class ConsumerHelper(object):
-    """
-    ConsumerHelper knows the python-openid and
-    """
-
-    def __init__(self, session, db):
-        self.session = session
-        store = self._init_store(db)
-        self.consumer = openid.consumer.consumer.Consumer(session, store)
-
-    def _init_store(self, db):
-        """
-        Initialize Web2pyStore
-        """
-        if not hasattr(self, "store"):
-            store = Web2pyStore(db)
-            session = self.session
-            if 'w2popenid' not in session:
-                session.w2popenid = Storage()
-            self.store = store
-        return self.store
-
-    def begin(self, oid, realm, return_to_url):
-        """
-        Begin the OpenID authentication
-        """
-        w2popenid = self.session.w2popenid
-        w2popenid.oid = oid
-        auth_req = self.consumer.begin(oid)
-        auth_req.addExtension(SRegRequest(required=['email', 'nickname']))
-        url = auth_req.redirectURL(return_to=return_to_url, realm=realm)
-        return url
-
-    def process_response(self, request_vars, return_to_url):
-        """
-        Complete the process and
-        """
-        resp = self.consumer.complete(request_vars, return_to_url)
-        if resp:
-            if resp.status == openid.consumer.consumer.SUCCESS:
-                self.resp = resp
-                if hasattr(resp, "identity_url"):
-                    self.session.w2popenid.oid = resp.identity_url
-                return "success"
-            if resp.status == openid.consumer.consumer.FAILURE:
-                self.error_message = resp.message
-                return "failure"
-            if resp.status == openid.consumer.consumer.CANCEL:
-                return "cancel"
-            if resp.status == openid.consumer.consumer.SETUP_NEEDED:
-                return "setup_needed"
-        return "no resp"
-
-    def sreg(self):
-        """
-        Try to get OpenID Simple Registation
-        http://openid.net/specs/openid-simple-registration-extension-1_0.html
-        """
-        if self.resp:
-            resp = self.resp
-            sreg_resp = SRegResponse.fromSuccessResponse(resp)
-            return sreg_resp.data if sreg_resp else None
-        else:
-            return None
-
-
-class Web2pyStore(OpenIDStore):
-    """
-    Web2pyStore
-
-    This class implements the OpenIDStore interface. OpenID stores take care
-    of persisting nonces and associations. The Janrain Python OpenID library
-    comes with implementations for file and memory storage. Web2pyStore uses
-    the web2py db abstration layer. See the source code docs of OpenIDStore
-    for a comprehensive description of this interface.
-    """
-
-    def __init__(self, database):
-        self.database = database
-        self.table_oid_associations_name = 'oid_associations'
-        self.table_oid_nonces_name = 'oid_nonces'
-        self._initDB()
-
-    def _initDB(self):
-
-        if self.table_oid_associations_name not in self.database:
-            self.database.define_table(self.table_oid_associations_name,
-                                       Field('server_url',
-                                             'string', length=2047, required=True),
-                                       Field('handle',
-                                             'string', length=255, required=True),
-                                       Field('secret', 'blob', required=True),
-                                       Field('issued',
-                                             'integer', required=True),
-                                       Field('lifetime',
-                                             'integer', required=True),
-                                       Field('assoc_type',
-                                             'string', length=64, required=True)
-                                       )
-        if self.table_oid_nonces_name not in self.database:
-            self.database.define_table(self.table_oid_nonces_name,
-                                       Field('server_url',
-                                             'string', length=2047, required=True),
-                                       Field('itimestamp',
-                                             'integer', required=True),
-                                       Field('salt', 'string',
-                                             length=40, required=True)
-                                       )
-
-    def storeAssociation(self, server_url, association):
-        """
-        Store associations. If there already is one with the same
-        server_url and handle in the table replace it.
-        """
-
-        db = self.database
-        query = (db.oid_associations.server_url == server_url) & (
-            db.oid_associations.handle == association.handle)
-        db(query).delete()
-        db.oid_associations.insert(server_url=server_url,
-                                   handle=association.handle,
-                                   secret=association.secret,
-                                   issued=association.issued,
-                                   lifetime=association.lifetime,
-                                   assoc_type=association.assoc_type), 'insert ' * 10
-
-    def getAssociation(self, server_url, handle=None):
-        """
-        Return the association for server_url and handle. If handle is
-        not None return the latests associations for that server_url.
-        Return None if no association can be found.
-        """
-
-        db = self.database
-        query = (db.oid_associations.server_url == server_url)
-        if handle:
-            query &= (db.oid_associations.handle == handle)
-        rows = db(query).select(orderby=db.oid_associations.issued)
-        keep_assoc, _ = self._removeExpiredAssocations(rows)
-        if len(keep_assoc) == 0:
-            return None
-        else:
-            assoc = keep_assoc.pop(
-            )  # pop the last one as it should be the latest one
-            return Association(assoc['handle'],
-                               assoc['secret'],
-                               assoc['issued'],
-                               assoc['lifetime'],
-                               assoc['assoc_type'])
-
-    def removeAssociation(self, server_url, handle):
-        db = self.database
-        query = (db.oid_associations.server_url == server_url) & (
-            db.oid_associations.handle == handle)
-        return db(query).delete() is not None
-
-    def useNonce(self, server_url, timestamp, salt):
-        """
-        This method returns Falase if a nonce has been used before or its
-        timestamp is not current.
-        """
-
-        db = self.database
-        if abs(timestamp - time.time()) > nonce.SKEW:
-            return False
-        query = (db.oid_nonces.server_url == server_url) & (db.oid_nonces.itimestamp == timestamp) & (db.oid_nonces.salt == salt)
-        if db(query).count() > 0:
-            return False
-        else:
-            db.oid_nonces.insert(server_url=server_url,
-                                 itimestamp=timestamp,
-                                 salt=salt)
-            return True
-
-    def _removeExpiredAssocations(self, rows):
-        """
-        This helper function is not part of the interface. Given a list of
-        association rows it checks which associations have expired and
-        deletes them from the db. It returns a tuple of the form
-        ([valid_assoc], no_of_expired_assoc_deleted).
-        """
-
-        db = self.database
-        keep_assoc = []
-        remove_assoc = []
-        t1970 = time.time()
-        for r in rows:
-            if r['issued'] + r['lifetime'] < t1970:
-                remove_assoc.append(r)
-            else:
-                keep_assoc.append(r)
-        for r in remove_assoc:
-            del db.oid_associations[r['id']]
-        return (keep_assoc, len(remove_assoc))  # return tuple (list of valid associations, number of deleted associations)
-
-    def cleanupNonces(self):
-        """
-        Remove expired nonce entries from DB and return the number
-        of entries deleted.
-        """
-
-        db = self.database
-        query = (db.oid_nonces.itimestamp < time.time() - nonce.SKEW)
-        return db(query).delete()
-
-    def cleanupAssociations(self):
-        """
-        Remove expired associations from db and return the number
-        of entries deleted.
-        """
-
-        db = self.database
-        query = (db.oid_associations.id > 0)
-        return self._removeExpiredAssocations(db(query).select())[1]  # return number of assoc removed
-
-    def cleanup(self):
-        """
-        This method should be run periodically to free the db from
-        expired nonce and association entries.
-        """
-
-        return self.cleanupNonces(), self.cleanupAssociations()

+ 0 - 22
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/pam_auth.py

@@ -1,22 +0,0 @@
-from gluon.contrib.pam import authenticate
-
-
-def pam_auth():
-    """
-    to use pam_login:
-    from gluon.contrib.login_methods.pam_auth import pam_auth
-    auth.settings.login_methods.append(pam_auth())
-
-    or
-
-    auth.settings.actions_disabled=[
-       'register','change_password','request_reset_password']
-    auth.settings.login_methods=[pam_auth()]
-
-    The latter method will not store the user password in auth_user.
-    """
-
-    def pam_auth_aux(username, password):
-        return authenticate(username, password)
-
-    return pam_auth_aux

+ 0 - 134
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/rpx_account.py

@@ -1,134 +0,0 @@
-#!/usr/bin/env python
-# coding: utf8
-
-"""
-   RPX Authentication for web2py
-   Developed by Nathan Freeze (Copyright © 2009)
-   Email <[email protected]>
-   Modified by Massimo Di Pierro
-
-   This file contains code to allow using RPXNow.com (now Jainrain.com)
-   services with web2py
-"""
-
-import os
-import re
-import urllib
-from gluon import *
-from gluon.tools import fetch
-from gluon.storage import Storage
-import gluon.contrib.simplejson as json
-
-
-class RPXAccount(object):
-
-    """
-    from gluon.contrib.login_methods.rpx_account import RPXAccount
-    auth.settings.actions_disabled=['register','change_password',
-        'request_reset_password']
-    auth.settings.login_form = RPXAccount(request,
-              api_key="...",
-              domain="...",
-              url = "http://localhost:8000/%s/default/user/login" % request.application)
-    """
-
-    def __init__(self,
-                 request,
-                 api_key="",
-                 domain="",
-                 url="",
-                 embed=True,
-                 auth_url="https://rpxnow.com/api/v2/auth_info",
-                 language="en",
-                 prompt='rpx',
-                 on_login_failure=None,
-                 ):
-
-        self.request = request
-        self.api_key = api_key
-        self.embed = embed
-        self.auth_url = auth_url
-        self.domain = domain
-        self.token_url = url
-        self.language = language
-        self.profile = None
-        self.prompt = prompt
-        self.on_login_failure = on_login_failure
-        self.mappings = Storage()
-
-        dn = {'givenName': '', 'familyName': ''}
-        self.mappings.Facebook = lambda profile, dn=dn:\
-            dict(registration_id=profile.get("identifier", ""),
-                 username=profile.get("preferredUsername", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("name", dn).get("givenName", ""),
-                 last_name=profile.get("name", dn).get("familyName", ""))
-        self.mappings.Google = lambda profile, dn=dn:\
-            dict(registration_id=profile.get("identifier", ""),
-                 username=profile.get("preferredUsername", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("name", dn).get("givenName", ""),
-                 last_name=profile.get("name", dn).get("familyName", ""))
-        self.mappings.default = lambda profile:\
-            dict(registration_id=profile.get("identifier", ""),
-                 username=profile.get("preferredUsername", ""),
-                 email=profile.get("email", ""),
-                 first_name=profile.get("preferredUsername", ""),
-                 last_name='')
-
-    def get_user(self):
-        request = self.request
-        if request.vars.token:
-            user = Storage()
-            data = urllib.urlencode(
-                dict(apiKey=self.api_key, token=request.vars.token))
-            auth_info_json = fetch(self.auth_url + '?' + data)
-            auth_info = json.loads(auth_info_json)
-
-            if auth_info['stat'] == 'ok':
-                self.profile = auth_info['profile']
-                provider = re.sub('[^\w\-]', '', self.profile['providerName'])
-                user = self.mappings.get(
-                    provider, self.mappings.default)(self.profile)
-                return user
-            elif self.on_login_failure:
-                redirect(self.on_login_failure)
-        return None
-
-    def login_form(self):
-        request = self.request
-        args = request.args
-        if self.embed:
-            JANRAIN_URL = \
-                "https://%s.rpxnow.com/openid/embed?token_url=%s&language_preference=%s"
-            rpxform = IFRAME(
-                _src=JANRAIN_URL % (
-                    self.domain, self.token_url, self.language),
-                _scrolling="no",
-                _frameborder="no",
-                _style="width:400px;height:240px;")
-        else:
-            JANRAIN_URL = \
-                "https://%s.rpxnow.com/openid/v2/signin?token_url=%s"
-            rpxform = DIV(SCRIPT(_src="https://rpxnow.com/openid/v2/widget",
-                                 _type="text/javascript"),
-                          SCRIPT("RPXNOW.overlay = true;",
-                                 "RPXNOW.language_preference = '%s';" % self.language,
-                                 "RPXNOW.realm = '%s';" % self.domain,
-                                 "RPXNOW.token_url = '%s';" % self.token_url,
-                                 "RPXNOW.show();",
-                                 _type="text/javascript"))
-        return rpxform
-
-
-def use_janrain(auth, filename='private/janrain.key', **kwargs):
-    path = os.path.join(current.request.folder, filename)
-    if os.path.exists(path):
-        request = current.request
-        domain, key = open(path, 'r').read().strip().split(':')
-        host = current.request.env.http_host
-        url = URL('default', 'user', args='login', scheme=True)
-        auth.settings.actions_disabled = \
-            ['register', 'change_password', 'request_reset_password']
-        auth.settings.login_form = RPXAccount(
-            request, api_key=key, domain=domain, url=url, **kwargs)

+ 0 - 129
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/saml2_auth.py

@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-This file is part of web2py Web Framework (Copyrighted, 2007-2014).
-Developed by Massimo Di Pierro <[email protected]>.
-License: LGPL v3
-
-Login will be done via Web2py's CAS application, instead of web2py's
-login form.
-
-Include in your model (eg db.py)::
-
-    auth.define_tables(username=True)
-    from gluon.contrib.login_methods.saml2_auth import Saml2Auth
-    auth.settings.login_form=Saml2Auth(
-    config_file = os.path.join(request.folder,'private','sp_conf'),
-    maps=dict(
-        username=lambda v: v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
-        email=lambda v: v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
-        user_id=lambda v: v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0]))
-        
-you must have private/sp_conf.py, the pysaml2 sp configuration file
-"""
-
-from saml2 import BINDING_HTTP_REDIRECT
-from saml2.client import Saml2Client
-from gluon.utils import web2py_uuid
-from gluon import current, redirect, URL
-import os, types
-
-def obj2dict(obj, processed=None):
-    """                                                                        
-    converts any object into a dict, recursively                               
-    """
-    processed = processed if not processed is None else set()
-    if obj is None:
-        return None
-    if isinstance(obj,(int,long,str,unicode,float,bool)):
-        return obj
-    if id(obj) in processed:
-        return '<reference>'
-    processed.add(id(obj))
-    if isinstance(obj,(list,tuple)):
-        return [obj2dict(item,processed) for item in obj]
-    if not isinstance(obj, dict) and hasattr(obj,'__dict__'):
-        obj = obj.__dict__
-    else:
-        return repr(obj)
-    return dict((key,obj2dict(value,processed)) for key,value in obj.items()
-                if not key.startswith('_') and
-                not type(value) in (types.FunctionType,
-                                    types.LambdaType,
-                                    types.BuiltinFunctionType,
-                                    types.BuiltinMethodType))
-
-def saml2_handler(session, request, config_filename = None):
-    config_filename = config_filename or os.path.join(request.folder,'private','sp_conf')
-    client = Saml2Client(config_file = config_filename)
-    idps = client.metadata.with_descriptor("idpsso")
-    entityid = idps.keys()[0]
-    bindings = [BINDING_HTTP_REDIRECT]
-    binding, destination = client.pick_binding(
-        "single_sign_on_service", bindings, "idpsso", entity_id=entityid)
-    binding = BINDING_HTTP_REDIRECT 
-    if not request.vars.SAMLResponse:
-        req_id, req = client.create_authn_request(destination, binding=binding)
-        relay_state = web2py_uuid().replace('-','')
-        session.saml_outstanding_queries = {req_id: request.url}
-        session.saml_req_id = req_id
-        http_args = client.apply_binding(binding, str(req), destination,
-                                         relay_state=relay_state)
-        return {'url':dict(http_args["headers"])['Location']}
-    else:
-        relay_state = request.vars.RelayState
-        req_id = session.saml_req_id
-        unquoted_response = request.vars.SAMLResponse
-        res =  {}
-        try:
-            data = client.parse_authn_request_response(
-                unquoted_response, binding, session.saml_outstanding_queries)
-            res['response'] = data if data else {}
-        except Exception, e:
-            import traceback
-            res['error'] = traceback.format_exc()
-        return res
-    
-
-class Saml2Auth(object):
-
-    def __init__(self, config_file=None, maps=dict(
-            username=lambda v:v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
-            email=lambda v:v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
-            user_id=lambda v:v['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/upn'][0],
-            )):
-        self.config_file = config_file
-        self.maps = maps
-
-    def login_url(self, next="/"):
-        d = saml2_handler(current.session, current.request)
-        if 'url' in d:
-            redirect(d['url'])
-        elif 'error' in d:
-            current.session.flash = d['error']
-            redirect(URL('default','index'))
-        elif 'response' in d:            
-            # a['assertions'][0]['attribute_statement'][0]['attribute']
-            # is list of
-            # {'name': 'http://schemas.microsoft.com/ws/2008/06/identity/claims/windowsaccountname', 'name_format': None, 'text': None, 'friendly_name': None, 'attribute_value': [{'text': 'CAA\\dev-mdp', 'extension_attributes': "{'{http://www.w3.org/2001/XMLSchema-instance}type': 'xs:string'}", 'extension_elements': []}], 'extension_elements': [], 'extension_attributes': '{}'}
-            try:
-                attributes = d['response'].assertions[0].attribute_statement[0].attribute
-            except:
-                attributes = d['response'].assertion.attribute_statement[0].attribute
-            current.session.saml2_info = dict(
-                (a.name, [i.text for i in a.attribute_value]) for a in attributes)
-        return next
-
-    def logout_url(self, next="/"):
-        current.session.saml2_info = None
-        return next
-
-    def get_user(self):        
-        user = current.session.saml2_info
-        if user:
-            d = {'source': 'web2py saml2'}
-            for key in self.maps:
-                d[key] = self.maps[key](user)
-            return d
-        return None

+ 0 - 98
frameworks/Python/web2py/web2py/gluon/contrib/login_methods/x509_auth.py

@@ -1,98 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-Written by Michele Comitini <[email protected]>
-License: LGPL v3
-
-Adds support for x509 authentication.
-
-"""
-
-from gluon.globals import current
-from gluon.storage import Storage
-from gluon.http import HTTP, redirect
-
-#requires M2Crypto
-from M2Crypto import X509
-
-
-class X509Auth(object):
-    """
-    Login using x509 cert from client.
-
-    from gluon.contrib.login_methods.x509_auth import X509Account
-    auth.settings.actions_disabled=['register','change_password',
-                                    'request_reset_password','profile']
-    auth.settings.login_form = X509Auth()
-
-    """
-
-    def __init__(self):
-        self.request = current.request
-        self.ssl_client_raw_cert = self.request.env.ssl_client_raw_cert
-
-        # rebuild the certificate passed by the env
-        # this is double work, but it is the only way
-        # since we cannot access the web server ssl engine directly
-
-        if self.ssl_client_raw_cert:
-
-            x509 = X509.load_cert_string(
-                self.ssl_client_raw_cert, X509.FORMAT_PEM)
-            # extract it from the cert
-            self.serial = self.request.env.ssl_client_serial or (
-                '%x' % x509.get_serial_number()).upper()
-
-            subject = x509.get_subject()
-
-            # Reordering the subject map to a usable Storage map
-            # this allows us a cleaner syntax:
-            # cn = self.subject.cn
-            self.subject = Storage(filter(None,
-                                          map(lambda x:
-                                              (x, map(lambda y:
-                                                      y.get_data(
-                                                      ).as_text(),
-                                                      subject.get_entries_by_nid(subject.nid[x]))),
-                                              subject.nid.keys())))
-
-    def login_form(self, **args):
-        raise HTTP(403, 'Login not allowed. No valid x509 crentials')
-
-    def login_url(self, next="/"):
-        raise HTTP(403, 'Login not allowed. No valid x509 crentials')
-
-    def logout_url(self, next="/"):
-        return next
-
-    def get_user(self):
-        '''Returns the user info contained in the certificate.
-        '''
-
-        # We did not get the client cert?
-        if not self.ssl_client_raw_cert:
-            return None
-
-        # Try to reconstruct some useful info for web2py auth machinery
-
-        p = profile = dict()
-
-        username = p['username'] = reduce(lambda a, b: '%s | %s' % (
-            a, b), self.subject.CN or self.subject.commonName)
-        p['first_name'] = reduce(lambda a, b: '%s | %s' % (a, b),
-                                 self.subject.givenName or username)
-        p['last_name'] = reduce(
-            lambda a, b: '%s | %s' % (a, b), self.subject.surname)
-        p['email'] = reduce(lambda a, b: '%s | %s' % (
-            a, b), self.subject.Email or self.subject.emailAddress)
-
-        # IMPORTANT WE USE THE CERT SERIAL AS UNIQUE KEY FOR THE USER
-        p['registration_id'] = self.serial
-
-        # If the auth table has a field certificate it will be used to
-        # save a PEM encoded copy of the user certificate.
-
-        p['certificate'] = self.ssl_client_raw_cert
-
-        return profile

+ 0 - 1
frameworks/Python/web2py/web2py/gluon/contrib/markdown/LICENSE

@@ -1 +0,0 @@
-markdown2.py is released under MIT license.

+ 0 - 17
frameworks/Python/web2py/web2py/gluon/contrib/markdown/__init__.py

@@ -1,17 +0,0 @@
-from markdown2 import *
-from gluon.html import XML
-
-def WIKI(text, encoding="utf8", safe_mode='escape', html4tags=False, **attributes):
-    if not text:
-        test = ''
-    if attributes.has_key('extras'):
-        extras = attributes['extras']
-        del attributes['extras']
-    else:
-        extras=None
-    text = text.decode(encoding,'replace')
-
-    return XML(markdown(text,extras=extras,
-                        safe_mode=safe_mode, html4tags=html4tags)\
-                   .encode(encoding,'xmlcharrefreplace'),**attributes)
-

+ 0 - 2365
frameworks/Python/web2py/web2py/gluon/contrib/markdown/markdown2.py

@@ -1,2365 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2012 Trent Mick.
-# Copyright (c) 2007-2008 ActiveState Corp.
-# License: MIT (http://www.opensource.org/licenses/mit-license.php)
-
-from __future__ import generators
-
-r"""A fast and complete Python implementation of Markdown.
-
-[from http://daringfireball.net/projects/markdown/]
-> Markdown is a text-to-HTML filter; it translates an easy-to-read /
-> easy-to-write structured text format into HTML.  Markdown's text
-> format is most similar to that of plain text email, and supports
-> features such as headers, *emphasis*, code blocks, blockquotes, and
-> links.
->
-> Markdown's syntax is designed not as a generic markup language, but
-> specifically to serve as a front-end to (X)HTML. You can use span-level
-> HTML tags anywhere in a Markdown document, and you can use block level
-> HTML tags (like <div> and <table> as well).
-
-Module usage:
-
-    >>> import markdown2
-    >>> markdown2.markdown("*boo!*")  # or use `html = markdown_path(PATH)`
-    u'<p><em>boo!</em></p>\n'
-
-    >>> markdowner = Markdown()
-    >>> markdowner.convert("*boo!*")
-    u'<p><em>boo!</em></p>\n'
-    >>> markdowner.convert("**boom!**")
-    u'<p><strong>boom!</strong></p>\n'
-
-This implementation of Markdown implements the full "core" syntax plus a
-number of extras (e.g., code syntax coloring, footnotes) as described on
-<https://github.com/trentm/python-markdown2/wiki/Extras>.
-"""
-
-cmdln_desc = """A fast and complete Python implementation of Markdown, a
-text-to-HTML conversion tool for web writers.
-
-Supported extra syntax options (see -x|--extras option below and
-see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
-
-* code-friendly: Disable _ and __ for em and strong.
-* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
-* fenced-code-blocks: Allows a code block to not have to be indented
-  by fencing it with '```' on a line before and after. Based on
-  <http://github.github.com/github-flavored-markdown/> with support for
-  syntax highlighting.
-* footnotes: Support footnotes as in use on daringfireball.net and
-  implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
-* header-ids: Adds "id" attributes to headers. The id value is a slug of
-  the header text.
-* html-classes: Takes a dict mapping html tag names (lowercase) to a
-  string to use for a "class" tag attribute. Currently only supports
-  "pre" and "code" tags. Add an issue if you require this for other tags.
-* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
-  have markdown processing be done on its contents. Similar to
-  <http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
-  some limitations.
-* metadata: Extract metadata from a leading '---'-fenced block.
-  See <https://github.com/trentm/python-markdown2/issues/77> for details.
-* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
-  <http://en.wikipedia.org/wiki/Nofollow>.
-* pyshell: Treats unindented Python interactive shell sessions as <code>
-  blocks.
-* link-patterns: Auto-link given regex patterns in text (e.g. bug number
-  references, revision number references).
-* smarty-pants: Replaces ' and " with curly quotation marks or curly
-  apostrophes.  Replaces --, ---, ..., and . . . with en dashes, em dashes,
-  and ellipses.
-* toc: The returned HTML string gets a new "toc_html" attribute which is
-  a Table of Contents for the document. (experimental)
-* xml: Passes one-liner processing instructions and namespaced XML tags.
-* wiki-tables: Google Code Wiki-style tables. See
-  <http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
-"""
-
-# Dev Notes:
-# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
-#   not yet sure if there implications with this. Compare 'pydoc sre'
-#   and 'perldoc perlre'.
-
-__version_info__ = (2, 2, 4)
-__version__ = '.'.join(map(str, __version_info__))
-__author__ = "Trent Mick"
-
-import os
-import sys
-from pprint import pprint
-import re
-import logging
-try:
-    from hashlib import md5
-except ImportError:
-    from md5 import md5
-import optparse
-from random import random, randint
-import codecs
-
-
-#---- Python version compat
-
-try:
-    from urllib.parse import quote # python3
-except ImportError:
-    from urllib import quote # python2
-
-if sys.version_info[:2] < (2,4):
-    from sets import Set as set
-    def reversed(sequence):
-        for i in sequence[::-1]:
-            yield i
-
-# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
-if sys.version_info[0] <= 2:
-    py3 = False
-    try:
-        bytes
-    except NameError:
-        bytes = str
-    base_string_type = basestring
-elif sys.version_info[0] >= 3:
-    py3 = True
-    unicode = str
-    base_string_type = str
-
-
-
-#---- globals
-
-DEBUG = False
-log = logging.getLogger("markdown")
-
-DEFAULT_TAB_WIDTH = 4
-
-
-SECRET_SALT = bytes(randint(0, 1000000))
-def _hash_text(s):
-    return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
-
-# Table of hash values for escaped characters:
-g_escape_table = dict([(ch, _hash_text(ch))
-    for ch in '\\`*_{}[]()>#+-.!'])
-
-
-
-#---- exceptions
-
-class MarkdownError(Exception):
-    pass
-
-
-
-#---- public api
-
-def markdown_path(path, encoding="utf-8",
-                  html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
-                  safe_mode=None, extras=None, link_patterns=None,
-                  use_file_vars=False):
-    fp = codecs.open(path, 'r', encoding)
-    text = fp.read()
-    fp.close()
-    return Markdown(html4tags=html4tags, tab_width=tab_width,
-                    safe_mode=safe_mode, extras=extras,
-                    link_patterns=link_patterns,
-                    use_file_vars=use_file_vars).convert(text)
-
-def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
-             safe_mode=None, extras=None, link_patterns=None,
-             use_file_vars=False):
-    return Markdown(html4tags=html4tags, tab_width=tab_width,
-                    safe_mode=safe_mode, extras=extras,
-                    link_patterns=link_patterns,
-                    use_file_vars=use_file_vars).convert(text)
-
-class Markdown(object):
-    # The dict of "extras" to enable in processing -- a mapping of
-    # extra name to argument for the extra. Most extras do not have an
-    # argument, in which case the value is None.
-    #
-    # This can be set via (a) subclassing and (b) the constructor
-    # "extras" argument.
-    extras = None
-
-    urls = None
-    titles = None
-    html_blocks = None
-    html_spans = None
-    html_removed_text = "[HTML_REMOVED]"  # for compat with markdown.py
-
-    # Used to track when we're inside an ordered or unordered list
-    # (see _ProcessListItems() for details):
-    list_level = 0
-
-    _ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
-
-    def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
-                 extras=None, link_patterns=None, use_file_vars=False):
-        if html4tags:
-            self.empty_element_suffix = ">"
-        else:
-            self.empty_element_suffix = " />"
-        self.tab_width = tab_width
-
-        # For compatibility with earlier markdown2.py and with
-        # markdown.py's safe_mode being a boolean,
-        #   safe_mode == True -> "replace"
-        if safe_mode is True:
-            self.safe_mode = "replace"
-        else:
-            self.safe_mode = safe_mode
-
-        # Massaging and building the "extras" info.
-        if self.extras is None:
-            self.extras = {}
-        elif not isinstance(self.extras, dict):
-            self.extras = dict([(e, None) for e in self.extras])
-        if extras:
-            if not isinstance(extras, dict):
-                extras = dict([(e, None) for e in extras])
-            self.extras.update(extras)
-        assert isinstance(self.extras, dict)
-        if "toc" in self.extras and not "header-ids" in self.extras:
-            self.extras["header-ids"] = None   # "toc" implies "header-ids"
-        self._instance_extras = self.extras.copy()
-
-        self.link_patterns = link_patterns
-        self.use_file_vars = use_file_vars
-        self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
-
-        self._escape_table = g_escape_table.copy()
-        if "smarty-pants" in self.extras:
-            self._escape_table['"'] = _hash_text('"')
-            self._escape_table["'"] = _hash_text("'")
-
-    def reset(self):
-        self.urls = {}
-        self.titles = {}
-        self.html_blocks = {}
-        self.html_spans = {}
-        self.list_level = 0
-        self.extras = self._instance_extras.copy()
-        if "footnotes" in self.extras:
-            self.footnotes = {}
-            self.footnote_ids = []
-        if "header-ids" in self.extras:
-            self._count_from_header_id = {} # no `defaultdict` in Python 2.4
-        if "metadata" in self.extras:
-            self.metadata = {}
-
-    # Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
-    # should only be used in <a> tags with an "href" attribute.
-    _a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
-
-    def convert(self, text):
-        """Convert the given text."""
-        # Main function. The order in which other subs are called here is
-        # essential. Link and image substitutions need to happen before
-        # _EscapeSpecialChars(), so that any *'s or _'s in the <a>
-        # and <img> tags get encoded.
-
-        # Clear the global hashes. If we don't clear these, you get conflicts
-        # from other articles when generating a page which contains more than
-        # one article (e.g. an index page that shows the N most recent
-        # articles):
-        self.reset()
-
-        if not isinstance(text, unicode):
-            #TODO: perhaps shouldn't presume UTF-8 for string input?
-            text = unicode(text, 'utf-8')
-
-        if self.use_file_vars:
-            # Look for emacs-style file variable hints.
-            emacs_vars = self._get_emacs_vars(text)
-            if "markdown-extras" in emacs_vars:
-                splitter = re.compile("[ ,]+")
-                for e in splitter.split(emacs_vars["markdown-extras"]):
-                    if '=' in e:
-                        ename, earg = e.split('=', 1)
-                        try:
-                            earg = int(earg)
-                        except ValueError:
-                            pass
-                    else:
-                        ename, earg = e, None
-                    self.extras[ename] = earg
-
-        # Standardize line endings:
-        text = re.sub("\r\n|\r", "\n", text)
-
-        # Make sure $text ends with a couple of newlines:
-        text += "\n\n"
-
-        # Convert all tabs to spaces.
-        text = self._detab(text)
-
-        # Strip any lines consisting only of spaces and tabs.
-        # This makes subsequent regexen easier to write, because we can
-        # match consecutive blank lines with /\n+/ instead of something
-        # contorted like /[ \t]*\n+/ .
-        text = self._ws_only_line_re.sub("", text)
-
-        # strip metadata from head and extract
-        if "metadata" in self.extras:
-            text = self._extract_metadata(text)
-
-        text = self.preprocess(text)
-
-        if "fenced-code-blocks" in self.extras and not self.safe_mode:
-            text = self._do_fenced_code_blocks(text)
-
-        if self.safe_mode:
-            text = self._hash_html_spans(text)
-
-        # Turn block-level HTML blocks into hash entries
-        text = self._hash_html_blocks(text, raw=True)
-
-        if "fenced-code-blocks" in self.extras and self.safe_mode:
-            text = self._do_fenced_code_blocks(text)
-
-        # Strip link definitions, store in hashes.
-        if "footnotes" in self.extras:
-            # Must do footnotes first because an unlucky footnote defn
-            # looks like a link defn:
-            #   [^4]: this "looks like a link defn"
-            text = self._strip_footnote_definitions(text)
-        text = self._strip_link_definitions(text)
-
-        text = self._run_block_gamut(text)
-
-        if "footnotes" in self.extras:
-            text = self._add_footnotes(text)
-
-        text = self.postprocess(text)
-
-        text = self._unescape_special_chars(text)
-
-        if self.safe_mode:
-            text = self._unhash_html_spans(text)
-
-        if "nofollow" in self.extras:
-            text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
-
-        text += "\n"
-
-        rv = UnicodeWithAttrs(text)
-        if "toc" in self.extras:
-            rv._toc = self._toc
-        if "metadata" in self.extras:
-            rv.metadata = self.metadata
-        return rv
-
-    def postprocess(self, text):
-        """A hook for subclasses to do some postprocessing of the html, if
-        desired. This is called before unescaping of special chars and
-        unhashing of raw HTML spans.
-        """
-        return text
-
-    def preprocess(self, text):
-        """A hook for subclasses to do some preprocessing of the Markdown, if
-        desired. This is called after basic formatting of the text, but prior
-        to any extras, safe mode, etc. processing.
-        """
-        return text
-
-    # Is metadata if the content starts with '---'-fenced `key: value`
-    # pairs. E.g. (indented for presentation):
-    #   ---
-    #   foo: bar
-    #   another-var: blah blah
-    #   ---
-    _metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
-
-    def _extract_metadata(self, text):
-        # fast test
-        if not text.startswith("---"):
-            return text
-        match = self._metadata_pat.match(text)
-        if not match:
-            return text
-
-        tail = text[len(match.group(0)):]
-        metadata_str = match.group(1).strip()
-        for line in metadata_str.split('\n'):
-            key, value = line.split(':', 1)
-            self.metadata[key.strip()] = value.strip()
-
-        return tail
-
-
-    _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
-    # This regular expression is intended to match blocks like this:
-    #    PREFIX Local Variables: SUFFIX
-    #    PREFIX mode: Tcl SUFFIX
-    #    PREFIX End: SUFFIX
-    # Some notes:
-    # - "[ \t]" is used instead of "\s" to specifically exclude newlines
-    # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
-    #   not like anything other than Unix-style line terminators.
-    _emacs_local_vars_pat = re.compile(r"""^
-        (?P<prefix>(?:[^\r\n|\n|\r])*?)
-        [\ \t]*Local\ Variables:[\ \t]*
-        (?P<suffix>.*?)(?:\r\n|\n|\r)
-        (?P<content>.*?\1End:)
-        """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
-
-    def _get_emacs_vars(self, text):
-        """Return a dictionary of emacs-style local variables.
-
-        Parsing is done loosely according to this spec (and according to
-        some in-practice deviations from this):
-        http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
-        """
-        emacs_vars = {}
-        SIZE = pow(2, 13) # 8kB
-
-        # Search near the start for a '-*-'-style one-liner of variables.
-        head = text[:SIZE]
-        if "-*-" in head:
-            match = self._emacs_oneliner_vars_pat.search(head)
-            if match:
-                emacs_vars_str = match.group(1)
-                assert '\n' not in emacs_vars_str
-                emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
-                                  if s.strip()]
-                if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
-                    # While not in the spec, this form is allowed by emacs:
-                    #   -*- Tcl -*-
-                    # where the implied "variable" is "mode". This form
-                    # is only allowed if there are no other variables.
-                    emacs_vars["mode"] = emacs_var_strs[0].strip()
-                else:
-                    for emacs_var_str in emacs_var_strs:
-                        try:
-                            variable, value = emacs_var_str.strip().split(':', 1)
-                        except ValueError:
-                            log.debug("emacs variables error: malformed -*- "
-                                      "line: %r", emacs_var_str)
-                            continue
-                        # Lowercase the variable name because Emacs allows "Mode"
-                        # or "mode" or "MoDe", etc.
-                        emacs_vars[variable.lower()] = value.strip()
-
-        tail = text[-SIZE:]
-        if "Local Variables" in tail:
-            match = self._emacs_local_vars_pat.search(tail)
-            if match:
-                prefix = match.group("prefix")
-                suffix = match.group("suffix")
-                lines = match.group("content").splitlines(0)
-                #print "prefix=%r, suffix=%r, content=%r, lines: %s"\
-                #      % (prefix, suffix, match.group("content"), lines)
-
-                # Validate the Local Variables block: proper prefix and suffix
-                # usage.
-                for i, line in enumerate(lines):
-                    if not line.startswith(prefix):
-                        log.debug("emacs variables error: line '%s' "
-                                  "does not use proper prefix '%s'"
-                                  % (line, prefix))
-                        return {}
-                    # Don't validate suffix on last line. Emacs doesn't care,
-                    # neither should we.
-                    if i != len(lines)-1 and not line.endswith(suffix):
-                        log.debug("emacs variables error: line '%s' "
-                                  "does not use proper suffix '%s'"
-                                  % (line, suffix))
-                        return {}
-
-                # Parse out one emacs var per line.
-                continued_for = None
-                for line in lines[:-1]: # no var on the last line ("PREFIX End:")
-                    if prefix: line = line[len(prefix):] # strip prefix
-                    if suffix: line = line[:-len(suffix)] # strip suffix
-                    line = line.strip()
-                    if continued_for:
-                        variable = continued_for
-                        if line.endswith('\\'):
-                            line = line[:-1].rstrip()
-                        else:
-                            continued_for = None
-                        emacs_vars[variable] += ' ' + line
-                    else:
-                        try:
-                            variable, value = line.split(':', 1)
-                        except ValueError:
-                            log.debug("local variables error: missing colon "
-                                      "in local variables entry: '%s'" % line)
-                            continue
-                        # Do NOT lowercase the variable name, because Emacs only
-                        # allows "mode" (and not "Mode", "MoDe", etc.) in this block.
-                        value = value.strip()
-                        if value.endswith('\\'):
-                            value = value[:-1].rstrip()
-                            continued_for = variable
-                        else:
-                            continued_for = None
-                        emacs_vars[variable] = value
-
-        # Unquote values.
-        for var, val in list(emacs_vars.items()):
-            if len(val) > 1 and (val.startswith('"') and val.endswith('"')
-               or val.startswith('"') and val.endswith('"')):
-                emacs_vars[var] = val[1:-1]
-
-        return emacs_vars
-
-    # Cribbed from a post by Bart Lateur:
-    # <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
-    _detab_re = re.compile(r'(.*?)\t', re.M)
-    def _detab_sub(self, match):
-        g1 = match.group(1)
-        return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
-    def _detab(self, text):
-        r"""Remove (leading?) tabs from a file.
-
-            >>> m = Markdown()
-            >>> m._detab("\tfoo")
-            '    foo'
-            >>> m._detab("  \tfoo")
-            '    foo'
-            >>> m._detab("\t  foo")
-            '      foo'
-            >>> m._detab("  foo")
-            '  foo'
-            >>> m._detab("  foo\n\tbar\tblam")
-            '  foo\n    bar blam'
-        """
-        if '\t' not in text:
-            return text
-        return self._detab_re.subn(self._detab_sub, text)[0]
-
-    # I broke out the html5 tags here and add them to _block_tags_a and
-    # _block_tags_b.  This way html5 tags are easy to keep track of.
-    _html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
-
-    _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
-    _block_tags_a += _html5tags
-
-    _strict_tag_block_re = re.compile(r"""
-        (                       # save in \1
-            ^                   # start of line  (with re.M)
-            <(%s)               # start tag = \2
-            \b                  # word break
-            (.*\n)*?            # any number of lines, minimally matching
-            </\2>               # the matching end tag
-            [ \t]*              # trailing spaces/tabs
-            (?=\n+|\Z)          # followed by a newline or end of document
-        )
-        """ % _block_tags_a,
-        re.X | re.M)
-
-    _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
-    _block_tags_b += _html5tags
-
-    _liberal_tag_block_re = re.compile(r"""
-        (                       # save in \1
-            ^                   # start of line  (with re.M)
-            <(%s)               # start tag = \2
-            \b                  # word break
-            (.*\n)*?            # any number of lines, minimally matching
-            .*</\2>             # the matching end tag
-            [ \t]*              # trailing spaces/tabs
-            (?=\n+|\Z)          # followed by a newline or end of document
-        )
-        """ % _block_tags_b,
-        re.X | re.M)
-
-    _html_markdown_attr_re = re.compile(
-        r'''\s+markdown=("1"|'1')''')
-    def _hash_html_block_sub(self, match, raw=False):
-        html = match.group(1)
-        if raw and self.safe_mode:
-            html = self._sanitize_html(html)
-        elif 'markdown-in-html' in self.extras and 'markdown=' in html:
-            first_line = html.split('\n', 1)[0]
-            m = self._html_markdown_attr_re.search(first_line)
-            if m:
-                lines = html.split('\n')
-                middle = '\n'.join(lines[1:-1])
-                last_line = lines[-1]
-                first_line = first_line[:m.start()] + first_line[m.end():]
-                f_key = _hash_text(first_line)
-                self.html_blocks[f_key] = first_line
-                l_key = _hash_text(last_line)
-                self.html_blocks[l_key] = last_line
-                return ''.join(["\n\n", f_key,
-                    "\n\n", middle, "\n\n",
-                    l_key, "\n\n"])
-        key = _hash_text(html)
-        self.html_blocks[key] = html
-        return "\n\n" + key + "\n\n"
-
-    def _hash_html_blocks(self, text, raw=False):
-        """Hashify HTML blocks
-
-        We only want to do this for block-level HTML tags, such as headers,
-        lists, and tables. That's because we still want to wrap <p>s around
-        "paragraphs" that are wrapped in non-block-level tags, such as anchors,
-        phrase emphasis, and spans. The list of tags we're looking for is
-        hard-coded.
-
-        @param raw {boolean} indicates if these are raw HTML blocks in
-            the original source. It makes a difference in "safe" mode.
-        """
-        if '<' not in text:
-            return text
-
-        # Pass `raw` value into our calls to self._hash_html_block_sub.
-        hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
-
-        # First, look for nested blocks, e.g.:
-        #   <div>
-        #       <div>
-        #       tags for inner block must be indented.
-        #       </div>
-        #   </div>
-        #
-        # The outermost tags must start at the left margin for this to match, and
-        # the inner nested divs must be indented.
-        # We need to do this before the next, more liberal match, because the next
-        # match will start at the first `<div>` and stop at the first `</div>`.
-        text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
-
-        # Now match more liberally, simply from `\n<tag>` to `</tag>\n`
-        text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
-
-        # Special case just for <hr />. It was easier to make a special
-        # case than to make the other regex more complicated.
-        if "<hr" in text:
-            _hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
-            text = _hr_tag_re.sub(hash_html_block_sub, text)
-
-        # Special case for standalone HTML comments:
-        if "<!--" in text:
-            start = 0
-            while True:
-                # Delimiters for next comment block.
-                try:
-                    start_idx = text.index("<!--", start)
-                except ValueError:
-                    break
-                try:
-                    end_idx = text.index("-->", start_idx) + 3
-                except ValueError:
-                    break
-
-                # Start position for next comment block search.
-                start = end_idx
-
-                # Validate whitespace before comment.
-                if start_idx:
-                    # - Up to `tab_width - 1` spaces before start_idx.
-                    for i in range(self.tab_width - 1):
-                        if text[start_idx - 1] != ' ':
-                            break
-                        start_idx -= 1
-                        if start_idx == 0:
-                            break
-                    # - Must be preceded by 2 newlines or hit the start of
-                    #   the document.
-                    if start_idx == 0:
-                        pass
-                    elif start_idx == 1 and text[0] == '\n':
-                        start_idx = 0  # to match minute detail of Markdown.pl regex
-                    elif text[start_idx-2:start_idx] == '\n\n':
-                        pass
-                    else:
-                        break
-
-                # Validate whitespace after comment.
-                # - Any number of spaces and tabs.
-                while end_idx < len(text):
-                    if text[end_idx] not in ' \t':
-                        break
-                    end_idx += 1
-                # - Must be following by 2 newlines or hit end of text.
-                if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
-                    continue
-
-                # Escape and hash (must match `_hash_html_block_sub`).
-                html = text[start_idx:end_idx]
-                if raw and self.safe_mode:
-                    html = self._sanitize_html(html)
-                key = _hash_text(html)
-                self.html_blocks[key] = html
-                text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
-
-        if "xml" in self.extras:
-            # Treat XML processing instructions and namespaced one-liner
-            # tags as if they were block HTML tags. E.g., if standalone
-            # (i.e. are their own paragraph), the following do not get
-            # wrapped in a <p> tag:
-            #    <?foo bar?>
-            #
-            #    <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
-            _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
-            text = _xml_oneliner_re.sub(hash_html_block_sub, text)
-
-        return text
-
-    def _strip_link_definitions(self, text):
-        # Strips link definitions from text, stores the URLs and titles in
-        # hash references.
-        less_than_tab = self.tab_width - 1
-
-        # Link defs are in the form:
-        #   [id]: url "optional title"
-        _link_def_re = re.compile(r"""
-            ^[ ]{0,%d}\[(.+)\]: # id = \1
-              [ \t]*
-              \n?               # maybe *one* newline
-              [ \t]*
-            <?(.+?)>?           # url = \2
-              [ \t]*
-            (?:
-                \n?             # maybe one newline
-                [ \t]*
-                (?<=\s)         # lookbehind for whitespace
-                ['"(]
-                ([^\n]*)        # title = \3
-                ['")]
-                [ \t]*
-            )?  # title is optional
-            (?:\n+|\Z)
-            """ % less_than_tab, re.X | re.M | re.U)
-        return _link_def_re.sub(self._extract_link_def_sub, text)
-
-    def _extract_link_def_sub(self, match):
-        id, url, title = match.groups()
-        key = id.lower()    # Link IDs are case-insensitive
-        self.urls[key] = self._encode_amps_and_angles(url)
-        if title:
-            self.titles[key] = title
-        return ""
-
-    def _extract_footnote_def_sub(self, match):
-        id, text = match.groups()
-        text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
-        normed_id = re.sub(r'\W', '-', id)
-        # Ensure footnote text ends with a couple newlines (for some
-        # block gamut matches).
-        self.footnotes[normed_id] = text + "\n\n"
-        return ""
-
-    def _strip_footnote_definitions(self, text):
-        """A footnote definition looks like this:
-
-            [^note-id]: Text of the note.
-
-                May include one or more indented paragraphs.
-
-        Where,
-        - The 'note-id' can be pretty much anything, though typically it
-          is the number of the footnote.
-        - The first paragraph may start on the next line, like so:
-
-            [^note-id]:
-                Text of the note.
-        """
-        less_than_tab = self.tab_width - 1
-        footnote_def_re = re.compile(r'''
-            ^[ ]{0,%d}\[\^(.+)\]:   # id = \1
-            [ \t]*
-            (                       # footnote text = \2
-              # First line need not start with the spaces.
-              (?:\s*.*\n+)
-              (?:
-                (?:[ ]{%d} | \t)  # Subsequent lines must be indented.
-                .*\n+
-              )*
-            )
-            # Lookahead for non-space at line-start, or end of doc.
-            (?:(?=^[ ]{0,%d}\S)|\Z)
-            ''' % (less_than_tab, self.tab_width, self.tab_width),
-            re.X | re.M)
-        return footnote_def_re.sub(self._extract_footnote_def_sub, text)
-
-    _hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
-
-    def _run_block_gamut(self, text):
-        # These are all the transformations that form block-level
-        # tags like paragraphs, headers, and list items.
-
-        if "fenced-code-blocks" in self.extras:
-            text = self._do_fenced_code_blocks(text)
-
-        text = self._do_headers(text)
-
-        # Do Horizontal Rules:
-        # On the number of spaces in horizontal rules: The spec is fuzzy: "If
-        # you wish, you may use spaces between the hyphens or asterisks."
-        # Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
-        # hr chars to one or two. We'll reproduce that limit here.
-        hr = "\n<hr"+self.empty_element_suffix+"\n"
-        text = re.sub(self._hr_re, hr, text)
-
-        text = self._do_lists(text)
-
-        if "pyshell" in self.extras:
-            text = self._prepare_pyshell_blocks(text)
-        if "wiki-tables" in self.extras:
-            text = self._do_wiki_tables(text)
-
-        text = self._do_code_blocks(text)
-
-        text = self._do_block_quotes(text)
-
-        # We already ran _HashHTMLBlocks() before, in Markdown(), but that
-        # was to escape raw HTML in the original Markdown source. This time,
-        # we're escaping the markup we've just created, so that we don't wrap
-        # <p> tags around block-level tags.
-        text = self._hash_html_blocks(text)
-
-        text = self._form_paragraphs(text)
-
-        return text
-
-    def _pyshell_block_sub(self, match):
-        lines = match.group(0).splitlines(0)
-        _dedentlines(lines)
-        indent = ' ' * self.tab_width
-        s = ('\n' # separate from possible cuddled paragraph
-             + indent + ('\n'+indent).join(lines)
-             + '\n\n')
-        return s
-
-    def _prepare_pyshell_blocks(self, text):
-        """Ensure that Python interactive shell sessions are put in
-        code blocks -- even if not properly indented.
-        """
-        if ">>>" not in text:
-            return text
-
-        less_than_tab = self.tab_width - 1
-        _pyshell_block_re = re.compile(r"""
-            ^([ ]{0,%d})>>>[ ].*\n   # first line
-            ^(\1.*\S+.*\n)*         # any number of subsequent lines
-            ^\n                     # ends with a blank line
-            """ % less_than_tab, re.M | re.X)
-
-        return _pyshell_block_re.sub(self._pyshell_block_sub, text)
-
-    def _wiki_table_sub(self, match):
-        ttext = match.group(0).strip()
-        #print 'wiki table: %r' % match.group(0)
-        rows = []
-        for line in ttext.splitlines(0):
-            line = line.strip()[2:-2].strip()
-            row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
-            rows.append(row)
-        #pprint(rows)
-        hlines = ['<table>', '<tbody>']
-        for row in rows:
-            hrow = ['<tr>']
-            for cell in row:
-                hrow.append('<td>')
-                hrow.append(self._run_span_gamut(cell))
-                hrow.append('</td>')
-            hrow.append('</tr>')
-            hlines.append(''.join(hrow))
-        hlines += ['</tbody>', '</table>']
-        return '\n'.join(hlines) + '\n'
-
-    def _do_wiki_tables(self, text):
-        # Optimization.
-        if "||" not in text:
-            return text
-
-        less_than_tab = self.tab_width - 1
-        wiki_table_re = re.compile(r'''
-            (?:(?<=\n\n)|\A\n?)            # leading blank line
-            ^([ ]{0,%d})\|\|.+?\|\|[ ]*\n  # first line
-            (^\1\|\|.+?\|\|\n)*        # any number of subsequent lines
-            ''' % less_than_tab, re.M | re.X)
-        return wiki_table_re.sub(self._wiki_table_sub, text)
-
-    def _run_span_gamut(self, text):
-        # These are all the transformations that occur *within* block-level
-        # tags like paragraphs, headers, and list items.
-
-        text = self._do_code_spans(text)
-
-        text = self._escape_special_chars(text)
-
-        # Process anchor and image tags.
-        text = self._do_links(text)
-
-        # Make links out of things like `<http://example.com/>`
-        # Must come after _do_links(), because you can use < and >
-        # delimiters in inline links like [this](<url>).
-        text = self._do_auto_links(text)
-
-        if "link-patterns" in self.extras:
-            text = self._do_link_patterns(text)
-
-        text = self._encode_amps_and_angles(text)
-
-        text = self._do_italics_and_bold(text)
-
-        if "smarty-pants" in self.extras:
-            text = self._do_smart_punctuation(text)
-
-        # Do hard breaks:
-        if "break-on-newline" in self.extras:
-            text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
-        else:
-            text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
-
-        return text
-
-    # "Sorta" because auto-links are identified as "tag" tokens.
-    _sorta_html_tokenize_re = re.compile(r"""
-        (
-            # tag
-            </?
-            (?:\w+)                                     # tag name
-            (?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))*  # attributes
-            \s*/?>
-            |
-            # auto-link (e.g., <http://www.activestate.com/>)
-            <\w+[^>]*>
-            |
-            <!--.*?-->      # comment
-            |
-            <\?.*?\?>       # processing instruction
-        )
-        """, re.X)
-
-    def _escape_special_chars(self, text):
-        # Python markdown note: the HTML tokenization here differs from
-        # that in Markdown.pl, hence the behaviour for subtle cases can
-        # differ (I believe the tokenizer here does a better job because
-        # it isn't susceptible to unmatched '<' and '>' in HTML tags).
-        # Note, however, that '>' is not allowed in an auto-link URL
-        # here.
-        escaped = []
-        is_html_markup = False
-        for token in self._sorta_html_tokenize_re.split(text):
-            if is_html_markup:
-                # Within tags/HTML-comments/auto-links, encode * and _
-                # so they don't conflict with their use in Markdown for
-                # italics and strong.  We're replacing each such
-                # character with its corresponding MD5 checksum value;
-                # this is likely overkill, but it should prevent us from
-                # colliding with the escape values by accident.
-                escaped.append(token.replace('*', self._escape_table['*'])
-                                    .replace('_', self._escape_table['_']))
-            else:
-                escaped.append(self._encode_backslash_escapes(token))
-            is_html_markup = not is_html_markup
-        return ''.join(escaped)
-
-    def _hash_html_spans(self, text):
-        # Used for safe_mode.
-
-        def _is_auto_link(s):
-            if ':' in s and self._auto_link_re.match(s):
-                return True
-            elif '@' in s and self._auto_email_link_re.match(s):
-                return True
-            return False
-
-        tokens = []
-        is_html_markup = False
-        for token in self._sorta_html_tokenize_re.split(text):
-            if is_html_markup and not _is_auto_link(token):
-                sanitized = self._sanitize_html(token)
-                key = _hash_text(sanitized)
-                self.html_spans[key] = sanitized
-                tokens.append(key)
-            else:
-                tokens.append(token)
-            is_html_markup = not is_html_markup
-        return ''.join(tokens)
-
-    def _unhash_html_spans(self, text):
-        for key, sanitized in list(self.html_spans.items()):
-            text = text.replace(key, sanitized)
-        return text
-
-    def _sanitize_html(self, s):
-        if self.safe_mode == "replace":
-            return self.html_removed_text
-        elif self.safe_mode == "escape":
-            replacements = [
-                ('&', '&amp;'),
-                ('<', '&lt;'),
-                ('>', '&gt;'),
-            ]
-            for before, after in replacements:
-                s = s.replace(before, after)
-            return s
-        else:
-            raise MarkdownError("invalid value for 'safe_mode': %r (must be "
-                                "'escape' or 'replace')" % self.safe_mode)
-
-    _inline_link_title = re.compile(r'''
-            (                   # \1
-              [ \t]+
-              (['"])            # quote char = \2
-              (?P<title>.*?)
-              \2
-            )?                  # title is optional
-          \)$
-        ''', re.X | re.S)
-    _tail_of_reference_link_re = re.compile(r'''
-          # Match tail of: [text][id]
-          [ ]?          # one optional space
-          (?:\n[ ]*)?   # one optional newline followed by spaces
-          \[
-            (?P<id>.*?)
-          \]
-        ''', re.X | re.S)
-
-    _whitespace = re.compile(r'\s*')
-
-    _strip_anglebrackets = re.compile(r'<(.*)>.*')
-
-    def _find_non_whitespace(self, text, start):
-        """Returns the index of the first non-whitespace character in text
-        after (and including) start
-        """
-        match = self._whitespace.match(text, start)
-        return match.end()
-
-    def _find_balanced(self, text, start, open_c, close_c):
-        """Returns the index where the open_c and close_c characters balance
-        out - the same number of open_c and close_c are encountered - or the
-        end of string if it's reached before the balance point is found.
-        """
-        i = start
-        l = len(text)
-        count = 1
-        while count > 0 and i < l:
-            if text[i] == open_c:
-                count += 1
-            elif text[i] == close_c:
-                count -= 1
-            i += 1
-        return i
-
-    def _extract_url_and_title(self, text, start):
-        """Extracts the url and (optional) title from the tail of a link"""
-        # text[start] equals the opening parenthesis
-        idx = self._find_non_whitespace(text, start+1)
-        if idx == len(text):
-            return None, None, None
-        end_idx = idx
-        has_anglebrackets = text[idx] == "<"
-        if has_anglebrackets:
-            end_idx = self._find_balanced(text, end_idx+1, "<", ">")
-        end_idx = self._find_balanced(text, end_idx, "(", ")")
-        match = self._inline_link_title.search(text, idx, end_idx)
-        if not match:
-            return None, None, None
-        url, title = text[idx:match.start()], match.group("title")
-        if has_anglebrackets:
-            url = self._strip_anglebrackets.sub(r'\1', url)
-        return url, title, end_idx
-
-    def _do_links(self, text):
-        """Turn Markdown link shortcuts into XHTML <a> and <img> tags.
-
-        This is a combination of Markdown.pl's _DoAnchors() and
-        _DoImages(). They are done together because that simplified the
-        approach. It was necessary to use a different approach than
-        Markdown.pl because of the lack of atomic matching support in
-        Python's regex engine used in $g_nested_brackets.
-        """
-        MAX_LINK_TEXT_SENTINEL = 3000  # markdown2 issue 24
-
-        # `anchor_allowed_pos` is used to support img links inside
-        # anchors, but not anchors inside anchors. An anchor's start
-        # pos must be `>= anchor_allowed_pos`.
-        anchor_allowed_pos = 0
-
-        curr_pos = 0
-        while True: # Handle the next link.
-            # The next '[' is the start of:
-            # - an inline anchor:   [text](url "title")
-            # - a reference anchor: [text][id]
-            # - an inline img:      ![text](url "title")
-            # - a reference img:    ![text][id]
-            # - a footnote ref:     [^id]
-            #   (Only if 'footnotes' extra enabled)
-            # - a footnote defn:    [^id]: ...
-            #   (Only if 'footnotes' extra enabled) These have already
-            #   been stripped in _strip_footnote_definitions() so no
-            #   need to watch for them.
-            # - a link definition:  [id]: url "title"
-            #   These have already been stripped in
-            #   _strip_link_definitions() so no need to watch for them.
-            # - not markup:         [...anything else...
-            try:
-                start_idx = text.index('[', curr_pos)
-            except ValueError:
-                break
-            text_length = len(text)
-
-            # Find the matching closing ']'.
-            # Markdown.pl allows *matching* brackets in link text so we
-            # will here too. Markdown.pl *doesn't* currently allow
-            # matching brackets in img alt text -- we'll differ in that
-            # regard.
-            bracket_depth = 0
-            for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
-                                            text_length)):
-                ch = text[p]
-                if ch == ']':
-                    bracket_depth -= 1
-                    if bracket_depth < 0:
-                        break
-                elif ch == '[':
-                    bracket_depth += 1
-            else:
-                # Closing bracket not found within sentinel length.
-                # This isn't markup.
-                curr_pos = start_idx + 1
-                continue
-            link_text = text[start_idx+1:p]
-
-            # Possibly a footnote ref?
-            if "footnotes" in self.extras and link_text.startswith("^"):
-                normed_id = re.sub(r'\W', '-', link_text[1:])
-                if normed_id in self.footnotes:
-                    self.footnote_ids.append(normed_id)
-                    result = '<sup class="footnote-ref" id="fnref-%s">' \
-                             '<a href="#fn-%s">%s</a></sup>' \
-                             % (normed_id, normed_id, len(self.footnote_ids))
-                    text = text[:start_idx] + result + text[p+1:]
-                else:
-                    # This id isn't defined, leave the markup alone.
-                    curr_pos = p+1
-                continue
-
-            # Now determine what this is by the remainder.
-            p += 1
-            if p == text_length:
-                return text
-
-            # Inline anchor or img?
-            if text[p] == '(': # attempt at perf improvement
-                url, title, url_end_idx = self._extract_url_and_title(text, p)
-                if url is not None:
-                    # Handle an inline anchor or img.
-                    is_img = start_idx > 0 and text[start_idx-1] == "!"
-                    if is_img:
-                        start_idx -= 1
-
-                    # We've got to encode these to avoid conflicting
-                    # with italics/bold.
-                    url = url.replace('*', self._escape_table['*']) \
-                             .replace('_', self._escape_table['_'])
-                    if title:
-                        title_str = ' title="%s"' % (
-                            _xml_escape_attr(title)
-                                .replace('*', self._escape_table['*'])
-                                .replace('_', self._escape_table['_']))
-                    else:
-                        title_str = ''
-                    if is_img:
-                        img_class_str = self._html_class_str_from_tag("img")
-                        result = '<img src="%s" alt="%s"%s%s%s' \
-                            % (url.replace('"', '&quot;'),
-                               _xml_escape_attr(link_text),
-                               title_str, img_class_str, self.empty_element_suffix)
-                        if "smarty-pants" in self.extras:
-                            result = result.replace('"', self._escape_table['"'])
-                        curr_pos = start_idx + len(result)
-                        text = text[:start_idx] + result + text[url_end_idx:]
-                    elif start_idx >= anchor_allowed_pos:
-                        result_head = '<a href="%s"%s>' % (url, title_str)
-                        result = '%s%s</a>' % (result_head, link_text)
-                        if "smarty-pants" in self.extras:
-                            result = result.replace('"', self._escape_table['"'])
-                        # <img> allowed from curr_pos on, <a> from
-                        # anchor_allowed_pos on.
-                        curr_pos = start_idx + len(result_head)
-                        anchor_allowed_pos = start_idx + len(result)
-                        text = text[:start_idx] + result + text[url_end_idx:]
-                    else:
-                        # Anchor not allowed here.
-                        curr_pos = start_idx + 1
-                    continue
-
-            # Reference anchor or img?
-            else:
-                match = self._tail_of_reference_link_re.match(text, p)
-                if match:
-                    # Handle a reference-style anchor or img.
-                    is_img = start_idx > 0 and text[start_idx-1] == "!"
-                    if is_img:
-                        start_idx -= 1
-                    link_id = match.group("id").lower()
-                    if not link_id:
-                        link_id = link_text.lower()  # for links like [this][]
-                    if link_id in self.urls:
-                        url = self.urls[link_id]
-                        # We've got to encode these to avoid conflicting
-                        # with italics/bold.
-                        url = url.replace('*', self._escape_table['*']) \
-                                 .replace('_', self._escape_table['_'])
-                        title = self.titles.get(link_id)
-                        if title:
-                            before = title
-                            title = _xml_escape_attr(title) \
-                                .replace('*', self._escape_table['*']) \
-                                .replace('_', self._escape_table['_'])
-                            title_str = ' title="%s"' % title
-                        else:
-                            title_str = ''
-                        if is_img:
-                            img_class_str = self._html_class_str_from_tag("img")
-                            result = '<img src="%s" alt="%s"%s%s%s' \
-                                % (url.replace('"', '&quot;'),
-                                   link_text.replace('"', '&quot;'),
-                                   title_str, img_class_str, self.empty_element_suffix)
-                            if "smarty-pants" in self.extras:
-                                result = result.replace('"', self._escape_table['"'])
-                            curr_pos = start_idx + len(result)
-                            text = text[:start_idx] + result + text[match.end():]
-                        elif start_idx >= anchor_allowed_pos:
-                            result = '<a href="%s"%s>%s</a>' \
-                                % (url, title_str, link_text)
-                            result_head = '<a href="%s"%s>' % (url, title_str)
-                            result = '%s%s</a>' % (result_head, link_text)
-                            if "smarty-pants" in self.extras:
-                                result = result.replace('"', self._escape_table['"'])
-                            # <img> allowed from curr_pos on, <a> from
-                            # anchor_allowed_pos on.
-                            curr_pos = start_idx + len(result_head)
-                            anchor_allowed_pos = start_idx + len(result)
-                            text = text[:start_idx] + result + text[match.end():]
-                        else:
-                            # Anchor not allowed here.
-                            curr_pos = start_idx + 1
-                    else:
-                        # This id isn't defined, leave the markup alone.
-                        curr_pos = match.end()
-                    continue
-
-            # Otherwise, it isn't markup.
-            curr_pos = start_idx + 1
-
-        return text
-
-    def header_id_from_text(self, text, prefix, n):
-        """Generate a header id attribute value from the given header
-        HTML content.
-
-        This is only called if the "header-ids" extra is enabled.
-        Subclasses may override this for different header ids.
-
-        @param text {str} The text of the header tag
-        @param prefix {str} The requested prefix for header ids. This is the
-            value of the "header-ids" extra key, if any. Otherwise, None.
-        @param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
-        @returns {str} The value for the header tag's "id" attribute. Return
-            None to not have an id attribute and to exclude this header from
-            the TOC (if the "toc" extra is specified).
-        """
-        header_id = _slugify(text)
-        if prefix and isinstance(prefix, base_string_type):
-            header_id = prefix + '-' + header_id
-        if header_id in self._count_from_header_id:
-            self._count_from_header_id[header_id] += 1
-            header_id += '-%s' % self._count_from_header_id[header_id]
-        else:
-            self._count_from_header_id[header_id] = 1
-        return header_id
-
-    _toc = None
-    def _toc_add_entry(self, level, id, name):
-        if self._toc is None:
-            self._toc = []
-        self._toc.append((level, id, self._unescape_special_chars(name)))
-
-    _h_re_base = r'''
-        (^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
-        |
-        (^(\#{1,6})  # \1 = string of #'s
-        [ \t]%s
-        (.+?)       # \2 = Header text
-        [ \t]*
-        (?<!\\)     # ensure not an escaped trailing '#'
-        \#*         # optional closing #'s (not counted)
-        \n+
-        )
-        '''
-
-    _h_re = re.compile(_h_re_base % '*', re.X | re.M)
-    _h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
-
-    def _h_sub(self, match):
-        if match.group(1) is not None:
-            # Setext header
-            n = {"=": 1, "-": 2}[match.group(3)[0]]
-            header_group = match.group(2)
-        else:
-            # atx header
-            n = len(match.group(5))
-            header_group = match.group(6)
-
-        demote_headers = self.extras.get("demote-headers")
-        if demote_headers:
-            n = min(n + demote_headers, 6)
-        header_id_attr = ""
-        if "header-ids" in self.extras:
-            header_id = self.header_id_from_text(header_group,
-                self.extras["header-ids"], n)
-            if header_id:
-                header_id_attr = ' id="%s"' % header_id
-        html = self._run_span_gamut(header_group)
-        if "toc" in self.extras and header_id:
-            self._toc_add_entry(n, header_id, html)
-        return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
-
-    def _do_headers(self, text):
-        # Setext-style headers:
-        #     Header 1
-        #     ========
-        #
-        #     Header 2
-        #     --------
-
-        # atx-style headers:
-        #   # Header 1
-        #   ## Header 2
-        #   ## Header 2 with closing hashes ##
-        #   ...
-        #   ###### Header 6
-
-        if 'tag-friendly' in self.extras:
-            return self._h_re_tag_friendly.sub(self._h_sub, text)
-        return self._h_re.sub(self._h_sub, text)
-
-    _marker_ul_chars  = '*+-'
-    _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
-    _marker_ul = '(?:[%s])' % _marker_ul_chars
-    _marker_ol = r'(?:\d+\.)'
-
-    def _list_sub(self, match):
-        lst = match.group(1)
-        lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
-        result = self._process_list_items(lst)
-        if self.list_level:
-            return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
-        else:
-            return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
-
-    def _do_lists(self, text):
-        # Form HTML ordered (numbered) and unordered (bulleted) lists.
-
-        # Iterate over each *non-overlapping* list match.
-        pos = 0
-        while True:
-            # Find the *first* hit for either list style (ul or ol). We
-            # match ul and ol separately to avoid adjacent lists of different
-            # types running into each other (see issue #16).
-            hits = []
-            for marker_pat in (self._marker_ul, self._marker_ol):
-                less_than_tab = self.tab_width - 1
-                whole_list = r'''
-                    (                   # \1 = whole list
-                      (                 # \2
-                        [ ]{0,%d}
-                        (%s)            # \3 = first list item marker
-                        [ \t]+
-                        (?!\ *\3\ )     # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
-                      )
-                      (?:.+?)
-                      (                 # \4
-                          \Z
-                        |
-                          \n{2,}
-                          (?=\S)
-                          (?!           # Negative lookahead for another list item marker
-                            [ \t]*
-                            %s[ \t]+
-                          )
-                      )
-                    )
-                ''' % (less_than_tab, marker_pat, marker_pat)
-                if self.list_level:  # sub-list
-                    list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
-                else:
-                    list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
-                                         re.X | re.M | re.S)
-                match = list_re.search(text, pos)
-                if match:
-                    hits.append((match.start(), match))
-            if not hits:
-                break
-            hits.sort()
-            match = hits[0][1]
-            start, end = match.span()
-            middle = self._list_sub(match)
-            text = text[:start] + middle + text[end:]
-            pos = start + len(middle) # start pos for next attempted match
-
-        return text
-
-    _list_item_re = re.compile(r'''
-        (\n)?                   # leading line = \1
-        (^[ \t]*)               # leading whitespace = \2
-        (?P<marker>%s) [ \t]+   # list marker = \3
-        ((?:.+?)                # list item text = \4
-         (\n{1,2}))             # eols = \5
-        (?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
-        ''' % (_marker_any, _marker_any),
-        re.M | re.X | re.S)
-
-    _last_li_endswith_two_eols = False
-    def _list_item_sub(self, match):
-        item = match.group(4)
-        leading_line = match.group(1)
-        leading_space = match.group(2)
-        if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
-            item = self._run_block_gamut(self._outdent(item))
-        else:
-            # Recursion for sub-lists:
-            item = self._do_lists(self._outdent(item))
-            if item.endswith('\n'):
-                item = item[:-1]
-            item = self._run_span_gamut(item)
-        self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
-        return "<li>%s</li>\n" % item
-
-    def _process_list_items(self, list_str):
-        # Process the contents of a single ordered or unordered list,
-        # splitting it into individual list items.
-
-        # The $g_list_level global keeps track of when we're inside a list.
-        # Each time we enter a list, we increment it; when we leave a list,
-        # we decrement. If it's zero, we're not in a list anymore.
-        #
-        # We do this because when we're not inside a list, we want to treat
-        # something like this:
-        #
-        #       I recommend upgrading to version
-        #       8. Oops, now this line is treated
-        #       as a sub-list.
-        #
-        # As a single paragraph, despite the fact that the second line starts
-        # with a digit-period-space sequence.
-        #
-        # Whereas when we're inside a list (or sub-list), that line will be
-        # treated as the start of a sub-list. What a kludge, huh? This is
-        # an aspect of Markdown's syntax that's hard to parse perfectly
-        # without resorting to mind-reading. Perhaps the solution is to
-        # change the syntax rules such that sub-lists must start with a
-        # starting cardinal number; e.g. "1." or "a.".
-        self.list_level += 1
-        self._last_li_endswith_two_eols = False
-        list_str = list_str.rstrip('\n') + '\n'
-        list_str = self._list_item_re.sub(self._list_item_sub, list_str)
-        self.list_level -= 1
-        return list_str
-
-    def _get_pygments_lexer(self, lexer_name):
-        try:
-            from pygments import lexers, util
-        except ImportError:
-            return None
-        try:
-            return lexers.get_lexer_by_name(lexer_name)
-        except util.ClassNotFound:
-            return None
-
-    def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
-        import pygments
-        import pygments.formatters
-
-        class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
-            def _wrap_code(self, inner):
-                """A function for use in a Pygments Formatter which
-                wraps in <code> tags.
-                """
-                yield 0, "<code>"
-                for tup in inner:
-                    yield tup
-                yield 0, "</code>"
-
-            def wrap(self, source, outfile):
-                """Return the source with a code, pre, and div."""
-                return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
-
-        formatter_opts.setdefault("cssclass", "codehilite")
-        formatter = HtmlCodeFormatter(**formatter_opts)
-        return pygments.highlight(codeblock, lexer, formatter)
-
-    def _code_block_sub(self, match, is_fenced_code_block=False):
-        lexer_name = None
-        if is_fenced_code_block:
-            lexer_name = match.group(1)
-            if lexer_name:
-                formatter_opts = self.extras['fenced-code-blocks'] or {}
-            codeblock = match.group(2)
-            codeblock = codeblock[:-1]  # drop one trailing newline
-        else:
-            codeblock = match.group(1)
-            codeblock = self._outdent(codeblock)
-            codeblock = self._detab(codeblock)
-            codeblock = codeblock.lstrip('\n')  # trim leading newlines
-            codeblock = codeblock.rstrip()      # trim trailing whitespace
-
-            # Note: "code-color" extra is DEPRECATED.
-            if "code-color" in self.extras and codeblock.startswith(":::"):
-                lexer_name, rest = codeblock.split('\n', 1)
-                lexer_name = lexer_name[3:].strip()
-                codeblock = rest.lstrip("\n")   # Remove lexer declaration line.
-                formatter_opts = self.extras['code-color'] or {}
-
-        if lexer_name:
-            def unhash_code( codeblock ):
-                for key, sanitized in list(self.html_spans.items()):
-                    codeblock = codeblock.replace(key, sanitized)
-                replacements = [
-                    ("&amp;", "&"),
-                    ("&lt;", "<"),
-                    ("&gt;", ">")
-                ]
-                for old, new in replacements:
-                    codeblock = codeblock.replace(old, new)
-                return codeblock
-            lexer = self._get_pygments_lexer(lexer_name)
-            if lexer:
-                codeblock = unhash_code( codeblock )
-                colored = self._color_with_pygments(codeblock, lexer,
-                                                    **formatter_opts)
-                return "\n\n%s\n\n" % colored
-
-        codeblock = self._encode_code(codeblock)
-        pre_class_str = self._html_class_str_from_tag("pre")
-        code_class_str = self._html_class_str_from_tag("code")
-        return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
-            pre_class_str, code_class_str, codeblock)
-
-    def _html_class_str_from_tag(self, tag):
-        """Get the appropriate ' class="..."' string (note the leading
-        space), if any, for the given tag.
-        """
-        if "html-classes" not in self.extras:
-            return ""
-        try:
-            html_classes_from_tag = self.extras["html-classes"]
-        except TypeError:
-            return ""
-        else:
-            if tag in html_classes_from_tag:
-                return ' class="%s"' % html_classes_from_tag[tag]
-        return ""
-
-    def _do_code_blocks(self, text):
-        """Process Markdown `<pre><code>` blocks."""
-        code_block_re = re.compile(r'''
-            (?:\n\n|\A\n?)
-            (               # $1 = the code block -- one or more lines, starting with a space/tab
-              (?:
-                (?:[ ]{%d} | \t)  # Lines must start with a tab or a tab-width of spaces
-                .*\n+
-              )+
-            )
-            ((?=^[ ]{0,%d}\S)|\Z)   # Lookahead for non-space at line-start, or end of doc
-            # Lookahead to make sure this block isn't already in a code block.
-            # Needed when syntax highlighting is being used.
-            (?![^<]*\</code\>)
-            ''' % (self.tab_width, self.tab_width),
-            re.M | re.X)
-        return code_block_re.sub(self._code_block_sub, text)
-
-    _fenced_code_block_re = re.compile(r'''
-        (?:\n\n|\A\n?)
-        ^```([\w+-]+)?[ \t]*\n      # opening fence, $1 = optional lang
-        (.*?)                       # $2 = code block content
-        ^```[ \t]*\n                # closing fence
-        ''', re.M | re.X | re.S)
-
-    def _fenced_code_block_sub(self, match):
-        return self._code_block_sub(match, is_fenced_code_block=True);
-
-    def _do_fenced_code_blocks(self, text):
-        """Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
-        return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
-
-    # Rules for a code span:
-    # - backslash escapes are not interpreted in a code span
-    # - to include one or or a run of more backticks the delimiters must
-    #   be a longer run of backticks
-    # - cannot start or end a code span with a backtick; pad with a
-    #   space and that space will be removed in the emitted HTML
-    # See `test/tm-cases/escapes.text` for a number of edge-case
-    # examples.
-    _code_span_re = re.compile(r'''
-            (?<!\\)
-            (`+)        # \1 = Opening run of `
-            (?!`)       # See Note A test/tm-cases/escapes.text
-            (.+?)       # \2 = The code block
-            (?<!`)
-            \1          # Matching closer
-            (?!`)
-        ''', re.X | re.S)
-
-    def _code_span_sub(self, match):
-        c = match.group(2).strip(" \t")
-        c = self._encode_code(c)
-        return "<code>%s</code>" % c
-
-    def _do_code_spans(self, text):
-        #   *   Backtick quotes are used for <code></code> spans.
-        #
-        #   *   You can use multiple backticks as the delimiters if you want to
-        #       include literal backticks in the code span. So, this input:
-        #
-        #         Just type ``foo `bar` baz`` at the prompt.
-        #
-        #       Will translate to:
-        #
-        #         <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
-        #
-        #       There's no arbitrary limit to the number of backticks you
-        #       can use as delimters. If you need three consecutive backticks
-        #       in your code, use four for delimiters, etc.
-        #
-        #   *   You can use spaces to get literal backticks at the edges:
-        #
-        #         ... type `` `bar` `` ...
-        #
-        #       Turns to:
-        #
-        #         ... type <code>`bar`</code> ...
-        return self._code_span_re.sub(self._code_span_sub, text)
-
-    def _encode_code(self, text):
-        """Encode/escape certain characters inside Markdown code runs.
-        The point is that in code, these characters are literals,
-        and lose their special Markdown meanings.
-        """
-        replacements = [
-            # Encode all ampersands; HTML entities are not
-            # entities within a Markdown code span.
-            ('&', '&amp;'),
-            # Do the angle bracket song and dance:
-            ('<', '&lt;'),
-            ('>', '&gt;'),
-        ]
-        for before, after in replacements:
-            text = text.replace(before, after)
-        hashed = _hash_text(text)
-        self._escape_table[text] = hashed
-        return hashed
-
-    _strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
-    _em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
-    _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
-    _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
-    def _do_italics_and_bold(self, text):
-        # <strong> must go first:
-        if "code-friendly" in self.extras:
-            text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
-            text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
-        else:
-            text = self._strong_re.sub(r"<strong>\2</strong>", text)
-            text = self._em_re.sub(r"<em>\2</em>", text)
-        return text
-
-    # "smarty-pants" extra: Very liberal in interpreting a single prime as an
-    # apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
-    # "twixt" can be written without an initial apostrophe. This is fine because
-    # using scare quotes (single quotation marks) is rare.
-    _apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
-    _contractions = ["tis", "twas", "twer", "neath", "o", "n",
-        "round", "bout", "twixt", "nuff", "fraid", "sup"]
-    def _do_smart_contractions(self, text):
-        text = self._apostrophe_year_re.sub(r"&#8217;\1", text)
-        for c in self._contractions:
-            text = text.replace("'%s" % c, "&#8217;%s" % c)
-            text = text.replace("'%s" % c.capitalize(),
-                "&#8217;%s" % c.capitalize())
-        return text
-
-    # Substitute double-quotes before single-quotes.
-    _opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
-    _opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
-    _closing_single_quote_re = re.compile(r"(?<=\S)'")
-    _closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
-    def _do_smart_punctuation(self, text):
-        """Fancifies 'single quotes', "double quotes", and apostrophes.
-        Converts --, ---, and ... into en dashes, em dashes, and ellipses.
-
-        Inspiration is: <http://daringfireball.net/projects/smartypants/>
-        See "test/tm-cases/smarty_pants.text" for a full discussion of the
-        support here and
-        <http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
-        discussion of some diversion from the original SmartyPants.
-        """
-        if "'" in text: # guard for perf
-            text = self._do_smart_contractions(text)
-            text = self._opening_single_quote_re.sub("&#8216;", text)
-            text = self._closing_single_quote_re.sub("&#8217;", text)
-
-        if '"' in text: # guard for perf
-            text = self._opening_double_quote_re.sub("&#8220;", text)
-            text = self._closing_double_quote_re.sub("&#8221;", text)
-
-        text = text.replace("---", "&#8212;")
-        text = text.replace("--", "&#8211;")
-        text = text.replace("...", "&#8230;")
-        text = text.replace(" . . . ", "&#8230;")
-        text = text.replace(". . .", "&#8230;")
-        return text
-
-    _block_quote_re = re.compile(r'''
-        (                           # Wrap whole match in \1
-          (
-            ^[ \t]*>[ \t]?          # '>' at the start of a line
-              .+\n                  # rest of the first line
-            (.+\n)*                 # subsequent consecutive lines
-            \n*                     # blanks
-          )+
-        )
-        ''', re.M | re.X)
-    _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
-
-    _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
-    def _dedent_two_spaces_sub(self, match):
-        return re.sub(r'(?m)^  ', '', match.group(1))
-
-    def _block_quote_sub(self, match):
-        bq = match.group(1)
-        bq = self._bq_one_level_re.sub('', bq)  # trim one level of quoting
-        bq = self._ws_only_line_re.sub('', bq)  # trim whitespace-only lines
-        bq = self._run_block_gamut(bq)          # recurse
-
-        bq = re.sub('(?m)^', '  ', bq)
-        # These leading spaces screw with <pre> content, so we need to fix that:
-        bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
-
-        return "<blockquote>\n%s\n</blockquote>\n\n" % bq
-
-    def _do_block_quotes(self, text):
-        if '>' not in text:
-            return text
-        return self._block_quote_re.sub(self._block_quote_sub, text)
-
-    def _form_paragraphs(self, text):
-        # Strip leading and trailing lines:
-        text = text.strip('\n')
-
-        # Wrap <p> tags.
-        grafs = []
-        for i, graf in enumerate(re.split(r"\n{2,}", text)):
-            if graf in self.html_blocks:
-                # Unhashify HTML blocks
-                grafs.append(self.html_blocks[graf])
-            else:
-                cuddled_list = None
-                if "cuddled-lists" in self.extras:
-                    # Need to put back trailing '\n' for `_list_item_re`
-                    # match at the end of the paragraph.
-                    li = self._list_item_re.search(graf + '\n')
-                    # Two of the same list marker in this paragraph: a likely
-                    # candidate for a list cuddled to preceding paragraph
-                    # text (issue 33). Note the `[-1]` is a quick way to
-                    # consider numeric bullets (e.g. "1." and "2.") to be
-                    # equal.
-                    if (li and len(li.group(2)) <= 3 and li.group("next_marker")
-                        and li.group("marker")[-1] == li.group("next_marker")[-1]):
-                        start = li.start()
-                        cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
-                        assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
-                        graf = graf[:start]
-
-                # Wrap <p> tags.
-                graf = self._run_span_gamut(graf)
-                grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
-
-                if cuddled_list:
-                    grafs.append(cuddled_list)
-
-        return "\n\n".join(grafs)
-
-    def _add_footnotes(self, text):
-        if self.footnotes:
-            footer = [
-                '<div class="footnotes">',
-                '<hr' + self.empty_element_suffix,
-                '<ol>',
-            ]
-            for i, id in enumerate(self.footnote_ids):
-                if i != 0:
-                    footer.append('')
-                footer.append('<li id="fn-%s">' % id)
-                footer.append(self._run_block_gamut(self.footnotes[id]))
-                backlink = ('<a href="#fnref-%s" '
-                    'class="footnoteBackLink" '
-                    'title="Jump back to footnote %d in the text.">'
-                    '&#8617;</a>' % (id, i+1))
-                if footer[-1].endswith("</p>"):
-                    footer[-1] = footer[-1][:-len("</p>")] \
-                        + '&#160;' + backlink + "</p>"
-                else:
-                    footer.append("\n<p>%s</p>" % backlink)
-                footer.append('</li>')
-            footer.append('</ol>')
-            footer.append('</div>')
-            return text + '\n\n' + '\n'.join(footer)
-        else:
-            return text
-
-    # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
-    #   http://bumppo.net/projects/amputator/
-    _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
-    _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
-    _naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
-
-    def _encode_amps_and_angles(self, text):
-        # Smart processing for ampersands and angle brackets that need
-        # to be encoded.
-        text = self._ampersand_re.sub('&amp;', text)
-
-        # Encode naked <'s
-        text = self._naked_lt_re.sub('&lt;', text)
-
-        # Encode naked >'s
-        # Note: Other markdown implementations (e.g. Markdown.pl, PHP
-        # Markdown) don't do this.
-        text = self._naked_gt_re.sub('&gt;', text)
-        return text
-
-    def _encode_backslash_escapes(self, text):
-        for ch, escape in list(self._escape_table.items()):
-            text = text.replace("\\"+ch, escape)
-        return text
-
-    _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
-    def _auto_link_sub(self, match):
-        g1 = match.group(1)
-        return '<a href="%s">%s</a>' % (g1, g1)
-
-    _auto_email_link_re = re.compile(r"""
-          <
-           (?:mailto:)?
-          (
-              [-.\w]+
-              \@
-              [-\w]+(\.[-\w]+)*\.[a-z]+
-          )
-          >
-        """, re.I | re.X | re.U)
-    def _auto_email_link_sub(self, match):
-        return self._encode_email_address(
-            self._unescape_special_chars(match.group(1)))
-
-    def _do_auto_links(self, text):
-        text = self._auto_link_re.sub(self._auto_link_sub, text)
-        text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
-        return text
-
-    def _encode_email_address(self, addr):
-        #  Input: an email address, e.g. "[email protected]"
-        #
-        #  Output: the email address as a mailto link, with each character
-        #      of the address encoded as either a decimal or hex entity, in
-        #      the hopes of foiling most address harvesting spam bots. E.g.:
-        #
-        #    <a href="&#x6D;&#97;&#105;&#108;&#x74;&#111;:&#102;&#111;&#111;&#64;&#101;
-        #       x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;">&#102;&#111;&#111;
-        #       &#64;&#101;x&#x61;&#109;&#x70;&#108;&#x65;&#x2E;&#99;&#111;&#109;</a>
-        #
-        #  Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
-        #  mailing list: <http://tinyurl.com/yu7ue>
-        chars = [_xml_encode_email_char_at_random(ch)
-                 for ch in "mailto:" + addr]
-        # Strip the mailto: from the visible part.
-        addr = '<a href="%s">%s</a>' \
-               % (''.join(chars), ''.join(chars[7:]))
-        return addr
-
-    def _do_link_patterns(self, text):
-        """Caveat emptor: there isn't much guarding against link
-        patterns being formed inside other standard Markdown links, e.g.
-        inside a [link def][like this].
-
-        Dev Notes: *Could* consider prefixing regexes with a negative
-        lookbehind assertion to attempt to guard against this.
-        """
-        link_from_hash = {}
-        for regex, repl in self.link_patterns:
-            replacements = []
-            for match in regex.finditer(text):
-                if hasattr(repl, "__call__"):
-                    href = repl(match)
-                else:
-                    href = match.expand(repl)
-                replacements.append((match.span(), href))
-            for (start, end), href in reversed(replacements):
-                escaped_href = (
-                    href.replace('"', '&quot;')  # b/c of attr quote
-                        # To avoid markdown <em> and <strong>:
-                        .replace('*', self._escape_table['*'])
-                        .replace('_', self._escape_table['_']))
-                link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
-                hash = _hash_text(link)
-                link_from_hash[hash] = link
-                text = text[:start] + hash + text[end:]
-        for hash, link in list(link_from_hash.items()):
-            text = text.replace(hash, link)
-        return text
-
-    def _unescape_special_chars(self, text):
-        # Swap back in all the special characters we've hidden.
-        for ch, hash in list(self._escape_table.items()):
-            text = text.replace(hash, ch)
-        return text
-
-    def _outdent(self, text):
-        # Remove one level of line-leading tabs or spaces
-        return self._outdent_re.sub('', text)
-
-
-class MarkdownWithExtras(Markdown):
-    """A markdowner class that enables most extras:
-
-    - footnotes
-    - code-color (only has effect if 'pygments' Python module on path)
-
-    These are not included:
-    - pyshell (specific to Python-related documenting)
-    - code-friendly (because it *disables* part of the syntax)
-    - link-patterns (because you need to specify some actual
-      link-patterns anyway)
-    """
-    extras = ["footnotes", "code-color"]
-
-
-#---- internal support functions
-
-class UnicodeWithAttrs(unicode):
-    """A subclass of unicode used for the return value of conversion to
-    possibly attach some attributes. E.g. the "toc_html" attribute when
-    the "toc" extra is used.
-    """
-    metadata = None
-    _toc = None
-    def toc_html(self):
-        """Return the HTML for the current TOC.
-
-        This expects the `_toc` attribute to have been set on this instance.
-        """
-        if self._toc is None:
-            return None
-
-        def indent():
-            return '  ' * (len(h_stack) - 1)
-        lines = []
-        h_stack = [0]   # stack of header-level numbers
-        for level, id, name in self._toc:
-            if level > h_stack[-1]:
-                lines.append("%s<ul>" % indent())
-                h_stack.append(level)
-            elif level == h_stack[-1]:
-                lines[-1] += "</li>"
-            else:
-                while level < h_stack[-1]:
-                    h_stack.pop()
-                    if not lines[-1].endswith("</li>"):
-                        lines[-1] += "</li>"
-                    lines.append("%s</ul></li>" % indent())
-            lines.append('%s<li><a href="#%s">%s</a>' % (
-                indent(), id, name))
-        while len(h_stack) > 1:
-            h_stack.pop()
-            if not lines[-1].endswith("</li>"):
-                lines[-1] += "</li>"
-            lines.append("%s</ul>" % indent())
-        return '\n'.join(lines) + '\n'
-    toc_html = property(toc_html)
-
-## {{{ http://code.activestate.com/recipes/577257/ (r1)
-_slugify_strip_re = re.compile(r'[^\w\s-]')
-_slugify_hyphenate_re = re.compile(r'[-\s]+')
-def _slugify(value):
-    """
-    Normalizes string, converts to lowercase, removes non-alpha characters,
-    and converts spaces to hyphens.
-
-    From Django's "django/template/defaultfilters.py".
-    """
-    import unicodedata
-    value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
-    value = _slugify_strip_re.sub('', value).strip().lower()
-    return _slugify_hyphenate_re.sub('-', value)
-## end of http://code.activestate.com/recipes/577257/ }}}
-
-
-# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
-def _curry(*args, **kwargs):
-    function, args = args[0], args[1:]
-    def result(*rest, **kwrest):
-        combined = kwargs.copy()
-        combined.update(kwrest)
-        return function(*args + rest, **combined)
-    return result
-
-# Recipe: regex_from_encoded_pattern (1.0)
-def _regex_from_encoded_pattern(s):
-    """'foo'    -> re.compile(re.escape('foo'))
-       '/foo/'  -> re.compile('foo')
-       '/foo/i' -> re.compile('foo', re.I)
-    """
-    if s.startswith('/') and s.rfind('/') != 0:
-        # Parse it: /PATTERN/FLAGS
-        idx = s.rfind('/')
-        pattern, flags_str = s[1:idx], s[idx+1:]
-        flag_from_char = {
-            "i": re.IGNORECASE,
-            "l": re.LOCALE,
-            "s": re.DOTALL,
-            "m": re.MULTILINE,
-            "u": re.UNICODE,
-        }
-        flags = 0
-        for char in flags_str:
-            try:
-                flags |= flag_from_char[char]
-            except KeyError:
-                raise ValueError("unsupported regex flag: '%s' in '%s' "
-                                 "(must be one of '%s')"
-                                 % (char, s, ''.join(list(flag_from_char.keys()))))
-        return re.compile(s[1:idx], flags)
-    else: # not an encoded regex
-        return re.compile(re.escape(s))
-
-# Recipe: dedent (0.1.2)
-def _dedentlines(lines, tabsize=8, skip_first_line=False):
-    """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
-
-        "lines" is a list of lines to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-
-    Same as dedent() except operates on a sequence of lines. Note: the
-    lines list is modified **in-place**.
-    """
-    DEBUG = False
-    if DEBUG:
-        print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
-              % (tabsize, skip_first_line))
-    indents = []
-    margin = None
-    for i, line in enumerate(lines):
-        if i == 0 and skip_first_line: continue
-        indent = 0
-        for ch in line:
-            if ch == ' ':
-                indent += 1
-            elif ch == '\t':
-                indent += tabsize - (indent % tabsize)
-            elif ch in '\r\n':
-                continue # skip all-whitespace lines
-            else:
-                break
-        else:
-            continue # skip all-whitespace lines
-        if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
-        if margin is None:
-            margin = indent
-        else:
-            margin = min(margin, indent)
-    if DEBUG: print("dedent: margin=%r" % margin)
-
-    if margin is not None and margin > 0:
-        for i, line in enumerate(lines):
-            if i == 0 and skip_first_line: continue
-            removed = 0
-            for j, ch in enumerate(line):
-                if ch == ' ':
-                    removed += 1
-                elif ch == '\t':
-                    removed += tabsize - (removed % tabsize)
-                elif ch in '\r\n':
-                    if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
-                    lines[i] = lines[i][j:]
-                    break
-                else:
-                    raise ValueError("unexpected non-whitespace char %r in "
-                                     "line %r while removing %d-space margin"
-                                     % (ch, line, margin))
-                if DEBUG:
-                    print("dedent: %r: %r -> removed %d/%d"\
-                          % (line, ch, removed, margin))
-                if removed == margin:
-                    lines[i] = lines[i][j+1:]
-                    break
-                elif removed > margin:
-                    lines[i] = ' '*(removed-margin) + lines[i][j+1:]
-                    break
-            else:
-                if removed:
-                    lines[i] = lines[i][removed:]
-    return lines
-
-def _dedent(text, tabsize=8, skip_first_line=False):
-    """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
-
-        "text" is the text to dedent.
-        "tabsize" is the tab width to use for indent width calculations.
-        "skip_first_line" is a boolean indicating if the first line should
-            be skipped for calculating the indent width and for dedenting.
-            This is sometimes useful for docstrings and similar.
-
-    textwrap.dedent(s), but don't expand tabs to spaces
-    """
-    lines = text.splitlines(1)
-    _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
-    return ''.join(lines)
-
-
-class _memoized(object):
-   """Decorator that caches a function's return value each time it is called.
-   If called later with the same arguments, the cached value is returned, and
-   not re-evaluated.
-
-   http://wiki.python.org/moin/PythonDecoratorLibrary
-   """
-   def __init__(self, func):
-      self.func = func
-      self.cache = {}
-   def __call__(self, *args):
-      try:
-         return self.cache[args]
-      except KeyError:
-         self.cache[args] = value = self.func(*args)
-         return value
-      except TypeError:
-         # uncachable -- for instance, passing a list as an argument.
-         # Better to not cache than to blow up entirely.
-         return self.func(*args)
-   def __repr__(self):
-      """Return the function's docstring."""
-      return self.func.__doc__
-
-
-def _xml_oneliner_re_from_tab_width(tab_width):
-    """Standalone XML processing instruction regex."""
-    return re.compile(r"""
-        (?:
-            (?<=\n\n)       # Starting after a blank line
-            |               # or
-            \A\n?           # the beginning of the doc
-        )
-        (                           # save in $1
-            [ ]{0,%d}
-            (?:
-                <\?\w+\b\s+.*?\?>   # XML processing instruction
-                |
-                <\w+:\w+\b\s+.*?/>  # namespaced single tag
-            )
-            [ \t]*
-            (?=\n{2,}|\Z)       # followed by a blank line or end of document
-        )
-        """ % (tab_width - 1), re.X)
-_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
-
-def _hr_tag_re_from_tab_width(tab_width):
-     return re.compile(r"""
-        (?:
-            (?<=\n\n)       # Starting after a blank line
-            |               # or
-            \A\n?           # the beginning of the doc
-        )
-        (                       # save in \1
-            [ ]{0,%d}
-            <(hr)               # start tag = \2
-            \b                  # word break
-            ([^<>])*?           #
-            /?>                 # the matching end tag
-            [ \t]*
-            (?=\n{2,}|\Z)       # followed by a blank line or end of document
-        )
-        """ % (tab_width - 1), re.X)
-_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
-
-
-def _xml_escape_attr(attr, skip_single_quote=True):
-    """Escape the given string for use in an HTML/XML tag attribute.
-
-    By default this doesn't bother with escaping `'` to `&#39;`, presuming that
-    the tag attribute is surrounded by double quotes.
-    """
-    escaped = (attr
-        .replace('&', '&amp;')
-        .replace('"', '&quot;')
-        .replace('<', '&lt;')
-        .replace('>', '&gt;'))
-    if not skip_single_quote:
-        escaped = escaped.replace("'", "&#39;")
-    return escaped
-
-
-def _xml_encode_email_char_at_random(ch):
-    r = random()
-    # Roughly 10% raw, 45% hex, 45% dec.
-    # '@' *must* be encoded. I [John Gruber] insist.
-    # Issue 26: '_' must be encoded.
-    if r > 0.9 and ch not in "@_":
-        return ch
-    elif r < 0.45:
-        # The [1:] is to drop leading '0': 0x63 -> x63
-        return '&#%s;' % hex(ord(ch))[1:]
-    else:
-        return '&#%s;' % ord(ch)
-
-
-
-#---- mainline
-
-class _NoReflowFormatter(optparse.IndentedHelpFormatter):
-    """An optparse formatter that does NOT reflow the description."""
-    def format_description(self, description):
-        return description or ""
-
-def _test():
-    import doctest
-    doctest.testmod()
-
-def main(argv=None):
-    if argv is None:
-        argv = sys.argv
-    if not logging.root.handlers:
-        logging.basicConfig()
-
-    usage = "usage: %prog [PATHS...]"
-    version = "%prog "+__version__
-    parser = optparse.OptionParser(prog="markdown2", usage=usage,
-        version=version, description=cmdln_desc,
-        formatter=_NoReflowFormatter())
-    parser.add_option("-v", "--verbose", dest="log_level",
-                      action="store_const", const=logging.DEBUG,
-                      help="more verbose output")
-    parser.add_option("--encoding",
-                      help="specify encoding of text content")
-    parser.add_option("--html4tags", action="store_true", default=False,
-                      help="use HTML 4 style for empty element tags")
-    parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
-                      help="sanitize literal HTML: 'escape' escapes "
-                           "HTML meta chars, 'replace' replaces with an "
-                           "[HTML_REMOVED] note")
-    parser.add_option("-x", "--extras", action="append",
-                      help="Turn on specific extra features (not part of "
-                           "the core Markdown spec). See above.")
-    parser.add_option("--use-file-vars",
-                      help="Look for and use Emacs-style 'markdown-extras' "
-                           "file var to turn on extras. See "
-                           "<https://github.com/trentm/python-markdown2/wiki/Extras>")
-    parser.add_option("--link-patterns-file",
-                      help="path to a link pattern file")
-    parser.add_option("--self-test", action="store_true",
-                      help="run internal self-tests (some doctests)")
-    parser.add_option("--compare", action="store_true",
-                      help="run against Markdown.pl as well (for testing)")
-    parser.set_defaults(log_level=logging.INFO, compare=False,
-                        encoding="utf-8", safe_mode=None, use_file_vars=False)
-    opts, paths = parser.parse_args()
-    log.setLevel(opts.log_level)
-
-    if opts.self_test:
-        return _test()
-
-    if opts.extras:
-        extras = {}
-        for s in opts.extras:
-            splitter = re.compile("[,;: ]+")
-            for e in splitter.split(s):
-                if '=' in e:
-                    ename, earg = e.split('=', 1)
-                    try:
-                        earg = int(earg)
-                    except ValueError:
-                        pass
-                else:
-                    ename, earg = e, None
-                extras[ename] = earg
-    else:
-        extras = None
-
-    if opts.link_patterns_file:
-        link_patterns = []
-        f = open(opts.link_patterns_file)
-        try:
-            for i, line in enumerate(f.readlines()):
-                if not line.strip(): continue
-                if line.lstrip().startswith("#"): continue
-                try:
-                    pat, href = line.rstrip().rsplit(None, 1)
-                except ValueError:
-                    raise MarkdownError("%s:%d: invalid link pattern line: %r"
-                                        % (opts.link_patterns_file, i+1, line))
-                link_patterns.append(
-                    (_regex_from_encoded_pattern(pat), href))
-        finally:
-            f.close()
-    else:
-        link_patterns = None
-
-    from os.path import join, dirname, abspath, exists
-    markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
-                       "Markdown.pl")
-    if not paths:
-        paths = ['-']
-    for path in paths:
-        if path == '-':
-            text = sys.stdin.read()
-        else:
-            fp = codecs.open(path, 'r', opts.encoding)
-            text = fp.read()
-            fp.close()
-        if opts.compare:
-            from subprocess import Popen, PIPE
-            print("==== Markdown.pl ====")
-            p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
-            p.stdin.write(text.encode('utf-8'))
-            p.stdin.close()
-            perl_html = p.stdout.read().decode('utf-8')
-            if py3:
-                sys.stdout.write(perl_html)
-            else:
-                sys.stdout.write(perl_html.encode(
-                    sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
-            print("==== markdown2.py ====")
-        html = markdown(text,
-            html4tags=opts.html4tags,
-            safe_mode=opts.safe_mode,
-            extras=extras, link_patterns=link_patterns,
-            use_file_vars=opts.use_file_vars)
-        if py3:
-            sys.stdout.write(html)
-        else:
-            sys.stdout.write(html.encode(
-                sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
-        if extras and "toc" in extras:
-            log.debug("toc_html: " +
-                html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
-        if opts.compare:
-            test_dir = join(dirname(dirname(abspath(__file__))), "test")
-            if exists(join(test_dir, "test_markdown2.py")):
-                sys.path.insert(0, test_dir)
-                from test_markdown2 import norm_html_from_html
-                norm_html = norm_html_from_html(html)
-                norm_perl_html = norm_html_from_html(perl_html)
-            else:
-                norm_html = html
-                norm_perl_html = perl_html
-            print("==== match? %r ====" % (norm_perl_html == norm_html))
-
-
-if __name__ == "__main__":
-    sys.exit( main(sys.argv) )

+ 0 - 2
frameworks/Python/web2py/web2py/gluon/contrib/markmin/__init__.py

@@ -1,2 +0,0 @@
-
-

文件差異過大導致無法顯示
+ 0 - 27
frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin.html


二進制
frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin.pdf


+ 0 - 1505
frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin2html.py

@@ -1,1505 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-# created by Massimo Di Pierro
-# recreated by Vladyslav Kozlovskyy
-# license MIT/BSD/GPL
-import re
-import urllib
-from cgi import escape
-from string import maketrans
-try:
-   from ast import parse as ast_parse
-   import ast
-except ImportError: # python 2.5
-    from compiler import parse
-    import compiler.ast as ast
-
-"""
-TODO: next version should use MathJax
-
-<script type="text/javascript" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js">
-MathJax.Hub.Config({
- extensions: ["tex2jax.js","TeX/AMSmath.js","TeX/AMSsymbols.js"],
- jax: ["input/TeX", "output/HTML-CSS"],
- tex2jax: {
-     inlineMath: [ ['$','$'], ["\\(","\\)"] ],
-     displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
- },
- "HTML-CSS": { availableFonts: ["TeX"] }
-});
-</script>
-"""
-
-__all__ = ['render', 'markmin2html', 'markmin_escape']
-
-__doc__ = """
-# Markmin markup language
-
-## About
-
-This is a new markup language that we call markmin designed to produce high quality scientific papers and books and also put them online. We provide serializers for html, latex and pdf. It is implemented in the ``markmin2html`` function in the ``markmin2html.py``.
-
-Example of usage:
-
-``
-m = "Hello **world** [[link http://web2py.com]]"
-from markmin2html import markmin2html
-print markmin2html(m)
-from markmin2latex import markmin2latex
-print markmin2latex(m)
-from markmin2pdf import markmin2pdf # requires pdflatex
-print markmin2pdf(m)
-``
-====================
-# This is a test block
-  with new features:
-This is a blockquote with
-a list with tables in it:
------------
-  This is a paragraph before list.
-  You can continue paragraph on the
-  next lines.
-
-  This is an ordered list with tables:
-  + Item 1
-  + Item 2
-  + --------
-    aa|bb|cc
-    11|22|33
-    --------:tableclass1[tableid1]
-  + Item 4
-    -----------
-     T1| T2| t3
-    ===========
-    aaa|bbb|ccc
-    ddd|fff|ggg
-    123|0  |5.0
-    -----------:tableclass1
------------:blockquoteclass[blockquoteid]
-
-This this a new paragraph
-with a followed table.
-Table has header, footer, sections,
-odd and even rows:
--------------------------------
-**Title 1**|**Title 2**|**Title 3**
-==============================
-data 1     | data 2    |  2.00
-data 3     |data4(long)| 23.00
-           |data 5     | 33.50
-==============================
-New section|New data   |  5.00
-data 1     |data2(long)|100.45
-           |data 3     | 12.50
-data 4     | data 5    |   .33
-data 6     |data7(long)|  8.01
-           |data 8     |   514
-==============================
-Total:     | 9 items   |698,79
-------------------------------:tableclass1[tableid2]
-
-## Multilevel
-   lists
-
-Now lists can be multilevel:
-
-+ Ordered item 1 on level 1.
-  You can continue item text on
-  next strings
-
-. paragraph in an item
-
-++. Ordered item 1 of sublevel 2 with
-    a paragraph (paragraph can start
-    with point after plus or minus
-    characters, e.g. **++.** or **--.**)
-
-++. This is another item. But with 3 paragraphs,
-    blockquote and sublists:
-
-.. This is the second paragraph in the item. You
-   can add paragraphs to an item, using point
-   notation, where first characters in the string
-   are sequence of points with space between
-   them and another string. For example, this
-   paragraph (in sublevel 2) starts with two points:
-   ``.. This is the second paragraph...``
-
-.. ----------
-     ### this is a blockquote in a list
-
-     You can use blockquote with headers, paragraphs,
-     tables and lists in it:
-
-     Tables can have or have not header and footer.
-     This table is defined without any header
-     and footer in it:
-     ---------------------
-     red  |fox     | 0
-     blue |dolphin | 1000
-     green|leaf    | 10000
-     ---------------------
-   ----------
-
-.. This is yet another paragraph in the item.
-
---- This is an item of unordered list **(sublevel 3)**
---- This is the second item of the unordered list ''(sublevel 3)''
-
-++++++ This is a single item of ordered list in sublevel 6
-.... and this is a paragraph in sublevel 4
----. This is a new item with paragraph in sublevel 3.
-++++ Start ordered list in sublevel 4 with code block: ``
-line 1
-  line 2
-     line 3
-``
-++++. Yet another item with code block (we need to indent \`\` to add code block as part of item):
- ``
-  line 1
-line 2
-  line 3
-``
- This item finishes with this paragraph.
-
-... Item in sublevel 3 can be continued with paragraphs.
-
-... ``
-  this is another
-code block
-    in the
-  sublevel 3 item
-``
-
-+++ The last item in sublevel 3
-.. This is a continuous paragraph for item 2 in sublevel 2.
-   You can use such structure to create difficult structured
-   documents.
-
-++ item 3 in sublevel 2
--- item 1 in sublevel 2 (new unordered list)
--- item 2 in sublevel 2
--- item 3 in sublevel 2
-
-++ item 1 in sublevel 2 (new ordered list)
-++ item 2 in sublevel 2
-++ item 3 in sublevle 2
-
-+ item 2 in level 1
-+ item 3 in level 1
-- new unordered list (item 1 in level 1)
-- level 2 in level 1
-
-- level 3 in level 1
-- level 4 in level 1
-## This is the last section of the test
-
-Single paragraph with '----' in it will be turned into separator:
-
------------
-
-And this is the last paragraph in
-the test. Be happy!
-
-====================
-
-## Why?
-
-We wanted a markup language with the following requirements:
-- less than 300 lines of functional code
-- easy to read
-- secure
-- support table, ul, ol, code
-- support html5 video and audio elements (html serialization only)
-- can align images and resize them
-- can specify class for tables, blockquotes and code elements
-- can add anchors
-- does not use _ for markup (since it creates odd behavior)
-- automatically links urls
-- fast
-- easy to extend
-- supports latex and pdf including references
-- allows to describe the markup in the markup (this document is generated from markmin syntax)
-
-(results depend on text but in average for text ~100K markmin is 30% faster than markdown, for text ~10K it is 10x faster)
-
-The [[web2py book http://www.lulu.com/product/paperback/web2py-%283rd-edition%29/12822827]] published by lulu, for example, was entirely generated with markmin2pdf from the online [[web2py wiki http://www.web2py.com/book]]
-
-## Download
-
-- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2html.py
-- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2latex.py
-- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2pdf.py
-
-markmin2html.py and markmin2latex.py are single files and have no web2py dependence. Their license is BSD.
-
-## Examples
-
-### Bold, italic, code and links
-
-------------------------------------------------------------------------------
-**SOURCE**                                    | **OUTPUT**
-==============================================================================
-``# title``                                   | **title**
-``## section``                                | **section**
-``### subsection``                            | **subsection**
-``**bold**``                                  | **bold**
-``''italic''``                                | ''italic''
-``~~strikeout~~``                             | ~~strikeout~~
-``!`!`verbatim`!`!``                          | ``verbatim``
-``\`\`color with **bold**\`\`:red``           | ``color with **bold**``:red
-``\`\`many colors\`\`:color[blue:#ffff00]``   | ``many colors``:color[blue:#ffff00]
-``http://google.com``                         | http://google.com
-``[[**click** me #myanchor]]``                | [[**click** me #myanchor]]
-``[[click me [extra info] #myanchor popup]]`` | [[click me [extra info] #myanchor popup]]
--------------------------------------------------------------------------------
-
-### More on links
-
-The format is always ``[[title link]]`` or ``[[title [extra] link]]``. Notice you can nest bold, italic, strikeout and code inside the link ``title``.
-
-### Anchors [[myanchor]]
-
-You can place an anchor anywhere in the text using the syntax ``[[name]]`` where ''name'' is the name of the anchor.
-You can then link the anchor with [[link #myanchor]], i.e. ``[[link #myanchor]]`` or [[link with an extra info [extra info] #myanchor]], i.e.
-``[[link with an extra info [extra info] #myanchor]]``.
-
-### Images
-
-[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]
-This paragraph has an image aligned to the right with a width of 200px. Its is placed using the code
-
-``[[alt-string for the image [the image title] http://www.web2py.com/examples/static/web2py_logo.png right 200px]]``.
-
-### Unordered Lists
-
-``
-- Dog
-- Cat
-- Mouse
-``
-
-is rendered as
-- Dog
-- Cat
-- Mouse
-
-Two new lines between items break the list in two lists.
-
-### Ordered Lists
-
-``
-+ Dog
-+ Cat
-+ Mouse
-``
-
-is rendered as
-+ Dog
-+ Cat
-+ Mouse
-
-
-### Multilevel Lists
-
-``
-+ Dogs
- -- red
- -- brown
- -- black
-+ Cats
- -- fluffy
- -- smooth
- -- bald
-+ Mice
- -- small
- -- big
- -- huge
-``
-
-is rendered as
-+ Dogs
- -- red
- -- brown
- -- black
-+ Cats
- -- fluffy
- -- smooth
- -- bald
-+ Mice
- -- small
- -- big
- -- huge
-
-
-### Tables (with optional header and/or footer)
-
-Something like this
-``
------------------
-**A**|**B**|**C**
-=================
-  0  |  0  |  X
-  0  |  X  |  0
-  X  |  0  |  0
-=================
-**D**|**F**|**G**
------------------:abc[id]
-``
-is a table and is rendered as
------------------
-**A**|**B**|**C**
-=================
-0 | 0 | X
-0 | X | 0
-X | 0 | 0
-=================
-**D**|**F**|**G**
------------------:abc[id]
-Four or more dashes delimit the table and | separates the columns.
-The ``:abc``, ``:id[abc_1]`` or ``:abc[abc_1]`` at the end sets the class and/or id for the table and it is optional.
-
-### Blockquote
-
-A table with a single cell is rendered as a blockquote:
-
------
-Hello world
------
-
-Blockquote can contain headers, paragraphs, lists and tables:
-
-``
------
-  This is a paragraph in a blockquote
-
-  + item 1
-  + item 2
-  -- item 2.1
-  -- item 2.2
-  + item 3
-
-  ---------
-  0 | 0 | X
-  0 | X | 0
-  X | 0 | 0
-  ---------:tableclass1
------
-``
-
-is rendered as:
------
-  This is a paragraph in a blockquote
-
-  + item 1
-  + item 2
-  -- item 2.1
-  -- item 2.2
-  + item 3
-
-  ---------
-  0 | 0 | X
-  0 | X | 0
-  X | 0 | 0
-  ---------:tableclass1
------
-
-
-### Code, ``<code>``, escaping and extra stuff
-
-``
-def test():
-    return "this is Python code"
-``:python
-
-Optionally a ` inside a ``!`!`...`!`!`` block can be inserted escaped with !`!.
-
-**NOTE:** You can escape markmin constructions (\\'\\',\`\`,\*\*,\~\~,\[,\{,\]\},\$,\@) with '\\\\' character:
- so \\\\`\\\\` can replace !`!`! escape string
-
-The ``:python`` after the markup is also optional. If present, by default, it is used to set the class of the <code> block.
-The behavior can be overridden by passing an argument ``extra`` to the ``render`` function. For example:
-
-``
-markmin2html("!`!!`!aaa!`!!`!:custom",
-             extra=dict(custom=lambda text: 'x'+text+'x'))
-``:python
-
-generates
-
-``'xaaax'``:python
-
-(the ``!`!`...`!`!:custom`` block is rendered by the ``custom=lambda`` function passed to ``render``).
-
-### Line breaks
-
-``[[NEWLINE]]`` tag is used to break lines:
-``
-#### Multiline [[NEWLINE]]
-   title
-paragraph [[NEWLINE]]
-with breaks[[NEWLINE]]in it
-``
-generates:
-
-#### Multiline [[NEWLINE]]
-   title
-paragraph [[NEWLINE]]
-with breaks[[NEWLINE]]in it
-
-
-### Html5 support
-
-Markmin also supports the <video> and <audio> html5 tags using the notation:
-``
-[[message link video]]
-[[message link audio]]
-
-[[message [title] link video]]
-[[message [title] link audio]]
-``
-where ``message`` will be shown in browsers without HTML5 video/audio tags support.
-
-### Latex and other extensions
-
-Formulas can be embedded into HTML with ''\$\$``formula``\$\$''.
-You can use Google charts to render the formula:
-
-``
-LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
-markmin2html(text,{'latex':lambda code: LATEX % urllib.quote(code)})
-``
-
-### Code with syntax highlighting
-
-This requires a syntax highlighting tool, such as the web2py CODE helper.
-
-``
-extra={'code_cpp':lambda text: CODE(text,language='cpp').xml(),
-       'code_java':lambda text: CODE(text,language='java').xml(),
-       'code_python':lambda text: CODE(text,language='python').xml(),
-       'code_html':lambda text: CODE(text,language='html').xml()}
-``
-or simple:
-``
-extra={'code':lambda text,lang='python': CODE(text,language=lang).xml()}
-``
-``
-markmin2html(text,extra=extra)
-``
-
-Code can now be marked up as in this example:
-``
-!`!`
-<html><body>example</body></html>
-!`!`:code_html
-``
-OR
-``
-!`!`
-<html><body>example</body></html>
-!`!`:code[html]
-``
-
-### Citations and References
-
-Citations are treated as internal links in html and proper citations in latex if there is a final section called "References". Items like
-
-``
-- [[key]] value
-``
-
-in the References will be translated into Latex
-
-``
-\\bibitem{key} value
-``
-
-Here is an example of usage:
-
-``
-As shown in Ref.!`!`mdipierro`!`!:cite
-
-## References
-
-- [[mdipierro]] web2py Manual, 3rd Edition, lulu.com
-``
-
-### Caveats
-
-``<ul/>``, ``<ol/>``, ``<code/>``, ``<table/>``, ``<blockquote/>``, ``<h1/>``, ..., ``<h6/>`` do not have ``<p>...</p>`` around them.
-
-"""
-html_colors=['aqua', 'black', 'blue', 'fuchsia', 'gray', 'green',
-             'lime', 'maroon', 'navy', 'olive', 'purple', 'red',
-             'silver', 'teal', 'white', 'yellow']
-
-META = '\x06'
-LINK = '\x07'
-DISABLED_META = '\x08'
-LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" />'
-regex_URL=re.compile(r'@/(?P<a>\w*)/(?P<c>\w*)/(?P<f>\w*(\.\w+)?)(/(?P<args>[\w\.\-/]+))?')
-regex_env2=re.compile(r'@\{(?P<a>[\w\-\.]+?)(\:(?P<b>.*?))?\}')
-regex_expand_meta = re.compile('('+META+'|'+DISABLED_META+'|````)')
-regex_dd=re.compile(r'\$\$(?P<latex>.*?)\$\$')
-regex_code = re.compile('('+META+'|'+DISABLED_META+r'|````)|(``(?P<t>.+?)``(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[^\]]*)\])?)?)',re.S)
-regex_strong=re.compile(r'\*\*(?P<t>[^\s*]+( +[^\s*]+)*)\*\*')
-regex_del=re.compile(r'~~(?P<t>[^\s*]+( +[^\s*]+)*)~~')
-regex_em=re.compile(r"''(?P<t>([^\s']| |'(?!'))+)''")
-regex_num=re.compile(r"^\s*[+-]?((\d+(\.\d*)?)|\.\d+)([eE][+-]?[0-9]+)?\s*$")
-regex_list=re.compile('^(?:(?:(#{1,6})|(?:(\.+|\++|\-+)(\.)?))\s*)?(.*)$')
-regex_bq_headline=re.compile('^(?:(\.+|\++|\-+)(\.)?\s+)?(-{3}-*)$')
-regex_tq=re.compile('^(-{3}-*)(?::(?P<c>[a-zA-Z][_a-zA-Z\-\d]*)(?:\[(?P<p>[a-zA-Z][_a-zA-Z\-\d]*)\])?)?$')
-regex_proto = re.compile(r'(?<!["\w>/=])(?P<p>\w+):(?P<k>\w+://[\w\d\-+=?%&/:.]+)', re.M)
-regex_auto = re.compile(r'(?<!["\w>/=])(?P<k>\w+://[\w\d\-+_=?%&/:.,;#]+\w|[\w\-.]+@[\w\-.]+)',re.M)
-regex_link=re.compile(r'('+LINK+r')|\[\[(?P<s>.+?)\]\]',re.S)
-regex_link_level2=re.compile(r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?(?:\s+(?P<p>popup))?\s*$',re.S)
-regex_media_level2=re.compile(r'^(?P<t>\S.*?)?(?:\s+\[(?P<a>.+?)\])?(?:\s+(?P<k>\S+))?\s+(?P<p>img|IMG|left|right|center|video|audio|blockleft|blockright)(?:\s+(?P<w>\d+px))?\s*$',re.S)
-
-regex_markmin_escape = re.compile(r"(\\*)(['`:*~\\[\]{}@\$+\-.#\n])")
-regex_backslash = re.compile(r"\\(['`:*~\\[\]{}@\$+\-.#\n])")
-ttab_in  = maketrans("'`:*~\\[]{}@$+-.#\n", '\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05')
-ttab_out = maketrans('\x0b\x0c\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x05',"'`:*~\\[]{}@$+-.#\n")
-regex_quote = re.compile('(?P<name>\w+?)\s*\=\s*')
-
-def make_dict(b):
-    return '{%s}' % regex_quote.sub("'\g<name>':",b)
-    
-def safe_eval(node_or_string, env):
-    """
-    Safely evaluate an expression node or a string containing a Python
-    expression.  The string or node provided may only consist of the following
-    Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
-    and None.
-    """
-    _safe_names = {'None': None, 'True': True, 'False': False}
-    _safe_names.update(env)
-    if isinstance(node_or_string, basestring):
-        node_or_string = ast_parse(node_or_string, mode='eval')
-    if isinstance(node_or_string, ast.Expression):
-        node_or_string = node_or_string.body
-    def _convert(node):
-        if isinstance(node, ast.Str):
-            return node.s
-        elif isinstance(node, ast.Num):
-            return node.n
-        elif isinstance(node, ast.Tuple):
-            return tuple(map(_convert, node.elts))
-        elif isinstance(node, ast.List):
-            return list(map(_convert, node.elts))
-        elif isinstance(node, ast.Dict):
-            return dict((_convert(k), _convert(v)) for k, v
-                        in zip(node.keys, node.values))
-        elif isinstance(node, ast.Name):
-            if node.id in _safe_names:
-                return _safe_names[node.id]
-        elif isinstance(node, ast.BinOp) and \
-             isinstance(node.op, (Add, Sub)) and \
-             isinstance(node.right, Num) and \
-             isinstance(node.right.n, complex) and \
-             isinstance(node.left, Num) and \
-             isinstance(node.left.n, (int, long, float)):
-            left = node.left.n
-            right = node.right.n
-            if isinstance(node.op, Add):
-                return left + right
-            else:
-                return left - right
-        raise ValueError('malformed string')
-    return _convert(node_or_string)
-
-def markmin_escape(text):
-    """ insert \\ before markmin control characters: '`:*~[]{}@$ """
-    return regex_markmin_escape.sub(
-       lambda m: '\\'+m.group(0).replace('\\','\\\\'), text)
-
-def replace_autolinks(text,autolinks):
-    return regex_auto.sub(lambda m: autolinks(m.group('k')), text)
-
-def replace_at_urls(text,url):
-    # this is experimental @{function/args}
-    def u1(match,url=url):
-        a,c,f,args = match.group('a','c','f','args')
-        return url(a=a or None,c=c or None,f = f or None,
-                   args=(args or '').split('/'), scheme=True, host=True)
-    return regex_URL.sub(u1,text)
-
-def replace_components(text,env):
-    # not perfect but acceptable
-    def u2(match, env=env):
-        f = env.get(match.group('a'), match.group(0))
-        if callable(f):
-            b = match.group('b')
-            try:
-                b = safe_eval(make_dict(b),env)
-            except:
-                pass
-            try:
-                f = f(**b) if isinstance(b,dict) else f(b)
-            except Exception, e:
-                f = 'ERROR: %s' % e
-            return str(f)
-    text = regex_env2.sub(u2, text)
-    return text
-
-def autolinks_simple(url):
-    """
-    it automatically converts the url to link,
-    image, video or audio tag
-    """
-    u_url=url.lower()
-    if '@' in url and not '://' in url:
-        return '<a href="mailto:%s">%s</a>' % (url, url)
-    elif u_url.endswith(('.jpg','.jpeg','.gif','.png')):
-        return '<img src="%s" controls />' % url
-    elif u_url.endswith(('.mp4','.mpeg','.mov','.ogv')):
-        return '<video src="%s" controls></video>' % url
-    elif u_url.endswith(('.mp3','.wav','.ogg')):
-        return '<audio src="%s" controls></audio>' % url
-    return '<a href="%s">%s</a>' % (url,url)
-
-def protolinks_simple(proto, url):
-    """
-    it converts url to html-string using appropriate proto-prefix:
-    Uses for construction "proto:url", e.g.:
-        "iframe:http://www.example.com/path" will call protolinks()
-        with parameters:
-            proto="iframe"
-            url="http://www.example.com/path"
-    """
-    if proto in ('iframe','embed'): #== 'iframe':
-        return '<iframe src="%s" frameborder="0" allowfullscreen></iframe>'%url
-    #elif proto == 'embed':  # NOTE: embed is a synonym to iframe now
-    #    return '<a href="%s" class="%sembed">%s></a>'%(url,class_prefix,url)
-    elif proto == 'qr':
-        return '<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=%s&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />'%url
-    return proto+':'+url
-
-def email_simple(email):
-   return '<a href="mailto:%s">%s</a>' % (email, email)
-
-def render(text,
-           extra={},
-           allowed={},
-           sep='p',
-           URL=None,
-           environment=None,
-           latex='google',
-           autolinks='default',
-           protolinks='default',
-           class_prefix='',
-           id_prefix='markmin_',
-           pretty_print=False):
-    """
-    Arguments:
-    - text is the text to be processed
-    - extra is a dict like extra=dict(custom=lambda value: value) that process custom code
-      as in " ``this is custom code``:custom "
-    - allowed is a dictionary of list of allowed classes like
-      allowed = dict(code=('python','cpp','java'))
-    - sep can be 'p' to separate text in <p>...</p>
-      or can be 'br' to separate text using <br />
-    - URL -
-    - environment is a dictionary of environment variables (can be accessed with @{variable}
-    - latex -
-    - autolinks is a function to convert auto urls to html-code (default is autolinks(url) )
-    - protolinks is a function to convert proto-urls (e.g."proto:url") to html-code
-      (default is protolinks(proto,url))
-    - class_prefix is a prefix for ALL classes in markmin text. E.g. if class_prefix='my_'
-      then for ``test``:cls class will be changed to "my_cls" (default value is '')
-    - id_prefix is prefix for ALL ids in markmin text (default value is 'markmin_'). E.g.:
-        -- [[id]] will be converted to <span class="anchor" id="markmin_id"></span>
-        -- [[link #id]] will be converted to <a href="#markmin_id">link</a>
-        -- ``test``:cls[id] will be converted to <code class="cls" id="markmin_id">test</code>
-
-    >>> render('this is\\n# a section\\n\\nparagraph')
-    '<p>this is</p><h1>a section</h1><p>paragraph</p>'
-    >>> render('this is\\n## a subsection\\n\\nparagraph')
-    '<p>this is</p><h2>a subsection</h2><p>paragraph</p>'
-    >>> render('this is\\n### a subsubsection\\n\\nparagraph')
-    '<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>'
-    >>> render('**hello world**')
-    '<p><strong>hello world</strong></p>'
-    >>> render('``hello world``')
-    '<code>hello world</code>'
-    >>> render('``hello world``:python')
-    '<code class="python">hello world</code>'
-    >>> render('``\\nhello\\nworld\\n``:python')
-    '<pre><code class="python">hello\\nworld</code></pre>'
-    >>> render('``hello world``:python[test_id]')
-    '<code class="python" id="markmin_test_id">hello world</code>'
-    >>> render('``hello world``:id[test_id]')
-    '<code id="markmin_test_id">hello world</code>'
-    >>> render('``\\nhello\\nworld\\n``:python[test_id]')
-    '<pre><code class="python" id="markmin_test_id">hello\\nworld</code></pre>'
-    >>> render('``\\nhello\\nworld\\n``:id[test_id]')
-    '<pre><code id="markmin_test_id">hello\\nworld</code></pre>'
-    >>> render("''hello world''")
-    '<p><em>hello world</em></p>'
-    >>> render('** hello** **world**')
-    '<p>** hello** <strong>world</strong></p>'
-
-    >>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another')
-    '<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>'
-
-    >>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another')
-    '<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>'
-
-    >>> render("----\\na | b\\nc | d\\n----\\n")
-    '<table><tbody><tr class="first"><td>a</td><td>b</td></tr><tr class="even"><td>c</td><td>d</td></tr></tbody></table>'
-
-    >>> render("----\\nhello world\\n----\\n")
-    '<blockquote>hello world</blockquote>'
-
-    >>> render('[[myanchor]]')
-    '<p><span class="anchor" id="markmin_myanchor"></span></p>'
-
-    >>> render('[[ http://example.com]]')
-    '<p><a href="http://example.com">http://example.com</a></p>'
-
-    >>> render('[[bookmark [http://example.com] ]]')
-    '<p><span class="anchor" id="markmin_bookmark"><a href="http://example.com">http://example.com</a></span></p>'
-
-    >>> render('[[this is a link http://example.com]]')
-    '<p><a href="http://example.com">this is a link</a></p>'
-
-    >>> render('[[this is an image http://example.com left]]')
-    '<p><img src="http://example.com" alt="this is an image" style="float:left" /></p>'
-
-    >>> render('[[this is an image http://example.com left 200px]]')
-    '<p><img src="http://example.com" alt="this is an image" style="float:left;width:200px" /></p>'
-
-    >>> render("[[Your browser doesn't support <video> HTML5 tag http://example.com video]]")
-    '<p><video controls="controls"><source src="http://example.com" />Your browser doesn\\'t support &lt;video&gt; HTML5 tag</video></p>'
-
-    >>> render("[[Your browser doesn't support <audio> HTML5 tag http://example.com audio]]")
-    '<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support &lt;audio&gt; HTML5 tag</audio></p>'
-
-    >>> render("[[Your\\nbrowser\\ndoesn't\\nsupport\\n<audio> HTML5 tag http://exam\\\\\\nple.com\\naudio]]")
-    '<p><audio controls="controls"><source src="http://example.com" />Your browser doesn\\'t support &lt;audio&gt; HTML5 tag</audio></p>'
-
-    >>> render('[[this is a **link** http://example.com]]')
-    '<p><a href="http://example.com">this is a <strong>link</strong></a></p>'
-
-    >>> render("``aaa``:custom", extra=dict(custom=lambda text: 'x'+text+'x'))
-    'xaaax'
-
-    >>> print render(r"$$\int_a^b sin(x)dx$$")
-    <img src="http://chart.apis.google.com/chart?cht=tx&chl=%5Cint_a%5Eb%20sin%28x%29dx" />
-
-    >>> markmin2html(r"use backslash: \[\[[[mess\[[ag\]]e link]]\]]")
-    '<p>use backslash: [[<a href="link">mess[[ag]]e</a>]]</p>'
-
-    >>> markmin2html("backslash instead of exclamation sign: \``probe``")
-    '<p>backslash instead of exclamation sign: ``probe``</p>'
-
-    >>> render(r"simple image: [[\[[this is an image\]] http://example.com IMG]]!!!")
-    '<p>simple image: <img src="http://example.com" alt="[[this is an image]]" />!!!</p>'
-
-    >>> render(r"simple link no anchor with popup: [[ http://example.com popup]]")
-    '<p>simple link no anchor with popup: <a href="http://example.com" target="_blank">http://example.com</a></p>'
-
-    >>> render("auto-url: http://example.com")
-    '<p>auto-url: <a href="http://example.com">http://example.com</a></p>'
-
-    >>> render("auto-image: (http://example.com/image.jpeg)")
-    '<p>auto-image: (<img src="http://example.com/image.jpeg" controls />)</p>'
-
-    >>> render("qr: (qr:http://example.com/image.jpeg)")
-    '<p>qr: (<img style="width:100px" src="http://chart.apis.google.com/chart?cht=qr&chs=100x100&chl=http://example.com/image.jpeg&choe=UTF-8&chld=H" alt="QR Code" title="QR Code" />)</p>'
-
-    >>> render("embed: (embed:http://example.com/page)")
-    '<p>embed: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
-
-    >>> render("iframe: (iframe:http://example.com/page)")
-    '<p>iframe: (<iframe src="http://example.com/page" frameborder="0" allowfullscreen></iframe>)</p>'
-
-    >>> render("title1: [[test message [simple \[test\] title] http://example.com ]] test")
-    '<p>title1: <a href="http://example.com" title="simple [test] title">test message</a> test</p>'
-
-    >>> render("title2: \[\[[[test message [simple title] http://example.com popup]]\]]")
-    '<p>title2: [[<a href="http://example.com" title="simple title" target="_blank">test message</a>]]</p>'
-
-    >>> render("title3: [[ [link w/o anchor but with title] http://www.example.com ]]")
-    '<p>title3: <a href="http://www.example.com" title="link w/o anchor but with title">http://www.example.com</a></p>'
-
-    >>> render("title4: [[ [simple title] http://www.example.com popup]]")
-    '<p>title4: <a href="http://www.example.com" title="simple title" target="_blank">http://www.example.com</a></p>'
-
-    >>> render("title5: [[test message [simple title] http://example.com IMG]]")
-    '<p>title5: <img src="http://example.com" alt="test message" title="simple title" /></p>'
-
-    >>> render("title6: [[[test message w/o title] http://example.com IMG]]")
-    '<p>title6: <img src="http://example.com" alt="[test message w/o title]" /></p>'
-
-    >>> render("title7: [[[this is not a title] [this is a title] http://example.com IMG]]")
-    '<p>title7: <img src="http://example.com" alt="[this is not a title]" title="this is a title" /></p>'
-
-    >>> render("title8: [[test message [title] http://example.com center]]")
-    '<p>title8: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" /></p></p>'
-
-    >>> render("title9: [[test message [title] http://example.com left]]")
-    '<p>title9: <img src="http://example.com" alt="test message" title="title" style="float:left" /></p>'
-
-    >>> render("title10: [[test message [title] http://example.com right 100px]]")
-    '<p>title10: <img src="http://example.com" alt="test message" title="title" style="float:right;width:100px" /></p>'
-
-    >>> render("title11: [[test message [title] http://example.com center 200px]]")
-    '<p>title11: <p style="text-align:center"><img src="http://example.com" alt="test message" title="title" style="width:200px" /></p></p>'
-
-    >>> render(r"\\[[probe]]")
-    '<p>[[probe]]</p>'
-
-    >>> render(r"\\\\[[probe]]")
-    '<p>\\\\<span class="anchor" id="markmin_probe"></span></p>'
-
-    >>> render(r"\\\\\\[[probe]]")
-    '<p>\\\\[[probe]]</p>'
-
-    >>> render(r"\\\\\\\\[[probe]]")
-    '<p>\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
-
-    >>> render(r"\\\\\\\\\[[probe]]")
-    '<p>\\\\\\\\[[probe]]</p>'
-
-    >>> render(r"\\\\\\\\\\\[[probe]]")
-    '<p>\\\\\\\\\\\\<span class="anchor" id="markmin_probe"></span></p>'
-
-    >>> render("``[[ [\\[[probe\]\\]] URL\\[x\\]]]``:red[dummy_params]")
-    '<span style="color: red"><a href="URL[x]" title="[[probe]]">URL[x]</a></span>'
-
-    >>> render("the \\**text**")
-    '<p>the **text**</p>'
-
-    >>> render("the \\``text``")
-    '<p>the ``text``</p>'
-
-    >>> render("the \\\\''text''")
-    "<p>the ''text''</p>"
-
-    >>> render("the [[link [**with** ``<b>title</b>``:red] http://www.example.com]]")
-    '<p>the <a href="http://www.example.com" title="**with** ``&lt;b&gt;title&lt;/b&gt;``:red">link</a></p>'
-
-    >>> render("the [[link \\[**without** ``<b>title</b>``:red\\] http://www.example.com]]")
-    '<p>the <a href="http://www.example.com">link [<strong>without</strong> <span style="color: red">&lt;b&gt;title&lt;/b&gt;</span>]</a></p>'
-
-    >>> render("aaa-META-``code``:text[]-LINK-[[link http://www.example.com]]-LINK-[[image http://www.picture.com img]]-end")
-    '<p>aaa-META-<code class="text">code</code>-LINK-<a href="http://www.example.com">link</a>-LINK-<img src="http://www.picture.com" alt="image" />-end</p>'
-
-    >>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a>]]")
-    '<p><a href="&lt;a&gt;text3&lt;/a&gt;" title="&lt;a&gt;test2&lt;/a&gt;">&lt;a&gt;test&lt;/a&gt;</a></p>'
-
-    >>> render("[[<a>test</a> [<a>test2</a>] <a>text3</a> IMG]]")
-    '<p><img src="&lt;a&gt;text3&lt;/a&gt;" alt="&lt;a&gt;test&lt;/a&gt;" title="&lt;a&gt;test2&lt;/a&gt;" /></p>'
-
-    >>> render("**bold** ''italic'' ~~strikeout~~")
-    '<p><strong>bold</strong> <em>italic</em> <del>strikeout</del></p>'
-
-    >>> render("this is ``a red on yellow text``:c[#FF0000:#FFFF00]")
-    '<p>this is <span style="color: #FF0000;background-color: #FFFF00;">a red on yellow text</span></p>'
-
-    >>> render("this is ``a text with yellow background``:c[:yellow]")
-    '<p>this is <span style="background-color: yellow;">a text with yellow background</span></p>'
-
-    >>> render("this is ``a colored text (RoyalBlue)``:color[rgb(65,105,225)]")
-    '<p>this is <span style="color: rgb(65,105,225);">a colored text (RoyalBlue)</span></p>'
-
-    >>> render("this is ``a green text``:color[green:]")
-    '<p>this is <span style="color: green;">a green text</span></p>'
-
-    >>> render("**@{probe:1}**", environment=dict(probe=lambda t:"test %s" % t))
-    '<p><strong>test 1</strong></p>'
-
-    >>> render("**@{probe:t=a}**", environment=dict(probe=lambda t:"test %s" % t, a=1))
-    '<p><strong>test 1</strong></p>'
-
-    >>> render('[[id1 [span **messag** in ''markmin''] ]] ... [[**link** to id [link\\\'s title] #mark1]]')
-    '<p><span class="anchor" id="markmin_id1">span <strong>messag</strong> in markmin</span> ... <a href="#markmin_mark1" title="link\\\'s title"><strong>link</strong> to id</a></p>'
-
-    >>> render('# Multiline[[NEWLINE]]\\n title\\nParagraph[[NEWLINE]]\\nwith breaks[[NEWLINE]]\\nin it')
-    '<h1>Multiline<br /> title</h1><p>Paragraph<br /> with breaks<br /> in it</p>'
-
-    >>> render("anchor with name 'NEWLINE': [[NEWLINE [ ] ]]")
-    '<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE"></span></p>'
-
-    >>> render("anchor with name 'NEWLINE': [[NEWLINE [newline] ]]")
-    '<p>anchor with name \\'NEWLINE\\': <span class="anchor" id="markmin_NEWLINE">newline</span></p>'
-    """
-    if autolinks=="default": autolinks = autolinks_simple
-    if protolinks=="default": protolinks = protolinks_simple
-    pp='\n' if pretty_print else ''
-    if isinstance(text,unicode):
-        text = text.encode('utf8')
-    text = str(text or '')
-    text = regex_backslash.sub(lambda m: m.group(1).translate(ttab_in), text)
-    text = text.replace('\x05','').replace('\r\n', '\n') # concatenate strings separeted by \\n
-
-    if URL is not None:
-        text = replace_at_urls(text,URL)
-
-    if latex == 'google':
-        text = regex_dd.sub('``\g<latex>``:latex ', text)
-
-    #############################################################
-    # replace all blocks marked with ``...``:class[id] with META
-    # store them into segments they will be treated as code
-    #############################################################
-    segments = []
-    def mark_code(m):
-        g = m.group(0)
-        if g in (META, DISABLED_META ):
-            segments.append((None, None, None, g))
-            return m.group()
-        elif g == '````':
-            segments.append((None, None, None, ''))
-            return m.group()
-        else:
-            c = m.group('c') or ''
-            p = m.group('p') or ''
-            if 'code' in allowed and not c in allowed['code']: c = ''
-            code = m.group('t').replace('!`!','`')
-            segments.append((code, c, p, m.group(0)))
-        return META
-    text = regex_code.sub(mark_code, text)
-
-    #############################################################
-    # replace all blocks marked with [[...]] with LINK
-    # store them into links they will be treated as link
-    #############################################################
-    links = []
-    def mark_link(m):
-        links.append( None if m.group() == LINK
-                         else m.group('s') )
-        return LINK
-    text = regex_link.sub(mark_link, text)
-    text = escape(text)
-
-    if protolinks:
-        text = regex_proto.sub(lambda m: protolinks(*m.group('p','k')), text)
-
-    if autolinks:
-        text = replace_autolinks(text,autolinks)
-
-    #############################################################
-    # normalize spaces
-    #############################################################
-    strings=text.split('\n')
-
-    def parse_title(t, s): #out, lev, etags, tag, s):
-        hlevel=str(len(t))
-        out.extend(etags[::-1])
-        out.append("<h%s>%s"%(hlevel,s))
-        etags[:]=["</h%s>%s"%(hlevel,pp)]
-        lev=0
-        ltags[:]=[]
-        tlev[:]=[]
-        return (lev, 'h')
-
-    def parse_list(t, p, s, tag, lev, mtag, lineno):
-        lent=len(t)
-        if lent<lev: # current item level < previous item level
-            while ltags[-1]>lent:
-                ltags.pop()
-                out.append(etags.pop())
-            lev=lent
-            tlev[lev:]=[]
-
-        if lent>lev: # current item level > previous item level
-            if lev==0: # previous line is not a list (paragraph or title)
-                out.extend(etags[::-1])
-                ltags[:]=[]
-                tlev[:]=[]
-                etags[:]=[]
-            if pend and mtag == '.': # paragraph in a list:
-                out.append(etags.pop())
-                ltags.pop()
-            for i in xrange(lent-lev):
-                out.append('<'+tag+'>'+pp)
-                etags.append('</'+tag+'>'+pp)
-                lev+=1
-                ltags.append(lev)
-                tlev.append(tag)
-        elif lent == lev:
-            if tlev[-1] != tag:
-                # type of list is changed (ul<=>ol):
-                for i in xrange(ltags.count(lent)):
-                    ltags.pop()
-                    out.append(etags.pop())
-                tlev[-1]=tag
-                out.append('<'+tag+'>'+pp)
-                etags.append('</'+tag+'>'+pp)
-                ltags.append(lev)
-            else:
-                if ltags.count(lev)>1:
-                    out.append(etags.pop())
-                    ltags.pop()
-        mtag='l'
-        out.append('<li>')
-        etags.append('</li>'+pp)
-        ltags.append(lev)
-        if s[:1] == '-':
-            (s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
-        if p and mtag=='l':
-            (lev,mtag,lineno)=parse_point(t, s, lev, '', lineno)
-        else:
-            out.append(s)
-
-        return (lev, mtag, lineno)
-
-    def parse_point(t, s, lev, mtag, lineno):
-        """ paragraphs in lists """
-        lent=len(t)
-        if lent>lev:
-            return parse_list(t, '.', s, 'ul', lev, mtag, lineno)
-        elif lent<lev:
-            while ltags[-1]>lent:
-                ltags.pop()
-                out.append(etags.pop())
-            lev=lent
-            tlev[lev:]=[]
-            mtag=''
-        elif lent==lev:
-            if pend and mtag == '.':
-                out.append(etags.pop())
-                ltags.pop()
-        if br and mtag in ('l','.'):
-            out.append(br)
-        if s == META:
-           mtag = ''
-        else:
-            mtag = '.'
-            if s[:1] == '-':
-               (s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
-            if mtag == '.':
-                out.append(pbeg)
-                if pend:
-                    etags.append(pend)
-                    ltags.append(lev)
-        out.append(s)
-        return (lev, mtag, lineno)
-
-    def parse_table_or_blockquote(s, mtag, lineno):
-        # check next line. If next line :
-        # - is empty -> this is an <hr /> tag
-        # - consists '|' -> table
-        # - consists other characters -> blockquote
-        if (lineno+1 >= strings_len or
-            not(s.count('-') == len(s) and len(s)>3)):
-           return (s, mtag, lineno)
-
-        lineno+=1
-        s = strings[lineno].strip()
-        if s:
-            if '|' in s:
-                # table
-                tout=[]
-                thead=[]
-                tbody=[]
-                rownum=0
-                t_id = ''
-                t_cls = ''
-
-                # parse table:
-                while lineno < strings_len:
-                    s = strings[lineno].strip()
-                    if s[:1] == '=':
-                        # header or footer
-                        if s.count('=')==len(s) and len(s)>3:  
-                            if not thead: # if thead list is empty:
-                                thead = tout
-                            else:
-                                tbody.extend(tout)
-                            tout = []
-                            rownum=0
-                            lineno+=1
-                            continue
-
-                    m = regex_tq.match(s)
-                    if m:
-                        t_cls = m.group('c') or ''
-                        t_id = m.group('p') or ''
-                        break
-
-                    if rownum % 2:
-                       tr = '<tr class="even">'
-                    else:
-                       tr = '<tr class="first">' if rownum == 0 else '<tr>'
-                    tout.append(tr + ''.join(['<td%s>%s</td>' % (
-                                    ' class="num"' 
-                                    if regex_num.match(f) else '',
-                                    f.strip()
-                                    ) for f in s.split('|')])+'</tr>'+pp)
-                    rownum+=1
-                    lineno+=1
-
-                t_cls = ' class="%s%s"'%(class_prefix, t_cls) \
-                    if t_cls and t_cls != 'id' else ''
-                t_id  = ' id="%s%s"'%(id_prefix, t_id) if t_id else ''
-                s = ''
-                if thead:
-                    s += '<thead>'+pp+''.join([l for l in thead])+'</thead>'+pp
-                if not tbody: # tbody strings are in tout list
-                    tbody = tout
-                    tout = []
-                if tbody: # if tbody list is not empty:
-                    s += '<tbody>'+pp+''.join([l for l in tbody])+'</tbody>'+pp
-                if tout: # tfoot is not empty:
-                    s += '<tfoot>'+pp+''.join([l for l in tout])+'</tfoot>'+pp
-                s = '<table%s%s>%s%s</table>%s' % (t_cls, t_id, pp, s, pp)
-                mtag='t'
-            else:
-                # parse blockquote:
-                bq_begin=lineno
-                t_mode = False # embedded table
-                t_cls = ''
-                t_id = ''
-
-                # search blockquote closing line:
-                while lineno < strings_len:
-                    s = strings[lineno].strip()
-                    if not t_mode:
-                        m = regex_tq.match(s)
-                        if m:
-                            if (lineno+1 == strings_len or 
-                                '|' not in strings[lineno+1]):
-                               t_cls = m.group('c') or ''
-                               t_id = m.group('p') or ''
-                               break
-
-                        if regex_bq_headline.match(s):
-                            if (lineno+1 < strings_len and 
-                                strings[lineno+1].strip()):
-                                    t_mode = True
-                            lineno+=1
-                            continue
-                    elif regex_tq.match(s):
-                        t_mode=False
-                        lineno+=1
-                        continue
-
-                    lineno+=1
-
-                t_cls = ' class="%s%s"'%(class_prefix,t_cls) \
-                    if t_cls and t_cls != 'id' else ''
-                t_id  = ' id="%s%s"'%(id_prefix,t_id) \
-                    if t_id else ''
-                
-                s = '<blockquote%s%s>%s</blockquote>%s' \
-                         % (t_cls,
-                            t_id,
-                            '\n'.join(strings[bq_begin:lineno]),pp)
-                mtag='q'
-        else:
-            s = '<hr />'
-            lineno-=1
-            mtag='q'
-        return (s, 'q', lineno)
-
-    if sep == 'p':
-      pbeg = "<p>"
-      pend = "</p>"+pp
-      br = ''
-    else:
-      pbeg = pend = ''
-      br = "<br />"+pp if sep=='br' else ''
-
-    lev = 0    # nesting level of lists
-    c0 = ''    # first character of current line
-    out = []   # list of processed lines
-    etags = [] # trailing tags
-    ltags = [] # level# correspondent to trailing tag
-    tlev = []  # list of tags for each level ('ul' or 'ol')
-    mtag = ''  # marked tag (~last tag) ('l','.','h','p','t'). Used to set <br/>
-               # and to avoid <p></p> around tables and blockquotes
-    lineno = 0
-    strings_len = len(strings)
-    while lineno < strings_len:
-        s0 = strings[lineno][:1]
-        s = strings[lineno].strip()
-        """ #     +     -     .             ---------------------
-            ##    ++    --    ..   -------  field | field | field  <-title
-            ###   +++   ---   ...  quote    =====================
-            ####  ++++  ----  .... -------  field | field | field  <-body
-            ##### +++++ ----- .....         ---------------------:class[id]
-        """
-        pc0=c0 # first character of previous line
-        c0=s[:1]
-        if c0: # for non empty strings
-            if c0 in "#+-.": # first character is one of: # + - .
-                (t1,t2,p,ss) = regex_list.findall(s)[0]
-                # t1 - tag ("###")
-                # t2 - tag ("+++", "---", "...")
-                # p - paragraph point ('.')->for "++." or "--."
-                # ss - other part of string
-                if t1 or t2:
-                    # headers and lists:
-                    if c0 == '#': # headers
-                        (lev, mtag) = parse_title(t1, ss)
-                        lineno+=1
-                        continue
-                    elif c0 == '+': # ordered list
-                        (lev, mtag, lineno)= parse_list(t2, p, ss, 'ol', lev, mtag, lineno)
-                        lineno+=1
-                        continue
-                    elif c0 == '-': # unordered list, table or blockquote
-                        if p or ss:
-                            (lev, mtag, lineno) = parse_list(t2, p, ss, 'ul', lev, mtag, lineno)
-                            lineno+=1
-                            continue
-                        else:
-                            (s, mtag, lineno) = parse_table_or_blockquote(s, mtag, lineno)
-                    elif lev>0: # and c0 == '.' # paragraph in lists
-                        (lev, mtag, lineno) = parse_point(t2, ss, lev, mtag, lineno)
-                        lineno+=1
-                        continue
-
-            if lev == 0 and (mtag == 'q' or s == META):
-                # new paragraph
-                pc0=''
-
-            if pc0 == '' or (mtag != 'p' and s0 not in (' ','\t')):
-                # paragraph
-                out.extend(etags[::-1])
-                etags=[]
-                ltags=[]
-                tlev=[]
-                lev=0
-                if br and mtag == 'p': out.append(br)
-                if mtag != 'q' and s != META:
-                   if pend: etags=[pend]
-                   out.append(pbeg)
-                   mtag = 'p'
-                else:
-                   mtag = ''
-                out.append(s)
-            else:
-                if lev>0 and mtag=='.' and s == META:
-                    out.append(etags.pop())
-                    ltags.pop()
-                    out.append(s)
-                    mtag = ''
-                else:
-                    out.append(' '+s)
-        lineno+=1
-    out.extend(etags[::-1])
-    text = ''.join(out)
-
-    #############################################################
-    # do strong,em,del
-    #############################################################
-    text = regex_strong.sub('<strong>\g<t></strong>', text)
-    text = regex_del.sub('<del>\g<t></del>', text)
-    text = regex_em.sub('<em>\g<t></em>', text)
-
-    #############################################################
-    # deal with images, videos, audios and links
-    #############################################################
-    def sub_media(m):
-        t,a,k,p,w = m.group('t','a','k','p','w')
-        if not k:
-            return m.group(0)
-        k = escape(k)
-        t = t or ''
-        style = 'width:%s' % w if w else ''
-        title = ' title="%s"' % escape(a).replace(META, DISABLED_META) if a else ''
-        p_begin = p_end = ''
-        if p == 'center':
-            p_begin = '<p style="text-align:center">'
-            p_end = '</p>'+pp
-        elif p == 'blockleft':
-            p_begin = '<p style="text-align:left">'
-            p_end = '</p>'+pp
-        elif p == 'blockright':
-            p_begin = '<p style="text-align:right">'
-            p_end = '</p>'+pp
-        elif p in ('left','right'):
-            style = ('float:%s' % p)+(';%s' % style if style else '')
-        if t and regex_auto.match(t):
-            p_begin = p_begin + '<a href="%s">' % t
-            p_end = '</a>' + p_end
-            t = ''
-        if style:
-            style = ' style="%s"' % style
-        if p in ('video','audio'):
-            t = render(t, {}, {}, 'br', URL, environment, latex,
-                       autolinks, protolinks, class_prefix, id_prefix, pretty_print)
-            return '<%(p)s controls="controls"%(title)s%(style)s><source src="%(k)s" />%(t)s</%(p)s>' \
-                    % dict(p=p, title=title, style=style, k=k, t=t)
-        alt = ' alt="%s"'%escape(t).replace(META, DISABLED_META) if t else ''
-        return '%(begin)s<img src="%(k)s"%(alt)s%(title)s%(style)s />%(end)s' \
-                % dict(begin=p_begin, k=k, alt=alt, title=title, style=style, end=p_end)
-
-    def sub_link(m):
-        t,a,k,p = m.group('t','a','k','p')
-        if not k and not t:
-            return m.group(0)
-        t = t or ''
-        a = escape(a) if a else ''
-        if k:
-            if '#' in k and not ':' in k.split('#')[0]: 
-                # wikipage, not external url
-                k=k.replace('#','#'+id_prefix)
-            k = escape(k)
-            title = ' title="%s"' % a.replace(META, DISABLED_META) if a else ''
-            target = ' target="_blank"' if p == 'popup' else ''
-            t = render(t, {}, {}, 'br', URL, environment, latex, None,
-                       None, class_prefix, id_prefix, pretty_print) if t else k
-            return '<a href="%(k)s"%(title)s%(target)s>%(t)s</a>' \
-                   % dict(k=k, title=title, target=target, t=t)
-        if t == 'NEWLINE' and not a:
-            return '<br />'+pp
-        return '<span class="anchor" id="%s">%s</span>' % (
-            escape(id_prefix+t),
-            render(a, {},{},'br', URL,
-                   environment, latex, autolinks,
-                   protolinks, class_prefix,
-                   id_prefix, pretty_print))
-    
-    parts = text.split(LINK)
-    text = parts[0]
-    for i,s in enumerate(links):
-        if s == None:
-            html = LINK
-        else:
-            html = regex_media_level2.sub(sub_media, s)
-            if html == s:
-                html = regex_link_level2.sub(sub_link, html)
-            if html == s:
-                # return unprocessed string as a signal of an error
-                html = '[[%s]]'%s
-        text += html + parts[i+1]
-
-    #############################################################
-    # process all code text
-    #############################################################
-    def expand_meta(m):
-        code,b,p,s = segments.pop(0)
-        if code==None or m.group() == DISABLED_META:
-            return escape(s)
-        if b in extra:
-            if code[:1]=='\n': code=code[1:]
-            if code[-1:]=='\n': code=code[:-1]
-            if p:
-                return str(extra[b](code,p))
-            else:
-                return str(extra[b](code))
-        elif b=='cite':
-            return '['+','.join('<a href="#%s" class="%s">%s</a>' \
-                                   % (id_prefix+d,b,d) \
-                                   for d in escape(code).split(','))+']'
-        elif b=='latex':
-            return LATEX % urllib.quote(code)
-        elif b in html_colors:
-            return '<span style="color: %s">%s</span>' \
-                  % (b, render(code, {}, {}, 'br', URL, environment, latex,
-                      autolinks, protolinks, class_prefix, id_prefix, pretty_print))
-        elif b in ('c', 'color') and p:
-             c=p.split(':')
-             fg='color: %s;' % c[0] if c[0] else ''
-             bg='background-color: %s;' % c[1] if len(c)>1 and c[1] else ''
-             return '<span style="%s%s">%s</span>' \
-                 % (fg, bg, render(code, {}, {}, 'br', URL, environment, latex,
-                    autolinks, protolinks, class_prefix, id_prefix, pretty_print))
-        cls = ' class="%s%s"'%(class_prefix,b) if b and b != 'id' else ''
-        id  = ' id="%s%s"'%(id_prefix,escape(p)) if p else ''
-        beg=(code[:1]=='\n')
-        end=[None,-1][code[-1:]=='\n']
-        if beg and end:
-            return '<pre><code%s%s>%s</code></pre>%s' % (cls, id, escape(code[1:-1]), pp)
-        return '<code%s%s>%s</code>' % (cls, id, escape(code[beg:end]))
-    text = regex_expand_meta.sub(expand_meta, text)
-
-    if environment:
-        text = replace_components(text,environment)
-
-    return text.translate(ttab_out)
-
-
-def markmin2html(text, extra={}, allowed={}, sep='p',
-                 autolinks='default', protolinks='default',
-                 class_prefix='', id_prefix='markmin_', pretty_print=False):
-    return render(text, extra, allowed, sep,
-                  autolinks=autolinks, protolinks=protolinks,
-                  class_prefix=class_prefix, id_prefix=id_prefix,
-                  pretty_print=pretty_print)
-
-def run_doctests():
-    import doctest
-    doctest.testmod()
-
-if __name__ == '__main__':
-    import sys
-    import doctest
-    from textwrap import dedent
-
-    html=dedent("""
-         <!doctype html>
-         <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
-         <head>
-         <meta http-equiv="content-type" content="text/html; charset=utf-8" />
-         %(style)s
-         <title>%(title)s</title>
-         </head>
-         <body>
-         %(body)s
-         </body>
-         </html>""")[1:]
-
-    if sys.argv[1:2] == ['-h']:
-        style=dedent("""
-              <style>
-                blockquote { background-color: #FFFAAE; padding: 7px; }
-                table { border-collapse: collapse; }
-                thead td { border-bottom: 1px solid; }
-                tfoot td { border-top: 1px solid; }
-                .tableclass1 { background-color: lime; }
-                .tableclass1 thead { color: yellow; background-color: green; }
-                .tableclass1 tfoot { color: yellow; background-color: green; }
-                .tableclass1 .even td { background-color: #80FF7F; }
-                .tableclass1 .first td {border-top: 1px solid; }
-
-                td.num { text-align: right; }
-                pre { background-color: #E0E0E0; padding: 5px; }
-              </style>""")[1:]
-
-        print html % dict(title="Markmin markup language",
-                          style=style,
-                          body=markmin2html(__doc__, pretty_print=True))
-    elif sys.argv[1:2] == ['-t']:
-        from timeit import Timer
-        loops=1000
-        ts = Timer("markmin2html(__doc__)","from markmin2html import markmin2html")
-        print 'timeit "markmin2html(__doc__)":'
-        t = min([ts.timeit(loops) for i in range(3)])
-        print "%s loops, best of 3: %.3f ms per loop" % (loops, t/1000*loops)
-    elif len(sys.argv) > 1:
-        fargv = open(sys.argv[1],'r')
-        try:
-            markmin_text=fargv.read()
-
-            # embed css file from second parameter into html file
-            if len(sys.argv) > 2:
-                if sys.argv[2].startswith('@'):
-                    markmin_style = '<link rel="stylesheet" href="'+sys.argv[2][1:]+'"/>'
-                else:
-                    fargv2 = open(sys.argv[2],'r')
-                    try:
-                        markmin_style = "<style>\n" + fargv2.read() + "</style>"
-                    finally:
-                        fargv2.close()
-            else:
-                markmin_style = ""
-
-            print html % dict(title=sys.argv[1], style=markmin_style,
-                              body=markmin2html(markmin_text, pretty_print=True))
-        finally:
-            fargv.close()
-
-    else:
-        print "Usage: "+sys.argv[0]+" -h | -t | file.markmin [file.css|@path_to/css]"
-        print "where: -h  - print __doc__"
-        print "       -t  - timeit __doc__ (for testing purpuse only)"
-        print "       file.markmin  [file.css] - process file.markmin + built in file.css (optional)"
-        print "       file.markmin  [@path_to/css] - process file.markmin + link path_to/css (optional)"
-        run_doctests()
-        

+ 0 - 291
frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin2latex.py

@@ -1,291 +0,0 @@
-#!/usr/bin/env python
-# created my Massimo Di Pierro
-# license MIT/BSD/GPL
-import re
-import cgi
-import sys
-import doctest
-from optparse import OptionParser
-
-__all__ = ['render','markmin2latex']
-
-META = 'META'
-regex_newlines = re.compile('(\n\r)|(\r\n)')
-regex_dd=re.compile('\$\$(?P<latex>.*?)\$\$')
-regex_code = re.compile('('+META+')|(``(?P<t>.*?)``(:(?P<c>\w+))?)',re.S)
-regex_title = re.compile('^#{1} (?P<t>[^\n]+)',re.M)
-regex_maps = [
-    (re.compile('[ \t\r]+\n'),'\n'),
-    (re.compile('\*\*(?P<t>[^\s\*]+( +[^\s\*]+)*)\*\*'),'{\\\\bf \g<t>}'),
-    (re.compile("''(?P<t>[^\s']+( +[^\s']+)*)''"),'{\\it \g<t>}'),
-    (re.compile('^#{5,6}\s*(?P<t>[^\n]+)',re.M),'\n\n{\\\\bf \g<t>}\n'),
-    (re.compile('^#{4}\s*(?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsubsection{\g<t>}\n'),
-    (re.compile('^#{3}\s*(?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsection{\g<t>}\n'),
-    (re.compile('^#{2}\s*(?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\section{\g<t>}\n'),
-    (re.compile('^#{1}\s*(?P<t>[^\n]+)',re.M),''),
-    (re.compile('^\- +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'),
-    (re.compile('^\+ +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'),
-    (re.compile('\\\\end\{itemize\}\s+\\\\begin\{itemize\}'),'\n'),
-    (re.compile('\n\s+\n'),'\n\n')]
-regex_table = re.compile('^\-{4,}\n(?P<t>.*?)\n\-{4,}(:(?P<c>\w+))?\n',re.M|re.S)
-
-regex_anchor = re.compile('\[\[(?P<t>\S+)\]\]')
-regex_bibitem = re.compile('\-\s*\[\[(?P<t>\S+)\]\]')
-regex_image_width = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +(?P<p>left|right|center) +(?P<w>\d+px)\]\]')
-regex_image = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +(?P<p>left|right|center)\]\]')
-#regex_video = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +video\]\]')
-#regex_audio = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+) +audio\]\]')
-regex_link = re.compile('\[\[(?P<t>[^\]]*?) +(?P<k>\S+)\]\]')
-regex_auto = re.compile('(?<!["\w])(?P<k>\w+://[\w\.\-\?&%\:]+)',re.M)
-regex_commas = re.compile('[ ]+(?P<t>[,;\.])')
-regex_noindent = re.compile('\n\n(?P<t>[a-z])')
-#regex_quote_left = re.compile('"(?=\w)')
-#regex_quote_right = re.compile('(?=\w\.)"')
-
-def latex_escape(text,pound=True):
-    text=text.replace('\\','{\\textbackslash}')
-    for c in '^_&$%{}': text=text.replace(c,'\\'+c)
-    text=text.replace('\\{\\textbackslash\\}','{\\textbackslash}')
-    if pound: text=text.replace('#','\\#')
-    return text
-
-def render(text,
-           extra={},
-           allowed={},
-           sep='p',
-           image_mapper=lambda x:x,
-           chapters=False):
-    #############################################################
-    # replace all blocks marked with ``...``:class with META
-    # store them into segments they will be treated as code
-    #############################################################
-    text = str(text or '')
-    segments, i = [], 0
-    text = regex_dd.sub('``\g<latex>``:latex ',text)
-    text = regex_newlines.sub('\n',text)
-    while True:
-        item = regex_code.search(text,i)
-        if not item: break
-        if item.group()==META:
-            segments.append((None,None))
-            text = text[:item.start()]+META+text[item.end():]
-        else:
-            c = item.group('c') or ''
-            if 'code' in allowed and not c in allowed['code']: c = ''
-            code = item.group('t').replace('!`!','`')
-            segments.append((code,c))
-            text = text[:item.start()]+META+text[item.end():]
-        i=item.start()+3
-
-
-    #############################################################
-    # do h1,h2,h3,h4,h5,h6,b,i,ol,ul and normalize spaces
-    #############################################################
-
-    title = regex_title.search(text)
-    if not title: title='Title'
-    else: title=title.group('t')
-
-    text = latex_escape(text,pound=False)
-
-    texts = text.split('## References',1)
-    text = regex_anchor.sub('\\label{\g<t>}', texts[0])
-    if len(texts)==2:
-        text += '\n\\begin{thebibliography}{999}\n'
-        text += regex_bibitem.sub('\n\\\\bibitem{\g<t>}', texts[1])
-        text += '\n\\end{thebibliography}\n'
-
-    text = '\n'.join(t.strip() for t in text.split('\n'))
-    for regex, sub in regex_maps:
-        text = regex.sub(sub,text)
-    text=text.replace('#','\\#')
-    text=text.replace('`',"'")
-
-    #############################################################
-    # process tables and blockquotes
-    #############################################################
-    while True:
-        item = regex_table.search(text)
-        if not item: break
-        c = item.group('c') or ''
-        if 'table' in allowed and not c in allowed['table']: c = ''
-        content = item.group('t')
-        if ' | ' in content:
-            rows = content.replace('\n','\\\\\n').replace(' | ',' & ')
-            row0,row2 = rows.split('\\\\\n',1)
-            cols=row0.count(' & ')+1
-            cal='{'+''.join('l' for j in range(cols))+'}'
-            tabular = '\\begin{center}\n{\\begin{tabular}'+cal+'\\hline\n' + row0+'\\\\ \\hline\n'+row2 + ' \\\\ \\hline\n\\end{tabular}}\n\\end{center}'
-            if row2.count('\n')>20: tabular='\\newpage\n'+tabular
-            text = text[:item.start()] + tabular + text[item.end():]
-        else:
-            text = text[:item.start()] + '\\begin{quote}' + content + '\\end{quote}' + text[item.end():]
-
-    #############################################################
-    # deal with images, videos, audios and links
-    #############################################################
-
-    def sub(x):
-        f=image_mapper(x.group('k'))
-        if not f: return None
-        return '\n\\begin{center}\\includegraphics[width=8cm]{%s}\\end{center}\n' % (f)
-    text = regex_image_width.sub(sub,text)
-    text = regex_image.sub(sub,text)
-
-    text = regex_link.sub('{\\\\footnotesize\\href{\g<k>}{\g<t>}}', text)
-    text = regex_commas.sub('\g<t>',text)
-    text = regex_noindent.sub('\n\\\\noindent \g<t>',text)
-
-    ### fix paths in images
-    regex=re.compile('\\\\_\w*\.(eps|png|jpg|gif)')
-    while True:
-        match=regex.search(text)
-        if not match: break
-        text=text[:match.start()]+text[match.start()+1:]
-    #text = regex_quote_left.sub('``',text)
-    #text = regex_quote_right.sub("''",text)
-
-    if chapters:
-        text=text.replace(r'\section*{',r'\chapter*{')
-        text=text.replace(r'\section{',r'\chapter{')
-        text=text.replace(r'subsection{',r'section{')
-
-    #############################################################
-    # process all code text
-    #############################################################
-    parts = text.split(META)
-    text = parts[0]
-    authors = []
-    for i,(code,b) in enumerate(segments):
-        if code==None:
-            html = META
-        else:
-            if b=='hidden':
-                html=''
-            elif b=='author':
-                author = latex_escape(code.strip())
-                authors.append(author)
-                html=''
-            elif b=='inxx':
-                html='\inxx{%s}' % latex_escape(code)
-            elif b=='cite':
-                html='~\cite{%s}' % latex_escape(code.strip())
-            elif b=='ref':
-                html='~\ref{%s}' % latex_escape(code.strip())
-            elif b=='latex':
-                if '\n' in code:
-                    html='\n\\begin{equation}\n%s\n\\end{equation}\n' % code.strip()
-                else:
-                    html='$%s$' % code.strip()
-            elif b=='latex_eqnarray':
-                code=code.strip()
-                code='\\\\'.join(x.replace('=','&=&',1) for x in code.split('\\\\'))
-                html='\n\\begin{eqnarray}\n%s\n\\end{eqnarray}\n' % code
-            elif b.startswith('latex_'):
-                key=b[6:]
-                html='\\begin{%s}%s\\end{%s}' % (key,code,key)
-            elif b in extra:
-                if code[:1]=='\n': code=code[1:]
-                if code[-1:]=='\n': code=code[:-1]
-                html = extra[b](code)
-            elif code[:1]=='\n' or code[:-1]=='\n':
-                if code[:1]=='\n': code=code[1:]
-                if code[-1:]=='\n': code=code[:-1]
-                if code.startswith('<') or code.startswith('{{') or code.startswith('http'):
-                    html = '\\begin{lstlisting}[keywords={}]\n%s\n\\end{lstlisting}' % code
-                else:
-                    html = '\\begin{lstlisting}\n%s\n\\end{lstlisting}' % code
-            else:
-                if code[:1]=='\n': code=code[1:]
-                if code[-1:]=='\n': code=code[:-1]
-                html = '{\\ft %s}' % latex_escape(code)
-        try:
-            text = text+html+parts[i+1]
-        except:
-            text = text + '... WIKI PROCESSING ERROR ...'
-            break
-    text =  text.replace(' ~\\cite','~\\cite')
-    return text, title, authors
-
-WRAPPER = """
-\\documentclass[12pt]{article}
-\\usepackage{hyperref}
-\\usepackage{listings}
-\\usepackage{upquote}
-\\usepackage{color}
-\\usepackage{graphicx}
-\\usepackage{grffile}
-\\usepackage[utf8x]{inputenc}
-\\definecolor{lg}{rgb}{0.9,0.9,0.9}
-\\definecolor{dg}{rgb}{0.3,0.3,0.3}
-\\def\\ft{\\small\\tt}
-\\lstset{
-   basicstyle=\\footnotesize,
-   breaklines=true, basicstyle=\\ttfamily\\color{black}\\footnotesize,
-   keywordstyle=\\bf\\ttfamily,
-   commentstyle=\\it\\ttfamily,
-   stringstyle=\\color{dg}\\it\\ttfamily,
-   numbers=left, numberstyle=\\color{dg}\\tiny, stepnumber=1, numbersep=5pt,
-   backgroundcolor=\\color{lg}, tabsize=4, showspaces=false,
-   showstringspaces=false
-}
-\\title{%(title)s}
-\\author{%(author)s}
-\\begin{document}
-\\maketitle
-\\tableofcontents
-\\newpage
-%(body)s
-\\end{document}
-"""
-
-def markmin2latex(data, image_mapper=lambda x:x, extra={},
-                  wrapper=WRAPPER):
-    body, title, authors = render(data, extra=extra, image_mapper=image_mapper)
-    author = '\n\\and\n'.join(a.replace('\n','\\\\\n\\footnotesize ') for a in authors)
-    return wrapper % dict(title=title, author=author, body=body)
-
-if __name__ == '__main__':
-    parser = OptionParser()
-    parser.add_option("-i", "--info", dest="info",
-                      help="markmin help")
-    parser.add_option("-t", "--test", dest="test", action="store_true",
-                      default=False)
-    parser.add_option("-n", "--no_wrapper", dest="no_wrapper",
-                      action="store_true",default=False)
-    parser.add_option("-c", "--chapters", dest="chapters",action="store_true",
-                      default=False,help="switch section for chapter")
-    parser.add_option("-w", "--wrapper", dest="wrapper", default=False,
-                      help="latex file containing header and footer")
-
-    (options, args) = parser.parse_args()
-    if options.info:
-        import markmin2html
-        markmin2latex(markmin2html.__doc__)
-    elif options.test:
-        doctest.testmod()
-    else:
-        if options.wrapper:
-            fwrapper = open(options.wrapper,'rb')
-            try:
-                wrapper = fwrapper.read()
-            finally:
-                fwrapper.close()
-        elif options.no_wrapper:
-            wrapper  = '%(body)s'
-        else:
-            wrapper = WRAPPER
-        for f in args:
-            fargs = open(f,'r')
-            content_data = []
-            try:
-                content_data.append(fargs.read())
-            finally:
-                fargs.close()
-        content = '\n'.join(content_data)
-        output= markmin2latex(content,
-                              wrapper=wrapper,
-                              chapters=options.chapters)
-        print output
-
-

+ 0 - 130
frameworks/Python/web2py/web2py/gluon/contrib/markmin/markmin2pdf.py

@@ -1,130 +0,0 @@
-"""
-Created by Massimo Di Pierro
-License BSD
-"""
-
-import subprocess
-import os
-import os.path
-import re
-import sys
-from tempfile import mkstemp, mkdtemp, NamedTemporaryFile
-from markmin2latex import markmin2latex
-
-__all__ = ['markmin2pdf']
-
-def removeall(path):
-
-    ERROR_STR= """Error removing %(path)s, %(error)s """
-    def rmgeneric(path, __func__):
-        try:
-            __func__(path)
-        except OSError, (errno, strerror):
-            print ERROR_STR % {'path' : path, 'error': strerror }
-
-    files=[path]
-
-    while files:
-        file=files[0]
-        if os.path.isfile(file):
-            f=os.remove
-            rmgeneric(file, os.remove)
-            del files[0]
-        elif os.path.isdir(file):
-            nested = os.listdir(file)
-            if not nested:
-                rmgeneric(file, os.rmdir)
-                del files[0]
-            else:
-                files = [os.path.join(file,x) for x in nested] + files
-
-
-def latex2pdf(latex, pdflatex='pdflatex', passes=3):
-    """
-    calls pdflatex in a tempfolder
-
-    Arguments:
-
-    - pdflatex: path to the pdflatex command. Default is just 'pdflatex'.
-    - passes:   defines how often pdflates should be run in the texfile.
-    """
-
-    pdflatex=pdflatex
-    passes=passes
-    warnings=[]
-
-    # setup the envoriment
-    tmpdir = mkdtemp()
-    texfile = open(tmpdir+'/test.tex','wb')
-    texfile.write(latex)
-    texfile.seek(0)
-    texfile.close()
-    texfile = os.path.abspath(texfile.name)
-
-    # start doing some work
-    for i in range(0, passes):
-        logfd,logname = mkstemp()
-        outfile=os.fdopen(logfd)
-        try:
-            ret = subprocess.call([pdflatex,
-                                   '-interaction=nonstopmode',
-                                   '-output-format', 'pdf',
-                                   '-output-directory', tmpdir,
-                                   texfile],
-                                  cwd=os.path.dirname(texfile), stdout=outfile,
-                                  stderr=subprocess.PIPE)
-        finally:
-            outfile.close()
-        re_errors=re.compile('^\!(.*)$',re.M)
-        re_warnings=re.compile('^LaTeX Warning\:(.*)$',re.M)
-        flog = open(logname)
-        try:
-            loglines = flog.read()
-        finally:
-            flog.close()
-        errors=re_errors.findall(loglines)
-        warnings=re_warnings.findall(loglines)
-        os.unlink(logname)
-
-    pdffile=texfile.rsplit('.',1)[0]+'.pdf'
-    if os.path.isfile(pdffile):
-        fpdf = open(pdffile, 'rb')
-        try:
-            data = fpdf.read()
-        finally:
-            fpdf.close()
-    else:
-        data = None
-    removeall(tmpdir)
-    return data, warnings, errors
-
-
-def markmin2pdf(text, image_mapper=lambda x: None, extra={}):
-    return latex2pdf(markmin2latex(text,image_mapper=image_mapper, extra=extra))
-
-
-if __name__ == '__main__':
-    import sys
-    import doctest
-    import markmin2html
-    if sys.argv[1:2]==['-h']:
-        data, warnings, errors = markmin2pdf(markmin2html.__doc__)
-        if errors:
-            print 'ERRORS:'+'\n'.join(errors)
-            print 'WARNGINS:'+'\n'.join(warnings)
-        else:
-            print data
-    elif len(sys.argv)>1:
-        fargv = open(sys.argv[1],'rb')
-        try:
-            data, warnings, errors = markmin2pdf(fargv.read())
-        finally:
-            fargv.close()
-        if errors:
-            print 'ERRORS:'+'\n'.join(errors)
-            print 'WARNGINS:'+'\n'.join(warnings)
-        else:
-            print data
-    else:
-        doctest.testmod()
-

+ 0 - 362
frameworks/Python/web2py/web2py/gluon/contrib/memcache/ChangeLog

@@ -1,362 +0,0 @@
-Sun, 27 Nov 2011 18:15:32 -0700  Sean Reifschneider  <[email protected]>
-
-   * Bug #745633: Values of maximum size are not stored
-   API inconsistency, max value length was tested for <= while max KEY
-   length was <.  So I picked that keys and values *LONGER* than the
-   specified max value are what is used, and added documentation and tests
-   to that effect.  The test for max value tested that length plus 4, so
-   I've changed that to be that value plus 1.  Issue found by matt-quru.
-
-   * Bug #713488: Issues Invalid "delete" command.
-   Protocol has changed so that the "delete" operation no longer takes a
-   "time" argument.  It seems that some servers will refuse a "delete key
-   0" while others will accept it, but the official server will NOT accept
-   "delete key 1".  So I've changed it so that if no "time" argument is
-   specified, no time argument is sent to the server.
-
-   * Bug #713451: server.expect("END") needs to be in a finally block
-   Expect an "END" when the _recv_value() raises an exception.
-   Patch by Jay Farrimond.
-
-   * Bug: #741090: cas cache can grow unbounded.  Default now is that the
-   cache is not used, unless the "Client()" object is created with
-   "cache_cas=True".  In that case, you need to have your own cas clearing
-   code, a simple one would be to use Client().reset_cas() to completely
-   clear the cas_ids cache.  Problem pointed out by Shaun Cutts.
-
-   * Bug #728359: Make python-memcache work on memcache restarts.
-   Patch by Tarek Ziade', reviewed and further patches submitted by Hugo
-   Beauze'e-Luysse and Neganov Alexandr.
-
-   * Bug #798342: If memcached server sends unknown flag in response for
-   "get", results in:
-      "UnboundLocalError: local variable 'val' referenced before assignment"
-   Now returns "None" instead.  Patch by Sharoon Thomas
-
-Mon, 20 Dec 2010 19:14:17 -0700  Sean Reifschneider  <[email protected]>
-
-   * Bug #680359: useOldServerHashFunction() is broken.  It now correctly
-     switches back to the old memcache hash function.
-
-Thu, 16 Dec 2010 02:07:40 -0700  Sean Reifschneider  <[email protected]>
-
-   * Bug #471727: Changed the delete() code to explicitly check for both
-     NOT_FOUND and DELETED as the responses and return successful for both.
-     It also logs an error if one of these two responses is not found.
-     Also added a test to ensure that delete() works.
-
-   * When using set_multi and one value is too big, traceback
-     TypeError: 'int' object is unsubscriptable
-     Patch by Orjan Persson
-
-   * Fixing Bug #529855: Server host can now be bare host without ":<port>".
-     Fix proposed by Roger Binns.
-
-   * Fixing Bug #491164: Typo fix, "compession" -> "compRession".
-
-   * Fixing Bug #509712: "TypeError: 'NoneType' object is unsubscriptable"
-     Also fixed some other similar code to not have issues with that.
-
-   * Also related to 509712 and 628339: readline() now returns '' instead
-     of None when a server dies.  This should be safer.  Patch suggested by
-     Denis Otkidach.
-
-   * Fixing Bug #628339: Read from server sometimes fails.  Patch by Jeremy
-     Cowles.
-
-   * Fixing Bug #633553: Add stat arguments support to get_stats().  Patch
-     by Ryan Lane.
-
-   * Changing the license to the PSF License.
-
-   * Removing Evan's e-mail address at his request, changing authorship to
-     Sean.
-
-Sat, 28 Nov 2009 01:07:42 -0700  Sean Reifschneider  <[email protected]>
-
-   * Version 1.45
-
-   * Per-connection max server key length.  Patch by Nicolas Delaby
-
-   * Patches to make memcached more garbage-collectable.  Removes
-     "debugfunc" argument from _Host objects and changed to "debug"
-     boolean.  Patches by John McFarlane and Aryeh Katz.
-
-   * Switching to a cmemcache compatible hash function.  Implemented by
-     André Cru and Ludvig Ericson.  To switch back to the old style, use:
-
-        memcached.useOldServerHashFunction()
-
-   * Rejecting keys that have spaces in them.  Patch by Etienne Posthumus.
-
-   * Fixing exception raising syntax.  Patch by Samuel Stauffer.
-
-   * Optimizations in read code.  Patch by Samuel Stauffer.
-
-   * Changing classes to be newstyle.  Patch by Samuel Stauffer.
-
-   * Changed "has_key" to "in".  Patch by Samuel Stauffer.
-
-   * incr/decr were raising ValueError if the key did not exist, the
-     docstring said it returned none.  Patch by Chihiro Sakatoku.
-
-   * Adding cas method, submitted by Ben Gutierrez.
-
-   * Fix in the docstring for how to use the "set" method.  Found and fixed
-     by William McVey
-
-Thu, 02 Apr 2009 13:37:49 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.44
-
-   * Allowing spaces in the key.  (Patch provided by xmm on Launchpad)
-
-   * Detecting when the pickler needs a positional argument.  (Patch
-     provided by Brad Clements on Launchpad)
-
-   * Moving length check after the compression.  (Patch provided by user
-     Tom on Launchpad)
-
-   * Fixing arguments passed to the _Error if invalid read length.
-
-   * Fixing the representation of domain sockets.  (Patch provided by user
-     MTB on Launchpad)
-
-   * Changing a typo of dead_until.  (Patch provided by Shane R. Spencer)
-
-   * Providing better error messages (patch provided by Johan Euphrosine).
-
-   * Adding get_slabs() function to get stats.  (Patch provided
-     by Nick Verbeck)
-
-Sun, 01 Jun 2008 15:05:11 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.43
-
-   *  eliott reported a bug in the 1.42 related to the socket timeout code
-      causing a traceback due to the timeout value not being set.
-
-Sat, 31 May 2008 02:09:17 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.42
-
-   *  Paul Hummer set up a Launchpad project which I'm going to start using
-      to track patches and allow users to set up their own bzr branches and
-      manage marging in the upstream patches with their own.
-
-         https://launchpad.net/python-memcached
-
-   *  Patch from Jehiah Czebotar which does: Changing the calls to
-      mark_dead() to make them dereference tuples, reducing timeout on
-      sockets to 3 seconds, settable via setting Host._SOCKET_TIMEOUT.
-
-   *  Patches from Steve Schwarz for set_multi() to return the full set of
-      keys if all servers are down.  Previously would not report any keys.
-
-   *  Fix from Steve Schwarz delete_multi() argument "seconds" not being
-      correctly handled.  Changed it to "time" to match all other calls.
-
-   *  Patch from Peter Wilkinson to support using unix domain sockets.
-      He reports that tests succeed with with memcached daemons running,
-      the normal and a domain socket started via
-      "memcached -s memcached.socket".  I massaged it quite a bit.
-
-      To use domain sockets, use a connect string of "unix:/path/to/socket"
-      Note however that if you are using a host name of "unix", it will now
-      detect "unix:11211" as being a domain socket with the name "11211".
-      In this case, please use "inet:unix:11211".
-
-      Because of this, it is now preferred to use a connect string prefix
-      of "inet:" or "unix:".
-
-Tue, 29 Apr 2008 21:03:53 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.41
-
-   * Patch from Jehiah Czebotar to catch an additional server disconnect
-     situation.
-
-   * Patch from Andrey Petrov to add the "append" and "replace" commands.
-
-Tue, 18 Sep 2007 20:52:09 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.40
-
-   * Updated setup.py file that uses distutils provided by Kai Lautaportti.
-
-   * Prevent keys from containing ASCII character 127 as well, patch provided
-     by Philip Neustrom.
-
-   * Added ability to overload the persistent_load/id, patch provided by
-     Steve Schwarz.
-
-   * Fixed ability to pass (server_hash,key) in place of key in Client.set()
-     Reported by Alexander Klyuev.
-
-Tue, 14 Aug 2007 14:43:27 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.39
-
-   * Michael Krause reports the previous version doesn't work for
-     _val_to_store_info() calls because it's defined as a staticmethod.
-     Removing staticmethod decorator.  Also confirmed by Kai Lautaportti,
-     with suggested fix of removing staticmethod.
-
-Fri, 10 Aug 2007 17:50:13 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.38
-
-   * Matt McClanahan submitted a patch that allow add() to have a
-     min_compress_len argument.
-
-   * Steve Schwarz submitted a patch allowing user-defined picklers.
-
-   * Michael Krause suggested checking the return value to prevent an
-     exception from being raised in _set() when a value is too large to be
-     stored.
-
-Fri, 27 Jul 2007 01:55:48 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.37
-
-   * Fixing call from add() to _set() with parameter for min_compress_len.
-     Reported by Jeff Fisher.
-
-Thu, 07 Jun 2007 04:10:31 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.36
-
-   * Patch by Dave St.Germain to make the Client() class sub-class
-     threadlocal to help with multi-threading issues.  Only available in
-     Python 2.4 and above.
-
-   * Patch by James Robinson with:
-      1) new set_multi method.
-      2) factored out determining the flags, length, and value to store
-         from set() into method _val_to_store_info() for use by both set()
-         and set_multi().
-      3) send_cmds() method on host which doesn't apply the trailing '\r\n'
-         for use by set_multi.
-      4) check_key() extended a bit to allow for testing the prefix passed
-         to set_multi just once, not once per each key.
-      5) Patch also enables support for auto compression in set, set_multi,
-         and replace.
-
-   * Suggestion by Helge Tesdal, fixes in check_key for non-string keys.
-
-   * NOTE: On a farm of clients with multiple servers, all clients will
-     need to be upgraded to this version.  The next patch changes the
-     server hash.
-
-   * Philip Neustrom supplied a patch to change the server hash function to
-     binascii.crc32.  The original "hash()" call is not cross-platform, so
-     big and little endian systems accessing the same memcache may end up
-     hitting different servers.  Restore the old functionality by calling:
-     "memcached.serverHashFunction = hash" after importing memcache.
-
-   * Philip Neustrom points out that passing Unicode keys or values causes
-     problems because len(key) or len(value) is not equal to the number of
-     bytes that are required to store the key/value.  Philip provides a
-     patch which raises an exception in this case.  Raises
-     memcache.Client.MemcachedStringEncodingError exception in this case.
-
-   * NOTE: If you recompiled memcached to increase the default 1MB max
-     value size, you will need to call "memcached.MAX_SERVER_VALUE_LENGTH = N"
-     or memcached will not store values larger than the default 1MB.
-
-   * Philip Neustrom includes another patch which checks that the key
-     doesn't exceed the memcache server's max size.  If it does, the item
-     is silently not stored.
-
-   * Philip Neustrom added a bunch of sanity checks.
-
-   * Jehiah Czebotar provided a patch to make the add() and replace()
-     functions return 0 when the add or replace fails, similar to how set()
-     works.
-
-Sat, 16 Sep 2006 18:31:46 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.34
-
-   * In get_multi, if the recv loop reads 0 bytes, raising an EOFError.
-     Identified by Jim Baker.
-
-Tue, 05 Sep 2006 14:06:50 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.33
-
-   * Including patch from Yoshinori K. Okuji to read in larger chunks for
-   readline() calls.  This should dramatically improve performance under
-   some circumstances.
-
-Sun, 03 Sep 2006 14:02:03 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.32
-
-   * Including patch from Philip Neustrom which checks keys sent to the
-   server for length and bad characters.
-
-Sat, 20 May 2006 14:51:28 -0600  Sean Reifschneider  <[email protected]>
-
-   * Version 1.31
-
-   *  Rolled version 1.30 since the Danga folks are now listing this
-   version as the official version.  Removing the "tummy" from the version
-   number, and incrementing so that it's clear it's more recent than "1.2".
-
-   * Patch applied from Simon Forman for handling of weighted hosts.
-
-   * Added a little more meat to the README.
-
-Sat, 28 Jan 2006 15:59:50 -0700  Sean Reifschneider  <[email protected]>
-
-   * cludwin at socallocal suggested that the write-combining with
-   sendall() may not be beneficial.  After testing on both SMP and non-SMP
-   machines, I can't see a significant benefit to not doing the
-   write-combining, even on large strings.  The benefits of write-combining
-   on smaller strings seems to be significant on UP machines in tight loops.
-   Even on strings that are larger than 2MB, there seems to be no benefit to
-   splitting out the writes.
-
-Sun, 18 Sep 2005 18:56:31 -0600  Sean Reifschneider  <[email protected]>
-
-   * Changing a printf to debuglog and catching a pickle exception, patch
-   submitted by Justin Azoff.
-
-Thu, 14 Jul 2005 11:17:30 -0700  Sean Reifschneider  <[email protected]>
-
-   * Alex Stapleton found that the sendall call was slow for writing data
-   larger than several kilobytes.  I had him test a change to his patch,
-   which worked as well, but was simpler.  The code now does two sendall
-   calls, one for the data and one for the line termination, if the data is
-   larger than 100 bytes.
-
-Thu, 7 Apr 2005 14:45:44 -0700  Sean Reifschneider  <[email protected]>
-
-   * Incorporating some fixes to get_multi() from Bo Yang
-
-Mon, 13 Dec 2004 02:35:17 -0700  Sean Reifschneider  <[email protected]>
-
-   * Simplifying the readline() function and speeding it up ~25%.
-   * Fixing a bug in readline() if the server drops, mark_dead() was not
-      being properly called.
-
-Sun, 12 Dec 2004 18:56:33 -0700  Sean Reifschneider  <[email protected]>
-
-   * Adding "stats()" and "flush_all()" methods.
-
-Thu, 10 Aug 2003 12:17:50 -0700  Evan Martin  <[email protected]>
-
-   * Slightly more verbose self-test output.
-   * Fix mark_dead() to use proper classname.
-   * Make pooltest.py run from the test directory.
-
-Thu, 07 Aug 2003 16:32:32 -0700  Evan Martin  <[email protected]>
-
-   * Add incr, decr, and delete.
-   * Better Python (based on comments from Uriah Welcome).
-   * Docs, using epydoc.
-
-Thu, 07 Aug 2003 14:20:27 -0700  Evan Martin  <[email protected]>
-
-   * Initial prerelease.

+ 0 - 10
frameworks/Python/web2py/web2py/gluon/contrib/memcache/PKG-INFO

@@ -1,10 +0,0 @@
-Metadata-Version: 1.0
-Name: python-memcached
-Version: 1.48
-Summary: A Python memcached client library.
-Home-page: http://www.tummy.com/Community/software/python-memcached/
-Author: Sean Reifschneider
-Author-email: [email protected]
-License: Python Software Foundation License
-Description: A Python memcached client library.
-Platform: UNKNOWN

+ 0 - 8
frameworks/Python/web2py/web2py/gluon/contrib/memcache/README

@@ -1,8 +0,0 @@
-This software is a 100% Python interface to the memcached memory cache
-daemon.  It is the client side software which allows storing values in one
-or more, possibly remote, memcached servers.  Search google for memcached
-for more information.
-
-This package was originally written by Evan Martin of Danga.
-Please do not contact Evan about maintenance.
-Sean Reifschneider of tummy.com, ltd. has taken over maintenance of it.

+ 0 - 111
frameworks/Python/web2py/web2py/gluon/contrib/memcache/__init__.py

@@ -1,111 +0,0 @@
-from gluon.contrib.memcache.memcache import Client
-from gluon.cache import CacheAbstract
-import time
-
-"""
-examle of usage:
-
-cache.memcache = MemcacheClient(request,[127.0.0.1:11211],debug=true)
-"""
-
-import cPickle as pickle
-import thread
-from gluon import current
-
-DEFAULT_TIME_EXPIRE = 300 # seconds (must be the same as cache.ram)
-
-def MemcacheClient(*a, **b):
-    if not hasattr(current,'__memcache_client'):
-        current.__memcache_client = MemcacheClientObj(*a, **b)
-    return current.__memcache_client
-
-class MemcacheClientObj(Client):
-
-    meta_storage = {}
-    max_time_expire = 24*3600
-
-    def __init__(self, request, servers, debug=0, pickleProtocol=0,
-                 pickler=pickle.Pickler, unpickler=pickle.Unpickler,
-                 pload=None, pid=None,
-                 default_time_expire = DEFAULT_TIME_EXPIRE):
-        self.request=request
-        self.default_time_expire = default_time_expire
-        if request:
-            app = request.application
-        else:
-            app = ''
-        Client.__init__(self, servers, debug, pickleProtocol,
-                        pickler, unpickler, pload, pid)
-        if not app in self.meta_storage:
-            self.storage = self.meta_storage[app] = {
-                CacheAbstract.cache_stats_name: {
-                    'hit_total': 0,
-                    'misses': 0,
-                    }}
-        else:
-            self.storage = self.meta_storage[app]
-
-    def __call__(self, key, f, time_expire = 'default'):
-        if time_expire == 'default':
-            time_expire = self.default_time_expire
-        if time_expire == None:
-            time_expire = self.max_time_expire
-        # this must be commented because get and set are redefined
-        # key = self.__keyFormat__(key)
-        now = time.time() 
-        value = None
-        if f is None: # force deletion of value
-            self.delete(key)
-            return None
-        elif time_expire==0: # value forced expired
-            item = None # value to be computed
-        else:
-            item = self.get(key)
-            if item:
-                if not isinstance(item,(list,tuple)):
-                    value = item
-                elif (item[0] < now - time_expire): # value expired
-                    item = None # value to be computed
-                else:
-                    value = item[1]
-        if not item:
-            value = f()
-            self.set(key, (now,value), self.max_time_expire)
-        return value
-
-    def increment(self, key, value=1, time_expire='default'):
-        """ time_expire is ignored """
-        if time_expire == 'default':
-            time_expire = self.default_time_expire
-        newKey = self.__keyFormat__(key)
-        obj = Client.get(self, newKey)
-        if obj:
-            if isinstance(obj,(int,float,long)):
-                return Client.incr(self, newKey, value)
-            else:
-                value += obj[1]
-                Client.set(self,newKey,(time.time(),value),
-                           self.max_time_expire)
-                return value
-        else:
-            Client.set(self, newKey, value, self.max_time_expire)
-            return value
-
-    def set(self, key, value, time_expire='default'):
-        if time_expire == 'default':
-            time_expire = self.default_time_expire
-        newKey = self.__keyFormat__(key)
-        return Client.set(self, newKey, value, time_expire)
-
-    def get(self, key):
-        newKey = self.__keyFormat__(key)
-        return Client.get(self, newKey)
-
-    def delete(self, key):
-        newKey = self.__keyFormat__(key)
-        return Client.delete(self, newKey)
-
-    def __keyFormat__(self, key):
-        return '%s/%s' % (self.request.application, key.replace(' ', '_'))
-
-

+ 0 - 1578
frameworks/Python/web2py/web2py/gluon/contrib/memcache/memcache.py

@@ -1,1578 +0,0 @@
-#!/usr/bin/env python
-
-"""client module for memcached (memory cache daemon)
-
-Overview
-========
-
-See U{the MemCached homepage<http://www.danga.com/memcached>} for more
-about memcached.
-
-Usage summary
-=============
-
-This should give you a feel for how this module operates::
-
-    import memcache
-    mc = memcache.Client(['127.0.0.1:11211'], debug=0)
-
-    mc.set("some_key", "Some value")
-    value = mc.get("some_key")
-
-    mc.set("another_key", 3)
-    mc.delete("another_key")
-
-    mc.set("key", "1") # note that the key used for incr/decr must be
-                       # a string.
-    mc.incr("key")
-    mc.decr("key")
-
-The standard way to use memcache with a database is like this:
-
-    key = derive_key(obj)
-    obj = mc.get(key)
-    if not obj:
-        obj = backend_api.get(...)
-        mc.set(key, obj)
-
-    # we now have obj, and future passes through this code
-    # will use the object from the cache.
-
-Detailed Documentation
-======================
-
-More detailed documentation is available in the L{Client} class.
-
-"""
-
-from __future__ import print_function
-
-import binascii
-import os
-import pickle
-import re
-import socket
-import sys
-import threading
-import time
-import zlib
-
-import six
-
-
-def cmemcache_hash(key):
-    return (
-        (((binascii.crc32(key.encode('ascii')) & 0xffffffff)
-          >> 16) & 0x7fff) or 1)
-serverHashFunction = cmemcache_hash
-
-
-def useOldServerHashFunction():
-    """Use the old python-memcache server hash function."""
-    global serverHashFunction
-    serverHashFunction = binascii.crc32
-
-try:
-    from zlib import compress, decompress
-    _supports_compress = True
-except ImportError:
-    _supports_compress = False
-    # quickly define a decompress just in case we recv compressed data.
-
-    def decompress(val):
-        raise _Error(
-            "Received compressed data but I don't support "
-            "compression (import error)")
-
-from io import BytesIO
-try:
-    unicode
-except NameError:
-    _has_unicode = False
-else:
-    _has_unicode = True
-
-try:
-    _str_cls = basestring
-except NameError:
-    _str_cls = str
-
-valid_key_chars_re = re.compile('[\x21-\x7e\x80-\xff]+$')
-
-
-#  Original author: Evan Martin of Danga Interactive
-__author__ = "Sean Reifschneider <[email protected]>"
-__version__ = "1.53"
-__copyright__ = "Copyright (C) 2003 Danga Interactive"
-#  http://en.wikipedia.org/wiki/Python_Software_Foundation_License
-__license__ = "Python Software Foundation License"
-
-SERVER_MAX_KEY_LENGTH = 250
-# Storing values larger than 1MB requires recompiling memcached.  If
-# you do, this value can be changed by doing
-# "memcache.SERVER_MAX_VALUE_LENGTH = N" after importing this module.
-SERVER_MAX_VALUE_LENGTH = 1024 * 1024
-
-
-class _Error(Exception):
-    pass
-
-
-class _ConnectionDeadError(Exception):
-    pass
-
-
-_DEAD_RETRY = 30  # number of seconds before retrying a dead server.
-_SOCKET_TIMEOUT = 3  # number of seconds before sockets timeout.
-
-
-class Client(threading.local):
-    """Object representing a pool of memcache servers.
-
-    See L{memcache} for an overview.
-
-    In all cases where a key is used, the key can be either:
-        1. A simple hashable type (string, integer, etc.).
-        2. A tuple of C{(hashvalue, key)}.  This is useful if you want
-        to avoid making this module calculate a hash value.  You may
-        prefer, for example, to keep all of a given user's objects on
-        the same memcache server, so you could use the user's unique
-        id as the hash value.
-
-
-    @group Setup: __init__, set_servers, forget_dead_hosts,
-    disconnect_all, debuglog
-    @group Insertion: set, add, replace, set_multi
-    @group Retrieval: get, get_multi
-    @group Integers: incr, decr
-    @group Removal: delete, delete_multi
-    @sort: __init__, set_servers, forget_dead_hosts, disconnect_all,
-           debuglog,\ set, set_multi, add, replace, get, get_multi,
-           incr, decr, delete, delete_multi
-    """
-    _FLAG_PICKLE = 1 << 0
-    _FLAG_INTEGER = 1 << 1
-    _FLAG_LONG = 1 << 2
-    _FLAG_COMPRESSED = 1 << 3
-
-    _SERVER_RETRIES = 10  # how many times to try finding a free server.
-
-    # exceptions for Client
-    class MemcachedKeyError(Exception):
-        pass
-
-    class MemcachedKeyLengthError(MemcachedKeyError):
-        pass
-
-    class MemcachedKeyCharacterError(MemcachedKeyError):
-        pass
-
-    class MemcachedKeyNoneError(MemcachedKeyError):
-        pass
-
-    class MemcachedKeyTypeError(MemcachedKeyError):
-        pass
-
-    class MemcachedStringEncodingError(Exception):
-        pass
-
-    def __init__(self, servers, debug=0, pickleProtocol=0,
-                 pickler=pickle.Pickler, unpickler=pickle.Unpickler,
-                 pload=None, pid=None,
-                 server_max_key_length=None, server_max_value_length=None,
-                 dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT,
-                 cache_cas=False, flush_on_reconnect=0, check_keys=True):
-        """Create a new Client object with the given list of servers.
-
-        @param servers: C{servers} is passed to L{set_servers}.
-        @param debug: whether to display error messages when a server
-        can't be contacted.
-        @param pickleProtocol: number to mandate protocol used by
-        (c)Pickle.
-        @param pickler: optional override of default Pickler to allow
-        subclassing.
-        @param unpickler: optional override of default Unpickler to
-        allow subclassing.
-        @param pload: optional persistent_load function to call on
-        pickle loading.  Useful for cPickle since subclassing isn't
-        allowed.
-        @param pid: optional persistent_id function to call on pickle
-        storing.  Useful for cPickle since subclassing isn't allowed.
-        @param dead_retry: number of seconds before retrying a
-        blacklisted server. Default to 30 s.
-        @param socket_timeout: timeout in seconds for all calls to a
-        server. Defaults to 3 seconds.
-        @param cache_cas: (default False) If true, cas operations will
-        be cached.  WARNING: This cache is not expired internally, if
-        you have a long-running process you will need to expire it
-        manually via client.reset_cas(), or the cache can grow
-        unlimited.
-        @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH)
-        Data that is larger than this will not be sent to the server.
-        @param server_max_value_length: (default
-        SERVER_MAX_VALUE_LENGTH) Data that is larger than this will
-        not be sent to the server.
-        @param flush_on_reconnect: optional flag which prevents a
-        scenario that can cause stale data to be read: If there's more
-        than one memcached server and the connection to one is
-        interrupted, keys that mapped to that server will get
-        reassigned to another. If the first server comes back, those
-        keys will map to it again. If it still has its data, get()s
-        can read stale data that was overwritten on another
-        server. This flag is off by default for backwards
-        compatibility.
-        @param check_keys: (default True) If True, the key is checked
-        to ensure it is the correct length and composed of the right
-        characters.
-        """
-        super(Client, self).__init__()
-        self.debug = debug
-        self.dead_retry = dead_retry
-        self.socket_timeout = socket_timeout
-        self.flush_on_reconnect = flush_on_reconnect
-        self.set_servers(servers)
-        self.stats = {}
-        self.cache_cas = cache_cas
-        self.reset_cas()
-        self.do_check_key = check_keys
-
-        # Allow users to modify pickling/unpickling behavior
-        self.pickleProtocol = pickleProtocol
-        self.pickler = pickler
-        self.unpickler = unpickler
-        self.persistent_load = pload
-        self.persistent_id = pid
-        self.server_max_key_length = server_max_key_length
-        if self.server_max_key_length is None:
-            self.server_max_key_length = SERVER_MAX_KEY_LENGTH
-        self.server_max_value_length = server_max_value_length
-        if self.server_max_value_length is None:
-            self.server_max_value_length = SERVER_MAX_VALUE_LENGTH
-
-        #  figure out the pickler style
-        file = BytesIO()
-        try:
-            pickler = self.pickler(file, protocol=self.pickleProtocol)
-            self.picklerIsKeyword = True
-        except TypeError:
-            self.picklerIsKeyword = False
-
-    def reset_cas(self):
-        """Reset the cas cache.
-
-        This is only used if the Client() object was created with
-        "cache_cas=True".  If used, this cache does not expire
-        internally, so it can grow unbounded if you do not clear it
-        yourself.
-        """
-        self.cas_ids = {}
-
-    def set_servers(self, servers):
-        """Set the pool of servers used by this client.
-
-        @param servers: an array of servers.
-        Servers can be passed in two forms:
-            1. Strings of the form C{"host:port"}, which implies a
-            default weight of 1.
-            2. Tuples of the form C{("host:port", weight)}, where
-            C{weight} is an integer weight value.
-
-        """
-        self.servers = [_Host(s, self.debug, dead_retry=self.dead_retry,
-                              socket_timeout=self.socket_timeout,
-                              flush_on_reconnect=self.flush_on_reconnect)
-                        for s in servers]
-        self._init_buckets()
-
-    def get_stats(self, stat_args=None):
-        """Get statistics from each of the servers.
-
-        @param stat_args: Additional arguments to pass to the memcache
-            "stats" command.
-
-        @return: A list of tuples ( server_identifier,
-            stats_dictionary ).  The dictionary contains a number of
-            name/value pairs specifying the name of the status field
-            and the string value associated with it.  The values are
-            not converted from strings.
-        """
-        data = []
-        for s in self.servers:
-            if not s.connect():
-                continue
-            if s.family == socket.AF_INET:
-                name = '%s:%s (%s)' % (s.ip, s.port, s.weight)
-            elif s.family == socket.AF_INET6:
-                name = '[%s]:%s (%s)' % (s.ip, s.port, s.weight)
-            else:
-                name = 'unix:%s (%s)' % (s.address, s.weight)
-            if not stat_args:
-                s.send_cmd('stats')
-            else:
-                s.send_cmd('stats ' + stat_args)
-            serverData = {}
-            data.append((name, serverData))
-            readline = s.readline
-            while 1:
-                line = readline()
-                if not line or line.strip() == 'END':
-                    break
-                stats = line.split(' ', 2)
-                serverData[stats[1]] = stats[2]
-
-        return(data)
-
-    def get_slabs(self):
-        data = []
-        for s in self.servers:
-            if not s.connect():
-                continue
-            if s.family == socket.AF_INET:
-                name = '%s:%s (%s)' % (s.ip, s.port, s.weight)
-            elif s.family == socket.AF_INET6:
-                name = '[%s]:%s (%s)' % (s.ip, s.port, s.weight)
-            else:
-                name = 'unix:%s (%s)' % (s.address, s.weight)
-            serverData = {}
-            data.append((name, serverData))
-            s.send_cmd('stats items')
-            readline = s.readline
-            while 1:
-                line = readline()
-                if not line or line.strip() == 'END':
-                    break
-                item = line.split(' ', 2)
-                # 0 = STAT, 1 = ITEM, 2 = Value
-                slab = item[1].split(':', 2)
-                # 0 = items, 1 = Slab #, 2 = Name
-                if slab[1] not in serverData:
-                    serverData[slab[1]] = {}
-                serverData[slab[1]][slab[2]] = item[2]
-        return data
-
-    def flush_all(self):
-        """Expire all data in memcache servers that are reachable."""
-        for s in self.servers:
-            if not s.connect():
-                continue
-            s.flush()
-
-    def debuglog(self, str):
-        if self.debug:
-            sys.stderr.write("MemCached: %s\n" % str)
-
-    def _statlog(self, func):
-        if func not in self.stats:
-            self.stats[func] = 1
-        else:
-            self.stats[func] += 1
-
-    def forget_dead_hosts(self):
-        """Reset every host in the pool to an "alive" state."""
-        for s in self.servers:
-            s.deaduntil = 0
-
-    def _init_buckets(self):
-        self.buckets = []
-        for server in self.servers:
-            for i in range(server.weight):
-                self.buckets.append(server)
-
-    def _get_server(self, key):
-        if isinstance(key, tuple):
-            serverhash, key = key
-        else:
-            serverhash = serverHashFunction(key)
-
-        if not self.buckets:
-            return None, None
-
-        for i in range(Client._SERVER_RETRIES):
-            server = self.buckets[serverhash % len(self.buckets)]
-            if server.connect():
-                # print("(using server %s)" % server,)
-                return server, key
-            serverhash = serverHashFunction(str(serverhash) + str(i))
-        return None, None
-
-    def disconnect_all(self):
-        for s in self.servers:
-            s.close_socket()
-
-    def delete_multi(self, keys, time=0, key_prefix=''):
-        """Delete multiple keys in the memcache doing just one query.
-
-        >>> notset_keys = mc.set_multi({'a1' : 'val1', 'a2' : 'val2'})
-        >>> mc.get_multi(['a1', 'a2']) == {'a1' : 'val1','a2' : 'val2'}
-        1
-        >>> mc.delete_multi(['key1', 'key2'])
-        1
-        >>> mc.get_multi(['key1', 'key2']) == {}
-        1
-
-        This method is recommended over iterated regular L{delete}s as
-        it reduces total latency, since your app doesn't have to wait
-        for each round-trip of L{delete} before sending the next one.
-
-        @param keys: An iterable of keys to clear
-        @param time: number of seconds any subsequent set / update
-        commands should fail. Defaults to 0 for no delay.
-        @param key_prefix: Optional string to prepend to each key when
-            sending to memcache.  See docs for L{get_multi} and
-            L{set_multi}.
-        @return: 1 if no failure in communication with any memcacheds.
-        @rtype: int
-        """
-
-        self._statlog('delete_multi')
-
-        server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
-            keys, key_prefix)
-
-        # send out all requests on each server before reading anything
-        dead_servers = []
-
-        rc = 1
-        for server in six.iterkeys(server_keys):
-            bigcmd = []
-            write = bigcmd.append
-            if time is not None:
-                for key in server_keys[server]:  # These are mangled keys
-                    write("delete %s %d\r\n" % (key, time))
-            else:
-                for key in server_keys[server]:  # These are mangled keys
-                    write("delete %s\r\n" % key)
-            try:
-                server.send_cmds(''.join(bigcmd))
-            except socket.error as msg:
-                rc = 0
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-                dead_servers.append(server)
-
-        # if any servers died on the way, don't expect them to respond.
-        for server in dead_servers:
-            del server_keys[server]
-
-        for server, keys in six.iteritems(server_keys):
-            try:
-                for key in keys:
-                    server.expect("DELETED")
-            except socket.error as msg:
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-                rc = 0
-        return rc
-
-    def delete(self, key, time=0):
-        '''Deletes a key from the memcache.
-
-        @return: Nonzero on success.
-        @param time: number of seconds any subsequent set / update commands
-        should fail. Defaults to None for no delay.
-        @rtype: int
-        '''
-        return self._deletetouch(['DELETED', 'NOT_FOUND'], "delete", key, time)
-
-    def touch(self, key, time=0):
-        '''Updates the expiration time of a key in memcache.
-
-        @return: Nonzero on success.
-        @param time: Tells memcached the time which this value should
-            expire, either as a delta number of seconds, or an absolute
-            unix time-since-the-epoch value. See the memcached protocol
-            docs section "Storage Commands" for more info on <exptime>. We
-            default to 0 == cache forever.
-        @rtype: int
-        '''
-        return self._deletetouch(['TOUCHED'], "touch", key, time)
-
-    def _deletetouch(self, expected, cmd, key, time=0):
-        if self.do_check_key:
-            self.check_key(key)
-        server, key = self._get_server(key)
-        if not server:
-            return 0
-        self._statlog(cmd)
-        if time is not None and time != 0:
-            cmd = "%s %s %d" % (cmd, key, time)
-        else:
-            cmd = "%s %s" % (cmd, key)
-
-        try:
-            server.send_cmd(cmd)
-            line = server.readline()
-            if line and line.strip() in expected:
-                return 1
-            self.debuglog('%s expected %s, got: %r'
-                          % (cmd, ' or '.join(expected), line))
-        except socket.error as msg:
-            if isinstance(msg, tuple):
-                msg = msg[1]
-            server.mark_dead(msg)
-        return 0
-
-    def incr(self, key, delta=1):
-        """Increment value for C{key} by C{delta}
-
-        Sends a command to the server to atomically increment the
-        value for C{key} by C{delta}, or by 1 if C{delta} is
-        unspecified.  Returns None if C{key} doesn't exist on server,
-        otherwise it returns the new value after incrementing.
-
-        Note that the value for C{key} must already exist in the
-        memcache, and it must be the string representation of an
-        integer.
-
-        >>> mc.set("counter", "20")  # returns 1, indicating success
-        1
-        >>> mc.incr("counter")
-        21
-        >>> mc.incr("counter")
-        22
-
-        Overflow on server is not checked.  Be aware of values
-        approaching 2**32.  See L{decr}.
-
-        @param delta: Integer amount to increment by (should be zero
-        or greater).
-
-        @return: New value after incrementing.
-        @rtype: int
-        """
-        return self._incrdecr("incr", key, delta)
-
-    def decr(self, key, delta=1):
-        """Decrement value for C{key} by C{delta}
-
-        Like L{incr}, but decrements.  Unlike L{incr}, underflow is
-        checked and new values are capped at 0.  If server value is 1,
-        a decrement of 2 returns 0, not -1.
-
-        @param delta: Integer amount to decrement by (should be zero
-        or greater).
-
-        @return: New value after decrementing or None on error.
-        @rtype: int
-        """
-        return self._incrdecr("decr", key, delta)
-
-    def _incrdecr(self, cmd, key, delta):
-        if self.do_check_key:
-            self.check_key(key)
-        server, key = self._get_server(key)
-        if not server:
-            return None
-        self._statlog(cmd)
-        cmd = "%s %s %d" % (cmd, key, delta)
-        try:
-            server.send_cmd(cmd)
-            line = server.readline()
-            if line is None or line.strip() == 'NOT_FOUND':
-                return None
-            return int(line)
-        except socket.error as msg:
-            if isinstance(msg, tuple):
-                msg = msg[1]
-            server.mark_dead(msg)
-            return None
-
-    def add(self, key, val, time=0, min_compress_len=0):
-        '''Add new key with value.
-
-        Like L{set}, but only stores in memcache if the key doesn't
-        already exist.
-
-        @return: Nonzero on success.
-        @rtype: int
-        '''
-        return self._set("add", key, val, time, min_compress_len)
-
-    def append(self, key, val, time=0, min_compress_len=0):
-        '''Append the value to the end of the existing key's value.
-
-        Only stores in memcache if key already exists.
-        Also see L{prepend}.
-
-        @return: Nonzero on success.
-        @rtype: int
-        '''
-        return self._set("append", key, val, time, min_compress_len)
-
-    def prepend(self, key, val, time=0, min_compress_len=0):
-        '''Prepend the value to the beginning of the existing key's value.
-
-        Only stores in memcache if key already exists.
-        Also see L{append}.
-
-        @return: Nonzero on success.
-        @rtype: int
-        '''
-        return self._set("prepend", key, val, time, min_compress_len)
-
-    def replace(self, key, val, time=0, min_compress_len=0):
-        '''Replace existing key with value.
-
-        Like L{set}, but only stores in memcache if the key already exists.
-        The opposite of L{add}.
-
-        @return: Nonzero on success.
-        @rtype: int
-        '''
-        return self._set("replace", key, val, time, min_compress_len)
-
-    def set(self, key, val, time=0, min_compress_len=0):
-        '''Unconditionally sets a key to a given value in the memcache.
-
-        The C{key} can optionally be an tuple, with the first element
-        being the server hash value and the second being the key.  If
-        you want to avoid making this module calculate a hash value.
-        You may prefer, for example, to keep all of a given user's
-        objects on the same memcache server, so you could use the
-        user's unique id as the hash value.
-
-        @return: Nonzero on success.
-        @rtype: int
-
-        @param time: Tells memcached the time which this value should
-        expire, either as a delta number of seconds, or an absolute
-        unix time-since-the-epoch value. See the memcached protocol
-        docs section "Storage Commands" for more info on <exptime>. We
-        default to 0 == cache forever.
-
-        @param min_compress_len: The threshold length to kick in
-        auto-compression of the value using the zlib.compress()
-        routine. If the value being cached is a string, then the
-        length of the string is measured, else if the value is an
-        object, then the length of the pickle result is measured. If
-        the resulting attempt at compression yeilds a larger string
-        than the input, then it is discarded. For backwards
-        compatability, this parameter defaults to 0, indicating don't
-        ever try to compress.
-
-        '''
-        return self._set("set", key, val, time, min_compress_len)
-
-    def cas(self, key, val, time=0, min_compress_len=0):
-        '''Check and set (CAS)
-
-        Sets a key to a given value in the memcache if it hasn't been
-        altered since last fetched. (See L{gets}).
-
-        The C{key} can optionally be an tuple, with the first element
-        being the server hash value and the second being the key.  If
-        you want to avoid making this module calculate a hash value.
-        You may prefer, for example, to keep all of a given user's
-        objects on the same memcache server, so you could use the
-        user's unique id as the hash value.
-
-        @return: Nonzero on success.
-        @rtype: int
-
-        @param time: Tells memcached the time which this value should
-        expire, either as a delta number of seconds, or an absolute
-        unix time-since-the-epoch value. See the memcached protocol
-        docs section "Storage Commands" for more info on <exptime>. We
-        default to 0 == cache forever.
-
-        @param min_compress_len: The threshold length to kick in
-        auto-compression of the value using the zlib.compress()
-        routine. If the value being cached is a string, then the
-        length of the string is measured, else if the value is an
-        object, then the length of the pickle result is measured. If
-        the resulting attempt at compression yeilds a larger string
-        than the input, then it is discarded. For backwards
-        compatability, this parameter defaults to 0, indicating don't
-        ever try to compress.
-        '''
-        return self._set("cas", key, val, time, min_compress_len)
-
-    def _map_and_prefix_keys(self, key_iterable, key_prefix):
-        """Compute the mapping of server (_Host instance) -> list of keys to
-        stuff onto that server, as well as the mapping of prefixed key
-        -> original key.
-        """
-        # Check it just once ...
-        key_extra_len = len(key_prefix)
-        if key_prefix and self.do_check_key:
-            self.check_key(key_prefix)
-
-        # server (_Host) -> list of unprefixed server keys in mapping
-        server_keys = {}
-
-        prefixed_to_orig_key = {}
-        # build up a list for each server of all the keys we want.
-        for orig_key in key_iterable:
-            if isinstance(orig_key, tuple):
-                # Tuple of hashvalue, key ala _get_server(). Caller is
-                # essentially telling us what server to stuff this on.
-                # Ensure call to _get_server gets a Tuple as well.
-                str_orig_key = str(orig_key[1])
-
-                # Gotta pre-mangle key before hashing to a
-                # server. Returns the mangled key.
-                server, key = self._get_server(
-                    (orig_key[0], key_prefix + str_orig_key))
-            else:
-                # set_multi supports int / long keys.
-                str_orig_key = str(orig_key)
-                server, key = self._get_server(key_prefix + str_orig_key)
-
-            # Now check to make sure key length is proper ...
-            if self.do_check_key:
-                self.check_key(str_orig_key, key_extra_len=key_extra_len)
-
-            if not server:
-                continue
-
-            if server not in server_keys:
-                server_keys[server] = []
-            server_keys[server].append(key)
-            prefixed_to_orig_key[key] = orig_key
-
-        return (server_keys, prefixed_to_orig_key)
-
-    def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
-        '''Sets multiple keys in the memcache doing just one query.
-
-        >>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
-        >>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1',
-        ...                                    'key2' : 'val2'}
-        1
-
-
-        This method is recommended over regular L{set} as it lowers
-        the number of total packets flying around your network,
-        reducing total latency, since your app doesn't have to wait
-        for each round-trip of L{set} before sending the next one.
-
-        @param mapping: A dict of key/value pairs to set.
-
-        @param time: Tells memcached the time which this value should
-            expire, either as a delta number of seconds, or an
-            absolute unix time-since-the-epoch value. See the
-            memcached protocol docs section "Storage Commands" for
-            more info on <exptime>. We default to 0 == cache forever.
-
-        @param key_prefix: Optional string to prepend to each key when
-            sending to memcache. Allows you to efficiently stuff these
-            keys into a pseudo-namespace in memcache:
-
-            >>> notset_keys = mc.set_multi(
-            ...     {'key1' : 'val1', 'key2' : 'val2'},
-            ...     key_prefix='subspace_')
-            >>> len(notset_keys) == 0
-            True
-            >>> mc.get_multi(['subspace_key1',
-            ...               'subspace_key2']) == {'subspace_key1': 'val1',
-            ...                                     'subspace_key2' : 'val2'}
-            True
-
-            Causes key 'subspace_key1' and 'subspace_key2' to be
-            set. Useful in conjunction with a higher-level layer which
-            applies namespaces to data in memcache.  In this case, the
-            return result would be the list of notset original keys,
-            prefix not applied.
-
-        @param min_compress_len: The threshold length to kick in
-            auto-compression of the value using the zlib.compress()
-            routine. If the value being cached is a string, then the
-            length of the string is measured, else if the value is an
-            object, then the length of the pickle result is
-            measured. If the resulting attempt at compression yeilds a
-            larger string than the input, then it is discarded. For
-            backwards compatability, this parameter defaults to 0,
-            indicating don't ever try to compress.
-
-        @return: List of keys which failed to be stored [ memcache out
-           of memory, etc. ].
-
-        @rtype: list
-        '''
-        self._statlog('set_multi')
-
-        server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
-            six.iterkeys(mapping), key_prefix)
-
-        # send out all requests on each server before reading anything
-        dead_servers = []
-        notstored = []  # original keys.
-
-        for server in six.iterkeys(server_keys):
-            bigcmd = []
-            write = bigcmd.append
-            try:
-                for key in server_keys[server]:  # These are mangled keys
-                    store_info = self._val_to_store_info(
-                        mapping[prefixed_to_orig_key[key]],
-                        min_compress_len)
-                    if store_info:
-                        msg = "set %s %d %d %d\r\n%s\r\n"
-                        write(msg % (key,
-                                     store_info[0],
-                                     time,
-                                     store_info[1],
-                                     store_info[2]))
-                    else:
-                        notstored.append(prefixed_to_orig_key[key])
-                server.send_cmds(''.join(bigcmd))
-            except socket.error as msg:
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-                dead_servers.append(server)
-
-        # if any servers died on the way, don't expect them to respond.
-        for server in dead_servers:
-            del server_keys[server]
-
-        #  short-circuit if there are no servers, just return all keys
-        if not server_keys:
-            return(mapping.keys())
-
-        for server, keys in six.iteritems(server_keys):
-            try:
-                for key in keys:
-                    if server.readline() == 'STORED':
-                        continue
-                    else:
-                        # un-mangle.
-                        notstored.append(prefixed_to_orig_key[key])
-            except (_Error, socket.error) as msg:
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-        return notstored
-
-    def _val_to_store_info(self, val, min_compress_len):
-        """Transform val to a storable representation.
-
-        Returns a tuple of the flags, the length of the new value, and
-        the new value itself.
-        """
-        flags = 0
-        if isinstance(val, str):
-            pass
-        elif isinstance(val, int):
-            flags |= Client._FLAG_INTEGER
-            val = "%d" % val
-            # force no attempt to compress this silly string.
-            min_compress_len = 0
-        elif isinstance(val, long):
-            flags |= Client._FLAG_LONG
-            val = "%d" % val
-            # force no attempt to compress this silly string.
-            min_compress_len = 0
-        else:
-            flags |= Client._FLAG_PICKLE
-            file = BytesIO()
-            if self.picklerIsKeyword:
-                pickler = self.pickler(file, protocol=self.pickleProtocol)
-            else:
-                pickler = self.pickler(file, self.pickleProtocol)
-            if self.persistent_id:
-                pickler.persistent_id = self.persistent_id
-            pickler.dump(val)
-            val = file.getvalue()
-
-        lv = len(val)
-        # We should try to compress if min_compress_len > 0 and we
-        # could import zlib and this string is longer than our min
-        # threshold.
-        if min_compress_len and lv > min_compress_len:
-            comp_val = zlib.compress(val)
-            # Only retain the result if the compression result is smaller
-            # than the original.
-            if len(comp_val) < lv:
-                flags |= Client._FLAG_COMPRESSED
-                val = comp_val
-
-        #  silently do not store if value length exceeds maximum
-        if (self.server_max_value_length != 0 and
-                len(val) > self.server_max_value_length):
-            return(0)
-
-        return (flags, len(val), val)
-
-    def _set(self, cmd, key, val, time, min_compress_len=0):
-        if self.do_check_key:
-            self.check_key(key)
-        server, key = self._get_server(key)
-        if not server:
-            return 0
-
-        def _unsafe_set():
-            self._statlog(cmd)
-
-            store_info = self._val_to_store_info(val, min_compress_len)
-            if not store_info:
-                return(0)
-
-            if cmd == 'cas':
-                if key not in self.cas_ids:
-                    return self._set('set', key, val, time, min_compress_len)
-                fullcmd = "%s %s %d %d %d %d\r\n%s" % (
-                    cmd, key, store_info[0], time, store_info[1],
-                    self.cas_ids[key], store_info[2])
-            else:
-                fullcmd = "%s %s %d %d %d\r\n%s" % (
-                    cmd, key, store_info[0],
-                    time, store_info[1], store_info[2]
-                )
-
-            try:
-                server.send_cmd(fullcmd)
-                return(server.expect("STORED", raise_exception=True)
-                       == "STORED")
-            except socket.error as msg:
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-            return 0
-
-        try:
-            return _unsafe_set()
-        except _ConnectionDeadError:
-            # retry once
-            try:
-                if server._get_socket():
-                    return _unsafe_set()
-            except (_ConnectionDeadError, socket.error) as msg:
-                server.mark_dead(msg)
-            return 0
-
-    def _get(self, cmd, key):
-        if self.do_check_key:
-            self.check_key(key)
-        server, key = self._get_server(key)
-        if not server:
-            return None
-
-        def _unsafe_get():
-            self._statlog(cmd)
-
-            try:
-                server.send_cmd("%s %s" % (cmd, key))
-                rkey = flags = rlen = cas_id = None
-
-                if cmd == 'gets':
-                    rkey, flags, rlen, cas_id, = self._expect_cas_value(
-                        server, raise_exception=True
-                    )
-                    if rkey and self.cache_cas:
-                        self.cas_ids[rkey] = cas_id
-                else:
-                    rkey, flags, rlen, = self._expectvalue(
-                        server, raise_exception=True
-                    )
-
-                if not rkey:
-                    return None
-                try:
-                    value = self._recv_value(server, flags, rlen)
-                finally:
-                    server.expect("END", raise_exception=True)
-            except (_Error, socket.error) as msg:
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-                return None
-
-            return value
-
-        try:
-            return _unsafe_get()
-        except _ConnectionDeadError:
-            # retry once
-            try:
-                if server.connect():
-                    return _unsafe_get()
-                return None
-            except (_ConnectionDeadError, socket.error) as msg:
-                server.mark_dead(msg)
-            return None
-
-    def get(self, key):
-        '''Retrieves a key from the memcache.
-
-        @return: The value or None.
-        '''
-        return self._get('get', key)
-
-    def gets(self, key):
-        '''Retrieves a key from the memcache. Used in conjunction with 'cas'.
-
-        @return: The value or None.
-        '''
-        return self._get('gets', key)
-
-    def get_multi(self, keys, key_prefix=''):
-        '''Retrieves multiple keys from the memcache doing just one query.
-
-        >>> success = mc.set("foo", "bar")
-        >>> success = mc.set("baz", 42)
-        >>> mc.get_multi(["foo", "baz", "foobar"]) == {
-        ...     "foo": "bar", "baz": 42
-        ... }
-        1
-        >>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
-        1
-
-        This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict
-        will just have unprefixed keys 'k1', 'k2'.
-
-        >>> mc.get_multi(['k1', 'k2', 'nonexist'],
-        ...              key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
-        1
-
-        get_mult [ and L{set_multi} ] can take str()-ables like ints /
-        longs as keys too. Such as your db pri key fields.  They're
-        rotored through str() before being passed off to memcache,
-        with or without the use of a key_prefix.  In this mode, the
-        key_prefix could be a table name, and the key itself a db
-        primary key number.
-
-        >>> mc.set_multi({42: 'douglass adams',
-        ...               46: 'and 2 just ahead of me'},
-        ...              key_prefix='numkeys_') == []
-        1
-        >>> mc.get_multi([46, 42], key_prefix='numkeys_') == {
-        ...     42: 'douglass adams',
-        ...     46: 'and 2 just ahead of me'
-        ... }
-        1
-
-        This method is recommended over regular L{get} as it lowers
-        the number of total packets flying around your network,
-        reducing total latency, since your app doesn't have to wait
-        for each round-trip of L{get} before sending the next one.
-
-        See also L{set_multi}.
-
-        @param keys: An array of keys.
-
-        @param key_prefix: A string to prefix each key when we
-        communicate with memcache.  Facilitates pseudo-namespaces
-        within memcache. Returned dictionary keys will not have this
-        prefix.
-
-        @return: A dictionary of key/value pairs that were
-        available. If key_prefix was provided, the keys in the retured
-        dictionary will not have it present.
-        '''
-
-        self._statlog('get_multi')
-
-        server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(
-            keys, key_prefix)
-
-        # send out all requests on each server before reading anything
-        dead_servers = []
-        for server in six.iterkeys(server_keys):
-            try:
-                server.send_cmd("get %s" % " ".join(server_keys[server]))
-            except socket.error as msg:
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-                dead_servers.append(server)
-
-        # if any servers died on the way, don't expect them to respond.
-        for server in dead_servers:
-            del server_keys[server]
-
-        retvals = {}
-        for server in six.iterkeys(server_keys):
-            try:
-                line = server.readline()
-                while line and line != 'END':
-                    rkey, flags, rlen = self._expectvalue(server, line)
-                    #  Bo Yang reports that this can sometimes be None
-                    if rkey is not None:
-                        val = self._recv_value(server, flags, rlen)
-                        # un-prefix returned key.
-                        retvals[prefixed_to_orig_key[rkey]] = val
-                    line = server.readline()
-            except (_Error, socket.error) as msg:
-                if isinstance(msg, tuple):
-                    msg = msg[1]
-                server.mark_dead(msg)
-        return retvals
-
-    def _expect_cas_value(self, server, line=None, raise_exception=False):
-        if not line:
-            line = server.readline(raise_exception)
-
-        if line and line[:5] == 'VALUE':
-            resp, rkey, flags, len, cas_id = line.split()
-            return (rkey, int(flags), int(len), int(cas_id))
-        else:
-            return (None, None, None, None)
-
-    def _expectvalue(self, server, line=None, raise_exception=False):
-        if not line:
-            line = server.readline(raise_exception)
-
-        if line and line[:5] == 'VALUE':
-            resp, rkey, flags, len = line.split()
-            flags = int(flags)
-            rlen = int(len)
-            return (rkey, flags, rlen)
-        else:
-            return (None, None, None)
-
-    def _recv_value(self, server, flags, rlen):
-        rlen += 2  # include \r\n
-        buf = server.recv(rlen)
-        if len(buf) != rlen:
-            raise _Error("received %d bytes when expecting %d"
-                         % (len(buf), rlen))
-
-        if len(buf) == rlen:
-            buf = buf[:-2]  # strip \r\n
-
-        if flags & Client._FLAG_COMPRESSED:
-            buf = zlib.decompress(buf)
-
-        if flags == 0 or flags == Client._FLAG_COMPRESSED:
-            # Either a bare string or a compressed string now decompressed...
-            val = buf
-        elif flags & Client._FLAG_INTEGER:
-            val = int(buf)
-        elif flags & Client._FLAG_LONG:
-            val = long(buf)
-        elif flags & Client._FLAG_PICKLE:
-            try:
-                file = BytesIO(buf)
-                unpickler = self.unpickler(file)
-                if self.persistent_load:
-                    unpickler.persistent_load = self.persistent_load
-                val = unpickler.load()
-            except Exception as e:
-                self.debuglog('Pickle error: %s\n' % e)
-                return None
-        else:
-            self.debuglog("unknown flags on get: %x\n" % flags)
-            raise ValueError('Unknown flags on get: %x' % flags)
-
-        return val
-
-    def check_key(self, key, key_extra_len=0):
-        """Checks sanity of key.
-
-            Fails if:
-
-            Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
-            Contains control characters  (Raises MemcachedKeyCharacterError).
-            Is not a string (Raises MemcachedStringEncodingError)
-            Is an unicode string (Raises MemcachedStringEncodingError)
-            Is not a string (Raises MemcachedKeyError)
-            Is None (Raises MemcachedKeyError)
-        """
-        if isinstance(key, tuple):
-            key = key[1]
-        if not key:
-            raise Client.MemcachedKeyNoneError("Key is None")
-
-        # Make sure we're not a specific unicode type, if we're old enough that
-        # it's a separate type.
-        if _has_unicode is True and isinstance(key, unicode):
-            raise Client.MemcachedStringEncodingError(
-                "Keys must be str()'s, not unicode.  Convert your unicode "
-                "strings using mystring.encode(charset)!")
-        if not isinstance(key, str):
-            raise Client.MemcachedKeyTypeError("Key must be str()'s")
-
-        if isinstance(key, _str_cls):
-            if (self.server_max_key_length != 0 and
-                    len(key) + key_extra_len > self.server_max_key_length):
-                raise Client.MemcachedKeyLengthError(
-                    "Key length is > %s" % self.server_max_key_length
-                )
-            if not valid_key_chars_re.match(key):
-                raise Client.MemcachedKeyCharacterError(
-                    "Control characters not allowed")
-
-
-class _Host(object):
-
-    def __init__(self, host, debug=0, dead_retry=_DEAD_RETRY,
-                 socket_timeout=_SOCKET_TIMEOUT, flush_on_reconnect=0):
-        self.dead_retry = dead_retry
-        self.socket_timeout = socket_timeout
-        self.debug = debug
-        self.flush_on_reconnect = flush_on_reconnect
-        if isinstance(host, tuple):
-            host, self.weight = host
-        else:
-            self.weight = 1
-
-        #  parse the connection string
-        m = re.match(r'^(?P<proto>unix):(?P<path>.*)$', host)
-        if not m:
-            m = re.match(r'^(?P<proto>inet6):'
-                         r'\[(?P<host>[^\[\]]+)\](:(?P<port>[0-9]+))?$', host)
-        if not m:
-            m = re.match(r'^(?P<proto>inet):'
-                         r'(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
-        if not m:
-            m = re.match(r'^(?P<host>[^:]+)(:(?P<port>[0-9]+))?$', host)
-        if not m:
-            raise ValueError('Unable to parse connection string: "%s"' % host)
-
-        hostData = m.groupdict()
-        if hostData.get('proto') == 'unix':
-            self.family = socket.AF_UNIX
-            self.address = hostData['path']
-        elif hostData.get('proto') == 'inet6':
-            self.family = socket.AF_INET6
-            self.ip = hostData['host']
-            self.port = int(hostData.get('port') or 11211)
-            self.address = (self.ip, self.port)
-        else:
-            self.family = socket.AF_INET
-            self.ip = hostData['host']
-            self.port = int(hostData.get('port') or 11211)
-            self.address = (self.ip, self.port)
-
-        self.deaduntil = 0
-        self.socket = None
-        self.flush_on_next_connect = 0
-
-        self.buffer = ''
-
-    def debuglog(self, str):
-        if self.debug:
-            sys.stderr.write("MemCached: %s\n" % str)
-
-    def _check_dead(self):
-        if self.deaduntil and self.deaduntil > time.time():
-            return 1
-        self.deaduntil = 0
-        return 0
-
-    def connect(self):
-        if self._get_socket():
-            return 1
-        return 0
-
-    def mark_dead(self, reason):
-        self.debuglog("MemCache: %s: %s.  Marking dead." % (self, reason))
-        self.deaduntil = time.time() + self.dead_retry
-        if self.flush_on_reconnect:
-            self.flush_on_next_connect = 1
-        self.close_socket()
-
-    def _get_socket(self):
-        if self._check_dead():
-            return None
-        if self.socket:
-            return self.socket
-        s = socket.socket(self.family, socket.SOCK_STREAM)
-        if hasattr(s, 'settimeout'):
-            s.settimeout(self.socket_timeout)
-        try:
-            s.connect(self.address)
-        except socket.timeout as msg:
-            self.mark_dead("connect: %s" % msg)
-            return None
-        except socket.error as msg:
-            if isinstance(msg, tuple):
-                msg = msg[1]
-            self.mark_dead("connect: %s" % msg)
-            return None
-        self.socket = s
-        self.buffer = ''
-        if self.flush_on_next_connect:
-            self.flush()
-            self.flush_on_next_connect = 0
-        return s
-
-    def close_socket(self):
-        if self.socket:
-            self.socket.close()
-            self.socket = None
-
-    def send_cmd(self, cmd):
-        self.socket.sendall(cmd + '\r\n')
-
-    def send_cmds(self, cmds):
-        """cmds already has trailing \r\n's applied."""
-        self.socket.sendall(cmds)
-
-    def readline(self, raise_exception=False):
-        """Read a line and return it.
-
-        If "raise_exception" is set, raise _ConnectionDeadError if the
-        read fails, otherwise return an empty string.
-        """
-        buf = self.buffer
-        if self.socket:
-            recv = self.socket.recv
-        else:
-            recv = lambda bufsize: ''
-
-        while True:
-            index = buf.find('\r\n')
-            if index >= 0:
-                break
-            data = recv(4096)
-            if not data:
-                # connection close, let's kill it and raise
-                self.mark_dead('connection closed in readline()')
-                if raise_exception:
-                    raise _ConnectionDeadError()
-                else:
-                    return ''
-
-            buf += data
-        self.buffer = buf[index + 2:]
-        return buf[:index]
-
-    def expect(self, text, raise_exception=False):
-        line = self.readline(raise_exception)
-        if line != text:
-            self.debuglog("while expecting '%s', got unexpected response '%s'"
-                          % (text, line))
-        return line
-
-    def recv(self, rlen):
-        self_socket_recv = self.socket.recv
-        buf = self.buffer
-        while len(buf) < rlen:
-            foo = self_socket_recv(max(rlen - len(buf), 4096))
-            buf += foo
-            if not foo:
-                raise _Error('Read %d bytes, expecting %d, '
-                             'read returned 0 length bytes' % (len(buf), rlen))
-        self.buffer = buf[rlen:]
-        return buf[:rlen]
-
-    def flush(self):
-        self.send_cmd('flush_all')
-        self.expect('OK')
-
-    def __str__(self):
-        d = ''
-        if self.deaduntil:
-            d = " (dead until %d)" % self.deaduntil
-
-        if self.family == socket.AF_INET:
-            return "inet:%s:%d%s" % (self.address[0], self.address[1], d)
-        elif self.family == socket.AF_INET6:
-            return "inet6:[%s]:%d%s" % (self.address[0], self.address[1], d)
-        else:
-            return "unix:%s%s" % (self.address, d)
-
-
-def _doctest():
-    import doctest
-    import memcache
-    servers = ["127.0.0.1:11211"]
-    mc = Client(servers, debug=1)
-    globs = {"mc": mc}
-    return doctest.testmod(memcache, globs=globs)
-
-if __name__ == "__main__":
-    failures = 0
-    print("Testing docstrings...")
-    _doctest()
-    print("Running tests:")
-    print()
-    serverList = [["127.0.0.1:11211"]]
-    if '--do-unix' in sys.argv:
-        serverList.append([os.path.join(os.getcwd(), 'memcached.socket')])
-
-    for servers in serverList:
-        mc = Client(servers, debug=1)
-
-        def to_s(val):
-            if not isinstance(val, _str_cls):
-                return "%s (%s)" % (val, type(val))
-            return "%s" % val
-
-        def test_setget(key, val):
-            global failures
-            print("Testing set/get {'%s': %s} ..."
-                  % (to_s(key), to_s(val)), end=" ")
-            mc.set(key, val)
-            newval = mc.get(key)
-            if newval == val:
-                print("OK")
-                return 1
-            else:
-                print("FAIL")
-                failures += 1
-                return 0
-
-        class FooStruct(object):
-
-            def __init__(self):
-                self.bar = "baz"
-
-            def __str__(self):
-                return "A FooStruct"
-
-            def __eq__(self, other):
-                if isinstance(other, FooStruct):
-                    return self.bar == other.bar
-                return 0
-
-        test_setget("a_string", "some random string")
-        test_setget("an_integer", 42)
-        if test_setget("long", long(1 << 30)):
-            print("Testing delete ...", end=" ")
-            if mc.delete("long"):
-                print("OK")
-            else:
-                print("FAIL")
-                failures += 1
-            print("Checking results of delete ...", end=" ")
-            if mc.get("long") is None:
-                print("OK")
-            else:
-                print("FAIL")
-                failures += 1
-        print("Testing get_multi ...",)
-        print(mc.get_multi(["a_string", "an_integer"]))
-
-        #  removed from the protocol
-        # if test_setget("timed_delete", 'foo'):
-        #     print "Testing timed delete ...",
-        #     if mc.delete("timed_delete", 1):
-        #         print("OK")
-        #     else:
-        #         print("FAIL")
-        #         failures += 1
-        #     print "Checking results of timed delete ..."
-        #     if mc.get("timed_delete") is None:
-        #         print("OK")
-        #     else:
-        #         print("FAIL")
-        #         failures += 1
-
-        print("Testing get(unknown value) ...", end=" ")
-        print(to_s(mc.get("unknown_value")))
-
-        f = FooStruct()
-        test_setget("foostruct", f)
-
-        print("Testing incr ...", end=" ")
-        x = mc.incr("an_integer", 1)
-        if x == 43:
-            print("OK")
-        else:
-            print("FAIL")
-            failures += 1
-
-        print("Testing decr ...", end=" ")
-        x = mc.decr("an_integer", 1)
-        if x == 42:
-            print("OK")
-        else:
-            print("FAIL")
-            failures += 1
-        sys.stdout.flush()
-
-        # sanity tests
-        print("Testing sending spaces...", end=" ")
-        sys.stdout.flush()
-        try:
-            x = mc.set("this has spaces", 1)
-        except Client.MemcachedKeyCharacterError as msg:
-            print("OK")
-        else:
-            print("FAIL")
-            failures += 1
-
-        print("Testing sending control characters...", end=" ")
-        try:
-            x = mc.set("this\x10has\x11control characters\x02", 1)
-        except Client.MemcachedKeyCharacterError as msg:
-            print("OK")
-        else:
-            print("FAIL")
-            failures += 1
-
-        print("Testing using insanely long key...", end=" ")
-        try:
-            x = mc.set('a'*SERVER_MAX_KEY_LENGTH, 1)
-        except Client.MemcachedKeyLengthError as msg:
-            print("FAIL")
-            failures += 1
-        else:
-            print("OK")
-        try:
-            x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'a', 1)
-        except Client.MemcachedKeyLengthError as msg:
-            print("OK")
-        else:
-            print("FAIL")
-            failures += 1
-
-        print("Testing sending a unicode-string key...", end=" ")
-        try:
-            x = mc.set(unicode('keyhere'), 1)
-        except Client.MemcachedStringEncodingError as msg:
-            print("OK", end=" ")
-        else:
-            print("FAIL", end=" ")
-            failures += 1
-        try:
-            x = mc.set((unicode('a')*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
-        except Client.MemcachedKeyError:
-            print("FAIL", end=" ")
-            failures += 1
-        else:
-            print("OK", end=" ")
-        s = pickle.loads('V\\u4f1a\np0\n.')
-        try:
-            x = mc.set((s * SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
-        except Client.MemcachedKeyLengthError:
-            print("OK")
-        else:
-            print("FAIL")
-            failures += 1
-
-        print("Testing using a value larger than the memcached value limit...")
-        print('NOTE: "MemCached: while expecting[...]" is normal...')
-        x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
-        if mc.get('keyhere') is None:
-            print("OK", end=" ")
-        else:
-            print("FAIL", end=" ")
-            failures += 1
-        x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
-        if mc.get('keyhere') is None:
-            print("OK")
-        else:
-            print("FAIL")
-            failures += 1
-
-        print("Testing set_multi() with no memcacheds running", end=" ")
-        mc.disconnect_all()
-        errors = mc.set_multi({'keyhere': 'a', 'keythere': 'b'})
-        if errors != []:
-            print("FAIL")
-            failures += 1
-        else:
-            print("OK")
-
-        print("Testing delete_multi() with no memcacheds running", end=" ")
-        mc.disconnect_all()
-        ret = mc.delete_multi({'keyhere': 'a', 'keythere': 'b'})
-        if ret != 1:
-            print("FAIL")
-            failures += 1
-        else:
-            print("OK")
-
-    if failures > 0:
-        print('*** THERE WERE FAILED TESTS')
-        sys.exit(1)
-    sys.exit(0)
-
-
-# vim: ts=4 sw=4 et :

+ 0 - 905
frameworks/Python/web2py/web2py/gluon/contrib/memdb.py

@@ -1,905 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-This file is part of web2py Web Framework (Copyrighted, 2007-2009).
-Developed by Massimo Di Pierro <[email protected]> and
-Robin B <[email protected]>.
-License: LGPLv3
-"""
-
-__all__ = ['MEMDB', 'Field']
-
-import re
-import sys
-import os
-import types
-import datetime
-import thread
-import cStringIO
-import csv
-import copy
-import gluon.validators as validators
-from gluon.utils import web2py_uuid
-from gluon.storage import Storage
-from gluon import SQLTABLE
-import random
-
-SQL_DIALECTS = {'memcache': {
-    'boolean': bool,
-    'string': unicode,
-    'text': unicode,
-    'password': unicode,
-    'blob': unicode,
-    'upload': unicode,
-    'integer': long,
-    'double': float,
-    'date': datetime.date,
-    'time': datetime.time,
-    'datetime': datetime.datetime,
-    'id': int,
-    'reference': int,
-    'lower': None,
-    'upper': None,
-    'is null': 'IS NULL',
-    'is not null': 'IS NOT NULL',
-    'extract': None,
-    'left join': None,
-}}
-
-
-def cleanup(text):
-    if re.compile('[^0-9a-zA-Z_]').findall(text):
-        raise SyntaxError('Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text)
-    return text
-
-
-def assert_filter_fields(*fields):
-    for field in fields:
-        if isinstance(field, (Field, Expression)) and field.type\
-                in ['text', 'blob']:
-            raise SyntaxError('AppEngine does not index by: %s'
-                              % field.type)
-
-
-def dateobj_to_datetime(object):
-
-    # convert dates,times to datetimes for AppEngine
-
-    if isinstance(object, datetime.date):
-        object = datetime.datetime(object.year, object.month,
-                                   object.day)
-    if isinstance(object, datetime.time):
-        object = datetime.datetime(
-            1970,
-            1,
-            1,
-            object.hour,
-            object.minute,
-            object.second,
-            object.microsecond,
-        )
-    return object
-
-
-def sqlhtml_validators(field_type, length):
-    v = {
-        'boolean': [],
-        'string': validators.IS_LENGTH(length),
-        'text': [],
-        'password': validators.IS_LENGTH(length),
-        'blob': [],
-        'upload': [],
-        'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100),
-        'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100),
-        'date': validators.IS_DATE(),
-        'time': validators.IS_TIME(),
-        'datetime': validators.IS_DATETIME(),
-        'reference': validators.IS_INT_IN_RANGE(0, 1e100),
-    }
-    try:
-        return v[field_type[:9]]
-    except KeyError:
-        return []
-
-
-class DALStorage(dict):
-
-    """
-    a dictionary that let you do d['a'] as well as d.a
-    """
-
-    def __getattr__(self, key):
-        return self[key]
-
-    def __setattr__(self, key, value):
-        if key in self:
-            raise SyntaxError(
-                'Object \'%s\'exists and cannot be redefined' % key)
-        self[key] = value
-
-    def __repr__(self):
-        return '<DALStorage ' + dict.__repr__(self) + '>'
-
-
-class SQLCallableList(list):
-
-    def __call__(self):
-        return copy.copy(self)
-
-
-class MEMDB(DALStorage):
-
-    """
-    an instance of this class represents a database connection
-
-    Example::
-
-       db=MEMDB(Client())
-       db.define_table('tablename',Field('fieldname1'),
-                                   Field('fieldname2'))
-    """
-
-    def __init__(self, client):
-        self._dbname = 'memdb'
-        self['_lastsql'] = ''
-        self.tables = SQLCallableList()
-        self._translator = SQL_DIALECTS['memcache']
-        self.client = client
-
-    def define_table(
-        self,
-        tablename,
-        *fields,
-        **args
-    ):
-        tablename = cleanup(tablename)
-        if tablename in dir(self) or tablename[0] == '_':
-            raise SyntaxError('invalid table name: %s' % tablename)
-        if not tablename in self.tables:
-            self.tables.append(tablename)
-        else:
-            raise SyntaxError('table already defined: %s' % tablename)
-        t = self[tablename] = Table(self, tablename, *fields)
-        t._create()
-        return t
-
-    def __call__(self, where=''):
-        return Set(self, where)
-
-
-class SQLALL(object):
-
-    def __init__(self, table):
-        self.table = table
-
-
-class Table(DALStorage):
-
-    """
-    an instance of this class represents a database table
-
-    Example::
-
-        db=MEMDB(Client())
-        db.define_table('users',Field('name'))
-        db.users.insert(name='me')
-    """
-
-    def __init__(
-        self,
-        db,
-        tablename,
-        *fields
-    ):
-        self._db = db
-        self._tablename = tablename
-        self.fields = SQLCallableList()
-        self._referenced_by = []
-        fields = list(fields)
-        fields.insert(0, Field('id', 'id'))
-        for field in fields:
-            self.fields.append(field.name)
-            self[field.name] = field
-            field._tablename = self._tablename
-            field._table = self
-            field._db = self._db
-        self.ALL = SQLALL(self)
-
-    def _create(self):
-        fields = []
-        myfields = {}
-        for k in self.fields:
-            field = self[k]
-            attr = {}
-            if not field.type[:9] in ['id', 'reference']:
-                if field.notnull:
-                    attr = dict(required=True)
-            if field.type[:2] == 'id':
-                continue
-            if field.type[:9] == 'reference':
-                referenced = field.type[10:].strip()
-                if not referenced:
-                    raise SyntaxError('Table %s: reference \'%s\' to nothing!' % (
-                        self._tablename, k))
-                if not referenced in self._db:
-                    raise SyntaxError(
-                        'Table: table %s does not exist' % referenced)
-                referee = self._db[referenced]
-                ftype = \
-                    self._db._translator[field.type[:9]](
-                        self._db[referenced]._tableobj)
-                if self._tablename in referee.fields:  # ## THIS IS OK
-                    raise SyntaxError('Field: table \'%s\' has same name as a field '
-                                      'in referenced table \'%s\'' % (
-                                          self._tablename, referenced))
-                self._db[referenced]._referenced_by.append((self._tablename,
-                                                            field.name))
-            elif not field.type in self._db._translator\
-                    or not self._db._translator[field.type]:
-                raise SyntaxError('Field: unknown field type %s' % field.type)
-        self._tableobj = self._db.client
-        return None
-
-    def create(self):
-
-        # nothing to do, here for backward compatility
-
-        pass
-
-    def drop(self):
-
-        # nothing to do, here for backward compatibility
-
-        self._db(self.id > 0).delete()
-
-
-    def insert(self, **fields):
-        # Checks 3 times that the id is new. 3 times is enough!
-        for i in range(3):
-            id = self._create_id()
-            if self.get(id) is None and self.update(id, **fields):
-                return long(id)
-        else:
-            raise RuntimeError("Too many ID conflicts")
-
-    def get(self, id):
-        val = self._tableobj.get(self._id_to_key(id))
-        if val:
-            return Storage(val)
-        else:
-            return None
-
-    def update(self, id, **fields):
-        for field in fields:
-            if not field in fields and self[field].default\
-                    is not None:
-                fields[field] = self[field].default
-            if field in fields:
-                fields[field] = obj_represent(fields[field],
-                                              self[field].type, self._db)
-        return self._tableobj.set(self._id_to_key(id), fields)
-
-    def delete(self, id):
-        return self._tableobj.delete(self._id_to_key(id))
-
-    def _id_to_key(self, id):
-        return '__memdb__/t/%s/k/%s' % (self._tablename, str(id))
-
-    def _create_id(self):
-        return long(web2py_uuid().replace('-',''),16)
-
-    def __str__(self):
-        return self._tablename
-
-    def __call__(self, id, **kwargs):
-        record = self.get(id)
-        if record is None:
-          return None
-        if kwargs and any(record[key]!=kwargs[key] for key in kwargs):
-            return None
-        return record
-
-class Expression(object):
-
-    def __init__(
-        self,
-        name,
-        type='string',
-        db=None,
-    ):
-        (self.name, self.type, self._db) = (name, type, db)
-
-    def __str__(self):
-        return self.name
-
-    def __or__(self, other):  # for use in sortby
-        assert_filter_fields(self, other)
-        return Expression(self.name + '|' + other.name, None, None)
-
-    def __invert__(self):
-        assert_filter_fields(self)
-        return Expression('-' + self.name, self.type, None)
-
-    # for use in Query
-
-    def __eq__(self, value):
-        return Query(self, '=', value)
-
-    def __ne__(self, value):
-        return Query(self, '!=', value)
-
-    def __lt__(self, value):
-        return Query(self, '<', value)
-
-    def __le__(self, value):
-        return Query(self, '<=', value)
-
-    def __gt__(self, value):
-        return Query(self, '>', value)
-
-    def __ge__(self, value):
-        return Query(self, '>=', value)
-
-    # def like(self,value): return Query(self,' LIKE ',value)
-    # def belongs(self,value): return Query(self,' IN ',value)
-    # for use in both Query and sortby
-
-    def __add__(self, other):
-        return Expression('%s+%s' % (self, other), 'float', None)
-
-    def __sub__(self, other):
-        return Expression('%s-%s' % (self, other), 'float', None)
-
-    def __mul__(self, other):
-        return Expression('%s*%s' % (self, other), 'float', None)
-
-    def __div__(self, other):
-        return Expression('%s/%s' % (self, other), 'float', None)
-
-
-class Field(Expression):
-
-    """
-    an instance of this class represents a database field
-
-    example::
-
-        a = Field(name, 'string', length=32, required=False,
-                     default=None, requires=IS_NOT_EMPTY(), notnull=False,
-                     unique=False, uploadfield=True)
-
-    to be used as argument of GQLDB.define_table
-
-    allowed field types:
-    string, boolean, integer, double, text, blob,
-    date, time, datetime, upload, password
-
-    strings must have a length or 512 by default.
-    fields should have a default or they will be required in SQLFORMs
-    the requires argument are used to validate the field input in SQLFORMs
-
-    """
-
-    def __init__(
-        self,
-        fieldname,
-        type='string',
-        length=None,
-        default=None,
-        required=False,
-        requires=sqlhtml_validators,
-        ondelete='CASCADE',
-        notnull=False,
-        unique=False,
-        uploadfield=True,
-    ):
-
-        self.name = cleanup(fieldname)
-        if fieldname in dir(Table) or fieldname[0] == '_':
-            raise SyntaxError('Field: invalid field name: %s' % fieldname)
-        if isinstance(type, Table):
-            type = 'reference ' + type._tablename
-        if not length:
-            length = 512
-        self.type = type  # 'string', 'integer'
-        self.length = length  # the length of the string
-        self.default = default  # default value for field
-        self.required = required  # is this field required
-        self.ondelete = ondelete.upper()  # this is for reference fields only
-        self.notnull = notnull
-        self.unique = unique
-        self.uploadfield = uploadfield
-        if requires == sqlhtml_validators:
-            requires = sqlhtml_validators(type, length)
-        elif requires is None:
-            requires = []
-        self.requires = requires  # list of validators
-
-    def formatter(self, value):
-        if value is None or not self.requires:
-            return value
-        if not isinstance(self.requires, (list, tuple)):
-            requires = [self.requires]
-        else:
-            requires = copy.copy(self.requires)
-        requires.reverse()
-        for item in requires:
-            if hasattr(item, 'formatter'):
-                value = item.formatter(value)
-        return value
-
-    def __str__(self):
-        return '%s.%s' % (self._tablename, self.name)
-
-
-MEMDB.Field = Field  # ## required by gluon/globals.py session.connect
-
-
-def obj_represent(object, fieldtype, db):
-    if object is not None:
-        if fieldtype == 'date' and not isinstance(object,
-                                                  datetime.date):
-            (y, m, d) = [int(x) for x in str(object).strip().split('-')]
-            object = datetime.date(y, m, d)
-        elif fieldtype == 'time' and not isinstance(object, datetime.time):
-            time_items = [int(x) for x in str(object).strip().split(':')[:3]]
-            if len(time_items) == 3:
-                (h, mi, s) = time_items
-            else:
-                (h, mi, s) = time_items + [0]
-            object = datetime.time(h, mi, s)
-        elif fieldtype == 'datetime' and not isinstance(object,
-                                                        datetime.datetime):
-            (y, m, d) = [int(x) for x in
-                         str(object)[:10].strip().split('-')]
-            time_items = [int(x) for x in
-                          str(object)[11:].strip().split(':')[:3]]
-            if len(time_items) == 3:
-                (h, mi, s) = time_items
-            else:
-                (h, mi, s) = time_items + [0]
-            object = datetime.datetime(
-                y,
-                m,
-                d,
-                h,
-                mi,
-                s,
-            )
-        elif fieldtype == 'integer' and not isinstance(object, long):
-            object = long(object)
-
-    return object
-
-
-class QueryException:
-
-    def __init__(self, **a):
-        self.__dict__ = a
-
-
-class Query(object):
-
-    """
-    A query object necessary to define a set.
-    It can be stored or can be passed to GQLDB.__call__() to obtain a Set
-
-    Example:
-    query=db.users.name=='Max'
-    set=db(query)
-    records=set.select()
-    """
-
-    def __init__(
-        self,
-        left,
-        op=None,
-        right=None,
-    ):
-        if isinstance(right, (Field, Expression)):
-            raise SyntaxError(
-                'Query: right side of filter must be a value or entity')
-        if isinstance(left, Field) and left.name == 'id':
-            if op == '=':
-                self.get_one = QueryException(
-                    tablename=left._tablename, id=long(right or 0))
-                return
-            else:
-                raise SyntaxError('only equality by id is supported')
-        raise SyntaxError('not supported')
-
-    def __str__(self):
-        return str(self.left)
-
-
-class Set(object):
-
-    """
-    As Set represents a set of records in the database,
-    the records are identified by the where=Query(...) object.
-    normally the Set is generated by GQLDB.__call__(Query(...))
-
-    given a set, for example
-       set=db(db.users.name=='Max')
-    you can:
-       set.update(db.users.name='Massimo')
-       set.delete() # all elements in the set
-       set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10))
-    and take subsets:
-       subset=set(db.users.id<5)
-    """
-
-    def __init__(self, db, where=None):
-        self._db = db
-        self._tables = []
-        self.filters = []
-        if hasattr(where, 'get_all'):
-            self.where = where
-            self._tables.insert(0, where.get_all)
-        elif hasattr(where, 'get_one') and isinstance(where.get_one,
-                                                      QueryException):
-            self.where = where.get_one
-        else:
-
-            # find out which tables are involved
-
-            if isinstance(where, Query):
-                self.filters = where.left
-            self.where = where
-            self._tables = [field._tablename for (field, op, val) in
-                            self.filters]
-
-    def __call__(self, where):
-        if isinstance(self.where, QueryException) or isinstance(where,
-                                                                QueryException):
-            raise SyntaxError('neither self.where nor where can be a QueryException instance')
-        if self.where:
-            return Set(self._db, self.where & where)
-        else:
-            return Set(self._db, where)
-
-    def _get_table_or_raise(self):
-        tablenames = list(set(self._tables))  # unique
-        if len(tablenames) < 1:
-            raise SyntaxError('Set: no tables selected')
-        if len(tablenames) > 1:
-            raise SyntaxError('Set: no join in appengine')
-        return self._db[tablenames[0]]._tableobj
-
-    def _getitem_exception(self):
-        (tablename, id) = (self.where.tablename, self.where.id)
-        fields = self._db[tablename].fields
-        self.colnames = ['%s.%s' % (tablename, t) for t in fields]
-        item = self._db[tablename].get(id)
-        return (item, fields, tablename, id)
-
-    def _select_except(self):
-        (item, fields, tablename, id) = self._getitem_exception()
-        if not item:
-            return []
-        new_item = []
-        for t in fields:
-            if t == 'id':
-                new_item.append(long(id))
-            else:
-                new_item.append(getattr(item, t))
-        r = [new_item]
-        return Rows(self._db, r, *self.colnames)
-
-    def select(self, *fields, **attributes):
-        """
-        Always returns a Rows object, even if it may be empty
-        """
-
-        if isinstance(self.where, QueryException):
-            return self._select_except()
-        else:
-            raise SyntaxError('select arguments not supported')
-
-    def count(self):
-        return len(self.select())
-
-    def delete(self):
-        if isinstance(self.where, QueryException):
-            (item, fields, tablename, id) = self._getitem_exception()
-            if not item:
-                return
-            self._db[tablename].delete(id)
-        else:
-            raise Exception('deletion not implemented')
-
-    def update(self, **update_fields):
-        if isinstance(self.where, QueryException):
-            (item, fields, tablename, id) = self._getitem_exception()
-            if not item:
-                return
-            for (key, value) in update_fields.items():
-                setattr(item, key, value)
-            self._db[tablename].update(id, **item)
-        else:
-            raise Exception('update not implemented')
-
-
-def update_record(
-    t,
-    s,
-    id,
-    a,
-):
-    item = s.get(id)
-    for (key, value) in a.items():
-        t[key] = value
-        setattr(item, key, value)
-    s.update(id, **item)
-
-
-class Rows(object):
-
-    """
-    A wrapper for the return value of a select. It basically represents a table.
-    It has an iterator and each row is represented as a dictionary.
-    """
-
-    # ## this class still needs some work to care for ID/OID
-
-    def __init__(
-        self,
-        db,
-        response,
-        *colnames
-    ):
-        self._db = db
-        self.colnames = colnames
-        self.response = response
-
-    def __len__(self):
-        return len(self.response)
-
-    def __getitem__(self, i):
-        if i >= len(self.response) or i < 0:
-            raise SyntaxError('Rows: no such row: %i' % i)
-        if len(self.response[0]) != len(self.colnames):
-            raise SyntaxError('Rows: internal error')
-        row = DALStorage()
-        for j in xrange(len(self.colnames)):
-            value = self.response[i][j]
-            if isinstance(value, unicode):
-                value = value.encode('utf-8')
-            packed = self.colnames[j].split('.')
-            try:
-                (tablename, fieldname) = packed
-            except:
-                if not '_extra' in row:
-                    row['_extra'] = DALStorage()
-                row['_extra'][self.colnames[j]] = value
-                continue
-            table = self._db[tablename]
-            field = table[fieldname]
-            if not tablename in row:
-                row[tablename] = DALStorage()
-            if field.type[:9] == 'reference':
-                referee = field.type[10:].strip()
-                rid = value
-                row[tablename][fieldname] = rid
-            elif field.type == 'boolean' and value is not None:
-
-                # row[tablename][fieldname]=Set(self._db[referee].id==rid)
-
-                if value == True or value == 'T':
-                    row[tablename][fieldname] = True
-                else:
-                    row[tablename][fieldname] = False
-            elif field.type == 'date' and value is not None\
-                    and not isinstance(value, datetime.date):
-                (y, m, d) = [int(x) for x in
-                             str(value).strip().split('-')]
-                row[tablename][fieldname] = datetime.date(y, m, d)
-            elif field.type == 'time' and value is not None\
-                    and not isinstance(value, datetime.time):
-                time_items = [int(x) for x in
-                              str(value).strip().split(':')[:3]]
-                if len(time_items) == 3:
-                    (h, mi, s) = time_items
-                else:
-                    (h, mi, s) = time_items + [0]
-                row[tablename][fieldname] = datetime.time(h, mi, s)
-            elif field.type == 'datetime' and value is not None\
-                    and not isinstance(value, datetime.datetime):
-                (y, m, d) = [int(x) for x in
-                             str(value)[:10].strip().split('-')]
-                time_items = [int(x) for x in
-                              str(value)[11:].strip().split(':')[:3]]
-                if len(time_items) == 3:
-                    (h, mi, s) = time_items
-                else:
-                    (h, mi, s) = time_items + [0]
-                row[tablename][fieldname] = datetime.datetime(
-                    y,
-                    m,
-                    d,
-                    h,
-                    mi,
-                    s,
-                )
-            else:
-                row[tablename][fieldname] = value
-            if fieldname == 'id':
-                id = row[tablename].id
-                row[tablename].update_record = lambda t = row[tablename], \
-                    s = self._db[tablename], id = id, **a: update_record(t,
-                                                                         s, id, a)
-                for (referee_table, referee_name) in \
-                        table._referenced_by:
-                    s = self._db[referee_table][referee_name]
-                    row[tablename][referee_table] = Set(self._db, s
-                                                        == id)
-        if len(row.keys()) == 1:
-            return row[row.keys()[0]]
-        return row
-
-    def __iter__(self):
-        """
-        iterator over records
-        """
-
-        for i in xrange(len(self)):
-            yield self[i]
-
-    def __str__(self):
-        """
-        serializes the table into a csv file
-        """
-
-        s = cStringIO.StringIO()
-        writer = csv.writer(s)
-        writer.writerow(self.colnames)
-        c = len(self.colnames)
-        for i in xrange(len(self)):
-            row = [self.response[i][j] for j in xrange(c)]
-            for k in xrange(c):
-                if isinstance(row[k], unicode):
-                    row[k] = row[k].encode('utf-8')
-            writer.writerow(row)
-        return s.getvalue()
-
-    def xml(self):
-        """
-        serializes the table using SQLTABLE (if present)
-        """
-
-        return SQLTABLE(self).xml()
-
-
-def test_all():
-    """
-    How to run from web2py dir:
-     export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH
-     python gluon/contrib/memdb.py
-
-    Setup the UTC timezone and database stubs
-
-    >>> import os
-    >>> os.environ['TZ'] = 'UTC'
-    >>> import time
-    >>> if hasattr(time, 'tzset'):
-    ...   time.tzset()
-    >>>
-    >>> from google.appengine.api import apiproxy_stub_map
-    >>> from google.appengine.api.memcache import memcache_stub
-    >>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
-    >>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub())
-
-        Create a table with all possible field types
-    >>> from google.appengine.api.memcache import Client
-    >>> db=MEMDB(Client())
-    >>> tmp=db.define_table('users',              Field('stringf','string',length=32,required=True),              Field('booleanf','boolean',default=False),              Field('passwordf','password',notnull=True),              Field('blobf','blob'),              Field('uploadf','upload'),              Field('integerf','integer',unique=True),              Field('doublef','double',unique=True,notnull=True),              Field('datef','date',default=datetime.date.today()),              Field('timef','time'),              Field('datetimef','datetime'),              migrate='test_user.table')
-
-   Insert a field
-
-    >>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A',                       uploadf=None, integerf=5,doublef=3.14,                       datef=datetime.date(2001,1,1),                       timef=datetime.time(12,30,15),                       datetimef=datetime.datetime(2002,2,2,12,30,15))
-    >>> user_id != None
-    True
-
-    Select all
-
-    # >>> all = db().select(db.users.ALL)
-
-    Drop the table
-
-    # >>> db.users.drop()
-
-    Select many entities
-
-    >>> tmp = db.define_table(\"posts\",              Field('body','text'),              Field('total','integer'),              Field('created_at','datetime'))
-    >>> many = 20   #2010 # more than 1000 single fetch limit (it can be slow)
-    >>> few = 5
-    >>> most = many - few
-    >>> 0 < few < most < many
-    True
-    >>> for i in range(many):
-    ...     f=db.posts.insert(body='',                total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i))
-    >>>
-
-    # test timezones
-    >>> class TZOffset(datetime.tzinfo):
-    ...   def __init__(self,offset=0):
-    ...     self.offset = offset
-    ...   def utcoffset(self, dt): return datetime.timedelta(hours=self.offset)
-    ...   def dst(self, dt): return datetime.timedelta(0)
-    ...   def tzname(self, dt): return 'UTC' + str(self.offset)
-    ...
-    >>> SERVER_OFFSET = -8
-    >>>
-    >>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201)
-    >>> post_id = db.posts.insert(created_at=stamp,body='body1')
-    >>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at
-    >>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset())
-    >>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET))
-    >>> stamp == naive_stamp
-    True
-    >>> utc_stamp == server_stamp
-    True
-    >>> rows = db(db.posts.id==post_id).select()
-    >>> len(rows) == 1
-    True
-    >>> rows[0].body == 'body1'
-    True
-    >>> db(db.posts.id==post_id).delete()
-    >>> rows = db(db.posts.id==post_id).select()
-    >>> len(rows) == 0
-    True
-
-    >>> id = db.posts.insert(total='0')   # coerce str to integer
-    >>> rows = db(db.posts.id==id).select()
-    >>> len(rows) == 1
-    True
-    >>> rows[0].total == 0
-    True
-
-    Examples of insert, select, update, delete
-
-    >>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table')
-    >>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22')
-    >>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21')
-    >>> me=db(db.person.id==person_id).select()[0] # test select
-    >>> me.name
-    'Massimo'
-    >>> db(db.person.id==person_id).update(name='massimo') # test update
-    >>> me = db(db.person.id==person_id).select()[0]
-    >>> me.name
-    'massimo'
-    >>> str(me.birth)
-    '1971-12-21'
-
-    # resave date to ensure it comes back the same
-    >>> me=db(db.person.id==person_id).update(birth=me.birth) # test update
-    >>> me = db(db.person.id==person_id).select()[0]
-    >>> me.birth
-    datetime.date(1971, 12, 21)
-    >>> db(db.person.id==marco_id).delete() # test delete
-    >>> len(db(db.person.id==marco_id).select())
-    0
-
-    Update a single record
-
-    >>> me.update_record(name=\"Max\")
-    >>> me.name
-    'Max'
-    >>> me = db(db.person.id == person_id).select()[0]
-    >>> me.name
-    'Max'
-
-    """
-
-SQLField = Field
-SQLTable = Table
-SQLXorable = Expression
-SQLQuery = Query
-SQLSet = Set
-SQLRows = Rows
-SQLStorage = DALStorage
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()

部分文件因文件數量過多而無法顯示