Merge in rpcproxy (wsgi based)
authorJulien Kerihuel <j.kerihuel@openchange.org>
Thu, 31 May 2012 10:04:43 +0000 (10:04 +0000)
committerJulien Kerihuel <j.kerihuel@openchange.org>
Thu, 31 May 2012 10:04:43 +0000 (10:04 +0000)
13 files changed:
mapiproxy/services/ocsmanager/ocsmanager/config/NTLMAuthHandler.py [new file with mode: 0644]
mapiproxy/services/ocsmanager/ocsmanager/config/middleware.py
mapiproxy/services/ocsmanager/ocsmanager/controllers/as.py
mapiproxy/services/ocsmanager/ocsmanager/model/OCSAuthenticator.py [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.conf [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.wsgi [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/NTLMAuthHandler.py [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/RPCProxyApplication.py [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/channels.py [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/fdunix.py [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/packets.py [new file with mode: 0644]
mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/utils.py [new file with mode: 0644]
pyopenchange/mapistore/freebusy_properties.c

diff --git a/mapiproxy/services/ocsmanager/ocsmanager/config/NTLMAuthHandler.py b/mapiproxy/services/ocsmanager/ocsmanager/config/NTLMAuthHandler.py
new file mode 100644 (file)
index 0000000..388f73d
--- /dev/null
@@ -0,0 +1,152 @@
+import httplib
+import socket
+import uuid
+
+from samba.gensec import Security
+from samba.auth import AuthContext
+
+
+__all__ = ['NTLMAuthHandler']
+
+COOKIE_NAME="ocs-ntlm-auth"
+
+
+class NTLMAuthHandler(object):
+    """
+    HTTP/1.0 ``NTLM`` authentication middleware
+
+    Parameters: application -- the application object that is called only upon
+    successful authentication.
+
+    """
+
+    def __init__(self, application):
+        # TODO: client expiration and/or cleanup
+        self.client_status = {}
+        self.application = application
+
+    def _in_progress_response(self, start_response, ntlm_data=None, client_id=None):
+        status = "401 %s" % httplib.responses[401]
+        content = "More data needed..."
+        headers = [("Content-Type", "text/plain"),
+                   ("Content-Length", "%d" % len(content))]
+        if ntlm_data is None:
+            www_auth_value = "NTLM"
+        else:
+            enc_ntlm_data = ntlm_data.encode("base64")
+            www_auth_value = ("NTLM %s"
+                              % enc_ntlm_data.strip().replace("\n", ""))
+        if client_id is not None:
+            # MUST occur when ntlm_data is None, can still occur otherwise
+            headers.append(("Set-Cookie", "%s=%s" % (COOKIE_NAME, client_id)))
+
+        headers.append(("WWW-Authenticate", www_auth_value))
+        start_response(status, headers)
+
+        return [content]
+
+    def _failure_response(self, start_response, explanation=None):
+        status = "403 %s" % httplib.responses[403]
+        content = "Authentication failure"
+        if explanation is not None:
+            content = content + " (%s)" % explanation
+        headers = [("Content-Type", "text/plain"),
+                   ("Content-Length", "%d" % len(content))]
+        start_response(status, headers)
+
+        return [content]
+
+    def _get_cookies(self, env):
+        cookies = {}
+        if "HTTP_COOKIE" in env:
+            cookie_str = env["HTTP_COOKIE"]
+            for pair in cookie_str.split(";"):
+                (key, value) = pair.strip().split("=")
+                cookies[key] = value
+
+        return cookies
+
+    def __call__(self, env, start_response):
+        cookies = self._get_cookies(env)
+        if COOKIE_NAME in cookies:
+            client_id = cookies[COOKIE_NAME]
+        else:
+            client_id = None
+
+        # old model that only works with mod_wsgi:
+        # if "REMOTE_ADDR" in env and "REMOTE_PORT" in env:
+        #     client_id = "%(REMOTE_ADDR)s:%(REMOTE_PORT)s".format(env)
+
+        if client_id is None or client_id not in self.client_status:
+            # first stage
+            server = Security.start_server(auth_context=AuthContext())
+            server.start_mech_by_name("ntlmssp")
+            client_id = str(uuid.uuid4())
+
+            if "HTTP_AUTHORIZATION" in env:
+                # Outlook may directly have sent a NTLM payload
+                auth = env["HTTP_AUTHORIZATION"]
+                auth_msg = server.update(auth[5:].decode('base64'))
+                response = self._in_progress_response(start_response,
+                                                      auth_msg[1],
+                                                      client_id)
+                self.client_status[client_id] = {"stage": "stage1",
+                                                 "server": server}
+            else:
+                self.client_status[client_id] = {"stage": "stage0",
+                                                 "server": server}
+                response = self._in_progress_response(start_response, None, client_id)
+        else:
+            status_stage = self.client_status[client_id]["stage"]
+
+            if status_stage == "ok":
+                # client authenticated previously
+                response = self.application(env, start_response)
+            elif status_stage == "stage0":
+                # test whether client supports "NTLM"
+                if "HTTP_AUTHORIZATION" in env:
+                    auth = env["HTTP_AUTHORIZATION"]
+                    server = self.client_status[client_id]["server"]
+                    auth_msg = server.update(auth[5:].decode('base64'))
+                    response = self._in_progress_response(start_response,
+                                                          auth_msg[1])
+                    self.client_status[client_id]["stage"] = "stage1"
+                else:
+                    del(self.client_status[client_id])
+                    response = self._failure_response(start_response,
+                                                      "failure at '%s'"
+                                                      % status_stage)
+            elif status_stage == "stage1":
+                if "HTTP_AUTHORIZATION" in env:
+                    auth = env["HTTP_AUTHORIZATION"]
+                    server = self.client_status[client_id]["server"]
+                    try:
+                        auth_msg = server.update(auth[5:].decode('base64'))
+                    except RuntimeError: # a bit violent...
+                        auth_msg = (0,)
+
+                    if auth_msg[0] == 1:
+                        # authentication completed
+                        self.client_status[client_id]["stage"] = "ok"
+                        del(self.client_status[client_id]["server"])
+                        response = self.application(env, start_response)
+                    else:
+                        # we start over with the whole process
+
+                        server = Security.start_server(auth_context=AuthContext())
+                        server.start_mech_by_name("ntlmssp")
+                        self.client_status[client_id] = {"stage": "stage0",
+                                                         "server": server}
+                        response = self._in_progress_response(start_response)
+                else:
+                    del(self.client_status[client_id])
+                    response = self._failure_response(start_response,
+                                                      "failure at '%s'"
+                                                      % status_stage)
+            else:
+                raise RuntimeError("none shall pass!")
+
+        return response
+
+
+middleware = NTLMAuthHandler
index f0b2323..3210695 100644 (file)
@@ -1,4 +1,5 @@
 """Pylons middleware initialization"""
+
 from beaker.middleware import SessionMiddleware
 from paste.cascade import Cascade
 from paste.registry import RegistryManager
@@ -10,6 +11,11 @@ from routes.middleware import RoutesMiddleware
 
 from ocsmanager.config.environment import load_environment
 
+# from paste.auth.basic import AuthBasicHandler
+# from ocsmanager.model.OCSAuthenticator import *
+
+from NTLMAuthHandler import NTLMAuthHandler
+
 def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
     """Create a Pylons WSGI application and return it
 
@@ -56,6 +62,10 @@ def make_app(global_conf, full_stack=True, static_files=True, **app_conf):
         else:
             app = StatusCodeRedirect(app, [400, 401, 403, 404, 417, 500])
 
+    # authenticator = OCSAuthenticator(config)
+    # app = AuthBasicHandler(app, "OCSManager", authenticator)
+    app = NTLMAuthHandler(app)
+
     # Establish the Registry for this application
     app = RegistryManager(app)
 
index b71070e..29faad8 100644 (file)
@@ -318,7 +318,7 @@ class ExchangeService(ServiceBase):
     def _timezone_datetime(year, tz_time):
         # we round the dates to midnight since events are unlikely to start at
         # such an early time of day
-        return datetime.datetime(year, tz_time.Month, tz_time.DayOrder)
+        return datetime.datetime(year, tz_time.Month + 1, tz_time.DayOrder + 1)
 
     @staticmethod
     def _freebusy_date(timezone, utcdate):
@@ -344,10 +344,10 @@ class ExchangeService(ServiceBase):
         start = freebusy_view_options.TimeWindow.StartTime
         end = freebusy_view_options.TimeWindow.EndTime
 
-        a = time()
-        print "fetching freebusy"
+        a = time()
+        print "fetching freebusy"
         freebusy_props = cal_folder.fetch_freebusy_properties(start, end)
-        print "fetched freebusy: %f secs" % (time() - a)
+        print "fetched freebusy: %f secs" % (time() - a)
 
         fb_response = FreeBusyResponse()
         fb_response.ResponseMessage = ResponseMessageType()
diff --git a/mapiproxy/services/ocsmanager/ocsmanager/model/OCSAuthenticator.py b/mapiproxy/services/ocsmanager/ocsmanager/model/OCSAuthenticator.py
new file mode 100644 (file)
index 0000000..894f2db
--- /dev/null
@@ -0,0 +1,33 @@
+import ldap
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+class OCSAuthenticator(object):
+    def __init__(self, config):
+        self.config = config
+
+    def __call__(self, environ, username, password):
+        result = False
+
+        config = self.config['ocsmanager']['rpcproxy'] 
+        # FIXME: we should perform an indirect bind, based on sAMAccountName
+        userdn = 'CN=%s,%s' % (username, config['ldap_basedn'])
+
+        try:
+            # FIXME: should be # 'ocsmanager' alone since we require auth from
+            # all actions
+            l = ldap.open(config['ldap_host'])
+            l.protocol_version = ldap.VERSION3
+            l.simple_bind(userdn, password)
+            l.result() # this will cause an exception in case of a failure
+            result = True
+        except ldap.LDAPError, e:
+            logger.info("authentication failure for '%s'", userdn)
+            logger.debug(e)
+        l.unbind()
+
+        return result
+
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.conf b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.conf
new file mode 100644 (file)
index 0000000..2823939
--- /dev/null
@@ -0,0 +1,17 @@
+# LogLevel Info
+WSGIDaemonProcess rpcproxy threads=10 processes=2 inactivity-timeout=300
+# processes=4
+# maximum-requests=20
+WSGILazyInitialization On
+
+KeepAliveTimeout 120
+
+<Directory /home/wsourdeau/src/openchange/mapiproxy/services/ocsmanager/rpcproxy>
+  WSGIPassAuthorization On
+  WSGIProcessGroup rpcproxy
+</Directory>
+
+WSGIScriptAlias /rpc/rpcproxy.dll /home/wsourdeau/src/openchange/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.wsgi
+# WSGIScriptAlias /rpcwithcert/rpcproxy.dll /home/wsourdeau/src/openchange/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.wsgi
+# WSGIScriptAlias /rpc/rpcproxy.dll /home/wsourdeau/src/openchange/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.wsgi
+
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.wsgi b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy.wsgi
new file mode 100644 (file)
index 0000000..a9bfe16
--- /dev/null
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+#
+# rpcproxy.wsgi -- OpenChange RPC-over-HTTP implementation
+#
+# Copyright (C) 2012  Julien Kerihuel <j.kerihuel@openchange.org>
+#                     Wolfgang Sourdeau <wsourdeau@inverse.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#   
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#   
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sys
+
+sys.path.extend(("/usr/local/samba/lib/python2.7/site-packages",
+                 "/home/wsourdeau/src/openchange/mapiproxy/services/ocsmanager/rpcproxy"))
+
+from rpcproxy.NTLMAuthHandler import *
+from rpcproxy.RPCProxyApplication import *
+
+application = NTLMAuthHandler(RPCProxyApplication())
+# application = RPCProxyApplication()
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/NTLMAuthHandler.py b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/NTLMAuthHandler.py
new file mode 100644 (file)
index 0000000..66fe4e7
--- /dev/null
@@ -0,0 +1,223 @@
+# NTLMAuthHandler.py -- OpenChange RPC-over-HTTP implementation
+#
+# Copyright (C) 2012  Wolfgang Sourdeau <wsourdeau@inverse.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#   
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#   
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""This module provides the NTLMAuthHandler class, a WSGI middleware that
+enables authentication via RPC to Samba
+
+"""
+
+import httplib
+from uuid import uuid4, UUID
+from socket import socket, _socketobject, SHUT_RDWR, AF_INET, AF_UNIX, \
+    SOCK_STREAM, MSG_WAITALL, error as socket_error
+from struct import pack, error as struct_error
+
+from packets import *
+
+
+COOKIE_NAME = "ocs-ntlm-auth"
+SAMBA_HOST = "localhost"
+SAMBA_PORT = 1024
+
+
+class NTLMAuthHandler(object):
+    """
+    HTTP/1.0 ``NTLM`` authentication middleware
+
+    Parameters: application -- the application object that is called only upon
+    successful authentication.
+
+    """
+
+    def __init__(self, application):
+        # TODO: client expiration and/or cleanup
+        self.client_status = {}
+        self.application = application
+
+    def _in_progress_response(self, start_response,
+                              ntlm_data=None, client_id=None):
+        status = "401 %s" % httplib.responses[401]
+        content = "More data needed..."
+        headers = [("Content-Type", "text/plain"),
+                   ("Content-Length", "%d" % len(content))]
+        if ntlm_data is None:
+            www_auth_value = "NTLM"
+        else:
+            enc_ntlm_data = ntlm_data.encode("base64")
+            www_auth_value = ("NTLM %s"
+                              % enc_ntlm_data.strip().replace("\n", ""))
+
+        if client_id is not None:
+            # MUST occur when ntlm_data is None, can still occur otherwise
+            headers.append(("Set-Cookie", "%s=%s" % (COOKIE_NAME, client_id)))
+
+        headers.append(("WWW-Authenticate", www_auth_value))
+        start_response(status, headers)
+
+        return [content]
+
+    def _get_cookies(self, env):
+        cookies = {}
+        if "HTTP_COOKIE" in env:
+            cookie_str = env["HTTP_COOKIE"]
+            for pair in cookie_str.split(";"):
+                (key, value) = pair.strip().split("=")
+                cookies[key] = value
+
+        return cookies
+
+    def _stage0(self, client_id, env, start_response):
+        # print >>sys.stderr, "* client auth stage0"
+
+        auth = env["HTTP_AUTHORIZATION"]
+        ntlm_payload = auth[5:].decode("base64")
+
+        # print >> sys.stderr, "connecting to host"
+        server = socket(AF_INET, SOCK_STREAM)
+        server.connect((SAMBA_HOST, SAMBA_PORT))
+        # print >> sys.stderr, "host: %s" % str(server.getsockname())
+
+        # print >> sys.stderr, "building bind packet"
+        packet = RPCBindOutPacket()
+        packet.ntlm_payload = ntlm_payload
+        
+        # print >> sys.stderr, "sending bind packet"
+        server.sendall(packet.make())
+
+        # print >> sys.stderr, "sent bind packet, receiving response"
+
+        packet = RPCPacket.from_file(server)
+        # print >> sys.stderr, "response parsed: %s" % packet.pretty_dump()
+
+        if isinstance(packet, RPCBindACKPacket):
+            # print >> sys.stderr, "ACK received"
+
+            client_id = str(uuid4())
+            self.client_status[client_id] = {"status": "challenged",
+                                             "server": server}
+
+            response = self._in_progress_response(start_response,
+                                                  packet.ntlm_payload,
+                                                  client_id)
+        else:
+            # print >> sys.stderr, "NAK received"
+            server.shutdown(SHUT_RDWR)
+            server.close()
+            response = self._in_progress_response(start_response)
+
+        return response
+
+    def _stage1(self, client_id, env, start_response):
+        # print >>sys.stderr, "* client auth stage1"
+
+        server = self.client_status[client_id]["server"]
+        # print >> sys.stderr, "host: %s" % str(server.getsockname())
+
+        auth = env["HTTP_AUTHORIZATION"]
+
+        ntlm_payload = auth[5:].decode("base64")
+
+        # print >> sys.stderr, "building auth_3 and ping packets"
+        packet = RPCAuth3OutPacket()
+        packet.ntlm_payload = ntlm_payload
+        server.sendall(packet.make())
+
+        # This is a hack:
+        # A ping at this stage will trigger a connection close
+        # from Samba and an error from Exchange. Since a successful
+        # authentication does not trigger a response from the server, this
+        # provides a simple way to ensure that it passed, without actually
+        # performing an RPC operation on the "mgmt" interface. The choice of
+        # "mgmt" was due to the fact that we want to keep this authenticator
+        # middleware to be reusable for other Samba services while "mgmt"
+        # seemes to be the only available interface from Samba outside of the
+        # ones provided by OpenChange.
+        packet = RPCPingOutPacket()
+        packet.call_id = 2
+        server.sendall(packet.make())
+        # print >> sys.stderr, "sent auth3 and ping packets, receiving response"
+
+        try:
+            packet = RPCPacket.from_file(server)
+            if isinstance(packet, RPCFaultPacket):
+                if packet.header["call_id"] == 2:
+                    # the Fault packet related to our Ping operation
+                    success = True
+                else:
+                    success = False
+            else:
+                raise ValueError("unexpected packet")
+        except socket_error:
+            # Samba closed the connection
+            success = True
+        except struct_error:
+            # Samba closed the connection
+            success = True
+
+        server.shutdown(SHUT_RDWR)
+        server.close()
+
+        if success:
+            del(self.client_status[client_id]["server"])
+
+            # authentication completed
+            self.client_status[client_id]["status"] = "ok"
+            response = self.application(env, start_response)
+        else:
+            # we start over with the whole process
+            del(self.client_status[client_id])
+            response = self._in_progress_response(start_response)
+
+        return response
+
+    def __call__(self, env, start_response):
+        # TODO: validate authorization payload
+
+        # print >>sys.stderr, "starting request: %d" % os.getpid()
+        # old model that only works with mod_wsgi:
+        # if "REMOTE_ADDR" in env and "REMOTE_PORT" in env:
+        #     client_id = "%(REMOTE_ADDR)s:%(REMOTE_PORT)s".format(env)
+
+        has_auth = "HTTP_AUTHORIZATION" in env
+
+        cookies = self._get_cookies(env)
+        if COOKIE_NAME in cookies:
+            client_id = cookies[COOKIE_NAME]
+        else:
+            client_id = None
+
+        # print >>sys.stderr, "client_id: %s (known: %s)" % (str(client_id), client_id in self.client_status)
+
+        if has_auth:
+            if client_id is None or client_id not in self.client_status:
+                # stage 0, where the cookie has not been set yet and where we
+                # know the NTLM payload is a NEGOTIATE message
+                response = self._stage0(client_id, env, start_response)
+            else:
+                # stage 1, where the client has already received the challenge
+                # from the server and is now sending an AUTH message
+                response = self._stage1(client_id, env, start_response)
+        else:
+            if client_id is None or client_id not in self.client_status:
+                # this client has never been seen
+                response = self._in_progress_response(start_response, None)
+            else:
+                # authenticated, where no NTLM payload is provided anymore
+                response = self.application(env, start_response)
+
+        return response
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/RPCProxyApplication.py b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/RPCProxyApplication.py
new file mode 100644 (file)
index 0000000..a73b7df
--- /dev/null
@@ -0,0 +1,73 @@
+# RPCProxyApplication.py -- OpenChange RPC-over-HTTP implementation
+#
+# Copyright (C) 2012  Wolfgang Sourdeau <wsourdeau@inverse.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#   
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#   
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import logging
+import sys
+
+from channels import RPCProxyInboundChannelHandler,\
+    RPCProxyOutboundChannelHandler
+
+
+class RPCProxyApplication(object):
+    def __init__(self):
+        print >>sys.stderr, "RPCProxy started"
+
+    def __call__(self, environ, start_response):
+        if "wsgi.errors" in environ:
+            log_stream = environ["wsgi.errors"]
+        else:
+            log_stream = sys.stderr
+
+        logHandler = logging.StreamHandler(log_stream)
+        fmter = logging.Formatter("[%(process)d] [%(levelname)s] %(message)s")
+        logHandler.setFormatter(fmter)
+
+        logger = logging.Logger("rpcproxy")
+        logger.setLevel(logging.INFO)
+        logger.addHandler(logHandler)
+        self.logger = logger
+
+        if "REQUEST_METHOD" in environ:
+            method = environ["REQUEST_METHOD"]
+            method_method = "_do_" + method
+            if hasattr(self, method_method):
+                method_method_method = getattr(self, method_method)
+                response = method_method_method(environ, start_response)
+            else:
+                response = self._unsupported_method(environ, start_response)
+        else:
+            response = self._unsupported_method(environ, start_response)
+
+        return response
+
+    @staticmethod
+    def _unsupported_method(environ, start_response):
+        msg = "Unsupported method"
+        start_response("501 Not Implemented", [("Content-Type", "text/plain"),
+                                               ("Content-length",
+                                                str(len(msg)))])
+
+        return [msg]
+
+    def _do_RPC_IN_DATA(self, environ, start_response):
+        handler = RPCProxyInboundChannelHandler(self.logger)
+        return handler.sequence(environ, start_response)
+
+    def _do_RPC_OUT_DATA(self, environ, start_response):
+        handler = RPCProxyOutboundChannelHandler(self.logger)
+        return handler.sequence(environ, start_response)
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/channels.py b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/channels.py
new file mode 100644 (file)
index 0000000..f601b5a
--- /dev/null
@@ -0,0 +1,492 @@
+# channels.py -- OpenChange RPC-over-HTTP implementation
+#
+# Copyright (C) 2012  Julien Kerihuel <j.kerihuel@openchange.org>
+#                     Wolfgang Sourdeau <wsourdeau@inverse.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#   
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#   
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+from select import poll, POLLIN, POLLHUP
+from socket import socket, AF_INET, AF_UNIX, SOCK_STREAM, MSG_WAITALL, \
+    error as socket_error
+from struct import pack, unpack_from
+import sys
+from time import time, sleep
+from uuid import UUID
+
+# from rpcproxy.RPCH import RPCH, RTS_FLAG_ECHO
+from fdunix import send_socket, receive_socket
+from packets import RTS_CMD_CONNECTION_TIMEOUT, RTS_CMD_VERSION, \
+    RTS_CMD_RECEIVE_WINDOW_SIZE, RTS_CMD_CONNECTION_TIMEOUT, \
+    RTS_FLAG_ECHO, \
+    RPCPacket, RPCRTSPacket, RPCRTSOutPacket
+
+
+"""Documentation:
+
+1. "Connection Establishment" sequence (from RPCH.pdf, 3.2.1.5.3.1)
+
+  client -> IN request -> proxy in
+  # server -> legacy server response -> proxy in
+  # server -> legacy server response -> proxy out
+  client -> Out request -> proxy out
+  client -> A1 -> proxy out
+  client -> B1 -> proxy in
+  # proxy out -> A2 -> server
+  proxy out -> OUT channel response -> client
+  # proxy in -> B2 -> server
+  proxy out -> A3 -> client
+  # server -> C1 -> proxy out
+  # server -> B3 -> proxy in
+  proxy out -> C2 -> client
+
+2. internal unix socket protocols
+
+   Note: OUT proxy is always the server
+
+ * establishing virtual connection
+ OUT proxy listens on unix socket
+ IN proxy connects to OUT proxy
+ IN -> OUT: "IP"
+ IN -> OUT: in_window_size
+ IN -> OUT: in_conn_timeout
+ OUT -> IN: sends connection to OpenChange
+ (TODO: socket close at this point?)
+
+ * channel recycling (unused yet, hypothethical)
+ When new OUT conn arrives:
+ new OUT -> OUT: "OP"
+ OUT -> new OUT: OUT listening socket (fdunix)
+ OUT -> new OUT: IN socket (fdunix)
+ OUT -> new OUT: oc socket (fdunix)
+ close OUT socket locally
+"""
+
+
+# those id must have the same length
+INBOUND_PROXY_ID = "IP"
+OUTBOUND_PROXY_ID = "OP"
+SOCKETS_DIR = "/tmp/rpcproxy"
+OC_HOST = "127.0.0.1"
+
+class RPCProxyChannelHandler(object):
+    def __init__(self, logger):
+        self.logger = logger
+
+        self.client_socket = None # placeholder for wsgi.input
+
+        self.bytes_read = 0
+        self.bytes_written = 0
+        self.startup_time = time()
+
+        self.channel_cookie = None
+        self.connection_cookie = None
+
+    def handle_echo_request(self, environ, start_response):
+        self.logger.info("handling echo request")
+
+        packet = RPCRTSOutPacket()
+        packet.flags = RTS_FLAG_ECHO
+        data = packet.make()
+        self.bytes_written = self.bytes_written + packet.size
+
+        start_response("200 Success", [("Content-length", "%d" % packet.size),
+                                       ("Content-Type", "application/rpc")])
+
+        return [data]
+
+    def log_connection_stats(self):
+        self.logger.info("request took %f secs; %d bytes received; %d bytes sent"
+                         % ((time() - self.startup_time),
+                            self.bytes_read, self.bytes_written))
+
+
+class RPCProxyInboundChannelHandler(RPCProxyChannelHandler):
+    def __init__(self, logger):
+        RPCProxyChannelHandler.__init__(self, logger)
+        self.oc_conn = None
+        self.window_size = 0
+        self.conn_timeout = 0
+        self.client_keepalive = 0
+        self.association_group_id = None
+
+    def _receive_conn_b1(self):
+        # CONN/B1 RTS PDU (TODO: validation)
+        # receive the cookie
+        self.logger.info("IN: receiving CONN/B1")
+
+        packet = RPCPacket.from_file(self.client_socket, self.logger)
+        if not isinstance(packet, RPCRTSPacket):
+            raise Exception("Unexpected non-rts packet received for CONN/B1")
+        self.connection_cookie = str(UUID(bytes=packet.commands[1]["Cookie"]))
+        self.channel_cookie = str(UUID(bytes=packet.commands[2]["Cookie"]))
+        self.client_keepalive = packet.commands[4]["ClientKeepalive"]
+        self.association_group_id = str(UUID(bytes=packet.commands[5] \
+                                                 ["AssociationGroupId"]))
+        self.bytes_read = self.bytes_read + packet.size
+
+    def _connect_to_OUT_channel(self):
+        # FIXME: we might need to keep a persistant connection to the OUT
+        # channel
+
+        # connect as a client to the cookie unix socket
+        socket_name = os.path.join(SOCKETS_DIR, self.connection_cookie)
+        self.logger.info("IN: connecting to OUT via unix socket '%s'"
+                         % socket_name)
+        sock = socket(AF_UNIX, SOCK_STREAM)
+        connected = False
+        attempt = 0
+        while not connected:
+            try:
+                attempt = attempt + 1
+                sock.connect(socket_name)
+                connected = True
+            except socket_error:
+                self.logger.info("IN: handling socket.error: %s"
+                                 % str(sys.exc_info()))
+                if attempt < 10:
+                    self.logger.warn("IN: reattempting to connect to OUT"
+                                     " channel... (%d/10)" % attempt)
+                    sleep(1)
+
+        if connected:
+            self.logger.info("IN: connection succeeded")
+            self.logger.info("IN: sending window size and connection timeout")
+
+            # identify ourselves as the IN proxy
+            sock.sendall(INBOUND_PROXY_ID)
+
+            # send window_size to 256Kib (max size allowed)
+            # and conn_timeout (in seconds, max size allowed)
+            sock.sendall(pack("<ll", (256 * 1024), 14400000))
+
+            # recv oc socket
+            self.oc_conn = receive_socket(sock)
+
+            self.logger.info("IN: oc_conn received (fileno=%d)"
+                             % self.oc_conn.fileno())
+            sock.close()
+        else:
+            self.logger.error("too many failed attempts to establish a"
+                              " connection to OUT channel")
+
+        return connected
+
+    def _runloop(self):
+        self.logger.info("IN: runloop")
+
+        status = True
+        while status:
+            try:
+                oc_packet = RPCPacket.from_file(self.client_socket,
+                                                self.logger)
+                self.bytes_read = self.bytes_read + oc_packet.size
+
+                self.logger.info("IN: packet headers = "
+                                 + oc_packet.pretty_dump())
+
+                if isinstance(oc_packet, RPCRTSPacket):
+                    # or oc_packet.header["ptype"] == DCERPC_PKT_AUTH3):
+                    # we do not forward rts packets
+                    self.logger.info("IN: ignored RTS packet")
+                else:
+                    self.logger.info("IN: sending packet to OC")
+                    self.oc_conn.sendall(oc_packet.data)
+                    self.bytes_written = self.bytes_written + oc_packet.size
+            except IOError:
+                status = False
+                # exc = sys.exc_info()
+                self.logger.error("IN: client connection closed")
+                self._notify_OUT_channel()
+
+    def _notify_OUT_channel(self):
+        self.logger.info("IN: notifying OUT channel of shutdown")
+
+        socket_name = os.path.join(SOCKETS_DIR, self.connection_cookie)
+        self.logger.info("IN: connecting to OUT via unix socket '%s'"
+                         % socket_name)
+        sock = socket(AF_UNIX, SOCK_STREAM)
+        connected = False
+        attempt = 0
+        while not connected:
+            try:
+                attempt = attempt + 1
+                sock.connect(socket_name)
+                connected = True
+            except socket_error:
+                self.logger.info("IN: handling socket.error: %s"
+                                 % str(sys.exc_info()))
+                if attempt < 10:
+                    self.logger.warn("IN: reattempting to connect to OUT"
+                                     " channel... (%d/10)" % attempt)
+                    sleep(1)
+
+        if connected:
+            self.logger.info("IN: connection succeeded")
+            try:
+                sock.sendall(INBOUND_PROXY_ID + "q")
+                sock.close()
+            except:
+                # UNIX socket might already have been closed by OUT channel
+                pass
+        else:
+            self.logger.error("too many failed attempts to establish a"
+                              " connection to OUT channel")
+
+    def _terminate_oc_socket(self):
+        self.oc_conn.close()
+
+    def sequence(self, environ, start_response):
+        self.logger.info("IN: processing request")
+        if "REMOTE_PORT" in environ:
+            self.logger.info("IN: remote port = %s" % environ["REMOTE_PORT"])
+        # self.logger.info("IN: path: ' + self.path)
+
+        content_length = int(environ["CONTENT_LENGTH"])
+        self.logger.info("IN: request size is %d" % content_length)
+
+        # echo request
+        if content_length <= 0x10:
+            self.logger.info("IN: Exiting (1) from do_RPC_IN_DATA")
+            for data in self.handle_echo_request(environ, start_response):
+                yield data
+        elif content_length >= 128:
+            self.logger.info("IN: Processing IN channel request")
+
+            self.client_socket = environ["wsgi.input"]
+            self._receive_conn_b1()
+            connected = self._connect_to_OUT_channel()
+
+            if connected:
+                start_response("200 Success",
+                               [("Content-Type", "application/rpc"),
+                                ("Content-length", "0")])
+                self._runloop()
+
+            self._terminate_oc_socket()
+
+            self.log_connection_stats()
+            self.logger.info("IN: Exiting (2) from do_RPC_IN_DATA")
+            
+            # TODO: error handling
+            start_response("200 Success",
+                           [("Content-length", "0"),
+                            ("Content-Type", "application/rpc")])
+            yield ""
+        else:
+            raise Exception("This content-length is not handled")
+
+        # OLD CODE
+        # msg = "RPC_IN_DATA method"
+
+        # content_length = environ["CONTENT_LENGTH"]
+        # # echo request
+        # if content_length <= 10:
+        #     pass
+
+        # start_response("200 OK", [("Content-Type", "text/plain"),
+        #                           ("Content-length", "%s" % len(msg))])
+
+        # return [msg]
+
+class RPCProxyOutboundChannelHandler(RPCProxyChannelHandler):
+    def __init__(self, logger):
+        RPCProxyChannelHandler.__init__(self, logger)
+        self.unix_socket = None
+        self.oc_conn = None
+        self.in_window_size = 0
+        self.in_conn_timeout = 0
+
+    def _receive_conn_a1(self):
+        # receive the cookie
+        # TODO: validation of CONN/A1
+        self.logger.info("OUT: receiving CONN/A1")
+        packet = RPCPacket.from_file(self.client_socket, self.logger)
+        if not isinstance(packet, RPCRTSPacket):
+            raise Exception("Unexpected non-rts packet received for CONN/A1")
+        self.connection_cookie = str(UUID(bytes=packet.commands[1]["Cookie"]))
+        self.channel_cookie = str(UUID(bytes=packet.commands[2]["Cookie"]))
+
+    def _send_conn_a3(self):
+        self.logger.info("OUT: sending CONN/A3 to client")
+            # send the A3 response to the client
+        packet = RPCRTSOutPacket(self.logger)
+        # we set the min timeout value allowed, as we would actually need
+        # either configuration values from Apache or from some config file
+        packet.add_command(RTS_CMD_CONNECTION_TIMEOUT, 120000)
+        self.bytes_written = self.bytes_written + packet.size
+
+        return packet.make()
+
+    def _send_conn_c2(self):
+        self.logger.info("OUT: sending CONN/C2 to client")
+            # send the C2 response to the client
+        packet = RPCRTSOutPacket(self.logger)
+        # we set the min timeout value allowed, as we would actually need
+        # either configuration values from Apache or from some config file
+        packet.add_command(RTS_CMD_VERSION, 1)
+        packet.add_command(RTS_CMD_RECEIVE_WINDOW_SIZE, self.in_window_size)
+        packet.add_command(RTS_CMD_CONNECTION_TIMEOUT, self.in_conn_timeout)
+        self.bytes_written = self.bytes_written + packet.size
+
+        return packet.make()
+
+    def _setup_oc_socket(self):
+        # create IP connection to OpenChange
+        self.logger.info("OUT: connecting to OC_HOST:1024")
+        connected = False
+        while not connected:
+            try:
+                oc_conn = socket(AF_INET, SOCK_STREAM)
+                oc_conn.connect((OC_HOST, 1024))
+                connected = True
+            except socket_error:
+                self.logger.info("OUT: failure to connect, retrying...")
+                sleep(1)
+        self.logger.info("OUT: connection to OC succeeeded (fileno=%d)"
+                         % oc_conn.fileno())
+        self.oc_conn = oc_conn
+
+    def _setup_channel_socket(self):
+        # TODO: add code to create missing socket dir
+        # create the corresponding unix socket
+        socket_name = os.path.join(SOCKETS_DIR, self.connection_cookie)
+        self.logger.info("OUT: creating unix socket '%s'" % socket_name)
+        if os.access(socket_name, os.F_OK):
+            os.remove(socket_name)
+        sock = socket(AF_UNIX, SOCK_STREAM)
+        sock.bind(socket_name)
+        sock.listen(2)
+        self.unix_socket = sock
+
+    def _wait_IN_channel(self):
+        self.logger.info("OUT: waiting for connection from IN")
+        # wait for the IN channel to connect as a B1 should be occurring
+        # on the other side
+        in_sock = self.unix_socket.accept()[0]
+        data = in_sock.recv(2, MSG_WAITALL)
+        if data != INBOUND_PROXY_ID:
+            raise IOError("connection must be from IN proxy (1): /%s/"
+                          % data)
+
+        self.logger.info("OUT: receiving window size + conn_timeout")
+            # receive the WindowSize + ConnectionTimeout
+        (self.in_window_size, self.in_conn_timeout) = \
+            unpack_from("<ll", in_sock.recv(8, MSG_WAITALL))
+            # send OC socket
+        self.logger.info("OUT: sending OC socket to IN")
+        send_socket(in_sock, self.oc_conn)
+        in_sock.close()
+
+    def _runloop(self):
+        self.logger.info("OUT: runloop")
+
+        unix_fd = self.unix_socket.fileno()
+        oc_fd = self.oc_conn.fileno()
+
+        fd_pool = poll()
+        fd_pool.register(unix_fd, POLLIN)
+        fd_pool.register(oc_fd, POLLIN)
+
+        # Listen for data from the listener
+        status = True
+        while status:
+            for data in fd_pool.poll(1000):
+                fd, event_no = data
+                if fd == oc_fd:
+                    # self.logger.info("received event '%d' on oc socket"
+                    #                   % event_no)
+                    if event_no & POLLHUP > 0:
+                        # FIXME: notify IN channel?
+                        self.logger.info("OUT: connection closed from OC")
+                        status = False
+                    elif event_no & POLLIN > 0:
+                        oc_packet = RPCPacket.from_file(self.oc_conn,
+                                                        self.logger)
+                        self.logger.info("OUT: packet headers = "
+                                         + oc_packet.pretty_dump())
+                        if isinstance(oc_packet, RPCRTSPacket):
+                            raise Exception("Unexpected rts packet received")
+
+                        self.logger.info("OUT: sending data to client")
+                        self.bytes_read = self.bytes_read + oc_packet.size
+                        self.bytes_written = self.bytes_written + oc_packet.size
+                        yield oc_packet.data
+                        # else:
+                        # self.logger.info("ignored event '%d' on oc socket"
+                        #                  % event_no)
+                elif fd == unix_fd:
+                    self.logger.info("OUT: ignored event '%d' on unix socket"
+                                     % event_no)
+                    # FIXME: we should listen to what the IN channel has to say
+                    status = False
+                else:
+                    raise Exception("invalid poll event: %s" % str(data))
+            # write(oc_packet.header_data)
+            # write(oc_packet.data)
+            # self.logger.info("OUT: data sent to client")
+
+    def _terminate_sockets(self):
+        socket_name = os.path.join(SOCKETS_DIR, self.connection_cookie)
+        self.logger.info("OUT: removing and closing unix socket '%s'"
+                         % socket_name)
+        if os.access(socket_name, os.F_OK):
+            os.remove(socket_name)
+        self.unix_socket.close()
+        self.oc_conn.close()
+
+    def sequence(self, environ, start_response):
+        self.logger.info("OUT: processing request")
+        if "REMOTE_PORT" in environ:
+            self.logger.info("OUT: remote port = %s" % environ["REMOTE_PORT"])
+        # self.logger.info("OUT: path: ' + self.path)
+        content_length = int(environ["CONTENT_LENGTH"])
+        self.logger.info("OUT: request size is %d" % content_length)
+
+        if content_length <= 0x10:
+            # echo request
+            for data in self.handle_echo_request(environ, start_response):
+                yield data
+        elif content_length == 76:
+            self.logger.info("OUT: Processing nonreplacement Out channel"
+                             "request")
+
+            self.client_socket = environ["wsgi.input"]
+            self._receive_conn_a1()
+
+            # Content-length = 1 Gib
+            start_response("200 Success",
+                           [("Content-Type", "application/rpc"),
+                            ("Content-length", "%d" % (1024 ** 3))])
+
+            yield self._send_conn_a3()
+            self._setup_oc_socket()
+            self._setup_channel_socket()
+            self._wait_IN_channel()
+
+            yield self._send_conn_c2()
+            self.logger.info("OUT: total bytes sent yet: %d"
+                             % self.bytes_written)
+            for data in self._runloop():
+                yield data
+            self._terminate_sockets()
+        elif content_length == 120:
+            # Out channel request: replacement OUT channel
+            raise Exception("Replacement OUT channel request not handled")
+        else:
+            raise Exception("This content-length is not handled")
+
+        self.log_connection_stats()
+        self.logger.info("OUT: Exiting from do_RPC_OUT_DATA")
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/fdunix.py b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/fdunix.py
new file mode 100644 (file)
index 0000000..bdbeeed
--- /dev/null
@@ -0,0 +1,169 @@
+# fdunix.py -- OpenChange RPC-over-HTTP implementation
+#
+# Copyright (C) 2012  Wolfgang Sourdeau <wsourdeau@inverse.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#   
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#   
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+"""A module that provides functions to send and receive a filedescriptor over
+a unix socket.
+
+"""
+
+from ctypes import *
+from struct import pack_into, unpack_from
+from os import fdopen, O_RDONLY, O_WRONLY, O_RDWR
+from fcntl import fcntl, F_GETFL
+from socket import fromfd, _socketobject, AF_INET, SOCK_STREAM
+
+# definitions
+
+SOL_SOCKET = 1
+SCM_RIGHTS = 1
+
+c_socklen_t = c_uint32
+
+
+class CMSGHdr(Structure):
+    _fields_ = [("cmsg_len", c_size_t),
+                ("cmsg_level", c_int),
+                ("cmsg_type", c_int)]
+
+FDBuffer = (c_byte * sizeof(c_int))
+
+class CMSG(Structure):
+    # The cmsg_data must be an array of chars rather than a pointer of chars,
+    # therefore we must diverge from the C struct due to the fact that ctypes
+    # only accept definitions of fixed-length arrays.
+    _fields_ = [("cmsg_hdr", CMSGHdr),
+                ("cmsg_data", FDBuffer)]
+
+
+class IOVec(Structure):
+    _fields_ = [("iov_base", c_char_p),
+                ("iov_len", c_size_t)]
+
+
+class MSGHdr(Structure):
+    _fields_ = [("msg_name", c_char_p),
+                ("msg_namelen", c_socklen_t),
+                ("msg_iov", POINTER(IOVec)),
+                ("msg_iovlen", c_size_t),
+                ("msg_control", POINTER(CMSG)),
+                ("msg_controllen", c_size_t),
+                ("msg_flags", c_int)]
+
+
+def CMSG_ALIGN(x):
+    return ((x + sizeof(c_size_t) - 1) & ~(sizeof(c_size_t) - 1))
+
+def CMSG_SPACE(x):
+    return CMSG_ALIGN(x) + CMSG_ALIGN(sizeof(CMSGHdr))
+
+def CMSG_LEN(x):
+    return CMSG_ALIGN(sizeof(CMSGHdr)) + x
+
+
+# symbols setup
+libc = CDLL("libc.so.6", use_errno=True)
+if libc is None:
+    raise RuntimeError("could not open C library")
+sendmsg = libc.sendmsg
+sendmsg.argtypes = (c_int, POINTER(MSGHdr), c_int)
+recvmsg = libc.recvmsg
+recvmsg.argtypes = (c_int, POINTER(MSGHdr), c_int)
+strerror = libc.strerror
+strerror.restype = c_char_p
+strerror.argtypes = (c_int,)
+errno = libc.errno
+
+
+def send_socket(socket, sobject):
+    """This function sends a filedescriptor.
+
+    socket: must be a unix socket
+    fd: must be a socket of a socket object
+
+    Returns True on success, False otherwise
+
+    """
+
+    if not isinstance(sobject, _socketobject):
+        raise TypeError("'sobject' must either be a file or a socket object")
+
+    iov = IOVec()
+    iov.iov_base = "A"
+    iov.iov_len = 1
+
+    cmsg = CMSG()
+    cmsg.cmsg_hdr.cmsg_len = CMSG_LEN(sizeof(c_int))
+    cmsg.cmsg_hdr.cmsg_level = SOL_SOCKET
+    cmsg.cmsg_hdr.cmsg_type = SCM_RIGHTS
+    pack_into("i", cmsg.cmsg_data, 0, sobject.fileno())
+
+    msgh = MSGHdr()
+    msgh.msg_name = None
+    msgh.msg_namelen = 0
+    msgh.msg_iov = (iov,)
+    msgh.msg_iovlen = 1
+    msgh.msg_control = pointer(cmsg)
+    msgh.msg_controllen = CMSG_SPACE(sizeof(c_int))
+    msgh.msg_flags = 0
+
+    rc = sendmsg(socket.fileno(), pointer(msgh), 0)
+    if rc == -1:
+        errno = get_errno()
+        raise OSError(errno, strerror(errno))
+
+    return True
+
+
+def receive_socket(socket):
+    """This function receives a socket object via a UNIX socket.
+
+    socket: must be a unix socket
+
+    Returns a socket object or None if the operation fails.
+
+    """
+
+    iov = IOVec()
+    iov.iov_base = "A"
+    iov.iov_len = 1
+
+    cmsg = CMSG()
+    cmsg.cmsg_hdr.cmsg_len = CMSG_LEN(sizeof(c_int))
+
+    msgh = MSGHdr()
+    msgh.msg_name = None
+    msgh.msg_namelen = 0
+    msgh.msg_iov = (iov,)
+    msgh.msg_iovlen = 1
+    msgh.msg_control = pointer(cmsg)
+    msgh.msg_controllen = CMSG_SPACE(sizeof(c_int))
+    msgh.msg_flags = 0
+
+    # rc = recvmsg(socket.fileno(), pointer(msgh), 0)
+    rc = recvmsg(socket.fileno(), pointer(msgh), 0)
+    if rc == -1:
+        errno = get_errno()
+        raise OSError(errno, strerror(errno))
+
+    (value,) = unpack_from("i", cmsg.cmsg_data)
+
+    # the 'mode' parameter should probably passed as part of the message
+    (fd,) = unpack_from("i", cmsg.cmsg_data)
+    newfile = fromfd(fd, AF_INET, SOCK_STREAM)
+
+    return newfile
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/packets.py b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/packets.py
new file mode 100644 (file)
index 0000000..b57613b
--- /dev/null
@@ -0,0 +1,696 @@
+# packets.py -- OpenChange RPC-over-HTTP implementation
+#
+# Copyright (C) 2012  Wolfgang Sourdeau <wsourdeau@inverse.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#   
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#   
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import logging
+from socket import _socketobject, MSG_WAITALL
+from struct import pack, unpack_from
+from uuid import UUID
+
+
+PFC_FIRST_FRAG = 1
+PFC_LAST_FRAG = 2
+PFC_PENDING_CANCEL = 4
+PFC_SUPPORT_HEADER_SIGN = 4
+PFC_RESERVED_1 = 8
+PFC_CONC_MPX = 16
+PFC_DID_NOT_EXECUTE = 32
+PFC_MAYBE = 64
+PFC_OBJECT_UUID = 128
+PFC_FLAG_LABELS = ("PFC_FIRST_FRAG",
+                   "PFC_LAST_FRAG", 
+                   "PFC_(PENDING_CANCEL|SUPPORT_HEADER_SIGN)", 
+                   "PFC_RESERVED_1", 
+                   "PFC_CONC_MPX", 
+                   "PFC_DID_NOT_EXECUTE",
+                   "PFC_MAYBE", 
+                   "PFC_OBJECT_UUID")
+
+
+# taken from dcerpc.idl
+DCERPC_PKT_REQUEST = 0
+DCERPC_PKT_PING = 1
+DCERPC_PKT_RESPONSE = 2
+DCERPC_PKT_FAULT = 3
+DCERPC_PKT_WORKING = 4
+DCERPC_PKT_NOCALL = 5
+DCERPC_PKT_REJECT = 6
+DCERPC_PKT_ACK = 7
+DCERPC_PKT_CL_CANCEL = 8
+DCERPC_PKT_FACK = 9
+DCERPC_PKT_CANCEL_ACK = 10
+DCERPC_PKT_BIND = 11
+DCERPC_PKT_BIND_ACK = 12
+DCERPC_PKT_BIND_NAK = 13
+DCERPC_PKT_ALTER = 14
+DCERPC_PKT_ALTER_RESP = 15
+DCERPC_PKT_AUTH_3 = 16
+DCERPC_PKT_SHUTDOWN = 17
+DCERPC_PKT_CO_CANCEL = 18
+DCERPC_PKT_ORPHANED = 19
+DCERPC_PKT_RTS = 20
+DCERPC_PKG_LABELS = ("DCERPC_PKT_REQUEST",
+                     "DCERPC_PKT_PING",
+                     "DCERPC_PKT_RESPONSE",
+                     "DCERPC_PKT_FAULT",
+                     "DCERPC_PKT_WORKING",
+                     "DCERPC_PKT_NOCALL",
+                     "DCERPC_PKT_REJECT",
+                     "DCERPC_PKT_ACK",
+                     "DCERPC_PKT_CL_CANCEL",
+                     "DCERPC_PKT_FACK",
+                     "DCERPC_PKT_CANCEL_ACK",
+                     "DCERPC_PKT_BIND",
+                     "DCERPC_PKT_BIND_ACK",
+                     "DCERPC_PKT_BIND_NAK",
+                     "DCERPC_PKT_ALTERA",
+                     "DCERPC_PKT_ALTER_RESP",
+                     "DCERPC_PKT_AUTH_3",
+                     "DCERPC_PKT_SHUTDOWN",
+                     "DCERPC_PKT_CO_CANCEL",
+                     "DCERPC_PKT_ORPHANED",
+                     "DCERPC_PKT_RTS")
+
+RTS_FLAG_NONE = 0
+RTS_FLAG_PING = 1
+RTS_FLAG_OTHER_CMD = 2
+RTS_FLAG_RECYCLE_CHANNEL = 4
+RTS_FLAG_IN_CHANNEL = 8
+RTS_FLAG_OUT_CHANNEL = 0x10
+RTS_FLAG_EOF = 0x20
+RTS_FLAG_ECHO = 0x40
+RTS_FLAG_LABELS = ("RTS_FLAG_PING",
+                   "RTS_FLAG_OTHER_CMD",
+                   "RTS_FLAG_RECYCLE_CHANNEL",
+                   "RTS_FLAG_IN_CHANNEL",
+                   "RTS_FLAG_OUT_CHANNEL",
+                   "RTS_FLAG_EOF",
+                   "RTS_FLAG_ECHO")
+
+RTS_CMD_RECEIVE_WINDOW_SIZE = 0
+RTS_CMD_FLOW_CONTROL_ACK = 1
+RTS_CMD_CONNECTION_TIMEOUT = 2
+RTS_CMD_COOKIE = 3
+RTS_CMD_CHANNEL_LIFETIME = 4
+RTS_CMD_CLIENT_KEEPALIVE = 5
+RTS_CMD_VERSION = 6
+RTS_CMD_EMPTY = 7
+RTS_CMD_PADDING = 8
+RTS_CMD_NEGATIVE_ANCE = 9
+RTS_CMD_ANCE = 10
+RTS_CMD_CLIENT_ADDRESS = 11
+RTS_CMD_ASSOCIATION_GROUP_ID = 12
+RTS_CMD_DESTINATION = 13
+RTS_CMD_PING_TRAFFIC_SENT_NOTIFY = 14
+
+RTS_CMD_SIZES = (8, 28, 8, 20, 8, 8, 8, 4, 8, 4, 4, 8, 20, 8, 8)
+RTS_CMD_DATA_LABELS = ("ReceiveWindowSize",
+                       "FlowControlAck",
+                       "ConnectionTimeout",
+                       "Cookie",
+                       "ChannelLifetime",
+                       "ClientKeepalive",
+                       "Version",
+                       "Empty",
+                       "Padding",
+                       "NegativeANCE",
+                       "ANCE",
+                       "ClientAddress",
+                       "AssociationGroupId",
+                       "Destination",
+                       "PingTrafficSentNotify")
+
+RPC_C_AUTHN_NONE = 0x0
+RPC_C_AUTHN_GSS_NEGOTIATE = 0x9 # SPNEGO
+RPC_C_AUTHN_WINNT = 0xa # NTLM
+RPC_C_AUTHN_GSS_SCHANNEL = 0xe # TLS
+RPC_C_AUTHN_GSS_KERBEROS = 0x10 # Kerberos
+RPC_C_AUTHN_NETLOGON = 0x44 # Netlogon
+RPC_C_AUTHN_DEFAULT = 0xff # (NTLM)
+
+RPC_C_AUTHN_LEVEL_DEFAULT = 0
+RPC_C_AUTHN_LEVEL_NONE = 1
+RPC_C_AUTHN_LEVEL_CONNECT = 2
+RPC_C_AUTHN_LEVEL_CALL = 3
+RPC_C_AUTHN_LEVEL_PKT = 4
+RPC_C_AUTHN_LEVEL_PKT_INTEGRITY = 5
+RPC_C_AUTHN_LEVEL_PKT_PRIVACY = 6
+
+
+class RTSParsingException(IOError):
+    """This exception occurs when a serious issue occurred while parsing an
+    RTS packet.
+
+    """
+
+    pass
+
+
+class RPCPacket(object):
+    def __init__(self, data, logger=None):
+        self.logger = logger
+
+        # BLOB level
+        self.data = data
+        self.size = 0
+
+        # parsed offset from the start of the "data" blob
+        self.offset = 0
+
+        # header is common to all PDU
+        self.header = None
+
+    @staticmethod
+    def from_file(input_file, logger=None):
+        """This static method acts as a constructor and returns an input
+        packet with the proper class, based on the packet headers.
+        The "input_file" parameter must either be a file or a sockect object.
+
+        """
+
+        if isinstance(input_file, _socketobject):
+            def read_file(count):
+                return input_file.recv(count, MSG_WAITALL)
+        elif hasattr(file, "read") and callable(file.read):
+            def read_file(count):
+                return input_file.read(count)
+        else:
+            raise ValueError("'input_file' must either be a socket object or"
+                             " provide a 'read' method")
+
+        fields = ("rpc_vers", "rpc_vers_minor", "ptype", "pfc_flags", "drep",
+                  "frag_length", "auth_length", "call_id")
+
+        header_data = read_file(16)
+        # TODO: value validation
+        values = unpack_from("<bbbblhhl", header_data)
+        if values[2] == DCERPC_PKT_FAULT:
+            packet_class = RPCFaultPacket
+        elif values[2] == DCERPC_PKT_BIND_ACK:
+            packet_class = RPCBindACKPacket
+        elif values[2] == DCERPC_PKT_BIND_NAK:
+            packet_class = RPCBindNAKPacket
+        elif values[2] == DCERPC_PKT_RTS:
+            packet_class = RPCRTSPacket
+        else:
+            packet_class = RPCPacket
+        body_data = read_file(values[5] - 16)
+
+        packet = packet_class(header_data + body_data, logger)
+        packet.header = dict(zip(fields, values))
+        packet.offset = 16
+        packet.size = values[5]
+        packet.parse()
+
+        return packet
+
+    def parse(self):
+        pass
+
+    def pretty_dump(self):
+        (fields, values) = self.make_dump_output()
+
+        output = ["%s: %s" % (fields[pos], str(values[pos]))
+                  for pos in xrange(len(fields))]
+
+        return "; ".join(output)
+
+    def make_dump_output(self):
+        values = []
+        
+        ptype = self.header["ptype"]
+        values.append(DCERPC_PKG_LABELS[ptype])
+
+        flags = self.header["pfc_flags"]
+        if flags == 0:
+            values.append("None")
+        else:
+            flag_values = []
+            for exp in xrange(7):
+                flag = 1 << exp
+                if flags & flag > 0:
+                    flag_values.append(PFC_FLAG_LABELS[exp])
+            values.append(", ".join(flag_values))
+
+        fields = ["ptype", "pfc_flags", "drep", "frag_length",
+                  "auth_length", "call_id"]
+        for field in fields[2:]:
+            values.append(self.header[field])
+
+        return (fields, values)
+
+
+
+# fault PDU (stub)
+class RPCFaultPacket(RPCPacket):
+    def __init__(self, data, logger=None):
+        RPCPacket.__init__(self, data, logger)
+
+
+# bind_ack PDU (incomplete)
+class RPCBindACKPacket(RPCPacket):
+    def __init__(self, data, logger=None):
+        RPCPacket.__init__(self, data, logger)
+        self.ntlm_payload = None
+        
+    def parse(self):
+        auth_offset = self.header["frag_length"] - self.header["auth_length"]
+        self.ntlm_payload = self.data[auth_offset:]
+
+
+# bind_nak PDU (stub)
+class RPCBindNAKPacket(RPCPacket):
+    def __init__(self, data, logger=None):
+        RPCPacket.__init__(self, data, logger)
+
+
+# FIXME: command parameters are either int32 values or binary blobs, both when
+# parsing and when producing
+class RPCRTSPacket(RPCPacket):
+    parsers = None
+
+    def __init__(self, data, logger=None):
+        RPCPacket.__init__(self, data, logger)
+
+        # RTS commands
+        self.commands = []
+
+    def parse(self):
+        fields = ("flags", "nbr_commands")
+        values = unpack_from("<hh", self.data, self.offset)
+        self.offset = self.offset + 4
+        self.header.update(zip(fields, values))
+
+        for counter in xrange(self.header["nbr_commands"]):
+            self._parse_command()
+
+        if (self.size != self.offset):
+            raise RTSParsingException("sizes do not match: expected = %d,"
+                                      " actual = %d"
+                                      % (self.size, self.offset))
+
+    def _parse_command(self):
+        (command_type,) = unpack_from("<l", self.data, self.offset)
+        if command_type < 0 or command_type > 15:
+            raise RTSParsingException("command type unknown: %d"
+                                      % command_type)
+        self.offset = self.offset + 4
+
+        command = {"type": command_type}
+        command_size = RTS_CMD_SIZES[command_type]
+        if command_size > 4:
+            data_size = command_size - 4
+            if command_type in self.parsers:
+                parser = self.parsers[command_type]
+                data_value = parser(self, data_size)
+            elif data_size == 4:
+                # commands with int32 values
+                (data_value,) = unpack_from("<l", self.data, self.offset)
+                self.offset = self.offset + 4
+            else:
+                raise RTSParsingException("command is badly handled: %d"
+                                          % command_type)
+
+            data_label = RTS_CMD_DATA_LABELS[command_type]
+            command[data_label] = data_value
+
+        self.commands.append(command)
+
+    def _parse_command_flow_control_ack(self, data_size):
+        data_blob = self.data[self.offset:self.offset+data_size]
+        self.offset = self.offset + data_size
+        # dumb method
+        return data_blob
+    
+    def _parse_command_cookie(self, data_size):
+        data_blob = self.data[self.offset:self.offset+data_size]
+        self.offset = self.offset + data_size
+        # dumb method
+        return data_blob
+
+    def _parse_command_padding_data(self, data_size):
+        # the length of the padding bytes is specified in the
+        # ConformanceCount field
+        (count,) = unpack_from("<l", self.data, self.offset)
+        self.offset = self.offset + 4
+
+        data_blob = self.data[self.offset:self.offset+count]
+        self.offset = self.offset + count
+
+        return data_value
+
+    def _parse_command_client_address(self, data_blob):
+        (address_type,) = unpack_from("<l", self.data, self.offset)
+        self.offset = self.offset + 4
+
+        if address_type == 0: # ipv4
+            address_size = 4
+        elif address_type == 1: # ipv6
+            address_size = 16
+        else:
+            raise RTSParsingException("unknown client address type: %d"
+                                      % address_type)
+
+        data_blob = self.data[self.offset:self.offset+address_size]
+
+        # compute offset with padding, which is ignored
+        self.offset = self.offset + address_size + 12
+
+        return data_value
+
+    def make_dump_output(self):
+        (fields, values) = RPCPacket.make_dump_output(self)
+        fields.extend(("flags", "nbr_commands"))
+
+        flags = self.header["flags"]
+        if flags == RTS_FLAG_NONE:
+            values.append("RTS_FLAG_NONE")
+        else:
+            flags_value = []
+            for exp in xrange(7):
+                flag = 1 << exp
+                if flags & flag > 0:
+                    flags_value.append(RTS_FLAG_LABELS[exp])
+            values.append(", ".join(flags_value))
+
+        values.append(self.header["nbr_commands"])
+
+        return (fields, values)
+
+
+# Those are the parser method for commands with a size > 4. They are defined
+# here since the "RPCRTSPacket" symbol is not accessible as long as the class
+# definition is not over
+RPCRTSPacket.parsers = {RTS_CMD_FLOW_CONTROL_ACK: RPCRTSPacket._parse_command_flow_control_ack,
+                        RTS_CMD_COOKIE: RPCRTSPacket._parse_command_cookie,
+                        RTS_CMD_ASSOCIATION_GROUP_ID: RPCRTSPacket._parse_command_cookie,
+                        RTS_CMD_PADDING: RPCRTSPacket._parse_command_padding_data,
+                        RTS_CMD_CLIENT_ADDRESS: RPCRTSPacket._parse_command_client_address}
+
+
+
+### OUT packets
+
+# bind PDU (strict minimum required for NTLMSSP auth)
+class RPCBindOutPacket(object):
+    def __init__(self, logger=None):
+        self.logger = logger
+
+        self.size = 0
+        self.data = None
+
+        self.call_id = 1
+        self.ntlm_payload = None
+
+    def make(self):
+        if self.data is None:
+            self._make_packet_data()
+
+        return self.data
+
+    def _make_packet_data(self):
+        if self.ntlm_payload is None:
+            raise ValueError("'ntlm_payload' attribute must not be None")
+
+        ntlm_payload_size = len(self.ntlm_payload)
+        align_modulo = ntlm_payload_size % 4
+        if align_modulo > 0:
+            padding = (4 - align_modulo) * "\0"
+        else:
+            padding = ""
+        len_padding = len(padding)
+
+
+        # rfr: 1544f5e0-613c-11d1-93df-00c04fd7bd09, v1
+        # mgmt: afa8bd80-7d8a-11c9-bef4-08002b102989, v1
+        svc_guid = UUID('{afa8bd80-7d8a-11c9-bef4-08002b102989}')
+        iface_version_major = 1
+        iface_version_minor = 0
+
+        p_content_elem = ("\x01\x00\x00\x00\x00\x00\x01\x00"
+                          "%s%s"
+                          "\x04\x5d\x88\x8a\xeb\x1c\xc9\x11\x9f\xe8\x08\x00"
+                          "\x2b\x10\x48\x60\x02\x00\x00\x00"
+                          % (svc_guid.bytes_le,
+                             pack("<hh",
+                                  iface_version_major,
+                                  iface_version_minor)))
+        # p_content_elem = ("\x00\x00\x00\x00")
+        len_p_content_elem = len(p_content_elem)
+
+        header_data = pack("<bbbbbbbbhhl hhl %ds bbbbl" % len_p_content_elem,
+
+                           ## common headers:
+                           5, 0, # rpc_vers, rpc_vers_minor
+                           DCERPC_PKT_BIND, # ptype
+                           # pfc_flags:
+                           PFC_FIRST_FRAG
+                           | PFC_LAST_FRAG
+                           | PFC_SUPPORT_HEADER_SIGN,
+                           # | PFC_CONC_MPX,
+                           # drep: RPC spec chap14.htm (Data Representation Format Label)
+                           (1 << 4) | 0, 0, 0, 0,
+                           (32 + ntlm_payload_size + len_padding + len_p_content_elem), # frag_length
+                           ntlm_payload_size + len_padding, # auth_length
+                           self.call_id, # call_id
+
+                           ## bind specific:
+                           4088, 4088, # max_xmit/recv_frag
+                           0, # assoc_group_id
+
+                           # p_context_elem
+                           p_content_elem,
+
+                           # p_context_elem (flattened to int32):
+                           # 0,
+
+                           # sec_trailer:
+                           RPC_C_AUTHN_WINNT, # auth_verifier.auth_type
+                           # auth_verifier.auth_level:
+                           RPC_C_AUTHN_LEVEL_CONNECT,
+                           len_padding, # auth_verifier.auth_pad_length
+                           0, # auth_verifier.auth_reserved
+                           1 # auth_verifier.auth_context_id
+                           )
+        self.size = len(header_data) + ntlm_payload_size + len_padding
+        self.data = header_data + self.ntlm_payload + padding
+
+
+# auth_3 PDU
+class RPCAuth3OutPacket(object):
+    def __init__(self, logger=None):
+        self.logger = logger
+
+        self.size = 0
+        self.data = None
+
+        self.pfc_flags = PFC_FIRST_FRAG | PFC_LAST_FRAG
+        self.call_id = 1
+
+        self.ntlm_payload = None
+
+    def make(self):
+        if self.data is None:
+            self._make_packet_data()
+
+        return self.data
+
+    def _make_packet_data(self):
+        if self.ntlm_payload is None:
+            raise ValueError("'ntlm_payload' attribute must not be None")
+
+        ntlm_payload_size = len(self.ntlm_payload)
+        align_modulo = ntlm_payload_size % 4
+        if align_modulo > 0:
+            len_padding = (4 - align_modulo)
+        else:
+            len_padding = 0
+
+        header_data = pack("<bbbbbbbbhhl 4s bbbbl",
+                           5, 0, # rpc_vers, rpc_vers_minor
+                           DCERPC_PKT_AUTH_3, # ptype
+                           # pfc_flags
+                           self.pfc_flags,
+                           # drep: RPC spec chap14.htm (Data Representation Format Label)
+                           (1 << 4) | 0, 0, 0, 0,
+                           (28 + ntlm_payload_size + len_padding), # frag_length
+                           ntlm_payload_size + len_padding, # auth_length
+                           self.call_id, # call_id
+
+                           ## auth 3 specific:
+                           "",
+
+                           # sec_trailer:
+                           RPC_C_AUTHN_WINNT, # auth_verifier.auth_type
+                           # auth_verifier.auth_level:
+                           RPC_C_AUTHN_LEVEL_CONNECT,
+                           len_padding, # auth_verifier.auth_pad_length
+                           0, # auth_verifier.auth_reserved
+                           1 # auth_verifier.auth_context_id
+                           )
+        self.size = len(header_data) + ntlm_payload_size + len_padding
+        self.data = header_data + self.ntlm_payload + len_padding * "\x00"
+
+
+# ping PDU
+class RPCPingOutPacket(object):
+    def __init__(self, logger=None):
+        self.logger = logger
+
+        self.size = 0
+        self.data = None
+
+        self.pfc_flags = PFC_FIRST_FRAG | PFC_LAST_FRAG
+        self.call_id = 1
+
+    def make(self):
+        if self.data is None:
+            self._make_packet_data()
+
+        return self.data
+
+    def _make_packet_data(self):
+        header_data = pack("<bbbbbbbbhhl",
+
+                           ## common headers:
+                           5, 0, # rpc_vers, rpc_vers_minor
+                           DCERPC_PKT_PING, # ptype
+                           # pfc_flags
+                           self.pfc_flags,
+                           # drep: RPC spec chap14.htm (Data Representation Format Label)
+                           (1 << 4) | 0, 0, 0, 0,
+                           16, # frag_length
+                           0, # auth_length
+                           self.call_id # call_id
+                           )
+        self.size = len(header_data)
+        self.data = header_data
+
+
+# rts PDU
+class RPCRTSOutPacket(object):
+    def __init__(self, logger=None):
+        self.logger = logger
+        self.size = 0
+
+        # RTS packets
+        self.flags = RTS_FLAG_NONE
+        self.command_data = []
+
+    def make(self):
+        if self.command_data is None:
+            raise RTSParsingException("packet already returned")
+
+        self._make_header()
+
+        data = "".join(self.command_data)
+        data_size = len(data)
+
+        if (data_size != self.size):
+            raise RTSParsingException("sizes do not match: declared = %d,"
+                                      " actual = %d" % (self.size, data_size))
+        self.command_data = None
+
+        if self.logger is not None:
+            self.logger.info("returning packet: %s" % repr(data))
+
+        return data
+
+    def _make_header(self):
+        header_data = pack("<bbbbbbbbhhlhh",
+                           5, 0, # rpc_vers, rpc_vers_minor
+                           DCERPC_PKT_RTS, # ptype
+                           PFC_FIRST_FRAG | PFC_LAST_FRAG, # pfc_flags
+                           # drep: RPC spec chap14.htm (Data Representation Format Label)
+                           (1 << 4) | 0, 0, 0, 0,
+                           (20 + self.size), # frag_length
+                           0, # auth_length
+                           0, # call_id
+                           self.flags,
+                           len(self.command_data))
+        self.command_data.insert(0, header_data)
+        self.size = self.size + 20
+
+    def add_command(self, command_type, *args):
+        if command_type < 0 or command_type > 15:
+            raise RTSParsingException("command type unknown: %d (%s)" %
+                                      (command_type, str(type(command_type))))
+
+        self.size = self.size + 4
+
+        values = [pack("<l", command_type)]
+
+        command_size = RTS_CMD_SIZES[command_type]
+        if command_size > 4:
+            if command_type == RTS_CMD_FLOW_CONTROL_ACK:
+                data = self._make_command_flow_control_ack(args[0])
+            elif (command_type == RTS_CMD_COOKIE
+                  or command_type == RTS_CMD_ASSOCIATION_GROUP_ID):
+                data = self._make_command_cookie(args[0])
+            elif command_type == RTS_CMD_PADDING:
+                data = self._make_command_padding_data(args[0])
+            elif command_type == RTS_CMD_CLIENT_ADDRESS:
+                data = self._make_command_client_address(args[0])
+            else:
+                # command with int32 value
+                data = pack("<l", args[0])
+                self.size = self.size + 4
+            values.append(data)
+
+        self.command_data.append("".join(values))
+        
+    def _make_command_flow_control_ack(self, data_blob):
+        # dumb method
+        len_data = len(data_blob)
+        if len_data != 24:
+            raise RTSParsingException("expected a length of %d bytes,"
+                                      " received %d" % (24, len_data))
+        self.size = self.size + len_data
+
+        return data_blob
+    
+    def _make_command_cookie(self, data_blob):
+        # dumb method
+        len_data = len(data_blob)
+        if len_data != 16:
+            raise RTSParsingException("expected a length of %d bytes,"
+                                      " received %d" % (16, len_data))
+        self.size = self.size + len_data
+
+        return data_blob
+
+    def _make_command_padding_data(self, data_blob):
+        len_data = len(data_blob)
+        data = pack("<l", len_data) + data_blob
+        self.size = self.size + 4 + len_data
+
+        return data
+
+    def _make_command_client_address(self, data_blob):
+        len_data = len(data_blob)
+        if len_data == 4:
+            address_type = 0 # ipv4
+        elif len_data == 16:
+            address_type = 1 # ipv6
+        else:
+            raise RTSParsingException("cannot deduce address type from data"
+                                      " length: %d" % len_data)
+
+        data = pack("<l", address_type) + data_blob + 12 * chr(0)
+        self.size = self.size + 4 + len_data + 12
+
+        return data
diff --git a/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/utils.py b/mapiproxy/services/ocsmanager/rpcproxy/rpcproxy/utils.py
new file mode 100644 (file)
index 0000000..2bf6914
--- /dev/null
@@ -0,0 +1,32 @@
+# utils.py -- OpenChange RPC-over-HTTP implementation
+#
+# Copyright (C) 2012  Wolfgang Sourdeau <wsourdeau@inverse.ca>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#   
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#   
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import json
+
+
+def prettify_dict(adict):
+    """This method makes use of the JSON API to return a properly indented
+    representation of a dict.
+
+    """
+
+    def _unhandled_objects(obj):
+        return str(obj)
+    lines = json.dumps(adict, default=_unhandled_objects,
+                       sort_keys=True, indent=4)
+    return "%s\n" % "\n".join([l.rstrip() for l in lines.splitlines()])
index c72f58e..5245893 100644 (file)
@@ -132,6 +132,7 @@ static PyObject *make_fb_tuple(struct mapistore_freebusy_properties *fb_props, s
        struct Binary_r *current_ranges;
        uint16_t *minutes_range_start;
        PyObject *tuple, *range_tuple;
+       char *tz;
 
        nbr_ranges = 0;
        for (i = 0; i < fb_props->nbr_months; i++) {
@@ -139,6 +140,10 @@ static PyObject *make_fb_tuple(struct mapistore_freebusy_properties *fb_props, s
                nbr_ranges += (current_ranges->cb / (2 * sizeof(uint16_t)));
        }
 
+       tz = getenv("TZ");
+       setenv("TZ", "", 1);
+       tzset();
+
        tuple = PyTuple_New(nbr_ranges);
        range_nbr = 0;
        for (i = 0; i < fb_props->nbr_months; i++) {
@@ -153,6 +158,14 @@ static PyObject *make_fb_tuple(struct mapistore_freebusy_properties *fb_props, s
                }
        }
 
+       if (tz) {
+               setenv("TZ", tz, 1);
+       }
+       else {
+               unsetenv("TZ");
+       }
+       tzset();
+
        return tuple;
 }