aboutsummaryrefslogtreecommitdiffstats
path: root/libmproxy
diff options
context:
space:
mode:
Diffstat (limited to 'libmproxy')
-rw-r--r--libmproxy/cmdline.py5
-rw-r--r--libmproxy/console/common.py13
-rw-r--r--libmproxy/console/flowview.py2
-rw-r--r--libmproxy/contentviews.py4
-rw-r--r--libmproxy/controller.py2
-rw-r--r--libmproxy/dump.py9
-rw-r--r--libmproxy/flow_format_compat.py14
-rw-r--r--libmproxy/main.py10
-rw-r--r--libmproxy/models/connections.py44
-rw-r--r--libmproxy/models/flow.py19
-rw-r--r--libmproxy/models/http.py166
-rw-r--r--libmproxy/protocol/__init__.py8
-rw-r--r--libmproxy/protocol/base.py2
-rw-r--r--libmproxy/protocol/http.py234
-rw-r--r--libmproxy/protocol/http1.py70
-rw-r--r--libmproxy/protocol/http2.py434
-rw-r--r--libmproxy/protocol/tls.py6
-rw-r--r--libmproxy/proxy/config.py3
-rw-r--r--libmproxy/proxy/server.py4
-rw-r--r--libmproxy/stateobject.py55
-rw-r--r--libmproxy/version.py2
-rw-r--r--libmproxy/web/__init__.py5
-rw-r--r--libmproxy/web/app.py35
23 files changed, 718 insertions, 428 deletions
diff --git a/libmproxy/cmdline.py b/libmproxy/cmdline.py
index 111ab145..d8b6000c 100644
--- a/libmproxy/cmdline.py
+++ b/libmproxy/cmdline.py
@@ -363,6 +363,11 @@ def proxy_options(parser):
help="Proxy service port."
)
http2 = group.add_mutually_exclusive_group()
+ # !!!
+ # Watch out: We raise a RuntimeError in libmproxy.proxy.config if http2 is enabled,
+ # but the OpenSSL version does not have ALPN support (which is the default on Ubuntu 14.04).
+ # Do not simply set --http2 as enabled by default.
+ # !!!
http2.add_argument("--http2", action="store_true", dest="http2")
http2.add_argument("--no-http2", action="store_false", dest="http2",
help="Explicitly enable/disable experimental HTTP2 support. "
diff --git a/libmproxy/console/common.py b/libmproxy/console/common.py
index 0ada3e34..c29ffddc 100644
--- a/libmproxy/console/common.py
+++ b/libmproxy/console/common.py
@@ -130,7 +130,7 @@ else:
SYMBOL_MARK = "[m]"
-def raw_format_flow(f, focus, extended, padding):
+def raw_format_flow(f, focus, extended):
f = dict(f)
pile = []
req = []
@@ -160,8 +160,11 @@ def raw_format_flow(f, focus, extended, padding):
else:
uc = "title"
+ url = f["req_url"]
+ if f["req_http_version"] not in ("HTTP/1.0", "HTTP/1.1"):
+ url += " " + f["req_http_version"]
req.append(
- urwid.Text([(uc, f["req_url"])])
+ urwid.Text([(uc, url)])
)
pile.append(urwid.Columns(req, dividechars=1))
@@ -396,8 +399,7 @@ def ask_save_body(part, master, state, flow):
flowcache = utils.LRUCache(800)
-def format_flow(f, focus, extended=False, hostheader=False, padding=2,
- marked=False):
+def format_flow(f, focus, extended=False, hostheader=False, marked=False):
d = dict(
intercepted = f.intercepted,
acked = f.reply.acked,
@@ -406,6 +408,7 @@ def format_flow(f, focus, extended=False, hostheader=False, padding=2,
req_is_replay = f.request.is_replay,
req_method = f.request.method,
req_url = f.request.pretty_url if hostheader else f.request.url,
+ req_http_version = f.request.http_version,
err_msg = f.error.msg if f.error else None,
resp_code = f.response.status_code if f.response else None,
@@ -437,5 +440,5 @@ def format_flow(f, focus, extended=False, hostheader=False, padding=2,
d["resp_ctype"] = ""
return flowcache.get(
raw_format_flow,
- tuple(sorted(d.items())), focus, extended, padding
+ tuple(sorted(d.items())), focus, extended
)
diff --git a/libmproxy/console/flowview.py b/libmproxy/console/flowview.py
index 8102de55..d2b98b68 100644
--- a/libmproxy/console/flowview.py
+++ b/libmproxy/console/flowview.py
@@ -103,7 +103,6 @@ class FlowViewHeader(urwid.WidgetWrap):
f,
False,
extended=True,
- padding=0,
hostheader=self.master.showhost
)
signals.flow_change.connect(self.sig_flow_change)
@@ -114,7 +113,6 @@ class FlowViewHeader(urwid.WidgetWrap):
flow,
False,
extended=True,
- padding=0,
hostheader=self.master.showhost
)
diff --git a/libmproxy/contentviews.py b/libmproxy/contentviews.py
index 80955b0f..c0652c18 100644
--- a/libmproxy/contentviews.py
+++ b/libmproxy/contentviews.py
@@ -35,12 +35,12 @@ from .contrib.wbxml.ASCommandResponse import ASCommandResponse
try:
import pyamf
from pyamf import remoting, flex
-except ImportError: # pragma nocover
+except ImportError: # pragma no cover
pyamf = None
try:
import cssutils
-except ImportError: # pragma nocover
+except ImportError: # pragma no cover
cssutils = None
else:
cssutils.log.setLevel(logging.CRITICAL)
diff --git a/libmproxy/controller.py b/libmproxy/controller.py
index 712ab1d2..9a059856 100644
--- a/libmproxy/controller.py
+++ b/libmproxy/controller.py
@@ -56,7 +56,7 @@ class Channel:
try:
# The timeout is here so we can handle a should_exit event.
g = m.reply.q.get(timeout=0.5)
- except Queue.Empty: # pragma: nocover
+ except Queue.Empty: # pragma: no cover
continue
return g
diff --git a/libmproxy/dump.py b/libmproxy/dump.py
index 95be2d27..6dab2ddc 100644
--- a/libmproxy/dump.py
+++ b/libmproxy/dump.py
@@ -247,11 +247,16 @@ class DumpMaster(flow.FlowMaster):
url = flow.request.url
url = click.style(url, bold=True)
- line = "{stickycookie}{client} {method} {url}".format(
+ httpversion = ""
+ if flow.request.http_version not in ("HTTP/1.1", "HTTP/1.0"):
+ httpversion = " " + flow.request.http_version # We hide "normal" HTTP 1.
+
+ line = "{stickycookie}{client} {method} {url}{httpversion}".format(
stickycookie=stickycookie,
client=client,
method=method,
- url=url
+ url=url,
+ httpversion=httpversion
)
self.echo(line)
diff --git a/libmproxy/flow_format_compat.py b/libmproxy/flow_format_compat.py
index 2b99b805..5af9b762 100644
--- a/libmproxy/flow_format_compat.py
+++ b/libmproxy/flow_format_compat.py
@@ -21,9 +21,23 @@ def convert_014_015(data):
return data
+def convert_015_016(data):
+ for m in ("request", "response"):
+ if "body" in data[m]:
+ data[m]["content"] = data[m].pop("body")
+ if "httpversion" in data[m]:
+ data[m]["http_version"] = data[m].pop("httpversion")
+ if "msg" in data["response"]:
+ data["response"]["reason"] = data["response"].pop("msg")
+ data["request"].pop("form_out", None)
+ data["version"] = (0, 16)
+ return data
+
+
converters = {
(0, 13): convert_013_014,
(0, 14): convert_014_015,
+ (0, 15): convert_015_016,
}
diff --git a/libmproxy/main.py b/libmproxy/main.py
index 655d573d..f6664924 100644
--- a/libmproxy/main.py
+++ b/libmproxy/main.py
@@ -37,7 +37,11 @@ def get_server(dummy_server, options):
sys.exit(1)
-def mitmproxy(args=None): # pragma: nocover
+def mitmproxy(args=None): # pragma: no cover
+ if os.name == "nt":
+ print("Error: mitmproxy's console interface is not supported on Windows. "
+ "You can run mitmdump or mitmweb instead.", file=sys.stderr)
+ sys.exit(1)
from . import console
check_pyopenssl_version()
@@ -68,7 +72,7 @@ def mitmproxy(args=None): # pragma: nocover
pass
-def mitmdump(args=None): # pragma: nocover
+def mitmdump(args=None): # pragma: no cover
from . import dump
check_pyopenssl_version()
@@ -103,7 +107,7 @@ def mitmdump(args=None): # pragma: nocover
pass
-def mitmweb(args=None): # pragma: nocover
+def mitmweb(args=None): # pragma: no cover
from . import web
check_pyopenssl_version()
diff --git a/libmproxy/models/connections.py b/libmproxy/models/connections.py
index a45e1629..d5920256 100644
--- a/libmproxy/models/connections.py
+++ b/libmproxy/models/connections.py
@@ -42,28 +42,14 @@ class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
return self.ssl_established
_stateobject_attributes = dict(
+ address=tcp.Address,
+ clientcert=certutils.SSLCert,
ssl_established=bool,
timestamp_start=float,
timestamp_end=float,
timestamp_ssl_setup=float
)
- def get_state(self, short=False):
- d = super(ClientConnection, self).get_state(short)
- d.update(
- address=({
- "address": self.address(),
- "use_ipv6": self.address.use_ipv6} if self.address else {}),
- clientcert=self.cert.to_pem() if self.clientcert else None)
- return d
-
- def load_state(self, state):
- super(ClientConnection, self).load_state(state)
- self.address = tcp.Address(
- **state["address"]) if state["address"] else None
- self.clientcert = certutils.SSLCert.from_pem(
- state["clientcert"]) if state["clientcert"] else None
-
def copy(self):
return copy.copy(self)
@@ -76,7 +62,7 @@ class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
@classmethod
def from_state(cls, state):
f = cls(None, tuple(), None)
- f.load_state(state)
+ f.set_state(state)
return f
def convert_to_ssl(self, *args, **kwargs):
@@ -130,33 +116,11 @@ class ServerConnection(tcp.TCPClient, stateobject.StateObject):
ssl_established=bool,
sni=str
)
- _stateobject_long_attributes = {"cert"}
-
- def get_state(self, short=False):
- d = super(ServerConnection, self).get_state(short)
- d.update(
- address=({"address": self.address(),
- "use_ipv6": self.address.use_ipv6} if self.address else {}),
- source_address=({"address": self.source_address(),
- "use_ipv6": self.source_address.use_ipv6} if self.source_address else None),
- cert=self.cert.to_pem() if self.cert else None
- )
- return d
-
- def load_state(self, state):
- super(ServerConnection, self).load_state(state)
-
- self.address = tcp.Address(
- **state["address"]) if state["address"] else None
- self.source_address = tcp.Address(
- **state["source_address"]) if state["source_address"] else None
- self.cert = certutils.SSLCert.from_pem(
- state["cert"]) if state["cert"] else None
@classmethod
def from_state(cls, state):
f = cls(tuple())
- f.load_state(state)
+ f.set_state(state)
return f
def copy(self):
diff --git a/libmproxy/models/flow.py b/libmproxy/models/flow.py
index b4e8cb88..10255dad 100644
--- a/libmproxy/models/flow.py
+++ b/libmproxy/models/flow.py
@@ -45,7 +45,7 @@ class Error(stateobject.StateObject):
# the default implementation assumes an empty constructor. Override
# accordingly.
f = cls(None)
- f.load_state(state)
+ f.set_state(state)
return f
def copy(self):
@@ -86,16 +86,19 @@ class Flow(stateobject.StateObject):
intercepted=bool
)
- def get_state(self, short=False):
- d = super(Flow, self).get_state(short)
+ def get_state(self):
+ d = super(Flow, self).get_state()
d.update(version=version.IVERSION)
if self._backup and self._backup != d:
- if short:
- d.update(modified=True)
- else:
- d.update(backup=self._backup)
+ d.update(backup=self._backup)
return d
+ def set_state(self, state):
+ state.pop("version")
+ if "backup" in state:
+ self._backup = state.pop("backup")
+ super(Flow, self).set_state(state)
+
def __eq__(self, other):
return self is other
@@ -133,7 +136,7 @@ class Flow(stateobject.StateObject):
Revert to the last backed up state.
"""
if self._backup:
- self.load_state(self._backup)
+ self.set_state(self._backup)
self._backup = None
def kill(self, master):
diff --git a/libmproxy/models/http.py b/libmproxy/models/http.py
index e07dff69..3c024e76 100644
--- a/libmproxy/models/http.py
+++ b/libmproxy/models/http.py
@@ -1,36 +1,20 @@
from __future__ import (absolute_import, print_function, division)
import Cookie
import copy
+import warnings
from email.utils import parsedate_tz, formatdate, mktime_tz
import time
from libmproxy import utils
from netlib import encoding
-from netlib.http import status_codes, Headers, Request, Response, CONTENT_MISSING, decoded
+from netlib.http import status_codes, Headers, Request, Response, decoded
from netlib.tcp import Address
-from .. import version, stateobject
+from .. import version
from .flow import Flow
-class MessageMixin(stateobject.StateObject):
- _stateobject_attributes = dict(
- http_version=bytes,
- headers=Headers,
- timestamp_start=float,
- timestamp_end=float
- )
- _stateobject_long_attributes = {"body"}
-
- def get_state(self, short=False):
- ret = super(MessageMixin, self).get_state(short)
- if short:
- if self.content:
- ret["contentLength"] = len(self.content)
- elif self.content == CONTENT_MISSING:
- ret["contentLength"] = None
- else:
- ret["contentLength"] = 0
- return ret
+
+class MessageMixin(object):
def get_decoded_content(self):
"""
@@ -43,33 +27,6 @@ class MessageMixin(stateobject.StateObject):
return self.content
return encoding.decode(ce, self.content)
- def decode(self):
- """
- Decodes body based on the current Content-Encoding header, then
- removes the header. If there is no Content-Encoding header, no
- action is taken.
-
- Returns True if decoding succeeded, False otherwise.
- """
- ce = self.headers.get("content-encoding")
- if not self.content or ce not in encoding.ENCODINGS:
- return False
- data = encoding.decode(ce, self.content)
- if data is None:
- return False
- self.content = data
- self.headers.pop("content-encoding", None)
- return True
-
- def encode(self, e):
- """
- Encodes body with the encoding e, where e is "gzip", "deflate"
- or "identity".
- """
- # FIXME: Error if there's an existing encoding header?
- self.content = encoding.encode(e, self.content)
- self.headers["content-encoding"] = e
-
def copy(self):
c = copy.copy(self)
if hasattr(self, "data"): # FIXME remove condition
@@ -86,10 +43,12 @@ class MessageMixin(stateobject.StateObject):
Returns the number of replacements made.
"""
- with decoded(self):
- self.content, count = utils.safe_subn(
- pattern, repl, self.content, *args, **kwargs
- )
+ count = 0
+ if self.content:
+ with decoded(self):
+ self.content, count = utils.safe_subn(
+ pattern, repl, self.content, *args, **kwargs
+ )
fields = []
for name, value in self.headers.fields:
name, c = utils.safe_subn(pattern, repl, name, *args, **kwargs)
@@ -161,6 +120,9 @@ class HTTPRequest(MessageMixin, Request):
timestamp_start=None,
timestamp_end=None,
form_out=None,
+ is_replay=False,
+ stickycookie=False,
+ stickyauth=False,
):
Request.__init__(
self,
@@ -179,51 +141,26 @@ class HTTPRequest(MessageMixin, Request):
self.form_out = form_out or first_line_format # FIXME remove
# Have this request's cookies been modified by sticky cookies or auth?
- self.stickycookie = False
- self.stickyauth = False
+ self.stickycookie = stickycookie
+ self.stickyauth = stickyauth
# Is this request replayed?
- self.is_replay = False
-
- _stateobject_attributes = MessageMixin._stateobject_attributes.copy()
- _stateobject_attributes.update(
- content=bytes,
- first_line_format=str,
- method=bytes,
- scheme=bytes,
- host=bytes,
- port=int,
- path=bytes,
- form_out=str,
- is_replay=bool
- )
-
- @classmethod
- def from_state(cls, state):
- f = cls(
- None,
- b"",
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None,
- None)
- f.load_state(state)
- return f
+ self.is_replay = is_replay
+
+ def get_state(self):
+ state = super(HTTPRequest, self).get_state()
+ state.update(
+ stickycookie = self.stickycookie,
+ stickyauth = self.stickyauth,
+ is_replay = self.is_replay,
+ )
+ return state
- @classmethod
- def from_protocol(
- self,
- protocol,
- *args,
- **kwargs
- ):
- req = protocol.read_request(*args, **kwargs)
- return self.wrap(req)
+ def set_state(self, state):
+ self.stickycookie = state.pop("stickycookie")
+ self.stickyauth = state.pop("stickyauth")
+ self.is_replay = state.pop("is_replay")
+ super(HTTPRequest, self).set_state(state)
@classmethod
def wrap(self, request):
@@ -241,10 +178,17 @@ class HTTPRequest(MessageMixin, Request):
timestamp_end=request.timestamp_end,
form_out=(request.form_out if hasattr(request, 'form_out') else None),
)
- if hasattr(request, 'stream_id'):
- req.stream_id = request.stream_id
return req
+ @property
+ def form_out(self):
+ warnings.warn(".form_out is deprecated, use .first_line_format instead.", DeprecationWarning)
+ return self.first_line_format
+
+ @form_out.setter
+ def form_out(self, value):
+ warnings.warn(".form_out is deprecated, use .first_line_format instead.", DeprecationWarning)
+
def __hash__(self):
return id(self)
@@ -297,6 +241,7 @@ class HTTPResponse(MessageMixin, Response):
content,
timestamp_start=None,
timestamp_end=None,
+ is_replay = False
):
Response.__init__(
self,
@@ -310,32 +255,9 @@ class HTTPResponse(MessageMixin, Response):
)
# Is this request replayed?
- self.is_replay = False
+ self.is_replay = is_replay
self.stream = False
- _stateobject_attributes = MessageMixin._stateobject_attributes.copy()
- _stateobject_attributes.update(
- body=bytes,
- status_code=int,
- msg=bytes
- )
-
- @classmethod
- def from_state(cls, state):
- f = cls(None, None, None, None, None)
- f.load_state(state)
- return f
-
- @classmethod
- def from_protocol(
- self,
- protocol,
- *args,
- **kwargs
- ):
- resp = protocol.read_response(*args, **kwargs)
- return self.wrap(resp)
-
@classmethod
def wrap(self, response):
resp = HTTPResponse(
@@ -347,8 +269,6 @@ class HTTPResponse(MessageMixin, Response):
timestamp_start=response.timestamp_start,
timestamp_end=response.timestamp_end,
)
- if hasattr(response, 'stream_id'):
- resp.stream_id = response.stream_id
return resp
def _refresh_cookie(self, c, delta):
@@ -448,7 +368,7 @@ class HTTPFlow(Flow):
@classmethod
def from_state(cls, state):
f = cls(None, None)
- f.load_state(state)
+ f.set_state(state)
return f
def __repr__(self):
diff --git a/libmproxy/protocol/__init__.py b/libmproxy/protocol/__init__.py
index d46f16f5..ea958d06 100644
--- a/libmproxy/protocol/__init__.py
+++ b/libmproxy/protocol/__init__.py
@@ -27,15 +27,19 @@ as late as possible; this makes server replay without any outgoing connections p
from __future__ import (absolute_import, print_function, division)
from .base import Layer, ServerConnectionMixin, Kill
-from .http import Http1Layer, UpstreamConnectLayer, Http2Layer
from .tls import TlsLayer
from .tls import is_tls_record_magic
from .tls import TlsClientHello
+from .http import UpstreamConnectLayer
+from .http1 import Http1Layer
+from .http2 import Http2Layer
from .rawtcp import RawTCPLayer
__all__ = [
"Layer", "ServerConnectionMixin", "Kill",
- "Http1Layer", "UpstreamConnectLayer", "Http2Layer",
"TlsLayer", "is_tls_record_magic", "TlsClientHello",
+ "UpstreamConnectLayer",
+ "Http1Layer",
+ "Http2Layer",
"RawTCPLayer",
]
diff --git a/libmproxy/protocol/base.py b/libmproxy/protocol/base.py
index 4eb034c0..40fcaf65 100644
--- a/libmproxy/protocol/base.py
+++ b/libmproxy/protocol/base.py
@@ -14,7 +14,7 @@ class _LayerCodeCompletion(object):
Dummy class that provides type hinting in PyCharm, which simplifies development a lot.
"""
- def __init__(self, **mixin_args): # pragma: nocover
+ def __init__(self, **mixin_args): # pragma: no cover
super(_LayerCodeCompletion, self).__init__(**mixin_args)
if True:
return
diff --git a/libmproxy/protocol/http.py b/libmproxy/protocol/http.py
index 12d09e71..f3240b85 100644
--- a/libmproxy/protocol/http.py
+++ b/libmproxy/protocol/http.py
@@ -1,26 +1,30 @@
from __future__ import (absolute_import, print_function, division)
+
import sys
import traceback
-
import six
from netlib import tcp
from netlib.exceptions import HttpException, HttpReadDisconnect, NetlibException
-from netlib.http import http1, Headers
-from netlib.http import CONTENT_MISSING
-from netlib.tcp import Address
-from netlib.http.http2.connections import HTTP2Protocol
-from netlib.http.http2.frame import GoAwayFrame, PriorityFrame, WindowUpdateFrame
+from netlib.http import Headers, CONTENT_MISSING
+
+from h2.exceptions import H2Error
+
from .. import utils
from ..exceptions import HttpProtocolException, ProtocolException
from ..models import (
- HTTPFlow, HTTPRequest, HTTPResponse, make_error_response, make_connect_response, Error, expect_continue_response
+ HTTPFlow,
+ HTTPResponse,
+ make_error_response,
+ make_connect_response,
+ Error,
+ expect_continue_response
)
+
from .base import Layer, Kill
-class _HttpLayer(Layer):
- supports_streaming = False
+class _HttpTransmissionLayer(Layer):
def read_request(self):
raise NotImplementedError()
@@ -32,37 +36,18 @@ class _HttpLayer(Layer):
raise NotImplementedError()
def read_response(self, request):
- raise NotImplementedError()
-
- def send_response(self, response):
- raise NotImplementedError()
-
- def check_close_connection(self, flow):
- raise NotImplementedError()
-
-
-class _StreamingHttpLayer(_HttpLayer):
- supports_streaming = True
-
- def read_response_headers(self):
- raise NotImplementedError
-
- def read_response_body(self, request, response):
- raise NotImplementedError()
- yield "this is a generator" # pragma: no cover
-
- def read_response(self, request):
response = self.read_response_headers()
response.data.content = b"".join(
self.read_response_body(request, response)
)
return response
- def send_response_headers(self, response):
- raise NotImplementedError
+ def read_response_headers(self):
+ raise NotImplementedError()
- def send_response_body(self, response, chunks):
+ def read_response_body(self, request, response):
raise NotImplementedError()
+ yield "this is a generator" # pragma: no cover
def send_response(self, response):
if response.content == CONTENT_MISSING:
@@ -70,164 +55,14 @@ class _StreamingHttpLayer(_HttpLayer):
self.send_response_headers(response)
self.send_response_body(response, [response.content])
-
-class Http1Layer(_StreamingHttpLayer):
-
- def __init__(self, ctx, mode):
- super(Http1Layer, self).__init__(ctx)
- self.mode = mode
-
- def read_request(self):
- req = http1.read_request(self.client_conn.rfile, body_size_limit=self.config.body_size_limit)
- return HTTPRequest.wrap(req)
-
- def read_request_body(self, request):
- expected_size = http1.expected_http_body_size(request)
- return http1.read_body(self.client_conn.rfile, expected_size, self.config.body_size_limit)
-
- def send_request(self, request):
- self.server_conn.wfile.write(http1.assemble_request(request))
- self.server_conn.wfile.flush()
-
- def read_response_headers(self):
- resp = http1.read_response_head(self.server_conn.rfile)
- return HTTPResponse.wrap(resp)
-
- def read_response_body(self, request, response):
- expected_size = http1.expected_http_body_size(request, response)
- return http1.read_body(self.server_conn.rfile, expected_size, self.config.body_size_limit)
-
def send_response_headers(self, response):
- raw = http1.assemble_response_head(response)
- self.client_conn.wfile.write(raw)
- self.client_conn.wfile.flush()
+ raise NotImplementedError()
def send_response_body(self, response, chunks):
- for chunk in http1.assemble_body(response.headers, chunks):
- self.client_conn.wfile.write(chunk)
- self.client_conn.wfile.flush()
-
- def check_close_connection(self, flow):
- request_close = http1.connection_close(
- flow.request.http_version,
- flow.request.headers
- )
- response_close = http1.connection_close(
- flow.response.http_version,
- flow.response.headers
- )
- read_until_eof = http1.expected_http_body_size(flow.request, flow.response) == -1
- close_connection = request_close or response_close or read_until_eof
- if flow.request.form_in == "authority" and flow.response.status_code == 200:
- # Workaround for https://github.com/mitmproxy/mitmproxy/issues/313:
- # Charles Proxy sends a CONNECT response with HTTP/1.0
- # and no Content-Length header
-
- return False
- return close_connection
-
- def __call__(self):
- layer = HttpLayer(self, self.mode)
- layer()
-
-
-# TODO: The HTTP2 layer is missing multiplexing, which requires a major rewrite.
-class Http2Layer(_HttpLayer):
-
- def __init__(self, ctx, mode):
- super(Http2Layer, self).__init__(ctx)
- self.mode = mode
- self.client_protocol = HTTP2Protocol(self.client_conn, is_server=True,
- unhandled_frame_cb=self.handle_unexpected_frame_from_client)
- self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
- unhandled_frame_cb=self.handle_unexpected_frame_from_server)
-
- def read_request(self):
- request = HTTPRequest.from_protocol(
- self.client_protocol,
- body_size_limit=self.config.body_size_limit
- )
- self._stream_id = request.stream_id
- return request
-
- def send_request(self, message):
- # TODO: implement flow control and WINDOW_UPDATE frames
- self.server_conn.send(self.server_protocol.assemble(message))
-
- def read_response(self, request):
- return HTTPResponse.from_protocol(
- self.server_protocol,
- request_method=request.method,
- body_size_limit=self.config.body_size_limit,
- include_body=True,
- stream_id=self._stream_id
- )
-
- def send_response(self, message):
- # TODO: implement flow control to prevent client buffer filling up
- # maintain a send buffer size, and read WindowUpdateFrames from client to increase the send buffer
- self.client_conn.send(self.client_protocol.assemble(message))
+ raise NotImplementedError()
def check_close_connection(self, flow):
- # TODO: add a timer to disconnect after a 10 second timeout
- return False
-
- def connect(self):
- self.ctx.connect()
- self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
- unhandled_frame_cb=self.handle_unexpected_frame_from_server)
- self.server_protocol.perform_connection_preface()
-
- def set_server(self, *args, **kwargs):
- self.ctx.set_server(*args, **kwargs)
- self.server_protocol = HTTP2Protocol(self.server_conn, is_server=False,
- unhandled_frame_cb=self.handle_unexpected_frame_from_server)
- self.server_protocol.perform_connection_preface()
-
- def __call__(self):
- self.server_protocol.perform_connection_preface()
- layer = HttpLayer(self, self.mode)
- layer()
-
- # terminate the connection
- self.client_conn.send(GoAwayFrame().to_bytes())
-
- def handle_unexpected_frame_from_client(self, frame):
- if isinstance(frame, WindowUpdateFrame):
- # Clients are sending WindowUpdate frames depending on their flow control algorithm.
- # Since we cannot predict these frames, and we do not need to respond to them,
- # simply accept them, and hide them from the log.
- # Ideally we should keep track of our own flow control window and
- # stall transmission if the outgoing flow control buffer is full.
- return
- if isinstance(frame, PriorityFrame):
- # Clients are sending Priority frames depending on their implementation.
- # The RFC does not clearly state when or which priority preferences should be set.
- # Since we cannot predict these frames, and we do not need to respond to them,
- # simply accept them, and hide them from the log.
- # Ideally we should forward them to the server.
- return
- if isinstance(frame, GoAwayFrame):
- # Client wants to terminate the connection,
- # relay it to the server.
- self.server_conn.send(frame.to_bytes())
- return
- self.log("Unexpected HTTP2 frame from client: %s" % frame.human_readable(), "info")
-
- def handle_unexpected_frame_from_server(self, frame):
- if isinstance(frame, WindowUpdateFrame):
- # Servers are sending WindowUpdate frames depending on their flow control algorithm.
- # Since we cannot predict these frames, and we do not need to respond to them,
- # simply accept them, and hide them from the log.
- # Ideally we should keep track of our own flow control window and
- # stall transmission if the outgoing flow control buffer is full.
- return
- if isinstance(frame, GoAwayFrame):
- # Server wants to terminate the connection,
- # relay it to the client.
- self.client_conn.send(frame.to_bytes())
- return
- self.log("Unexpected HTTP2 frame from server: %s" % frame.human_readable(), "info")
+ raise NotImplementedError()
class ConnectServerConnection(object):
@@ -285,7 +120,7 @@ class UpstreamConnectLayer(Layer):
def set_server(self, address, server_tls=None, sni=None):
if self.ctx.server_conn:
self.ctx.disconnect()
- address = Address.wrap(address)
+ address = tcp.Address.wrap(address)
self.connect_request.host = address.host
self.connect_request.port = address.port
self.server_conn.address = address
@@ -400,7 +235,8 @@ class HttpLayer(Layer):
try:
response = make_error_response(code, message)
self.send_response(response)
- except NetlibException:
+ except (NetlibException, H2Error):
+ self.log(traceback.format_exc(), "debug")
pass
def change_upstream_proxy_server(self, address):
@@ -420,7 +256,7 @@ class HttpLayer(Layer):
layer()
def send_response_to_client(self, flow):
- if not (self.supports_streaming and flow.response.stream):
+ if not flow.response.stream:
# no streaming:
# we already received the full response from the server and can
# send it to the client straight away.
@@ -441,10 +277,7 @@ class HttpLayer(Layer):
def get_response_from_server(self, flow):
def get_response():
self.send_request(flow.request)
- if self.supports_streaming:
- flow.response = self.read_response_headers()
- else:
- flow.response = self.read_response(flow.request)
+ flow.response = self.read_response_headers()
try:
get_response()
@@ -474,15 +307,14 @@ class HttpLayer(Layer):
if flow == Kill:
raise Kill()
- if self.supports_streaming:
- if flow.response.stream:
- flow.response.data.content = CONTENT_MISSING
- else:
- flow.response.data.content = b"".join(self.read_response_body(
- flow.request,
- flow.response
- ))
- flow.response.timestamp_end = utils.timestamp()
+ if flow.response.stream:
+ flow.response.data.content = CONTENT_MISSING
+ else:
+ flow.response.data.content = b"".join(self.read_response_body(
+ flow.request,
+ flow.response
+ ))
+ flow.response.timestamp_end = utils.timestamp()
# no further manipulation of self.server_conn beyond this point
# we can safely set it as the final attribute value here.
diff --git a/libmproxy/protocol/http1.py b/libmproxy/protocol/http1.py
new file mode 100644
index 00000000..fc2cf07a
--- /dev/null
+++ b/libmproxy/protocol/http1.py
@@ -0,0 +1,70 @@
+from __future__ import (absolute_import, print_function, division)
+
+import six
+
+from netlib import tcp
+from netlib.http import http1
+
+from .http import _HttpTransmissionLayer, HttpLayer
+from .. import utils
+from ..models import HTTPRequest, HTTPResponse
+
+
+class Http1Layer(_HttpTransmissionLayer):
+
+ def __init__(self, ctx, mode):
+ super(Http1Layer, self).__init__(ctx)
+ self.mode = mode
+
+ def read_request(self):
+ req = http1.read_request(self.client_conn.rfile, body_size_limit=self.config.body_size_limit)
+ return HTTPRequest.wrap(req)
+
+ def read_request_body(self, request):
+ expected_size = http1.expected_http_body_size(request)
+ return http1.read_body(self.client_conn.rfile, expected_size, self.config.body_size_limit)
+
+ def send_request(self, request):
+ self.server_conn.wfile.write(http1.assemble_request(request))
+ self.server_conn.wfile.flush()
+
+ def read_response_headers(self):
+ resp = http1.read_response_head(self.server_conn.rfile)
+ return HTTPResponse.wrap(resp)
+
+ def read_response_body(self, request, response):
+ expected_size = http1.expected_http_body_size(request, response)
+ return http1.read_body(self.server_conn.rfile, expected_size, self.config.body_size_limit)
+
+ def send_response_headers(self, response):
+ raw = http1.assemble_response_head(response)
+ self.client_conn.wfile.write(raw)
+ self.client_conn.wfile.flush()
+
+ def send_response_body(self, response, chunks):
+ for chunk in http1.assemble_body(response.headers, chunks):
+ self.client_conn.wfile.write(chunk)
+ self.client_conn.wfile.flush()
+
+ def check_close_connection(self, flow):
+ request_close = http1.connection_close(
+ flow.request.http_version,
+ flow.request.headers
+ )
+ response_close = http1.connection_close(
+ flow.response.http_version,
+ flow.response.headers
+ )
+ read_until_eof = http1.expected_http_body_size(flow.request, flow.response) == -1
+ close_connection = request_close or response_close or read_until_eof
+ if flow.request.form_in == "authority" and flow.response.status_code == 200:
+ # Workaround for https://github.com/mitmproxy/mitmproxy/issues/313:
+ # Charles Proxy sends a CONNECT response with HTTP/1.0
+ # and no Content-Length header
+
+ return False
+ return close_connection
+
+ def __call__(self):
+ layer = HttpLayer(self, self.mode)
+ layer()
diff --git a/libmproxy/protocol/http2.py b/libmproxy/protocol/http2.py
new file mode 100644
index 00000000..04ff8bf6
--- /dev/null
+++ b/libmproxy/protocol/http2.py
@@ -0,0 +1,434 @@
+from __future__ import (absolute_import, print_function, division)
+
+import threading
+import time
+import Queue
+
+from netlib.tcp import ssl_read_select
+from netlib.exceptions import HttpException
+from netlib.http import Headers
+from netlib.utils import http2_read_raw_frame
+
+import hyperframe
+import h2
+from h2.connection import H2Connection
+from h2.events import *
+
+from .base import Layer
+from .http import _HttpTransmissionLayer, HttpLayer
+from .. import utils
+from ..models import HTTPRequest, HTTPResponse
+from ..exceptions import HttpProtocolException
+from ..exceptions import ProtocolException
+
+
+class SafeH2Connection(H2Connection):
+
+ def __init__(self, conn, *args, **kwargs):
+ super(SafeH2Connection, self).__init__(*args, **kwargs)
+ self.conn = conn
+ self.lock = threading.RLock()
+
+ def safe_close_connection(self, error_code):
+ with self.lock:
+ self.close_connection(error_code)
+ self.conn.send(self.data_to_send())
+
+ def safe_increment_flow_control(self, stream_id, length):
+ if length == 0:
+ return
+
+ with self.lock:
+ self.increment_flow_control_window(length)
+ self.conn.send(self.data_to_send())
+ with self.lock:
+ if stream_id in self.streams and not self.streams[stream_id].closed:
+ self.increment_flow_control_window(length, stream_id=stream_id)
+ self.conn.send(self.data_to_send())
+
+ def safe_reset_stream(self, stream_id, error_code):
+ with self.lock:
+ try:
+ self.reset_stream(stream_id, error_code)
+ except h2.exceptions.StreamClosedError:
+ # stream is already closed - good
+ pass
+ self.conn.send(self.data_to_send())
+
+ def safe_update_settings(self, new_settings):
+ with self.lock:
+ self.update_settings(new_settings)
+ self.conn.send(self.data_to_send())
+
+ def safe_send_headers(self, is_zombie, stream_id, headers):
+ with self.lock:
+ if is_zombie():
+ return
+ self.send_headers(stream_id, headers)
+ self.conn.send(self.data_to_send())
+
+ def safe_send_body(self, is_zombie, stream_id, chunks):
+ for chunk in chunks:
+ position = 0
+ while position < len(chunk):
+ self.lock.acquire()
+ if is_zombie():
+ self.lock.release()
+ return
+ max_outbound_frame_size = self.max_outbound_frame_size
+ frame_chunk = chunk[position:position + max_outbound_frame_size]
+ if self.local_flow_control_window(stream_id) < len(frame_chunk):
+ self.lock.release()
+ time.sleep(0)
+ continue
+ self.send_data(stream_id, frame_chunk)
+ self.conn.send(self.data_to_send())
+ self.lock.release()
+ position += max_outbound_frame_size
+ with self.lock:
+ if is_zombie():
+ return
+ self.end_stream(stream_id)
+ self.conn.send(self.data_to_send())
+
+
+class Http2Layer(Layer):
+
+ def __init__(self, ctx, mode):
+ super(Http2Layer, self).__init__(ctx)
+ self.mode = mode
+ self.streams = dict()
+ self.client_reset_streams = []
+ self.server_reset_streams = []
+ self.server_to_client_stream_ids = dict([(0, 0)])
+ self.client_conn.h2 = SafeH2Connection(self.client_conn, client_side=False)
+
+ # make sure that we only pass actual SSL.Connection objects in here,
+ # because otherwise ssl_read_select fails!
+ self.active_conns = [self.client_conn.connection]
+
+ def _initiate_server_conn(self):
+ self.server_conn.h2 = SafeH2Connection(self.server_conn, client_side=True)
+ self.server_conn.h2.initiate_connection()
+ self.server_conn.send(self.server_conn.h2.data_to_send())
+ self.active_conns.append(self.server_conn.connection)
+
+ def connect(self): # pragma: no cover
+ raise ValueError("CONNECT inside an HTTP2 stream is not supported.")
+ # self.ctx.connect()
+ # self.server_conn.connect()
+ # self._initiate_server_conn()
+
+ def set_server(self): # pragma: no cover
+ raise NotImplementedError("Cannot change server for HTTP2 connections.")
+
+ def disconnect(self): # pragma: no cover
+ raise NotImplementedError("Cannot dis- or reconnect in HTTP2 connections.")
+
+ def next_layer(self): # pragma: no cover
+ # WebSockets over HTTP/2?
+ # CONNECT for proxying?
+ raise NotImplementedError()
+
+ def _handle_event(self, event, source_conn, other_conn, is_server):
+ if hasattr(event, 'stream_id'):
+ if is_server and event.stream_id % 2 == 1:
+ eid = self.server_to_client_stream_ids[event.stream_id]
+ else:
+ eid = event.stream_id
+
+ if isinstance(event, RequestReceived):
+ headers = Headers([[str(k), str(v)] for k, v in event.headers])
+ self.streams[eid] = Http2SingleStreamLayer(self, eid, headers)
+ self.streams[eid].timestamp_start = time.time()
+ self.streams[eid].start()
+ elif isinstance(event, ResponseReceived):
+ headers = Headers([[str(k), str(v)] for k, v in event.headers])
+ self.streams[eid].queued_data_length = 0
+ self.streams[eid].timestamp_start = time.time()
+ self.streams[eid].response_headers = headers
+ self.streams[eid].response_arrived.set()
+ elif isinstance(event, DataReceived):
+ if self.config.body_size_limit and self.streams[eid].queued_data_length > self.config.body_size_limit:
+ raise HttpException("HTTP body too large. Limit is {}.".format(self.config.body_size_limit))
+ self.streams[eid].data_queue.put(event.data)
+ self.streams[eid].queued_data_length += len(event.data)
+ source_conn.h2.safe_increment_flow_control(event.stream_id, event.flow_controlled_length)
+ elif isinstance(event, StreamEnded):
+ self.streams[eid].timestamp_end = time.time()
+ self.streams[eid].data_finished.set()
+ elif isinstance(event, StreamReset):
+ self.streams[eid].zombie = time.time()
+ self.client_reset_streams.append(self.streams[eid].client_stream_id)
+ if self.streams[eid].server_stream_id:
+ self.server_reset_streams.append(self.streams[eid].server_stream_id)
+ if eid in self.streams and event.error_code == 0x8:
+ if is_server:
+ other_stream_id = self.streams[eid].client_stream_id
+ else:
+ other_stream_id = self.streams[eid].server_stream_id
+ if other_stream_id is not None:
+ other_conn.h2.safe_reset_stream(other_stream_id, event.error_code)
+ elif isinstance(event, RemoteSettingsChanged):
+ new_settings = dict([(id, cs.new_value) for (id, cs) in event.changed_settings.iteritems()])
+ other_conn.h2.safe_update_settings(new_settings)
+ elif isinstance(event, ConnectionTerminated):
+ # Do not immediately terminate the other connection.
+ # Some streams might be still sending data to the client.
+ return False
+ elif isinstance(event, PushedStreamReceived):
+ # pushed stream ids should be uniq and not dependent on race conditions
+ # only the parent stream id must be looked up first
+ parent_eid = self.server_to_client_stream_ids[event.parent_stream_id]
+ with self.client_conn.h2.lock:
+ self.client_conn.h2.push_stream(parent_eid, event.pushed_stream_id, event.headers)
+
+ headers = Headers([[str(k), str(v)] for k, v in event.headers])
+ headers['x-mitmproxy-pushed'] = 'true'
+ self.streams[event.pushed_stream_id] = Http2SingleStreamLayer(self, event.pushed_stream_id, headers)
+ self.streams[event.pushed_stream_id].timestamp_start = time.time()
+ self.streams[event.pushed_stream_id].pushed = True
+ self.streams[event.pushed_stream_id].parent_stream_id = parent_eid
+ self.streams[event.pushed_stream_id].timestamp_end = time.time()
+ self.streams[event.pushed_stream_id].request_data_finished.set()
+ self.streams[event.pushed_stream_id].start()
+ elif isinstance(event, TrailersReceived):
+ raise NotImplementedError()
+
+ return True
+
+ def _cleanup_streams(self):
+ death_time = time.time() - 10
+ for stream_id in self.streams.keys():
+ zombie = self.streams[stream_id].zombie
+ if zombie and zombie <= death_time:
+ self.streams.pop(stream_id, None)
+
+ def __call__(self):
+ if self.server_conn:
+ self._initiate_server_conn()
+
+ preamble = self.client_conn.rfile.read(24)
+ self.client_conn.h2.initiate_connection()
+ self.client_conn.h2.receive_data(preamble)
+ self.client_conn.send(self.client_conn.h2.data_to_send())
+
+ while True:
+ r = ssl_read_select(self.active_conns, 1)
+ for conn in r:
+ source_conn = self.client_conn if conn == self.client_conn.connection else self.server_conn
+ other_conn = self.server_conn if conn == self.client_conn.connection else self.client_conn
+ is_server = (conn == self.server_conn.connection)
+
+ with source_conn.h2.lock:
+ try:
+ raw_frame = b''.join(http2_read_raw_frame(source_conn.rfile))
+ except:
+ for stream in self.streams.values():
+ stream.zombie = time.time()
+ return
+
+
+ frame, _ = hyperframe.frame.Frame.parse_frame_header(raw_frame[:9])
+
+ if is_server:
+ list = self.server_reset_streams
+ else:
+ list = self.client_reset_streams
+ if frame.stream_id in list:
+ # this frame belongs to a reset stream - just ignore it
+ if isinstance(frame, hyperframe.frame.HeadersFrame) or isinstance(frame, hyperframe.frame.ContinuationFrame):
+ # we need to keep the hpack-decoder happy too
+ source_conn.h2.decoder.decode(raw_frame[9:])
+ continue
+
+ events = source_conn.h2.receive_data(raw_frame)
+ source_conn.send(source_conn.h2.data_to_send())
+
+ for event in events:
+ if not self._handle_event(event, source_conn, other_conn, is_server):
+ return
+
+ self._cleanup_streams()
+
+
+class Http2SingleStreamLayer(_HttpTransmissionLayer, threading.Thread):
+
+ def __init__(self, ctx, stream_id, request_headers):
+ super(Http2SingleStreamLayer, self).__init__(ctx)
+ self.zombie = None
+ self.client_stream_id = stream_id
+ self.server_stream_id = None
+ self.request_headers = request_headers
+ self.response_headers = None
+ self.pushed = False
+
+ self.request_data_queue = Queue.Queue()
+ self.request_queued_data_length = 0
+ self.request_data_finished = threading.Event()
+
+ self.response_arrived = threading.Event()
+ self.response_data_queue = Queue.Queue()
+ self.response_queued_data_length = 0
+ self.response_data_finished = threading.Event()
+
+ @property
+ def data_queue(self):
+ if self.response_arrived.is_set():
+ return self.response_data_queue
+ else:
+ return self.request_data_queue
+
+ @property
+ def queued_data_length(self):
+ if self.response_arrived.is_set():
+ return self.response_queued_data_length
+ else:
+ return self.request_queued_data_length
+
+ @property
+ def data_finished(self):
+ if self.response_arrived.is_set():
+ return self.response_data_finished
+ else:
+ return self.request_data_finished
+
+ @queued_data_length.setter
+ def queued_data_length(self, v):
+ if self.response_arrived.is_set():
+ return self.response_queued_data_length
+ else:
+ return self.request_queued_data_length
+
+ def is_zombie(self):
+ return self.zombie is not None
+
+ def read_request(self):
+ self.request_data_finished.wait()
+
+ authority = self.request_headers.get(':authority', '')
+ method = self.request_headers.get(':method', 'GET')
+ scheme = self.request_headers.get(':scheme', 'https')
+ path = self.request_headers.get(':path', '/')
+ host = None
+ port = None
+
+ if path == '*' or path.startswith("/"):
+ form_in = "relative"
+ elif method == 'CONNECT': # pragma: no cover
+ raise NotImplementedError("CONNECT over HTTP/2 is not implemented.")
+ else: # pragma: no cover
+ form_in = "absolute"
+ # FIXME: verify if path or :host contains what we need
+ scheme, host, port, _ = utils.parse_url(path)
+
+ if authority:
+ host, _, port = authority.partition(':')
+
+ if not host:
+ host = 'localhost'
+ if not port:
+ port = 443 if scheme == 'https' else 80
+ port = int(port)
+
+ data = []
+ while self.request_data_queue.qsize() > 0:
+ data.append(self.request_data_queue.get())
+ data = b"".join(data)
+
+ return HTTPRequest(
+ form_in,
+ method,
+ scheme,
+ host,
+ port,
+ path,
+ b"HTTP/2.0",
+ self.request_headers,
+ data,
+ timestamp_start=self.timestamp_start,
+ timestamp_end=self.timestamp_end,
+ )
+
+ def send_request(self, message):
+ if self.pushed:
+ # nothing to do here
+ return
+
+ with self.server_conn.h2.lock:
+ # We must not assign a stream id if we are already a zombie.
+ if self.zombie:
+ return
+
+ self.server_stream_id = self.server_conn.h2.get_next_available_stream_id()
+ self.server_to_client_stream_ids[self.server_stream_id] = self.client_stream_id
+
+ self.server_conn.h2.safe_send_headers(
+ self.is_zombie,
+ self.server_stream_id,
+ message.headers
+ )
+ self.server_conn.h2.safe_send_body(
+ self.is_zombie,
+ self.server_stream_id,
+ message.body
+ )
+
+ def read_response_headers(self):
+ self.response_arrived.wait()
+
+ status_code = int(self.response_headers.get(':status', 502))
+
+ return HTTPResponse(
+ http_version=b"HTTP/2.0",
+ status_code=status_code,
+ reason='',
+ headers=self.response_headers,
+ content=None,
+ timestamp_start=self.timestamp_start,
+ timestamp_end=self.timestamp_end,
+ )
+
+ def read_response_body(self, request, response):
+ while True:
+ try:
+ yield self.response_data_queue.get(timeout=1)
+ except Queue.Empty:
+ pass
+ if self.response_data_finished.is_set():
+ while self.response_data_queue.qsize() > 0:
+ yield self.response_data_queue.get()
+ return
+ if self.zombie:
+ return
+
+ def send_response_headers(self, response):
+ self.client_conn.h2.safe_send_headers(
+ self.is_zombie,
+ self.client_stream_id,
+ response.headers
+ )
+
+ def send_response_body(self, _response, chunks):
+ self.client_conn.h2.safe_send_body(
+ self.is_zombie,
+ self.client_stream_id,
+ chunks
+ )
+
+ def check_close_connection(self, flow):
+ # This layer only handles a single stream.
+ # RFC 7540 8.1: An HTTP request/response exchange fully consumes a single stream.
+ return True
+
+ def connect(self): # pragma: no cover
+ raise ValueError("CONNECT inside an HTTP2 stream is not supported.")
+
+ def set_server(self, *args, **kwargs): # pragma: no cover
+ # do not mess with the server connection - all streams share it.
+ pass
+
+ def run(self):
+ layer = HttpLayer(self, self.mode)
+ layer()
+ self.zombie = time.time()
diff --git a/libmproxy/protocol/tls.py b/libmproxy/protocol/tls.py
index af1a6055..986eb964 100644
--- a/libmproxy/protocol/tls.py
+++ b/libmproxy/protocol/tls.py
@@ -349,7 +349,7 @@ class TlsLayer(Layer):
layer = self.ctx.next_layer(self)
layer()
- def __repr__(self):
+ def __repr__(self): # pragma: no cover
if self._client_tls and self._server_tls:
return "TlsLayer(client and server)"
elif self._client_tls:
@@ -560,5 +560,7 @@ class TlsLayer(Layer):
if self._sni_from_server_change:
sans.add(self._sni_from_server_change)
- sans.discard(host)
+ # Some applications don't consider the CN and expect the hostname to be in the SANs.
+ # For example, Thunderbird 38 will display a warning if the remote host is only the CN.
+ sans.add(host)
return self.config.certstore.get_cert(host, list(sans))
diff --git a/libmproxy/proxy/config.py b/libmproxy/proxy/config.py
index bf765d81..a635ab19 100644
--- a/libmproxy/proxy/config.py
+++ b/libmproxy/proxy/config.py
@@ -180,6 +180,9 @@ def process_proxy_options(parser, options):
parser.error("Certificate file does not exist: %s" % parts[1])
certs.append(parts)
+ if options.http2 and not tcp.HAS_ALPN:
+ raise RuntimeError("HTTP2 support requires OpenSSL 1.0.2 or above.")
+
return ProxyConfig(
host=options.addr,
port=options.port,
diff --git a/libmproxy/proxy/server.py b/libmproxy/proxy/server.py
index 750cb1a4..d208cff5 100644
--- a/libmproxy/proxy/server.py
+++ b/libmproxy/proxy/server.py
@@ -103,9 +103,9 @@ class ConnectionHandler(object):
return Socks5Proxy(root_context)
elif mode == "regular":
return HttpProxy(root_context)
- elif callable(mode): # pragma: nocover
+ elif callable(mode): # pragma: no cover
return mode(root_context)
- else: # pragma: nocover
+ else: # pragma: no cover
raise ValueError("Unknown proxy mode: %s" % mode)
def handle(self):
diff --git a/libmproxy/stateobject.py b/libmproxy/stateobject.py
index 52a8347f..9600ab09 100644
--- a/libmproxy/stateobject.py
+++ b/libmproxy/stateobject.py
@@ -1,52 +1,51 @@
from __future__ import absolute_import
+from netlib.utils import Serializable
-class StateObject(object):
-
+class StateObject(Serializable):
"""
- An object with serializable state.
+ An object with serializable state.
- State attributes can either be serializable types(str, tuple, bool, ...)
- or StateObject instances themselves.
+ State attributes can either be serializable types(str, tuple, bool, ...)
+ or StateObject instances themselves.
"""
- # An attribute-name -> class-or-type dict containing all attributes that
- # should be serialized. If the attribute is a class, it must implement the
- # StateObject protocol.
- _stateobject_attributes = None
- # A set() of attributes that should be ignored for short state
- _stateobject_long_attributes = frozenset([])
- def from_state(self, state):
- raise NotImplementedError()
+ _stateobject_attributes = None
+ """
+ An attribute-name -> class-or-type dict containing all attributes that
+ should be serialized. If the attribute is a class, it must implement the
+ Serializable protocol.
+ """
- def get_state(self, short=False):
+ def get_state(self):
"""
- Retrieve object state. If short is true, return an abbreviated
- format with long data elided.
+ Retrieve object state.
"""
state = {}
for attr, cls in self._stateobject_attributes.iteritems():
- if short and attr in self._stateobject_long_attributes:
- continue
val = getattr(self, attr)
if hasattr(val, "get_state"):
- state[attr] = val.get_state(short)
+ state[attr] = val.get_state()
else:
state[attr] = val
return state
- def load_state(self, state):
+ def set_state(self, state):
"""
- Load object state from data returned by a get_state call.
+ Load object state from data returned by a get_state call.
"""
+ state = state.copy()
for attr, cls in self._stateobject_attributes.iteritems():
- if state.get(attr, None) is None:
- setattr(self, attr, None)
+ if state.get(attr) is None:
+ setattr(self, attr, state.pop(attr))
else:
curr = getattr(self, attr)
- if hasattr(curr, "load_state"):
- curr.load_state(state[attr])
+ if hasattr(curr, "set_state"):
+ curr.set_state(state.pop(attr))
elif hasattr(cls, "from_state"):
- setattr(self, attr, cls.from_state(state[attr]))
- else:
- setattr(self, attr, cls(state[attr]))
+ obj = cls.from_state(state.pop(attr))
+ setattr(self, attr, obj)
+ else: # primitive types such as int, str, ...
+ setattr(self, attr, cls(state.pop(attr)))
+ if state:
+ raise RuntimeWarning("Unexpected State in __setstate__: {}".format(state))
diff --git a/libmproxy/version.py b/libmproxy/version.py
index 03c2f256..25c56706 100644
--- a/libmproxy/version.py
+++ b/libmproxy/version.py
@@ -1,6 +1,6 @@
from __future__ import (absolute_import, print_function, division)
-IVERSION = (0, 15)
+IVERSION = (0, 16)
VERSION = ".".join(str(i) for i in IVERSION)
MINORVERSION = ".".join(str(i) for i in IVERSION[:2])
NAME = "mitmproxy"
diff --git a/libmproxy/web/__init__.py b/libmproxy/web/__init__.py
index 43fc993d..c48b3d09 100644
--- a/libmproxy/web/__init__.py
+++ b/libmproxy/web/__init__.py
@@ -2,6 +2,7 @@ from __future__ import absolute_import, print_function
import collections
import tornado.ioloop
import tornado.httpserver
+
from .. import controller, flow
from . import app
@@ -20,7 +21,7 @@ class WebFlowView(flow.FlowView):
app.ClientConnection.broadcast(
type="flows",
cmd="add",
- data=f.get_state(short=True)
+ data=app._strip_content(f.get_state())
)
def _update(self, f):
@@ -28,7 +29,7 @@ class WebFlowView(flow.FlowView):
app.ClientConnection.broadcast(
type="flows",
cmd="update",
- data=f.get_state(short=True)
+ data=app._strip_content(f.get_state())
)
def _remove(self, f):
diff --git a/libmproxy/web/app.py b/libmproxy/web/app.py
index 79f76013..958b8669 100644
--- a/libmproxy/web/app.py
+++ b/libmproxy/web/app.py
@@ -4,9 +4,38 @@ import tornado.web
import tornado.websocket
import logging
import json
+
+from netlib.http import CONTENT_MISSING
from .. import version, filt
+def _strip_content(flow_state):
+ """
+ Remove flow message content and cert to save transmission space.
+
+ Args:
+ flow_state: The original flow state. Will be left unmodified
+ """
+ for attr in ("request", "response"):
+ if attr in flow_state:
+ message = flow_state[attr]
+ if message["content"]:
+ message["contentLength"] = len(message["content"])
+ elif message["content"] == CONTENT_MISSING:
+ message["contentLength"] = None
+ else:
+ message["contentLength"] = 0
+ del message["content"]
+
+ if "backup" in flow_state:
+ del flow_state["backup"]
+ flow_state["modified"] = True
+
+ flow_state.get("server_conn", {}).pop("cert", None)
+
+ return flow_state
+
+
class APIError(tornado.web.HTTPError):
pass
@@ -100,7 +129,7 @@ class Flows(RequestHandler):
def get(self):
self.write(dict(
- data=[f.get_state(short=True) for f in self.state.flows]
+ data=[_strip_content(f.get_state()) for f in self.state.flows]
))
@@ -141,7 +170,7 @@ class FlowHandler(RequestHandler):
elif k == "port":
request.port = int(v)
elif k == "headers":
- request.headers.load_state(v)
+ request.headers.set_state(v)
else:
print "Warning: Unknown update {}.{}: {}".format(a, k, v)
@@ -155,7 +184,7 @@ class FlowHandler(RequestHandler):
elif k == "http_version":
response.http_version = str(v)
elif k == "headers":
- response.headers.load_state(v)
+ response.headers.set_state(v)
else:
print "Warning: Unknown update {}.{}: {}".format(a, k, v)
else: