| 
									
										
										
										
											2000-02-04 15:28:42 +00:00
										 |  |  | """Parse (absolute and relative) URLs.
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-17 14:30:53 +00:00
										 |  |  | urlparse module is based upon the following RFC specifications. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding | 
					
						
							|  |  |  | and L.  Masinter, January 2005. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter | 
					
						
							|  |  |  | and L.Masinter, December 1999. | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-30 00:49:09 +00:00
										 |  |  | RFC 2396:  "Uniform Resource Identifiers (URI)": Generic Syntax by T. | 
					
						
							| 
									
										
										
										
											2010-04-17 14:30:53 +00:00
										 |  |  | Berners-Lee, R. Fielding, and L. Masinter, August 1998. | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-30 00:49:09 +00:00
										 |  |  | RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998. | 
					
						
							| 
									
										
										
										
											2010-04-17 14:30:53 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June | 
					
						
							|  |  |  | 1995. | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-30 00:49:09 +00:00
										 |  |  | RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. | 
					
						
							| 
									
										
										
										
											2010-04-17 14:30:53 +00:00
										 |  |  | McCahill, December 1994 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2010-04-30 00:49:09 +00:00
										 |  |  | RFC 3986 is considered the current standard and any future changes to | 
					
						
							|  |  |  | urlparse module should conform with it.  The urlparse module is | 
					
						
							|  |  |  | currently not entirely compliant with this RFC due to defacto | 
					
						
							|  |  |  | scenarios for parsing, and for backward compatibility purposes, some | 
					
						
							|  |  |  | parsing quirks from older RFCs are retained. The testcases in | 
					
						
							| 
									
										
										
										
											2010-04-17 14:30:53 +00:00
										 |  |  | test_urlparse.py provides a good indicator of parsing behavior. | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2000-02-04 15:28:42 +00:00
										 |  |  | """
 | 
					
						
							| 
									
										
										
										
											1994-09-12 10:36:35 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2002-10-16 21:21:39 +00:00
										 |  |  | __all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", | 
					
						
							| 
									
										
										
										
											2008-09-03 22:35:50 +00:00
										 |  |  |            "urlsplit", "urlunsplit", "parse_qs", "parse_qsl"] | 
					
						
							| 
									
										
										
										
											2001-03-01 04:27:19 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											1994-09-12 10:36:35 +00:00
										 |  |  | # A classification of schemes ('' means apply by default) | 
					
						
							| 
									
										
										
										
											2004-05-07 05:50:35 +00:00
										 |  |  | uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap', | 
					
						
							| 
									
										
										
										
											2006-01-20 17:24:23 +00:00
										 |  |  |                  'wais', 'file', 'https', 'shttp', 'mms', | 
					
						
							|  |  |  |                  'prospero', 'rtsp', 'rtspu', '', 'sftp'] | 
					
						
							| 
									
										
										
										
											2004-05-07 05:50:35 +00:00
										 |  |  | uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet', | 
					
						
							| 
									
										
										
										
											2006-01-20 17:24:23 +00:00
										 |  |  |                'imap', 'wais', 'file', 'mms', 'https', 'shttp', | 
					
						
							|  |  |  |                'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '', | 
					
						
							| 
									
										
										
										
											2010-05-13 03:25:21 +00:00
										 |  |  |                'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh'] | 
					
						
							| 
									
										
										
										
											2004-05-07 05:50:35 +00:00
										 |  |  | non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', | 
					
						
							| 
									
										
										
										
											2006-04-01 06:11:07 +00:00
										 |  |  |                     'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] | 
					
						
							| 
									
										
										
										
											2004-05-07 05:50:35 +00:00
										 |  |  | uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap', | 
					
						
							| 
									
										
										
										
											2006-04-01 06:11:07 +00:00
										 |  |  |                'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', | 
					
						
							| 
									
										
										
										
											2006-01-20 17:24:23 +00:00
										 |  |  |                'mms', '', 'sftp'] | 
					
						
							| 
									
										
										
										
											2004-05-07 05:50:35 +00:00
										 |  |  | uses_query = ['http', 'wais', 'imap', 'https', 'shttp', 'mms', | 
					
						
							| 
									
										
										
										
											2006-04-01 06:11:07 +00:00
										 |  |  |               'gopher', 'rtsp', 'rtspu', 'sip', 'sips', ''] | 
					
						
							| 
									
										
										
										
											2004-05-07 05:50:35 +00:00
										 |  |  | uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news', | 
					
						
							| 
									
										
										
										
											2006-01-20 17:24:23 +00:00
										 |  |  |                  'nntp', 'wais', 'https', 'shttp', 'snews', | 
					
						
							|  |  |  |                  'file', 'prospero', ''] | 
					
						
							| 
									
										
										
										
											1994-09-12 10:36:35 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | # Characters valid in scheme names | 
					
						
							| 
									
										
										
										
											2000-12-19 16:48:13 +00:00
										 |  |  | scheme_chars = ('abcdefghijklmnopqrstuvwxyz' | 
					
						
							|  |  |  |                 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' | 
					
						
							|  |  |  |                 '0123456789' | 
					
						
							|  |  |  |                 '+-.') | 
					
						
							| 
									
										
										
										
											1994-09-12 10:36:35 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											1997-07-14 19:08:15 +00:00
										 |  |  | MAX_CACHE_SIZE = 20 | 
					
						
							| 
									
										
										
										
											1996-05-28 23:54:24 +00:00
										 |  |  | _parse_cache = {} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def clear_cache(): | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     """Clear the parse cache.""" | 
					
						
							| 
									
										
										
										
											2008-01-11 18:04:55 +00:00
										 |  |  |     _parse_cache.clear() | 
					
						
							| 
									
										
										
										
											1996-05-28 23:54:24 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-01-11 18:04:55 +00:00
										 |  |  | class ResultMixin(object): | 
					
						
							|  |  |  |     """Shared methods for the parsed result objects.""" | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     @property | 
					
						
							|  |  |  |     def username(self): | 
					
						
							|  |  |  |         netloc = self.netloc | 
					
						
							|  |  |  |         if "@" in netloc: | 
					
						
							| 
									
										
										
										
											2008-01-05 01:21:57 +00:00
										 |  |  |             userinfo = netloc.rsplit("@", 1)[0] | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  |             if ":" in userinfo: | 
					
						
							|  |  |  |                 userinfo = userinfo.split(":", 1)[0] | 
					
						
							|  |  |  |             return userinfo | 
					
						
							|  |  |  |         return None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @property | 
					
						
							|  |  |  |     def password(self): | 
					
						
							|  |  |  |         netloc = self.netloc | 
					
						
							|  |  |  |         if "@" in netloc: | 
					
						
							| 
									
										
										
										
											2008-01-05 01:21:57 +00:00
										 |  |  |             userinfo = netloc.rsplit("@", 1)[0] | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  |             if ":" in userinfo: | 
					
						
							|  |  |  |                 return userinfo.split(":", 1)[1] | 
					
						
							|  |  |  |         return None | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     @property | 
					
						
							|  |  |  |     def hostname(self): | 
					
						
							| 
									
										
										
										
											2010-04-16 02:46:46 +00:00
										 |  |  |         netloc = self.netloc.split('@')[-1] | 
					
						
							|  |  |  |         if '[' in netloc and ']' in netloc: | 
					
						
							|  |  |  |             return netloc.split(']')[0][1:].lower() | 
					
						
							|  |  |  |         elif ':' in netloc: | 
					
						
							|  |  |  |             return netloc.split(':')[0].lower() | 
					
						
							|  |  |  |         elif netloc == '': | 
					
						
							|  |  |  |             return None | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             return netloc.lower() | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     @property | 
					
						
							|  |  |  |     def port(self): | 
					
						
							| 
									
										
										
										
											2010-04-16 02:46:46 +00:00
										 |  |  |         netloc = self.netloc.split('@')[-1].split(']')[-1] | 
					
						
							|  |  |  |         if ':' in netloc: | 
					
						
							|  |  |  |             port = netloc.split(':')[1] | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  |             return int(port, 10) | 
					
						
							| 
									
										
										
										
											2010-04-16 02:46:46 +00:00
										 |  |  |         else: | 
					
						
							|  |  |  |             return None | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-01-11 18:04:55 +00:00
										 |  |  | from collections import namedtuple | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-01-11 18:04:55 +00:00
										 |  |  | class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin): | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     __slots__ = () | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def geturl(self): | 
					
						
							|  |  |  |         return urlunsplit(self) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-01-11 18:04:55 +00:00
										 |  |  | class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin): | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     __slots__ = () | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     def geturl(self): | 
					
						
							|  |  |  |         return urlunparse(self) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def urlparse(url, scheme='', allow_fragments=True): | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     """Parse a URL into 6 components:
 | 
					
						
							|  |  |  |     <scheme>://<netloc>/<path>;<params>?<query>#<fragment> | 
					
						
							|  |  |  |     Return a 6-tuple: (scheme, netloc, path, params, query, fragment). | 
					
						
							|  |  |  |     Note that we don't break the components up in smaller bits | 
					
						
							|  |  |  |     (e.g. netloc is a single string) and we don't expand % escapes.""" | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  |     tuple = urlsplit(url, scheme, allow_fragments) | 
					
						
							|  |  |  |     scheme, netloc, url, query, fragment = tuple | 
					
						
							|  |  |  |     if scheme in uses_params and ';' in url: | 
					
						
							|  |  |  |         url, params = _splitparams(url) | 
					
						
							|  |  |  |     else: | 
					
						
							|  |  |  |         params = '' | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  |     return ParseResult(scheme, netloc, url, params, query, fragment) | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | def _splitparams(url): | 
					
						
							|  |  |  |     if '/'  in url: | 
					
						
							|  |  |  |         i = url.find(';', url.rfind('/')) | 
					
						
							|  |  |  |         if i < 0: | 
					
						
							|  |  |  |             return url, '' | 
					
						
							|  |  |  |     else: | 
					
						
							|  |  |  |         i = url.find(';') | 
					
						
							|  |  |  |     return url[:i], url[i+1:] | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2005-01-09 15:29:10 +00:00
										 |  |  | def _splitnetloc(url, start=0): | 
					
						
							| 
									
										
										
										
											2008-01-05 22:19:06 +00:00
										 |  |  |     delim = len(url)   # position of end of domain part of url, default is end | 
					
						
							|  |  |  |     for c in '/?#':    # look for delimiters; the order is NOT important | 
					
						
							|  |  |  |         wdelim = url.find(c, start)        # find first of this delim | 
					
						
							|  |  |  |         if wdelim >= 0:                    # if found | 
					
						
							|  |  |  |             delim = min(delim, wdelim)     # use earliest delim position | 
					
						
							|  |  |  |     return url[start:delim], url[delim:]   # return (domain, rest) | 
					
						
							| 
									
										
										
										
											2005-01-09 15:29:10 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | def urlsplit(url, scheme='', allow_fragments=True): | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  |     """Parse a URL into 5 components:
 | 
					
						
							|  |  |  |     <scheme>://<netloc>/<path>?<query>#<fragment> | 
					
						
							|  |  |  |     Return a 5-tuple: (scheme, netloc, path, query, fragment). | 
					
						
							|  |  |  |     Note that we don't break the components up in smaller bits | 
					
						
							|  |  |  |     (e.g. netloc is a single string) and we don't expand % escapes.""" | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  |     allow_fragments = bool(allow_fragments) | 
					
						
							| 
									
										
										
										
											2007-12-13 17:58:23 +00:00
										 |  |  |     key = url, scheme, allow_fragments, type(url), type(scheme) | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     cached = _parse_cache.get(key, None) | 
					
						
							|  |  |  |     if cached: | 
					
						
							|  |  |  |         return cached | 
					
						
							|  |  |  |     if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth | 
					
						
							|  |  |  |         clear_cache() | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  |     netloc = query = fragment = '' | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     i = url.find(':') | 
					
						
							|  |  |  |     if i > 0: | 
					
						
							|  |  |  |         if url[:i] == 'http': # optimize the common case | 
					
						
							|  |  |  |             scheme = url[:i].lower() | 
					
						
							|  |  |  |             url = url[i+1:] | 
					
						
							|  |  |  |             if url[:2] == '//': | 
					
						
							| 
									
										
										
										
											2005-01-09 15:29:10 +00:00
										 |  |  |                 netloc, url = _splitnetloc(url, 2) | 
					
						
							| 
									
										
										
										
											2010-04-22 12:10:13 +00:00
										 |  |  |                 if (('[' in netloc and ']' not in netloc) or | 
					
						
							|  |  |  |                         (']' in netloc and '[' not in netloc)): | 
					
						
							|  |  |  |                     raise ValueError("Invalid IPv6 URL") | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  |             if allow_fragments and '#' in url: | 
					
						
							|  |  |  |                 url, fragment = url.split('#', 1) | 
					
						
							|  |  |  |             if '?' in url: | 
					
						
							|  |  |  |                 url, query = url.split('?', 1) | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  |             v = SplitResult(scheme, netloc, url, query, fragment) | 
					
						
							|  |  |  |             _parse_cache[key] = v | 
					
						
							|  |  |  |             return v | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |         for c in url[:i]: | 
					
						
							|  |  |  |             if c not in scheme_chars: | 
					
						
							|  |  |  |                 break | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             scheme, url = url[:i].lower(), url[i+1:] | 
					
						
							| 
									
										
										
										
											2010-02-19 07:32:48 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  |     if url[:2] == '//': | 
					
						
							| 
									
										
										
										
											2005-01-09 15:29:10 +00:00
										 |  |  |         netloc, url = _splitnetloc(url, 2) | 
					
						
							| 
									
										
										
										
											2010-04-22 12:10:13 +00:00
										 |  |  |         if (('[' in netloc and ']' not in netloc) or | 
					
						
							|  |  |  |                 (']' in netloc and '[' not in netloc)): | 
					
						
							|  |  |  |             raise ValueError("Invalid IPv6 URL") | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  |     if allow_fragments and scheme in uses_fragment and '#' in url: | 
					
						
							|  |  |  |         url, fragment = url.split('#', 1) | 
					
						
							|  |  |  |     if scheme in uses_query and '?' in url: | 
					
						
							|  |  |  |         url, query = url.split('?', 1) | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  |     v = SplitResult(scheme, netloc, url, query, fragment) | 
					
						
							|  |  |  |     _parse_cache[key] = v | 
					
						
							|  |  |  |     return v | 
					
						
							| 
									
										
										
										
											1994-09-12 10:36:35 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-08-03 00:51:02 +00:00
										 |  |  | def urlunparse(data): | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     """Put a parsed URL back together again.  This may result in a
 | 
					
						
							|  |  |  |     slightly different, but equivalent URL, if the URL that was parsed | 
					
						
							|  |  |  |     originally had redundant delimiters, e.g. a ? with an empty query | 
					
						
							|  |  |  |     (the draft states that these are equivalent)."""
 | 
					
						
							| 
									
										
										
										
											2008-08-03 00:51:02 +00:00
										 |  |  |     scheme, netloc, url, params, query, fragment = data | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  |     if params: | 
					
						
							|  |  |  |         url = "%s;%s" % (url, params) | 
					
						
							|  |  |  |     return urlunsplit((scheme, netloc, url, query, fragment)) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-08-03 00:51:02 +00:00
										 |  |  | def urlunsplit(data): | 
					
						
							| 
									
										
										
										
											2010-06-28 13:56:46 +00:00
										 |  |  |     """Combine the elements of a tuple as returned by urlsplit() into a
 | 
					
						
							|  |  |  |     complete URL as a string. The data argument can be any five-item iterable. | 
					
						
							|  |  |  |     This may result in a slightly different, but equivalent URL, if the URL that | 
					
						
							|  |  |  |     was parsed originally had unnecessary delimiters (for example, a ? with an | 
					
						
							|  |  |  |     empty query; the RFC states that these are equivalent)."""
 | 
					
						
							| 
									
										
										
										
											2008-08-03 00:51:02 +00:00
										 |  |  |     scheme, netloc, url, query, fragment = data | 
					
						
							| 
									
										
										
										
											2002-10-14 19:59:54 +00:00
										 |  |  |     if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |         if url and url[:1] != '/': url = '/' + url | 
					
						
							|  |  |  |         url = '//' + (netloc or '') + url | 
					
						
							|  |  |  |     if scheme: | 
					
						
							|  |  |  |         url = scheme + ':' + url | 
					
						
							|  |  |  |     if query: | 
					
						
							|  |  |  |         url = url + '?' + query | 
					
						
							|  |  |  |     if fragment: | 
					
						
							|  |  |  |         url = url + '#' + fragment | 
					
						
							|  |  |  |     return url | 
					
						
							| 
									
										
										
										
											1994-09-12 10:36:35 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2006-04-01 22:14:43 +00:00
										 |  |  | def urljoin(base, url, allow_fragments=True): | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     """Join a base URL and a possibly relative URL to form an absolute
 | 
					
						
							|  |  |  |     interpretation of the latter."""
 | 
					
						
							|  |  |  |     if not base: | 
					
						
							|  |  |  |         return url | 
					
						
							|  |  |  |     if not url: | 
					
						
							|  |  |  |         return base | 
					
						
							|  |  |  |     bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ | 
					
						
							|  |  |  |             urlparse(base, '', allow_fragments) | 
					
						
							|  |  |  |     scheme, netloc, path, params, query, fragment = \ | 
					
						
							|  |  |  |             urlparse(url, bscheme, allow_fragments) | 
					
						
							|  |  |  |     if scheme != bscheme or scheme not in uses_relative: | 
					
						
							|  |  |  |         return url | 
					
						
							|  |  |  |     if scheme in uses_netloc: | 
					
						
							|  |  |  |         if netloc: | 
					
						
							|  |  |  |             return urlunparse((scheme, netloc, path, | 
					
						
							|  |  |  |                                params, query, fragment)) | 
					
						
							|  |  |  |         netloc = bnetloc | 
					
						
							|  |  |  |     if path[:1] == '/': | 
					
						
							|  |  |  |         return urlunparse((scheme, netloc, path, | 
					
						
							|  |  |  |                            params, query, fragment)) | 
					
						
							| 
									
										
										
										
											2008-08-14 16:51:00 +00:00
										 |  |  |     if not path: | 
					
						
							|  |  |  |         path = bpath | 
					
						
							|  |  |  |         if not params: | 
					
						
							|  |  |  |             params = bparams | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             path = path[:-1] | 
					
						
							|  |  |  |             return urlunparse((scheme, netloc, path, | 
					
						
							|  |  |  |                                 params, query, fragment)) | 
					
						
							|  |  |  |         if not query: | 
					
						
							|  |  |  |             query = bquery | 
					
						
							|  |  |  |         return urlunparse((scheme, netloc, path, | 
					
						
							|  |  |  |                            params, query, fragment)) | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     segments = bpath.split('/')[:-1] + path.split('/') | 
					
						
							|  |  |  |     # XXX The stuff below is bogus in various ways... | 
					
						
							|  |  |  |     if segments[-1] == '.': | 
					
						
							|  |  |  |         segments[-1] = '' | 
					
						
							|  |  |  |     while '.' in segments: | 
					
						
							|  |  |  |         segments.remove('.') | 
					
						
							|  |  |  |     while 1: | 
					
						
							|  |  |  |         i = 1 | 
					
						
							|  |  |  |         n = len(segments) - 1 | 
					
						
							|  |  |  |         while i < n: | 
					
						
							|  |  |  |             if (segments[i] == '..' | 
					
						
							|  |  |  |                 and segments[i-1] not in ('', '..')): | 
					
						
							|  |  |  |                 del segments[i-1:i+1] | 
					
						
							|  |  |  |                 break | 
					
						
							|  |  |  |             i = i+1 | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             break | 
					
						
							|  |  |  |     if segments == ['', '..']: | 
					
						
							|  |  |  |         segments[-1] = '' | 
					
						
							|  |  |  |     elif len(segments) >= 2 and segments[-1] == '..': | 
					
						
							|  |  |  |         segments[-2:] = [''] | 
					
						
							|  |  |  |     return urlunparse((scheme, netloc, '/'.join(segments), | 
					
						
							|  |  |  |                        params, query, fragment)) | 
					
						
							| 
									
										
										
										
											1994-09-12 10:36:35 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											1996-05-28 23:54:24 +00:00
										 |  |  | def urldefrag(url): | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     """Removes any existing fragment from URL.
 | 
					
						
							| 
									
										
										
										
											1996-05-28 23:54:24 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2001-01-15 03:34:38 +00:00
										 |  |  |     Returns a tuple of the defragmented URL and the fragment.  If | 
					
						
							|  |  |  |     the URL contained no fragments, the second element is the | 
					
						
							|  |  |  |     empty string. | 
					
						
							|  |  |  |     """
 | 
					
						
							| 
									
										
										
										
											2001-11-16 02:52:57 +00:00
										 |  |  |     if '#' in url: | 
					
						
							|  |  |  |         s, n, p, a, q, frag = urlparse(url) | 
					
						
							|  |  |  |         defrag = urlunparse((s, n, p, a, q, '')) | 
					
						
							|  |  |  |         return defrag, frag | 
					
						
							|  |  |  |     else: | 
					
						
							|  |  |  |         return url, '' | 
					
						
							| 
									
										
										
										
											1996-05-28 23:54:24 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2008-09-03 22:35:50 +00:00
										 |  |  | # unquote method for parse_qs and parse_qsl | 
					
						
							| 
									
										
										
										
											2010-05-25 15:20:46 +00:00
										 |  |  | # Cannot use directly from urllib as it would create a circular reference | 
					
						
							|  |  |  | # because urllib uses urlparse methods (urljoin).  If you update this function, | 
					
						
							|  |  |  | # update it also in urllib.  This code duplication does not existin in Python3. | 
					
						
							| 
									
										
										
										
											2010-03-18 12:14:15 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | _hexdig = '0123456789ABCDEFabcdef' | 
					
						
							| 
									
										
										
										
											2010-05-25 15:20:46 +00:00
										 |  |  | _hextochr = dict((a+b, chr(int(a+b,16))) | 
					
						
							|  |  |  |                  for a in _hexdig for b in _hexdig) | 
					
						
							| 
									
										
										
										
											2008-09-03 22:35:50 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | def unquote(s): | 
					
						
							|  |  |  |     """unquote('abc%20def') -> 'abc def'.""" | 
					
						
							|  |  |  |     res = s.split('%') | 
					
						
							| 
									
										
										
										
											2010-05-25 15:20:46 +00:00
										 |  |  |     # fastpath | 
					
						
							|  |  |  |     if len(res) == 1: | 
					
						
							|  |  |  |         return s | 
					
						
							|  |  |  |     s = res[0] | 
					
						
							|  |  |  |     for item in res[1:]: | 
					
						
							| 
									
										
										
										
											2008-09-03 22:35:50 +00:00
										 |  |  |         try: | 
					
						
							| 
									
										
										
										
											2010-05-25 15:20:46 +00:00
										 |  |  |             s += _hextochr[item[:2]] + item[2:] | 
					
						
							| 
									
										
										
										
											2008-09-03 22:35:50 +00:00
										 |  |  |         except KeyError: | 
					
						
							| 
									
										
										
										
											2010-05-25 15:20:46 +00:00
										 |  |  |             s += '%' + item | 
					
						
							| 
									
										
										
										
											2008-09-03 22:35:50 +00:00
										 |  |  |         except UnicodeDecodeError: | 
					
						
							| 
									
										
										
										
											2010-05-25 15:20:46 +00:00
										 |  |  |             s += unichr(int(item[:2], 16)) + item[2:] | 
					
						
							|  |  |  |     return s | 
					
						
							| 
									
										
										
										
											2008-09-03 22:35:50 +00:00
										 |  |  | 
 | 
					
						
							|  |  |  | def parse_qs(qs, keep_blank_values=0, strict_parsing=0): | 
					
						
							|  |  |  |     """Parse a query given as a string argument.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         Arguments: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         qs: URL-encoded query string to be parsed | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         keep_blank_values: flag indicating whether blank values in | 
					
						
							|  |  |  |             URL encoded queries should be treated as blank strings. | 
					
						
							|  |  |  |             A true value indicates that blanks should be retained as | 
					
						
							|  |  |  |             blank strings.  The default false value indicates that | 
					
						
							|  |  |  |             blank values are to be ignored and treated as if they were | 
					
						
							|  |  |  |             not included. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |         strict_parsing: flag indicating what to do with parsing errors. | 
					
						
							|  |  |  |             If false (the default), errors are silently ignored. | 
					
						
							|  |  |  |             If true, errors raise a ValueError exception. | 
					
						
							|  |  |  |     """
 | 
					
						
							|  |  |  |     dict = {} | 
					
						
							|  |  |  |     for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): | 
					
						
							|  |  |  |         if name in dict: | 
					
						
							|  |  |  |             dict[name].append(value) | 
					
						
							|  |  |  |         else: | 
					
						
							|  |  |  |             dict[name] = [value] | 
					
						
							|  |  |  |     return dict | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | def parse_qsl(qs, keep_blank_values=0, strict_parsing=0): | 
					
						
							|  |  |  |     """Parse a query given as a string argument.
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     Arguments: | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     qs: URL-encoded query string to be parsed | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     keep_blank_values: flag indicating whether blank values in | 
					
						
							|  |  |  |         URL encoded queries should be treated as blank strings.  A | 
					
						
							|  |  |  |         true value indicates that blanks should be retained as blank | 
					
						
							|  |  |  |         strings.  The default false value indicates that blank values | 
					
						
							|  |  |  |         are to be ignored and treated as if they were  not included. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     strict_parsing: flag indicating what to do with parsing errors. If | 
					
						
							|  |  |  |         false (the default), errors are silently ignored. If true, | 
					
						
							|  |  |  |         errors raise a ValueError exception. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     Returns a list, as G-d intended. | 
					
						
							|  |  |  |     """
 | 
					
						
							|  |  |  |     pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')] | 
					
						
							|  |  |  |     r = [] | 
					
						
							|  |  |  |     for name_value in pairs: | 
					
						
							|  |  |  |         if not name_value and not strict_parsing: | 
					
						
							|  |  |  |             continue | 
					
						
							|  |  |  |         nv = name_value.split('=', 1) | 
					
						
							|  |  |  |         if len(nv) != 2: | 
					
						
							|  |  |  |             if strict_parsing: | 
					
						
							|  |  |  |                 raise ValueError, "bad query field: %r" % (name_value,) | 
					
						
							|  |  |  |             # Handle case of a control-name with no equal sign | 
					
						
							|  |  |  |             if keep_blank_values: | 
					
						
							|  |  |  |                 nv.append('') | 
					
						
							|  |  |  |             else: | 
					
						
							|  |  |  |                 continue | 
					
						
							|  |  |  |         if len(nv[1]) or keep_blank_values: | 
					
						
							|  |  |  |             name = unquote(nv[0].replace('+', ' ')) | 
					
						
							|  |  |  |             value = unquote(nv[1].replace('+', ' ')) | 
					
						
							|  |  |  |             r.append((name, value)) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     return r |