download.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. from __future__ import absolute_import
  2. import cgi
  3. import email.utils
  4. import hashlib
  5. import getpass
  6. import json
  7. import logging
  8. import mimetypes
  9. import os
  10. import platform
  11. import re
  12. import shutil
  13. import sys
  14. import tempfile
  15. from pip._vendor.six.moves.urllib import parse as urllib_parse
  16. from pip._vendor.six.moves.urllib import request as urllib_request
  17. import pip
  18. from pip.exceptions import InstallationError, HashMismatch
  19. from pip.models import PyPI
  20. from pip.utils import (splitext, rmtree, format_size, display_path,
  21. backup_dir, ask_path_exists, unpack_file,
  22. call_subprocess)
  23. from pip.utils.filesystem import check_path_owner
  24. from pip.utils.logging import indent_log
  25. from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
  26. from pip.locations import write_delete_marker_file
  27. from pip.vcs import vcs
  28. from pip._vendor import requests, six
  29. from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
  30. from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
  31. from pip._vendor.requests.models import Response
  32. from pip._vendor.requests.structures import CaseInsensitiveDict
  33. from pip._vendor.requests.packages import urllib3
  34. from pip._vendor.cachecontrol import CacheControlAdapter
  35. from pip._vendor.cachecontrol.caches import FileCache
  36. from pip._vendor.lockfile import LockError
  37. from pip._vendor.six.moves import xmlrpc_client
  38. __all__ = ['get_file_content',
  39. 'is_url', 'url_to_path', 'path_to_url',
  40. 'is_archive_file', 'unpack_vcs_link',
  41. 'unpack_file_url', 'is_vcs_url', 'is_file_url',
  42. 'unpack_http_url', 'unpack_url']
  43. logger = logging.getLogger(__name__)
  44. def user_agent():
  45. """
  46. Return a string representing the user agent.
  47. """
  48. data = {
  49. "installer": {"name": "pip", "version": pip.__version__},
  50. "python": platform.python_version(),
  51. "implementation": {
  52. "name": platform.python_implementation(),
  53. },
  54. }
  55. if data["implementation"]["name"] == 'CPython':
  56. data["implementation"]["version"] = platform.python_version()
  57. elif data["implementation"]["name"] == 'PyPy':
  58. if sys.pypy_version_info.releaselevel == 'final':
  59. pypy_version_info = sys.pypy_version_info[:3]
  60. else:
  61. pypy_version_info = sys.pypy_version_info
  62. data["implementation"]["version"] = ".".join(
  63. [str(x) for x in pypy_version_info]
  64. )
  65. elif data["implementation"]["name"] == 'Jython':
  66. # Complete Guess
  67. data["implementation"]["version"] = platform.python_version()
  68. elif data["implementation"]["name"] == 'IronPython':
  69. # Complete Guess
  70. data["implementation"]["version"] = platform.python_version()
  71. if sys.platform.startswith("linux"):
  72. distro = dict(filter(
  73. lambda x: x[1],
  74. zip(["name", "version", "id"], platform.linux_distribution()),
  75. ))
  76. libc = dict(filter(
  77. lambda x: x[1],
  78. zip(["lib", "version"], platform.libc_ver()),
  79. ))
  80. if libc:
  81. distro["libc"] = libc
  82. if distro:
  83. data["distro"] = distro
  84. if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
  85. data["distro"] = {"name": "OS X", "version": platform.mac_ver()[0]}
  86. if platform.system():
  87. data.setdefault("system", {})["name"] = platform.system()
  88. if platform.release():
  89. data.setdefault("system", {})["release"] = platform.release()
  90. if platform.machine():
  91. data["cpu"] = platform.machine()
  92. return "{data[installer][name]}/{data[installer][version]} {json}".format(
  93. data=data,
  94. json=json.dumps(data, separators=(",", ":"), sort_keys=True),
  95. )
  96. class MultiDomainBasicAuth(AuthBase):
  97. def __init__(self, prompting=True):
  98. self.prompting = prompting
  99. self.passwords = {}
  100. def __call__(self, req):
  101. parsed = urllib_parse.urlparse(req.url)
  102. # Get the netloc without any embedded credentials
  103. netloc = parsed.netloc.rsplit("@", 1)[-1]
  104. # Set the url of the request to the url without any credentials
  105. req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
  106. # Use any stored credentials that we have for this netloc
  107. username, password = self.passwords.get(netloc, (None, None))
  108. # Extract credentials embedded in the url if we have none stored
  109. if username is None:
  110. username, password = self.parse_credentials(parsed.netloc)
  111. if username or password:
  112. # Store the username and password
  113. self.passwords[netloc] = (username, password)
  114. # Send the basic auth with this request
  115. req = HTTPBasicAuth(username or "", password or "")(req)
  116. # Attach a hook to handle 401 responses
  117. req.register_hook("response", self.handle_401)
  118. return req
  119. def handle_401(self, resp, **kwargs):
  120. # We only care about 401 responses, anything else we want to just
  121. # pass through the actual response
  122. if resp.status_code != 401:
  123. return resp
  124. # We are not able to prompt the user so simple return the response
  125. if not self.prompting:
  126. return resp
  127. parsed = urllib_parse.urlparse(resp.url)
  128. # Prompt the user for a new username and password
  129. username = six.moves.input("User for %s: " % parsed.netloc)
  130. password = getpass.getpass("Password: ")
  131. # Store the new username and password to use for future requests
  132. if username or password:
  133. self.passwords[parsed.netloc] = (username, password)
  134. # Consume content and release the original connection to allow our new
  135. # request to reuse the same one.
  136. resp.content
  137. resp.raw.release_conn()
  138. # Add our new username and password to the request
  139. req = HTTPBasicAuth(username or "", password or "")(resp.request)
  140. # Send our new request
  141. new_resp = resp.connection.send(req, **kwargs)
  142. new_resp.history.append(resp)
  143. return new_resp
  144. def parse_credentials(self, netloc):
  145. if "@" in netloc:
  146. userinfo = netloc.rsplit("@", 1)[0]
  147. if ":" in userinfo:
  148. return userinfo.split(":", 1)
  149. return userinfo, None
  150. return None, None
  151. class LocalFSAdapter(BaseAdapter):
  152. def send(self, request, stream=None, timeout=None, verify=None, cert=None,
  153. proxies=None):
  154. pathname = url_to_path(request.url)
  155. resp = Response()
  156. resp.status_code = 200
  157. resp.url = request.url
  158. try:
  159. stats = os.stat(pathname)
  160. except OSError as exc:
  161. resp.status_code = 404
  162. resp.raw = exc
  163. else:
  164. modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
  165. content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
  166. resp.headers = CaseInsensitiveDict({
  167. "Content-Type": content_type,
  168. "Content-Length": stats.st_size,
  169. "Last-Modified": modified,
  170. })
  171. resp.raw = open(pathname, "rb")
  172. resp.close = resp.raw.close
  173. return resp
  174. def close(self):
  175. pass
  176. class SafeFileCache(FileCache):
  177. """
  178. A file based cache which is safe to use even when the target directory may
  179. not be accessible or writable.
  180. """
  181. def __init__(self, *args, **kwargs):
  182. super(SafeFileCache, self).__init__(*args, **kwargs)
  183. # Check to ensure that the directory containing our cache directory
  184. # is owned by the user current executing pip. If it does not exist
  185. # we will check the parent directory until we find one that does exist.
  186. # If it is not owned by the user executing pip then we will disable
  187. # the cache and log a warning.
  188. if not check_path_owner(self.directory):
  189. logger.warning(
  190. "The directory '%s' or its parent directory is not owned by "
  191. "the current user and the cache has been disabled. Please "
  192. "check the permissions and owner of that directory. If "
  193. "executing pip with sudo, you may want sudo's -H flag.",
  194. self.directory,
  195. )
  196. # Set our directory to None to disable the Cache
  197. self.directory = None
  198. def get(self, *args, **kwargs):
  199. # If we don't have a directory, then the cache should be a no-op.
  200. if self.directory is None:
  201. return
  202. try:
  203. return super(SafeFileCache, self).get(*args, **kwargs)
  204. except (LockError, OSError, IOError):
  205. # We intentionally silence this error, if we can't access the cache
  206. # then we can just skip caching and process the request as if
  207. # caching wasn't enabled.
  208. pass
  209. def set(self, *args, **kwargs):
  210. # If we don't have a directory, then the cache should be a no-op.
  211. if self.directory is None:
  212. return
  213. try:
  214. return super(SafeFileCache, self).set(*args, **kwargs)
  215. except (LockError, OSError, IOError):
  216. # We intentionally silence this error, if we can't access the cache
  217. # then we can just skip caching and process the request as if
  218. # caching wasn't enabled.
  219. pass
  220. def delete(self, *args, **kwargs):
  221. # If we don't have a directory, then the cache should be a no-op.
  222. if self.directory is None:
  223. return
  224. try:
  225. return super(SafeFileCache, self).delete(*args, **kwargs)
  226. except (LockError, OSError, IOError):
  227. # We intentionally silence this error, if we can't access the cache
  228. # then we can just skip caching and process the request as if
  229. # caching wasn't enabled.
  230. pass
  231. class InsecureHTTPAdapter(HTTPAdapter):
  232. def cert_verify(self, conn, url, verify, cert):
  233. conn.cert_reqs = 'CERT_NONE'
  234. conn.ca_certs = None
  235. class PipSession(requests.Session):
  236. timeout = None
  237. def __init__(self, *args, **kwargs):
  238. retries = kwargs.pop("retries", 0)
  239. cache = kwargs.pop("cache", None)
  240. insecure_hosts = kwargs.pop("insecure_hosts", [])
  241. super(PipSession, self).__init__(*args, **kwargs)
  242. # Attach our User Agent to the request
  243. self.headers["User-Agent"] = user_agent()
  244. # Attach our Authentication handler to the session
  245. self.auth = MultiDomainBasicAuth()
  246. # Create our urllib3.Retry instance which will allow us to customize
  247. # how we handle retries.
  248. retries = urllib3.Retry(
  249. # Set the total number of retries that a particular request can
  250. # have.
  251. total=retries,
  252. # A 503 error from PyPI typically means that the Fastly -> Origin
  253. # connection got interupted in some way. A 503 error in general
  254. # is typically considered a transient error so we'll go ahead and
  255. # retry it.
  256. status_forcelist=[503],
  257. # Add a small amount of back off between failed requests in
  258. # order to prevent hammering the service.
  259. backoff_factor=0.25,
  260. )
  261. # We want to _only_ cache responses on securely fetched origins. We do
  262. # this because we can't validate the response of an insecurely fetched
  263. # origin, and we don't want someone to be able to poison the cache and
  264. # require manual evication from the cache to fix it.
  265. if cache:
  266. secure_adapter = CacheControlAdapter(
  267. cache=SafeFileCache(cache),
  268. max_retries=retries,
  269. )
  270. else:
  271. secure_adapter = HTTPAdapter(max_retries=retries)
  272. # Our Insecure HTTPAdapter disables HTTPS validation. It does not
  273. # support caching (see above) so we'll use it for all http:// URLs as
  274. # well as any https:// host that we've marked as ignoring TLS errors
  275. # for.
  276. insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
  277. self.mount("https://", secure_adapter)
  278. self.mount("http://", insecure_adapter)
  279. # Enable file:// urls
  280. self.mount("file://", LocalFSAdapter())
  281. # We want to use a non-validating adapter for any requests which are
  282. # deemed insecure.
  283. for host in insecure_hosts:
  284. self.mount("https://{0}/".format(host), insecure_adapter)
  285. def request(self, method, url, *args, **kwargs):
  286. # Allow setting a default timeout on a session
  287. kwargs.setdefault("timeout", self.timeout)
  288. # Dispatch the actual request
  289. return super(PipSession, self).request(method, url, *args, **kwargs)
  290. def get_file_content(url, comes_from=None, session=None):
  291. """Gets the content of a file; it may be a filename, file: URL, or
  292. http: URL. Returns (location, content). Content is unicode."""
  293. if session is None:
  294. raise TypeError(
  295. "get_file_content() missing 1 required keyword argument: 'session'"
  296. )
  297. match = _scheme_re.search(url)
  298. if match:
  299. scheme = match.group(1).lower()
  300. if (scheme == 'file' and comes_from and
  301. comes_from.startswith('http')):
  302. raise InstallationError(
  303. 'Requirements file %s references URL %s, which is local'
  304. % (comes_from, url))
  305. if scheme == 'file':
  306. path = url.split(':', 1)[1]
  307. path = path.replace('\\', '/')
  308. match = _url_slash_drive_re.match(path)
  309. if match:
  310. path = match.group(1) + ':' + path.split('|', 1)[1]
  311. path = urllib_parse.unquote(path)
  312. if path.startswith('/'):
  313. path = '/' + path.lstrip('/')
  314. url = path
  315. else:
  316. # FIXME: catch some errors
  317. resp = session.get(url)
  318. resp.raise_for_status()
  319. if six.PY3:
  320. return resp.url, resp.text
  321. else:
  322. return resp.url, resp.content
  323. try:
  324. with open(url) as f:
  325. content = f.read()
  326. except IOError as exc:
  327. raise InstallationError(
  328. 'Could not open requirements file: %s' % str(exc)
  329. )
  330. return url, content
  331. _scheme_re = re.compile(r'^(http|https|file):', re.I)
  332. _url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
  333. def is_url(name):
  334. """Returns true if the name looks like a URL"""
  335. if ':' not in name:
  336. return False
  337. scheme = name.split(':', 1)[0].lower()
  338. return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
  339. def url_to_path(url):
  340. """
  341. Convert a file: URL to a path.
  342. """
  343. assert url.startswith('file:'), (
  344. "You can only turn file: urls into filenames (not %r)" % url)
  345. _, netloc, path, _, _ = urllib_parse.urlsplit(url)
  346. # if we have a UNC path, prepend UNC share notation
  347. if netloc:
  348. netloc = '\\\\' + netloc
  349. path = urllib_request.url2pathname(netloc + path)
  350. return path
  351. def path_to_url(path):
  352. """
  353. Convert a path to a file: URL. The path will be made absolute and have
  354. quoted path parts.
  355. """
  356. path = os.path.normpath(os.path.abspath(path))
  357. url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
  358. return url
  359. def is_archive_file(name):
  360. """Return True if `name` is a considered as an archive file."""
  361. archives = (
  362. '.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.whl'
  363. )
  364. ext = splitext(name)[1].lower()
  365. if ext in archives:
  366. return True
  367. return False
  368. def unpack_vcs_link(link, location, only_download=False):
  369. vcs_backend = _get_used_vcs_backend(link)
  370. if only_download:
  371. vcs_backend.export(location)
  372. else:
  373. vcs_backend.unpack(location)
  374. def _get_used_vcs_backend(link):
  375. for backend in vcs.backends:
  376. if link.scheme in backend.schemes:
  377. vcs_backend = backend(link.url)
  378. return vcs_backend
  379. def is_vcs_url(link):
  380. return bool(_get_used_vcs_backend(link))
  381. def is_file_url(link):
  382. return link.url.lower().startswith('file:')
  383. def _check_hash(download_hash, link):
  384. if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
  385. logger.critical(
  386. "Hash digest size of the package %d (%s) doesn't match the "
  387. "expected hash name %s!",
  388. download_hash.digest_size, link, link.hash_name,
  389. )
  390. raise HashMismatch('Hash name mismatch for package %s' % link)
  391. if download_hash.hexdigest() != link.hash:
  392. logger.critical(
  393. "Hash of the package %s (%s) doesn't match the expected hash %s!",
  394. link, download_hash.hexdigest(), link.hash,
  395. )
  396. raise HashMismatch(
  397. 'Bad %s hash for package %s' % (link.hash_name, link)
  398. )
  399. def _get_hash_from_file(target_file, link):
  400. try:
  401. download_hash = hashlib.new(link.hash_name)
  402. except (ValueError, TypeError):
  403. logger.warning(
  404. "Unsupported hash name %s for package %s", link.hash_name, link,
  405. )
  406. return None
  407. with open(target_file, 'rb') as fp:
  408. while True:
  409. chunk = fp.read(4096)
  410. if not chunk:
  411. break
  412. download_hash.update(chunk)
  413. return download_hash
  414. def _progress_indicator(iterable, *args, **kwargs):
  415. return iterable
  416. def _download_url(resp, link, content_file):
  417. download_hash = None
  418. if link.hash and link.hash_name:
  419. try:
  420. download_hash = hashlib.new(link.hash_name)
  421. except ValueError:
  422. logger.warning(
  423. "Unsupported hash name %s for package %s",
  424. link.hash_name, link,
  425. )
  426. try:
  427. total_length = int(resp.headers['content-length'])
  428. except (ValueError, KeyError, TypeError):
  429. total_length = 0
  430. cached_resp = getattr(resp, "from_cache", False)
  431. if logger.getEffectiveLevel() > logging.INFO:
  432. show_progress = False
  433. elif cached_resp:
  434. show_progress = False
  435. elif total_length > (40 * 1000):
  436. show_progress = True
  437. elif not total_length:
  438. show_progress = True
  439. else:
  440. show_progress = False
  441. show_url = link.show_url
  442. def resp_read(chunk_size):
  443. try:
  444. # Special case for urllib3.
  445. for chunk in resp.raw.stream(
  446. chunk_size,
  447. # We use decode_content=False here because we do
  448. # want urllib3 to mess with the raw bytes we get
  449. # from the server. If we decompress inside of
  450. # urllib3 then we cannot verify the checksum
  451. # because the checksum will be of the compressed
  452. # file. This breakage will only occur if the
  453. # server adds a Content-Encoding header, which
  454. # depends on how the server was configured:
  455. # - Some servers will notice that the file isn't a
  456. # compressible file and will leave the file alone
  457. # and with an empty Content-Encoding
  458. # - Some servers will notice that the file is
  459. # already compressed and will leave the file
  460. # alone and will add a Content-Encoding: gzip
  461. # header
  462. # - Some servers won't notice anything at all and
  463. # will take a file that's already been compressed
  464. # and compress it again and set the
  465. # Content-Encoding: gzip header
  466. #
  467. # By setting this not to decode automatically we
  468. # hope to eliminate problems with the second case.
  469. decode_content=False):
  470. yield chunk
  471. except AttributeError:
  472. # Standard file-like object.
  473. while True:
  474. chunk = resp.raw.read(chunk_size)
  475. if not chunk:
  476. break
  477. yield chunk
  478. progress_indicator = _progress_indicator
  479. if link.netloc == PyPI.netloc:
  480. url = show_url
  481. else:
  482. url = link.url_without_fragment
  483. if show_progress: # We don't show progress on cached responses
  484. if total_length:
  485. logger.info(
  486. "Downloading %s (%s)", url, format_size(total_length),
  487. )
  488. progress_indicator = DownloadProgressBar(
  489. max=total_length,
  490. ).iter
  491. else:
  492. logger.info("Downloading %s", url)
  493. progress_indicator = DownloadProgressSpinner().iter
  494. elif cached_resp:
  495. logger.info("Using cached %s", url)
  496. else:
  497. logger.info("Downloading %s", url)
  498. logger.debug('Downloading from URL %s', link)
  499. for chunk in progress_indicator(resp_read(4096), 4096):
  500. if download_hash is not None:
  501. download_hash.update(chunk)
  502. content_file.write(chunk)
  503. if link.hash and link.hash_name:
  504. _check_hash(download_hash, link)
  505. return download_hash
  506. def _copy_file(filename, location, content_type, link):
  507. copy = True
  508. download_location = os.path.join(location, link.filename)
  509. if os.path.exists(download_location):
  510. response = ask_path_exists(
  511. 'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
  512. display_path(download_location), ('i', 'w', 'b'))
  513. if response == 'i':
  514. copy = False
  515. elif response == 'w':
  516. logger.warning('Deleting %s', display_path(download_location))
  517. os.remove(download_location)
  518. elif response == 'b':
  519. dest_file = backup_dir(download_location)
  520. logger.warning(
  521. 'Backing up %s to %s',
  522. display_path(download_location),
  523. display_path(dest_file),
  524. )
  525. shutil.move(download_location, dest_file)
  526. if copy:
  527. shutil.copy(filename, download_location)
  528. logger.info('Saved %s', display_path(download_location))
  529. def unpack_http_url(link, location, download_dir=None, session=None):
  530. if session is None:
  531. raise TypeError(
  532. "unpack_http_url() missing 1 required keyword argument: 'session'"
  533. )
  534. temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
  535. # If a download dir is specified, is the file already downloaded there?
  536. already_downloaded_path = None
  537. if download_dir:
  538. already_downloaded_path = _check_download_dir(link, download_dir)
  539. if already_downloaded_path:
  540. from_path = already_downloaded_path
  541. content_type = mimetypes.guess_type(from_path)[0]
  542. else:
  543. # let's download to a tmp dir
  544. from_path, content_type = _download_http_url(link, session, temp_dir)
  545. # unpack the archive to the build dir location. even when only downloading
  546. # archives, they have to be unpacked to parse dependencies
  547. unpack_file(from_path, location, content_type, link)
  548. # a download dir is specified; let's copy the archive there
  549. if download_dir and not already_downloaded_path:
  550. _copy_file(from_path, download_dir, content_type, link)
  551. if not already_downloaded_path:
  552. os.unlink(from_path)
  553. rmtree(temp_dir)
  554. def unpack_file_url(link, location, download_dir=None):
  555. """Unpack link into location.
  556. If download_dir is provided and link points to a file, make a copy
  557. of the link file inside download_dir."""
  558. link_path = url_to_path(link.url_without_fragment)
  559. # If it's a url to a local directory
  560. if os.path.isdir(link_path):
  561. if os.path.isdir(location):
  562. rmtree(location)
  563. shutil.copytree(link_path, location, symlinks=True)
  564. if download_dir:
  565. logger.info('Link is a directory, ignoring download_dir')
  566. return
  567. # if link has a hash, let's confirm it matches
  568. if link.hash:
  569. link_path_hash = _get_hash_from_file(link_path, link)
  570. _check_hash(link_path_hash, link)
  571. # If a download dir is specified, is the file already there and valid?
  572. already_downloaded_path = None
  573. if download_dir:
  574. already_downloaded_path = _check_download_dir(link, download_dir)
  575. if already_downloaded_path:
  576. from_path = already_downloaded_path
  577. else:
  578. from_path = link_path
  579. content_type = mimetypes.guess_type(from_path)[0]
  580. # unpack the archive to the build dir location. even when only downloading
  581. # archives, they have to be unpacked to parse dependencies
  582. unpack_file(from_path, location, content_type, link)
  583. # a download dir is specified and not already downloaded
  584. if download_dir and not already_downloaded_path:
  585. _copy_file(from_path, download_dir, content_type, link)
  586. def _copy_dist_from_dir(link_path, location):
  587. """Copy distribution files in `link_path` to `location`.
  588. Invoked when user requests to install a local directory. E.g.:
  589. pip install .
  590. pip install ~/dev/git-repos/python-prompt-toolkit
  591. """
  592. # Note: This is currently VERY SLOW if you have a lot of data in the
  593. # directory, because it copies everything with `shutil.copytree`.
  594. # What it should really do is build an sdist and install that.
  595. # See https://github.com/pypa/pip/issues/2195
  596. if os.path.isdir(location):
  597. rmtree(location)
  598. # build an sdist
  599. setup_py = 'setup.py'
  600. sdist_args = [sys.executable]
  601. sdist_args.append('-c')
  602. sdist_args.append(
  603. "import setuptools, tokenize;__file__=%r;"
  604. "exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
  605. ".replace('\\r\\n', '\\n'), __file__, 'exec'))" % setup_py)
  606. sdist_args.append('sdist')
  607. sdist_args += ['--dist-dir', location]
  608. logger.info('Running setup.py sdist for %s', link_path)
  609. with indent_log():
  610. call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
  611. # unpack sdist into `location`
  612. sdist = os.path.join(location, os.listdir(location)[0])
  613. logger.info('Unpacking sdist %s into %s', sdist, location)
  614. unpack_file(sdist, location, content_type=None, link=None)
  615. class PipXmlrpcTransport(xmlrpc_client.Transport):
  616. """Provide a `xmlrpclib.Transport` implementation via a `PipSession`
  617. object.
  618. """
  619. def __init__(self, index_url, session, use_datetime=False):
  620. xmlrpc_client.Transport.__init__(self, use_datetime)
  621. index_parts = urllib_parse.urlparse(index_url)
  622. self._scheme = index_parts.scheme
  623. self._session = session
  624. def request(self, host, handler, request_body, verbose=False):
  625. parts = (self._scheme, host, handler, None, None, None)
  626. url = urllib_parse.urlunparse(parts)
  627. try:
  628. headers = {'Content-Type': 'text/xml'}
  629. response = self._session.post(url, data=request_body,
  630. headers=headers, stream=True)
  631. response.raise_for_status()
  632. self.verbose = verbose
  633. return self.parse_response(response.raw)
  634. except requests.HTTPError as exc:
  635. logger.critical(
  636. "HTTP error %s while getting %s",
  637. exc.response.status_code, url,
  638. )
  639. raise
  640. def unpack_url(link, location, download_dir=None,
  641. only_download=False, session=None):
  642. """Unpack link.
  643. If link is a VCS link:
  644. if only_download, export into download_dir and ignore location
  645. else unpack into location
  646. for other types of link:
  647. - unpack into location
  648. - if download_dir, copy the file into download_dir
  649. - if only_download, mark location for deletion
  650. """
  651. # non-editable vcs urls
  652. if is_vcs_url(link):
  653. unpack_vcs_link(link, location, only_download)
  654. # file urls
  655. elif is_file_url(link):
  656. unpack_file_url(link, location, download_dir)
  657. if only_download:
  658. write_delete_marker_file(location)
  659. # http urls
  660. else:
  661. if session is None:
  662. session = PipSession()
  663. unpack_http_url(
  664. link,
  665. location,
  666. download_dir,
  667. session,
  668. )
  669. if only_download:
  670. write_delete_marker_file(location)
  671. def _download_http_url(link, session, temp_dir):
  672. """Download link url into temp_dir using provided session"""
  673. target_url = link.url.split('#', 1)[0]
  674. try:
  675. resp = session.get(
  676. target_url,
  677. # We use Accept-Encoding: identity here because requests
  678. # defaults to accepting compressed responses. This breaks in
  679. # a variety of ways depending on how the server is configured.
  680. # - Some servers will notice that the file isn't a compressible
  681. # file and will leave the file alone and with an empty
  682. # Content-Encoding
  683. # - Some servers will notice that the file is already
  684. # compressed and will leave the file alone and will add a
  685. # Content-Encoding: gzip header
  686. # - Some servers won't notice anything at all and will take
  687. # a file that's already been compressed and compress it again
  688. # and set the Content-Encoding: gzip header
  689. # By setting this to request only the identity encoding We're
  690. # hoping to eliminate the third case. Hopefully there does not
  691. # exist a server which when given a file will notice it is
  692. # already compressed and that you're not asking for a
  693. # compressed file and will then decompress it before sending
  694. # because if that's the case I don't think it'll ever be
  695. # possible to make this work.
  696. headers={"Accept-Encoding": "identity"},
  697. stream=True,
  698. )
  699. resp.raise_for_status()
  700. except requests.HTTPError as exc:
  701. logger.critical(
  702. "HTTP error %s while getting %s", exc.response.status_code, link,
  703. )
  704. raise
  705. content_type = resp.headers.get('content-type', '')
  706. filename = link.filename # fallback
  707. # Have a look at the Content-Disposition header for a better guess
  708. content_disposition = resp.headers.get('content-disposition')
  709. if content_disposition:
  710. type, params = cgi.parse_header(content_disposition)
  711. # We use ``or`` here because we don't want to use an "empty" value
  712. # from the filename param.
  713. filename = params.get('filename') or filename
  714. ext = splitext(filename)[1]
  715. if not ext:
  716. ext = mimetypes.guess_extension(content_type)
  717. if ext:
  718. filename += ext
  719. if not ext and link.url != resp.url:
  720. ext = os.path.splitext(resp.url)[1]
  721. if ext:
  722. filename += ext
  723. file_path = os.path.join(temp_dir, filename)
  724. with open(file_path, 'wb') as content_file:
  725. _download_url(resp, link, content_file)
  726. return file_path, content_type
  727. def _check_download_dir(link, download_dir):
  728. """ Check download_dir for previously downloaded file with correct hash
  729. If a correct file is found return its path else None
  730. """
  731. download_path = os.path.join(download_dir, link.filename)
  732. if os.path.exists(download_path):
  733. # If already downloaded, does its hash match?
  734. logger.info('File was already downloaded %s', download_path)
  735. if link.hash:
  736. download_hash = _get_hash_from_file(download_path, link)
  737. try:
  738. _check_hash(download_hash, link)
  739. except HashMismatch:
  740. logger.warning(
  741. 'Previously-downloaded file %s has bad hash, '
  742. 're-downloading.',
  743. download_path
  744. )
  745. os.unlink(download_path)
  746. return None
  747. return download_path
  748. return None