at 18.09-beta 1.6 kB view raw
1{ stdenv, buildPythonPackage, fetchPypi, glibcLocales, mock, pytest, botocore, 2 testfixtures, pillow, six, twisted, w3lib, lxml, queuelib, pyopenssl, 3 service-identity, parsel, pydispatcher, cssselect, lib }: 4buildPythonPackage rec { 5 version = "1.5.1"; 6 pname = "Scrapy"; 7 8 checkInputs = [ glibcLocales mock pytest botocore testfixtures pillow ]; 9 propagatedBuildInputs = [ 10 six twisted w3lib lxml cssselect queuelib pyopenssl service-identity parsel pydispatcher 11 ]; 12 13 # Scrapy is usually installed via pip where copying all 14 # permissions makes sense. In Nix the files copied are owned by 15 # root and readonly. As a consequence scrapy can't edit the 16 # project templates. 17 patches = [ ./permissions-fix.patch ]; 18 19 LC_ALL="en_US.UTF-8"; 20 21 checkPhase = '' 22 py.test --ignore=tests/test_linkextractors_deprecated.py --ignore=tests/test_proxy_connect.py ${lib.optionalString stdenv.isDarwin "--ignore=tests/test_utils_iterators.py"} 23 # The ignored tests require mitmproxy, which depends on protobuf, but it's disabled on Python3 24 # Ignore iteration test, because lxml can't find encodings on darwin https://bugs.launchpad.net/lxml/+bug/707396 25 ''; 26 27 src = fetchPypi { 28 inherit pname version; 29 sha256 = "5a398bf6818f87dcc817c919408a195f19ba46414ae12f259119336cfa862bb6"; 30 }; 31 32 meta = with lib; { 33 description = "A fast high-level web crawling and web scraping framework, used to crawl websites and extract structured data from their pages"; 34 homepage = https://scrapy.org/; 35 license = licenses.bsd3; 36 maintainers = with maintainers; [ drewkett ]; 37 platforms = platforms.unix; 38 }; 39}