···11+# Config file for automatic testing at travis-ci.org
22+33+language: python
44+python: 2.7
55+66+# command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors
77+install:
88+ - pip install Scrapy docopt
99+1010+# command to run tests, e.g. python setup.py test
1111+script:
1212+ - nosetests tests
1313+1414+notifications:
1515+ slack: descartes2:6sgCzx3PvrO9IIMwKxj12dDM
+1-3
FourmiCrawler/items.py
···11-# Define here the models for your scraped items
22-#
33-# See documentation in:
11+# For more information on item definitions, see the Scrapy documentation in:
42# http://doc.scrapy.org/en/latest/topics/items.html
5364from scrapy.item import Item, Field
+13-12
FourmiCrawler/pipelines.py
···11-# Define your item pipelines here
22-#
33-# Don't forget to add your pipeline to the ITEM_PIPELINES setting
44-# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
11+# For more information on item pipelines, see the Scrapy documentation in:
22+# http://doc.scrapy.org/en/latest/topics/item-pipeline.html
53import re
44+65from scrapy.exceptions import DropItem
7688-class RemoveNonePipeline(object):
9788+class RemoveNonePipeline(object):
109 def __init__(self):
1111- self.known_values = set()
1010+ pass
12111313- def process_item(self, item, spider):
1212+ @staticmethod
1313+ def process_item(item, spider):
1414 """
1515 Processing the items so None values are replaced by empty strings
1616 :param item: The incoming item
···2222 item[key] = ""
2323 return item
24242525-class DuplicatePipeline(object):
26252626+class DuplicatePipeline(object):
2727 def __init__(self):
2828 self.known_values = set()
2929···3636 """
3737 value = (item['attribute'], item['value'], item['conditions'])
3838 if value in self.known_values:
3939- raise DropItem("Duplicate item found: %s" % item) # #[todo] append sources of first item.
3939+ raise DropItem("Duplicate item found: %s" % item) # [todo] append sources of first item.
4040 else:
4141 self.known_values.add(value)
4242 return item
43434444-class AttributeSelectionPipeline(object):
45444545+class AttributeSelectionPipeline(object):
4646 def __init__(self):
4747- pass;
4747+ pass
48484949- def process_item(self, item, spider):
4949+ @staticmethod
5050+ def process_item(item, spider):
5051 """
5152 The items are processed using the selected attribute list available in the spider,
5253 items that don't match the selected items are dropped.
+1-1
FourmiCrawler/settings.py
···33# For simplicity, this file contains only the most important settings by
44# default. All the other settings are documented here:
55#
66-# http://doc.scrapy.org/en/latest/topics/settings.html
66+# http://doc.scrapy.org/en/latest/topics/settings.html
77#
8899BOT_NAME = 'FourmiCrawler'
+6-5
FourmiCrawler/sources/ChemSpider.py
···11-from source import Source
11+import re
22+23from scrapy import log
34from scrapy.http import Request
45from scrapy.selector import Selector
66+77+from source import Source
58from FourmiCrawler.items import Result
66-import re
99+710811# [TODO] - Maybe clean up usage of '.extract()[0]', because of possible IndexError exception.
912···5861 prop_conditions = ''
59626063 # Test for properties without values, with one hardcoded exception
6161- if (not re.match(r'^\d', prop_value) or
6262- (prop_name == 'Polarizability' and
6363- prop_value == '10-24cm3')):
6464+ if not re.match(r'^\d', prop_value) or (prop_name == 'Polarizability' and prop_value == '10-24cm3'):
6465 continue
65666667 # Match for condition in parentheses
+276
FourmiCrawler/sources/NIST.py
···11+import re
22+33+from scrapy import log
44+from scrapy.http import Request
55+from scrapy.selector import Selector
66+77+from source import Source
88+from FourmiCrawler.items import Result
99+1010+1111+# [TODO]: values can be '128.', perhaps remove the dot in that case?
1212+# [TODO]: properties have references and comments which do not exist in the
1313+# Result item, but should be included eventually.
1414+1515+class NIST(Source):
1616+ """NIST Scraper plugin
1717+1818+ This plugin manages searching for a chemical on the NIST website
1919+ and parsing the resulting page if the chemical exists on NIST.
2020+ """
2121+ website = "http://webbook.nist.gov/*"
2222+2323+ search = 'cgi/cbook.cgi?Name=%s&Units=SI&cTP=on'
2424+2525+ ignore_list = set()
2626+2727+ def __init__(self):
2828+ Source.__init__(self)
2929+3030+ def parse(self, response):
3131+ sel = Selector(response)
3232+3333+ title = sel.xpath('head/title/text()').extract()[0]
3434+ if title == 'Name Not Found':
3535+ log.msg('NIST: Chemical not found!', level=log.ERROR)
3636+ return
3737+ if title not in self.ignore_list:
3838+ self.ignore_list.update(title)
3939+ log.msg('NIST emit synonym: %s' % title, level=log.DEBUG)
4040+ self._spider.get_synonym_requests(title)
4141+4242+ requests = []
4343+4444+ requests.extend(self.parse_generic_info(sel))
4545+4646+ symbol_table = {}
4747+ tds = sel.xpath('//table[@class="symbol_table"]/tr/td')
4848+ for (symbol_td, name_td) in zip(tds[::2], tds[1::2]):
4949+ symbol = ''.join(symbol_td.xpath('node()').extract())
5050+ name = name_td.xpath('text()').extract()[0]
5151+ symbol_table[symbol] = name
5252+ log.msg('NIST symbol: |%s|, name: |%s|' % (symbol, name),
5353+ level=log.DEBUG)
5454+5555+ for table in sel.xpath('//table[@class="data"]'):
5656+ summary = table.xpath('@summary').extract()[0]
5757+ if summary == 'One dimensional data':
5858+ log.msg('NIST table: Aggregrate data', level=log.DEBUG)
5959+ requests.extend(
6060+ self.parse_aggregate_data(table, symbol_table))
6161+ elif table.xpath('tr/th="Initial Phase"').extract()[0] == '1':
6262+ log.msg('NIST table; Enthalpy/entropy of phase transition',
6363+ level=log.DEBUG)
6464+ requests.extend(self.parse_transition_data(table, summary))
6565+ elif table.xpath('tr[1]/td'):
6666+ log.msg('NIST table: Horizontal table', level=log.DEBUG)
6767+ elif summary == 'Antoine Equation Parameters':
6868+ log.msg('NIST table: Antoine Equation Parameters',
6969+ level=log.DEBUG)
7070+ requests.extend(self.parse_antoine_data(table, summary))
7171+ elif len(table.xpath('tr[1]/th')) == 5:
7272+ log.msg('NIST table: generic 5 columns', level=log.DEBUG)
7373+ # Symbol (unit) Temperature (K) Method Reference Comment
7474+ requests.extend(self.parse_generic_data(table, summary))
7575+ elif len(table.xpath('tr[1]/th')) == 4:
7676+ log.msg('NIST table: generic 4 columns', level=log.DEBUG)
7777+ # Symbol (unit) Temperature (K) Reference Comment
7878+ requests.extend(self.parse_generic_data(table, summary))
7979+ else:
8080+ log.msg('NIST table: NOT SUPPORTED', level=log.WARNING)
8181+ continue # Assume unsupported
8282+ return requests
8383+8484+ def parse_generic_info(self, sel):
8585+ """Parses: synonyms, chemical formula, molecular weight, InChI,
8686+ InChiKey, CAS number
8787+ """
8888+ ul = sel.xpath('body/ul[li/strong="IUPAC Standard InChI:"]')
8989+ li = ul.xpath('li')
9090+9191+ raw_synonyms = ul.xpath('li[strong="Other names:"]/text()').extract()
9292+ for synonym in raw_synonyms[0].strip().split(';\n'):
9393+ log.msg('NIST synonym: %s' % synonym, level=log.DEBUG)
9494+ self.ignore_list.update(synonym)
9595+ self._spider.get_synonym_requests(synonym)
9696+9797+ data = {}
9898+9999+ raw_formula = ul.xpath('li[strong/a="Formula"]//text()').extract()
100100+ data['Chemical formula'] = ''.join(raw_formula[2:]).strip()
101101+102102+ raw_mol_weight = ul.xpath('li[strong/a="Molecular weight"]/text()')
103103+ data['Molecular weight'] = raw_mol_weight.extract()[0].strip()
104104+105105+ raw_inchi = ul.xpath('li[strong="IUPAC Standard InChI:"]//tt/text()')
106106+ data['IUPAC Standard InChI'] = raw_inchi.extract()[0]
107107+108108+ raw_inchikey = ul.xpath('li[strong="IUPAC Standard InChIKey:"]'
109109+ '/tt/text()')
110110+ data['IUPAC Standard InChIKey'] = raw_inchikey.extract()[0]
111111+112112+ raw_cas_number = ul.xpath('li[strong="CAS Registry Number:"]/text()')
113113+ data['CAS Registry Number'] = raw_cas_number.extract()[0].strip()
114114+115115+ requests = []
116116+ for key, value in data.iteritems():
117117+ result = Result({
118118+ 'attribute': key,
119119+ 'value': value,
120120+ 'source': 'NIST',
121121+ 'reliability': 'Unknown',
122122+ 'conditions': ''
123123+ })
124124+ requests.append(result)
125125+126126+ return requests
127127+128128+ def parse_aggregate_data(self, table, symbol_table):
129129+ """Parses the table(s) which contain possible links to individual
130130+ data points
131131+ """
132132+ results = []
133133+ for tr in table.xpath('tr[td]'):
134134+ extra_data_url = tr.xpath('td[last()][a="Individual data points"]'
135135+ '/a/@href').extract()
136136+ if extra_data_url:
137137+ request = Request(url=self.website[:-1] + extra_data_url[0],
138138+ callback=self.parse_individual_datapoints)
139139+ results.append(request)
140140+ continue
141141+ data = []
142142+ for td in tr.xpath('td'):
143143+ data.append(''.join(td.xpath('node()').extract()))
144144+145145+ name = symbol_table[data[0]]
146146+ condition = ''
147147+148148+ m = re.match(r'(.*) at (.*)', name)
149149+ if m:
150150+ name = m.group(1)
151151+ condition = m.group(2)
152152+153153+ result = Result({
154154+ 'attribute': name,
155155+ 'value': data[1] + ' ' + data[2],
156156+ 'source': 'NIST',
157157+ 'reliability': 'Unknown',
158158+ 'conditions': condition
159159+ })
160160+ log.msg('NIST: |%s|' % data, level=log.DEBUG)
161161+ results.append(result)
162162+ return results
163163+164164+ @staticmethod
165165+ def parse_transition_data(table, summary):
166166+ """Parses the table containing properties regarding phase changes"""
167167+ results = []
168168+169169+ tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
170170+ m = re.search(r'\((.*)\)', tr_unit)
171171+ unit = '!'
172172+ if m:
173173+ unit = m.group(1)
174174+175175+ for tr in table.xpath('tr[td]'):
176176+ tds = tr.xpath('td/text()').extract()
177177+ result = Result({
178178+ 'attribute': summary,
179179+ 'value': tds[0] + ' ' + unit,
180180+ 'source': 'NIST',
181181+ 'reliability': 'Unknown',
182182+ 'conditions': '%s K, (%s -> %s)' % (tds[1], tds[2], tds[3])
183183+ })
184184+ results.append(result)
185185+186186+ return results
187187+188188+ @staticmethod
189189+ def parse_generic_data(table, summary):
190190+ """Parses the common tables of 4 and 5 rows. Assumes they are of the
191191+ form:
192192+ Symbol (unit)|Temperature (K)|Method|Reference|Comment
193193+ Symbol (unit)|Temperature (K)|Reference|Comment
194194+ """
195195+ results = []
196196+197197+ tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
198198+ m = re.search(r'\((.*)\)', tr_unit)
199199+ unit = '!'
200200+ if m:
201201+ unit = m.group(1)
202202+203203+ for tr in table.xpath('tr[td]'):
204204+ tds = tr.xpath('td/text()').extract()
205205+ result = Result({
206206+ 'attribute': summary,
207207+ 'value': tds[0] + ' ' + unit,
208208+ 'source': 'NIST',
209209+ 'reliability': 'Unknown',
210210+ 'conditions': '%s K' % tds[1]
211211+ })
212212+ results.append(result)
213213+ return results
214214+215215+ @staticmethod
216216+ def parse_antoine_data(table, summary):
217217+ """Parse table containing parameters for the Antione equation"""
218218+ results = []
219219+220220+ for tr in table.xpath('tr[td]'):
221221+ tds = tr.xpath('td/text()').extract()
222222+ result = Result({
223223+ 'attribute': summary,
224224+ 'value': 'A=%s, B=%s, C=%s' % (tds[1], tds[2], tds[3]),
225225+ 'source': 'NIST',
226226+ 'reliability': 'Unknown',
227227+ 'conditions': '%s K' % tds[0]
228228+ })
229229+ results.append(result)
230230+231231+ return results
232232+233233+ @staticmethod
234234+ def parse_individual_datapoints(response):
235235+ """Parses the page linked from aggregate data"""
236236+ sel = Selector(response)
237237+ table = sel.xpath('//table[@class="data"]')[0]
238238+239239+ results = []
240240+241241+ name = table.xpath('@summary').extract()[0]
242242+ condition = ''
243243+ m = re.match(r'(.*) at (.*)', name)
244244+ if m:
245245+ name = m.group(1)
246246+ condition = m.group(2)
247247+248248+ tr_unit = ''.join(table.xpath('tr[1]/th[1]/node()').extract())
249249+ m = re.search(r'\((.*)\)', tr_unit)
250250+ unit = '!'
251251+ if m:
252252+ unit = m.group(1)
253253+254254+ for tr in table.xpath('tr[td]'):
255255+ tds = tr.xpath('td/text()').extract()
256256+ uncertainty = ''
257257+ m = re.search('Uncertainty assigned by TRC = (.*?) ', tds[-1])
258258+ if m:
259259+ uncertainty = '+- %s ' % m.group(1)
260260+ # [TODO]: get the plusminus sign working in here
261261+ result = Result({
262262+ 'attribute': name,
263263+ 'value': '%s %s%s' % (tds[0], uncertainty, unit),
264264+ 'source': 'NIST',
265265+ 'reliability': 'Unknown',
266266+ 'conditions': condition
267267+ })
268268+ results.append(result)
269269+270270+ return results
271271+272272+ def new_compound_request(self, compound):
273273+ if compound not in self.ignore_list:
274274+ self.ignore_list.update(compound)
275275+ return Request(url=self.website[:-1] + self.search % compound,
276276+ callback=self.parse)
+5-3
FourmiCrawler/sources/WikipediaParser.py
···11+import re
22+13from scrapy.http import Request
24from scrapy import log
33-from source import Source
45from scrapy.selector import Selector
66+77+from source import Source
58from FourmiCrawler.items import Result
66-import re
79810911class WikipediaParser(Source):
···3638 """ scrape data from infobox on wikipedia. """
3739 items = []
38403939- #be sure to get chembox (wikipedia template)
4141+ # be sure to get chembox (wikipedia template)
4042 tr_list = sel.xpath('.//table[@class="infobox bordered"]//td[not(@colspan)]'). \
4143 xpath('normalize-space(string())')
4244 prop_names = tr_list[::2]
+19-2
FourmiCrawler/sources/source.py
···77 _spider = None
8899 def __init__(self):
1010+ """
1111+ Initiation of a new Source
1212+ """
1013 pass
11141212- def parse(self, reponse):
1313- log.msg("The parse function of the empty parser was used.", level=log.WARNING)
1515+ def parse(self, response):
1616+ """
1717+ This function should be able to parse all Scrapy Response objects with a URL matching the website Regex.
1818+ :param response: A Scrapy Response object
1919+ :return: A list of Result items and new Scrapy Requests
2020+ """
2121+ log.msg("The parse function of the empty source was used.", level=log.WARNING)
1422 pass
15231624 def new_compound_request(self, compound):
2525+ """
2626+ This function should return a Scrapy Request for the given compound request.
2727+ :param compound: A compound name.
2828+ :return: A new Scrapy Request
2929+ """
1730 # return Request(url=self.website[:-1] + compound, callback=self.parse)
1831 pass
19322033 def set_spider(self, spider):
3434+ """
3535+ A Function to save the associated spider.
3636+ :param spider: A FourmiSpider object
3737+ """
2138 self._spider = spider
+54-20
FourmiCrawler/spider.py
···11+import re
22+13from scrapy.spider import Spider
24from scrapy import log
33-import re
455667class FourmiSpider(Spider):
88+ """
99+ A spider writen for the Fourmi Project which calls upon all available sources to request and scrape data.
1010+ """
711 name = "FourmiSpider"
88- __parsers = []
99- synonyms = []
1212+ _sources = []
1313+ synonyms = set()
10141115 def __init__(self, compound=None, selected_attributes=[".*"], *args, **kwargs):
1616+ """
1717+ Initiation of the Spider
1818+ :param compound: compound that will be searched.
1919+ :param selected_attributes: A list of regular expressions that the attributes should match.
2020+ """
1221 super(FourmiSpider, self).__init__(*args, **kwargs)
1313- self.synonyms.append(compound)
1414- self.selected_attributes = selected_attributes;
2222+ self.synonyms.add(compound)
2323+ self.selected_attributes = selected_attributes
15241616- def parse(self, reponse):
1717- for parser in self.__parsers:
1818- if re.match(parser.website, reponse.url):
1919- log.msg("Url: " + reponse.url + " -> Source: " + parser.website, level=log.DEBUG)
2020- return parser.parse(reponse)
2525+ def parse(self, response):
2626+ """
2727+ The function that is called when a response to a request is available. This function distributes this to a
2828+ source which should be able to handle parsing the data.
2929+ :param response: A Scrapy Response object that should be parsed
3030+ :return: A list of Result items and new Request to be handled by the scrapy core.
3131+ """
3232+ for source in self._sources:
3333+ if re.match(source.website, response.url):
3434+ log.msg("Url: " + response.url + " -> Source: " + source.website, level=log.DEBUG)
3535+ return source.parse(response)
2136 return None
22372338 def get_synonym_requests(self, compound):
3939+ """
4040+ A function that generates new Scrapy Request for each source given a new synonym of a compound.
4141+ :param compound: A compound name
4242+ :return: A list of Scrapy Request objects
4343+ """
2444 requests = []
2525- for parser in self.__parsers:
2626- parser_requests = parser.new_compound_request(compound)
2727- if parser_requests is not None:
2828- requests.append(parser_requests)
4545+ if compound not in self.synonyms:
4646+ self.synonyms.add(compound)
4747+ for parser in self._sources:
4848+ parser_requests = parser.new_compound_request(compound)
4949+ if parser_requests is not None:
5050+ requests.append(parser_requests)
2951 return requests
30523153 def start_requests(self):
5454+ """
5555+ The function called by Scrapy for it's first Requests
5656+ :return: A list of Scrapy Request generated from the known synonyms using the available sources.
5757+ """
3258 requests = []
3359 for synonym in self.synonyms:
3460 requests.extend(self.get_synonym_requests(synonym))
3561 return requests
36623737- def add_parsers(self, parsers):
3838- for parser in parsers:
3939- self.add_parser(parser)
6363+ def add_sources(self, sources):
6464+ """
6565+ A function to add a new Parser objects to the list of available sources.
6666+ :param sources: A list of Source Objects.
6767+ """
6868+ for parser in sources:
6969+ self.add_source(parser)
40704141- def add_parser(self, parser):
4242- self.__parsers.append(parser)
4343- parser.set_spider(self)7171+ def add_source(self, source):
7272+ """
7373+ A function add a new Parser object to the list of available parsers.
7474+ :param source: A Source Object
7575+ """
7676+ self._sources.append(source)
7777+ source.set_spider(self)
+4
README.md
···11# Fourmi
2233+**Master branch**: [](https://travis-ci.org/Recondor/Fourmi)
44+55+**Developing branch**: [](https://travis-ci.org/Recondor/Fourmi)
66+37Fourmi is an web scraper for chemical substances. The program is designed to be
48used as a search engine to search multiple chemical databases for a specific
59substance. The program will produce all available attributes of the substance
+28-6
fourmi.py
···11-#!/usr/bin/env python
11+# !/usr/bin/env python
22"""
33Fourmi, a web scraper build to search specific information for a given compound (and it's pseudonyms).
44···3333from sourceloader import SourceLoader
343435353636-def setup_crawler(searchable, settings, source_loader, attributes):
3737- spider = FourmiSpider(compound=searchable, selected_attributes=attributes)
3838- spider.add_parsers(source_loader.sources)
3636+def setup_crawler(compound, settings, source_loader, attributes):
3737+ """
3838+ This function prepares and start the crawler which starts the actual search on the internet
3939+ :param compound: The compound which should be searched
4040+ :param settings: A scrapy settings object
4141+ :param source_loader: A fully functional SourceLoader object which contains only the sources that should be used.
4242+ :param attributes: A list of regular expressions which the attribute names should match.
4343+ """
4444+ spider = FourmiSpider(compound=compound, selected_attributes=attributes)
4545+ spider.add_sources(source_loader.sources)
3946 crawler = Crawler(settings)
4047 crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
4148 crawler.configure()
···445145524653def scrapy_settings_manipulation(docopt_arguments):
5454+ """
5555+ This function manipulates the Scrapy settings that normally would be set in the settings file. In the Fourmi
5656+ project these are command line arguments.
5757+ :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
5858+ """
4759 settings = get_project_settings()
4848- # [todo] - add at least a warning for files that already exist
6060+4961 if docopt_arguments["--output"] != 'result.*format*':
5062 settings.overrides["FEED_URI"] = docopt_arguments["--output"]
5163 elif docopt_arguments["--format"] == "jsonlines":
···607261736274def start_log(docopt_arguments):
7575+ """
7676+ This function starts the logging functionality of Scrapy using the settings given by the CLI.
7777+ :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
7878+ """
6379 if docopt_arguments["--log"] is not None:
6480 if docopt_arguments["--verbose"]:
6581 log.start(logfile=docopt_arguments["--log"], logstdout=False, loglevel=log.DEBUG)
···738974907591def search(docopt_arguments, source_loader):
9292+ """
9393+ The function that facilitates the search for a specific compound.
9494+ :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments.
9595+ :param source_loader: An initiated SourceLoader object pointed at the directory with the sources.
9696+ """
7697 start_log(docopt_arguments)
7798 settings = scrapy_settings_manipulation(docopt_arguments)
7899 setup_crawler(docopt_arguments["<compound>"], settings, source_loader, docopt_arguments["--attributes"].split(','))
79100 reactor.run()
8010181102103103+# The start for the Fourmi Command Line interface.
82104if __name__ == '__main__':
8383- arguments = docopt.docopt(__doc__, version='Fourmi - V0.3.1')
105105+ arguments = docopt.docopt(__doc__, version='Fourmi - V0.4.1')
84106 loader = SourceLoader()
8510786108 if arguments["--include"]:
+18
setup.py
···11+import sys
22+from cx_Freeze import setup, Executable
33+44+# After running the setup file (python setup.py build) the scrapy/VERSION file has to be manually put into the
55+# library.zip, also the FourmiCrawler map has to be copied to both the library and the exe.win32-2.7 folder. after
66+# putting the files in the library the library has to be zipped and replace the old library.
77+# Dependencies are automatically detected, but it might need fine tuning.
88+build_exe_options = {"packages": ["os", "scrapy", "lxml", "w3lib", "pkg_resources", "zope.interface", "twisted.internet"], "excludes": []}
99+1010+# GUI applications require a different base on Windows (the default is for a
1111+# console application).
1212+base = None
1313+1414+setup( name = "Scrapy",
1515+ version = "0.1",
1616+ description = "My GUI application!",
1717+ options = {"build_exe": build_exe_options},
1818+ executables = [Executable("fourmi.py", base=base)])
+23-4
sourceloader.py
···11import inspect
22+import sys
23import os
34import re
55+46from FourmiCrawler.sources.source import Source
5768···810 sources = []
9111012 def __init__(self, rel_dir="FourmiCrawler/sources"):
1111- path = os.path.dirname(os.path.abspath(__file__))
1313+1414+ if hasattr(sys,'frozen'):
1515+ path = os.path.dirname(sys.executable)
1616+ else:
1717+ path = os.path.dirname(os.path.abspath(__file__))
1818+1219 path += "/" + rel_dir
1320 known_parser = set()
14211522 for py in [f[:-3] for f in os.listdir(path) if f.endswith('.py') and f != '__init__.py']:
1616- mod = __import__('.'.join([rel_dir.replace("/", "."), py]), fromlist=[py])
2323+ mod = __import__('.'.join([rel_dir.replace('/', "."), py]), fromlist=[py])
1724 classes = [getattr(mod, x) for x in dir(mod) if inspect.isclass(getattr(mod, x))]
1825 for cls in classes:
1926 if issubclass(cls, Source) and cls not in known_parser:
2020- self.sources.append(cls()) # [review] - Would we ever need arguments for the parsers?
2121- known_parser.add(cls)
2727+ self.sources.append(cls()) # [review] - Would we ever need arguments for the parsers?
2828+ # known_parser.add(cls)
22292330 def include(self, source_names):
3131+ """
3232+ This function excludes all sources that don't match the given regular expressions.
3333+ :param source_names: A list of regular expression (strings)
3434+ """
2435 new = set()
2536 for name in source_names:
2637 new.update([src for src in self.sources if re.match(name, src.__class__.__name__)])
2738 self.sources = list(new)
28392940 def exclude(self, source_names):
4141+ """
4242+ This function excludes all sources that match the given regular expressions.
4343+ :param source_names: A list of regular expression (strings)
4444+ """
3045 exclude = []
3146 for name in source_names:
3247 exclude.extend([src for src in self.sources if re.match(name, src.__class__.__name__)])
3348 self.sources = [src for src in self.sources if src not in exclude]
34493550 def __str__(self):
5151+ """
5252+ This function returns a string with all sources currently available in the SourceLoader.
5353+ :return: a string with all available sources.
5454+ """
3655 string = ""
3756 for src in self.sources:
3857 string += "Source: " + src.__class__.__name__