A web scraper build to search specific information for a given compound (and its pseudonyms)

Compare changes

Choose any two refs to compare.

+347 -66
+15
.travis.yml
··· 1 + # Config file for automatic testing at travis-ci.org 2 + 3 + language: python 4 + python: 2.7 5 + 6 + # command to install dependencies, e.g. pip install -r requirements.txt --use-mirrors 7 + install: 8 + - pip install Scrapy docopt 9 + 10 + # command to run tests, e.g. python setup.py test 11 + script: 12 + - nosetests tests 13 + 14 + notifications: 15 + slack: descartes2:6sgCzx3PvrO9IIMwKxj12dDM
+1 -3
FourmiCrawler/items.py
··· 1 - # Define here the models for your scraped items 2 - # 3 - # See documentation in: 1 + # For more information on item definitions, see the Scrapy documentation in: 4 2 # http://doc.scrapy.org/en/latest/topics/items.html 5 3 6 4 from scrapy.item import Item, Field
+13 -12
FourmiCrawler/pipelines.py
··· 1 - # Define your item pipelines here 2 - # 3 - # Don't forget to add your pipeline to the ITEM_PIPELINES setting 4 - # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html 1 + # For more information on item pipelines, see the Scrapy documentation in: 2 + # http://doc.scrapy.org/en/latest/topics/item-pipeline.html 5 3 import re 4 + 6 5 from scrapy.exceptions import DropItem 7 6 8 - class RemoveNonePipeline(object): 9 7 8 + class RemoveNonePipeline(object): 10 9 def __init__(self): 11 - self.known_values = set() 10 + pass 12 11 13 - def process_item(self, item, spider): 12 + @staticmethod 13 + def process_item(item, spider): 14 14 """ 15 15 Processing the items so None values are replaced by empty strings 16 16 :param item: The incoming item ··· 22 22 item[key] = "" 23 23 return item 24 24 25 - class DuplicatePipeline(object): 26 25 26 + class DuplicatePipeline(object): 27 27 def __init__(self): 28 28 self.known_values = set() 29 29 ··· 36 36 """ 37 37 value = (item['attribute'], item['value'], item['conditions']) 38 38 if value in self.known_values: 39 - raise DropItem("Duplicate item found: %s" % item) # #[todo] append sources of first item. 39 + raise DropItem("Duplicate item found: %s" % item) # [todo] append sources of first item. 40 40 else: 41 41 self.known_values.add(value) 42 42 return item 43 43 44 - class AttributeSelectionPipeline(object): 45 44 45 + class AttributeSelectionPipeline(object): 46 46 def __init__(self): 47 - pass; 47 + pass 48 48 49 - def process_item(self, item, spider): 49 + @staticmethod 50 + def process_item(item, spider): 50 51 """ 51 52 The items are processed using the selected attribute list available in the spider, 52 53 items that don't match the selected items are dropped.
+1 -1
FourmiCrawler/settings.py
··· 3 3 # For simplicity, this file contains only the most important settings by 4 4 # default. All the other settings are documented here: 5 5 # 6 - # http://doc.scrapy.org/en/latest/topics/settings.html 6 + # http://doc.scrapy.org/en/latest/topics/settings.html 7 7 # 8 8 9 9 BOT_NAME = 'FourmiCrawler'
+6 -5
FourmiCrawler/sources/ChemSpider.py
··· 1 - from source import Source 1 + import re 2 + 2 3 from scrapy import log 3 4 from scrapy.http import Request 4 5 from scrapy.selector import Selector 6 + 7 + from source import Source 5 8 from FourmiCrawler.items import Result 6 - import re 9 + 7 10 8 11 # [TODO] - Maybe clean up usage of '.extract()[0]', because of possible IndexError exception. 9 12 ··· 58 61 prop_conditions = '' 59 62 60 63 # Test for properties without values, with one hardcoded exception 61 - if (not re.match(r'^\d', prop_value) or 62 - (prop_name == 'Polarizability' and 63 - prop_value == '10-24cm3')): 64 + if not re.match(r'^\d', prop_value) or (prop_name == 'Polarizability' and prop_value == '10-24cm3'): 64 65 continue 65 66 66 67 # Match for condition in parentheses
+13 -10
FourmiCrawler/sources/NIST.py
··· 1 - from source import Source 1 + import re 2 + 2 3 from scrapy import log 3 4 from scrapy.http import Request 4 5 from scrapy.selector import Selector 6 + 7 + from source import Source 5 8 from FourmiCrawler.items import Result 6 - import re 9 + 7 10 8 11 # [TODO]: values can be '128.', perhaps remove the dot in that case? 9 12 # [TODO]: properties have references and comments which do not exist in the 10 - # Result item, but should be included eventually. 13 + # Result item, but should be included eventually. 11 14 12 15 class NIST(Source): 13 16 """NIST Scraper plugin ··· 15 18 This plugin manages searching for a chemical on the NIST website 16 19 and parsing the resulting page if the chemical exists on NIST. 17 20 """ 18 - website = "http://webbook.nist.gov/*" 21 + website = "http://webbook.nist.gov/*" 19 22 20 23 search = 'cgi/cbook.cgi?Name=%s&Units=SI&cTP=on' 21 24 ··· 75 78 requests.extend(self.parse_generic_data(table, summary)) 76 79 else: 77 80 log.msg('NIST table: NOT SUPPORTED', level=log.WARNING) 78 - continue #Assume unsupported 81 + continue # Assume unsupported 79 82 return requests 80 83 81 84 def parse_generic_info(self, sel): ··· 103 106 data['IUPAC Standard InChI'] = raw_inchi.extract()[0] 104 107 105 108 raw_inchikey = ul.xpath('li[strong="IUPAC Standard InChIKey:"]' 106 - '/tt/text()') 109 + '/tt/text()') 107 110 data['IUPAC Standard InChIKey'] = raw_inchikey.extract()[0] 108 111 109 112 raw_cas_number = ul.xpath('li[strong="CAS Registry Number:"]/text()') ··· 129 132 results = [] 130 133 for tr in table.xpath('tr[td]'): 131 134 extra_data_url = tr.xpath('td[last()][a="Individual data points"]' 132 - '/a/@href').extract() 135 + '/a/@href').extract() 133 136 if extra_data_url: 134 137 request = Request(url=self.website[:-1] + extra_data_url[0], 135 - callback=self.parse_individual_datapoints) 138 + callback=self.parse_individual_datapoints) 136 139 results.append(request) 137 140 continue 138 141 data = [] ··· 179 182 'conditions': '%s K, (%s -> %s)' % (tds[1], tds[2], tds[3]) 180 183 }) 181 184 results.append(result) 182 - 183 185 184 186 return results 185 187 ··· 228 230 229 231 return results 230 232 231 - def parse_individual_datapoints(self, response): 233 + @staticmethod 234 + def parse_individual_datapoints(response): 232 235 """Parses the page linked from aggregate data""" 233 236 sel = Selector(response) 234 237 table = sel.xpath('//table[@class="data"]')[0]
+5 -3
FourmiCrawler/sources/WikipediaParser.py
··· 1 + import re 2 + 1 3 from scrapy.http import Request 2 4 from scrapy import log 3 - from source import Source 4 5 from scrapy.selector import Selector 6 + 7 + from source import Source 5 8 from FourmiCrawler.items import Result 6 - import re 7 9 8 10 9 11 class WikipediaParser(Source): ··· 36 38 """ scrape data from infobox on wikipedia. """ 37 39 items = [] 38 40 39 - #be sure to get chembox (wikipedia template) 41 + # be sure to get chembox (wikipedia template) 40 42 tr_list = sel.xpath('.//table[@class="infobox bordered"]//td[not(@colspan)]'). \ 41 43 xpath('normalize-space(string())') 42 44 prop_names = tr_list[::2]
+19 -2
FourmiCrawler/sources/source.py
··· 7 7 _spider = None 8 8 9 9 def __init__(self): 10 + """ 11 + Initiation of a new Source 12 + """ 10 13 pass 11 14 12 - def parse(self, reponse): 13 - log.msg("The parse function of the empty parser was used.", level=log.WARNING) 15 + def parse(self, response): 16 + """ 17 + This function should be able to parse all Scrapy Response objects with a URL matching the website Regex. 18 + :param response: A Scrapy Response object 19 + :return: A list of Result items and new Scrapy Requests 20 + """ 21 + log.msg("The parse function of the empty source was used.", level=log.WARNING) 14 22 pass 15 23 16 24 def new_compound_request(self, compound): 25 + """ 26 + This function should return a Scrapy Request for the given compound request. 27 + :param compound: A compound name. 28 + :return: A new Scrapy Request 29 + """ 17 30 # return Request(url=self.website[:-1] + compound, callback=self.parse) 18 31 pass 19 32 20 33 def set_spider(self, spider): 34 + """ 35 + A Function to save the associated spider. 36 + :param spider: A FourmiSpider object 37 + """ 21 38 self._spider = spider
+54 -20
FourmiCrawler/spider.py
··· 1 + import re 2 + 1 3 from scrapy.spider import Spider 2 4 from scrapy import log 3 - import re 4 5 5 6 6 7 class FourmiSpider(Spider): 8 + """ 9 + A spider writen for the Fourmi Project which calls upon all available sources to request and scrape data. 10 + """ 7 11 name = "FourmiSpider" 8 - __parsers = [] 9 - synonyms = [] 12 + _sources = [] 13 + synonyms = set() 10 14 11 15 def __init__(self, compound=None, selected_attributes=[".*"], *args, **kwargs): 16 + """ 17 + Initiation of the Spider 18 + :param compound: compound that will be searched. 19 + :param selected_attributes: A list of regular expressions that the attributes should match. 20 + """ 12 21 super(FourmiSpider, self).__init__(*args, **kwargs) 13 - self.synonyms.append(compound) 14 - self.selected_attributes = selected_attributes; 22 + self.synonyms.add(compound) 23 + self.selected_attributes = selected_attributes 15 24 16 - def parse(self, reponse): 17 - for parser in self.__parsers: 18 - if re.match(parser.website, reponse.url): 19 - log.msg("Url: " + reponse.url + " -> Source: " + parser.website, level=log.DEBUG) 20 - return parser.parse(reponse) 25 + def parse(self, response): 26 + """ 27 + The function that is called when a response to a request is available. This function distributes this to a 28 + source which should be able to handle parsing the data. 29 + :param response: A Scrapy Response object that should be parsed 30 + :return: A list of Result items and new Request to be handled by the scrapy core. 31 + """ 32 + for source in self._sources: 33 + if re.match(source.website, response.url): 34 + log.msg("Url: " + response.url + " -> Source: " + source.website, level=log.DEBUG) 35 + return source.parse(response) 21 36 return None 22 37 23 38 def get_synonym_requests(self, compound): 39 + """ 40 + A function that generates new Scrapy Request for each source given a new synonym of a compound. 41 + :param compound: A compound name 42 + :return: A list of Scrapy Request objects 43 + """ 24 44 requests = [] 25 - for parser in self.__parsers: 26 - parser_requests = parser.new_compound_request(compound) 27 - if parser_requests is not None: 28 - requests.append(parser_requests) 45 + if compound not in self.synonyms: 46 + self.synonyms.add(compound) 47 + for parser in self._sources: 48 + parser_requests = parser.new_compound_request(compound) 49 + if parser_requests is not None: 50 + requests.append(parser_requests) 29 51 return requests 30 52 31 53 def start_requests(self): 54 + """ 55 + The function called by Scrapy for it's first Requests 56 + :return: A list of Scrapy Request generated from the known synonyms using the available sources. 57 + """ 32 58 requests = [] 33 59 for synonym in self.synonyms: 34 60 requests.extend(self.get_synonym_requests(synonym)) 35 61 return requests 36 62 37 - def add_parsers(self, parsers): 38 - for parser in parsers: 39 - self.add_parser(parser) 63 + def add_sources(self, sources): 64 + """ 65 + A function to add a new Parser objects to the list of available sources. 66 + :param sources: A list of Source Objects. 67 + """ 68 + for parser in sources: 69 + self.add_source(parser) 40 70 41 - def add_parser(self, parser): 42 - self.__parsers.append(parser) 43 - parser.set_spider(self) 71 + def add_source(self, source): 72 + """ 73 + A function add a new Parser object to the list of available parsers. 74 + :param source: A Source Object 75 + """ 76 + self._sources.append(source) 77 + source.set_spider(self)
+4
README.md
··· 1 1 # Fourmi 2 2 3 + **Master branch**: [![Build Status](https://travis-ci.org/Recondor/Fourmi.svg?branch=master)](https://travis-ci.org/Recondor/Fourmi) 4 + 5 + **Developing branch**: [![Build Status](https://travis-ci.org/Recondor/Fourmi.svg?branch=develop)](https://travis-ci.org/Recondor/Fourmi) 6 + 3 7 Fourmi is an web scraper for chemical substances. The program is designed to be 4 8 used as a search engine to search multiple chemical databases for a specific 5 9 substance. The program will produce all available attributes of the substance
+28 -6
fourmi.py
··· 1 - #!/usr/bin/env python 1 + # !/usr/bin/env python 2 2 """ 3 3 Fourmi, a web scraper build to search specific information for a given compound (and it's pseudonyms). 4 4 ··· 33 33 from sourceloader import SourceLoader 34 34 35 35 36 - def setup_crawler(searchable, settings, source_loader, attributes): 37 - spider = FourmiSpider(compound=searchable, selected_attributes=attributes) 38 - spider.add_parsers(source_loader.sources) 36 + def setup_crawler(compound, settings, source_loader, attributes): 37 + """ 38 + This function prepares and start the crawler which starts the actual search on the internet 39 + :param compound: The compound which should be searched 40 + :param settings: A scrapy settings object 41 + :param source_loader: A fully functional SourceLoader object which contains only the sources that should be used. 42 + :param attributes: A list of regular expressions which the attribute names should match. 43 + """ 44 + spider = FourmiSpider(compound=compound, selected_attributes=attributes) 45 + spider.add_sources(source_loader.sources) 39 46 crawler = Crawler(settings) 40 47 crawler.signals.connect(reactor.stop, signal=signals.spider_closed) 41 48 crawler.configure() ··· 44 51 45 52 46 53 def scrapy_settings_manipulation(docopt_arguments): 54 + """ 55 + This function manipulates the Scrapy settings that normally would be set in the settings file. In the Fourmi 56 + project these are command line arguments. 57 + :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments. 58 + """ 47 59 settings = get_project_settings() 48 - # [todo] - add at least a warning for files that already exist 60 + 49 61 if docopt_arguments["--output"] != 'result.*format*': 50 62 settings.overrides["FEED_URI"] = docopt_arguments["--output"] 51 63 elif docopt_arguments["--format"] == "jsonlines": ··· 60 72 61 73 62 74 def start_log(docopt_arguments): 75 + """ 76 + This function starts the logging functionality of Scrapy using the settings given by the CLI. 77 + :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments. 78 + """ 63 79 if docopt_arguments["--log"] is not None: 64 80 if docopt_arguments["--verbose"]: 65 81 log.start(logfile=docopt_arguments["--log"], logstdout=False, loglevel=log.DEBUG) ··· 73 89 74 90 75 91 def search(docopt_arguments, source_loader): 92 + """ 93 + The function that facilitates the search for a specific compound. 94 + :param docopt_arguments: A dictionary generated by docopt containing all CLI arguments. 95 + :param source_loader: An initiated SourceLoader object pointed at the directory with the sources. 96 + """ 76 97 start_log(docopt_arguments) 77 98 settings = scrapy_settings_manipulation(docopt_arguments) 78 99 setup_crawler(docopt_arguments["<compound>"], settings, source_loader, docopt_arguments["--attributes"].split(',')) 79 100 reactor.run() 80 101 81 102 103 + # The start for the Fourmi Command Line interface. 82 104 if __name__ == '__main__': 83 - arguments = docopt.docopt(__doc__, version='Fourmi - V0.4.0') 105 + arguments = docopt.docopt(__doc__, version='Fourmi - V0.4.1') 84 106 loader = SourceLoader() 85 107 86 108 if arguments["--include"]:
+18
setup.py
··· 1 + import sys 2 + from cx_Freeze import setup, Executable 3 + 4 + # After running the setup file (python setup.py build) the scrapy/VERSION file has to be manually put into the 5 + # library.zip, also the FourmiCrawler map has to be copied to both the library and the exe.win32-2.7 folder. after 6 + # putting the files in the library the library has to be zipped and replace the old library. 7 + # Dependencies are automatically detected, but it might need fine tuning. 8 + build_exe_options = {"packages": ["os", "scrapy", "lxml", "w3lib", "pkg_resources", "zope.interface", "twisted.internet"], "excludes": []} 9 + 10 + # GUI applications require a different base on Windows (the default is for a 11 + # console application). 12 + base = None 13 + 14 + setup( name = "Scrapy", 15 + version = "0.1", 16 + description = "My GUI application!", 17 + options = {"build_exe": build_exe_options}, 18 + executables = [Executable("fourmi.py", base=base)])
+23 -4
sourceloader.py
··· 1 1 import inspect 2 + import sys 2 3 import os 3 4 import re 5 + 4 6 from FourmiCrawler.sources.source import Source 5 7 6 8 ··· 8 10 sources = [] 9 11 10 12 def __init__(self, rel_dir="FourmiCrawler/sources"): 11 - path = os.path.dirname(os.path.abspath(__file__)) 13 + 14 + if hasattr(sys,'frozen'): 15 + path = os.path.dirname(sys.executable) 16 + else: 17 + path = os.path.dirname(os.path.abspath(__file__)) 18 + 12 19 path += "/" + rel_dir 13 20 known_parser = set() 14 21 15 22 for py in [f[:-3] for f in os.listdir(path) if f.endswith('.py') and f != '__init__.py']: 16 - mod = __import__('.'.join([rel_dir.replace("/", "."), py]), fromlist=[py]) 23 + mod = __import__('.'.join([rel_dir.replace('/', "."), py]), fromlist=[py]) 17 24 classes = [getattr(mod, x) for x in dir(mod) if inspect.isclass(getattr(mod, x))] 18 25 for cls in classes: 19 26 if issubclass(cls, Source) and cls not in known_parser: 20 - self.sources.append(cls()) # [review] - Would we ever need arguments for the parsers? 21 - known_parser.add(cls) 27 + self.sources.append(cls()) # [review] - Would we ever need arguments for the parsers? 28 + # known_parser.add(cls) 22 29 23 30 def include(self, source_names): 31 + """ 32 + This function excludes all sources that don't match the given regular expressions. 33 + :param source_names: A list of regular expression (strings) 34 + """ 24 35 new = set() 25 36 for name in source_names: 26 37 new.update([src for src in self.sources if re.match(name, src.__class__.__name__)]) 27 38 self.sources = list(new) 28 39 29 40 def exclude(self, source_names): 41 + """ 42 + This function excludes all sources that match the given regular expressions. 43 + :param source_names: A list of regular expression (strings) 44 + """ 30 45 exclude = [] 31 46 for name in source_names: 32 47 exclude.extend([src for src in self.sources if re.match(name, src.__class__.__name__)]) 33 48 self.sources = [src for src in self.sources if src not in exclude] 34 49 35 50 def __str__(self): 51 + """ 52 + This function returns a string with all sources currently available in the SourceLoader. 53 + :return: a string with all available sources. 54 + """ 36 55 string = "" 37 56 for src in self.sources: 38 57 string += "Source: " + src.__class__.__name__
+1
tests/__init__.py
··· 1 +
+52
tests/test_pipeline.py
··· 1 + import copy 2 + import unittest 3 + 4 + from scrapy.exceptions import DropItem 5 + 6 + from FourmiCrawler import pipelines, spider, items 7 + 8 + 9 + class TestPipelines(unittest.TestCase): 10 + def setUp(self): 11 + self.testItem = items.Result() 12 + 13 + def test_none_pipeline(self): 14 + # Testing the pipeline that replaces the None values in items. 15 + self.testItem["value"] = "abc" 16 + pipe = pipelines.RemoveNonePipeline() 17 + processed = pipe.process_item(self.testItem, spider.FourmiSpider()) 18 + 19 + self.assertTrue(processed["value"] == "abc") 20 + 21 + for key in self.testItem: 22 + self.assertIsNotNone(processed[key]) 23 + if key is not "value": 24 + self.assertIs(processed[key], "") 25 + 26 + def test_duplicate_pipeline(self): 27 + # Testing the pipeline that removes duplicates. 28 + self.testItem["attribute"] = "test" 29 + self.testItem["value"] = "test" 30 + self.testItem["conditions"] = "test" 31 + 32 + pipe = pipelines.DuplicatePipeline() 33 + self.assertEqual(pipe.process_item(self.testItem, spider.FourmiSpider()), self.testItem) 34 + self.assertRaises(DropItem, pipe.process_item, self.testItem, spider.FourmiSpider()) 35 + 36 + other_item = copy.deepcopy(self.testItem) 37 + other_item["value"] = "test1" 38 + self.assertEqual(pipe.process_item(other_item, spider.FourmiSpider()), other_item) 39 + 40 + def test_attribute_selection(self): 41 + # Testing the pipeline that selects attributes. 42 + item1 = copy.deepcopy(self.testItem) 43 + item2 = copy.deepcopy(self.testItem) 44 + 45 + item1["attribute"] = "abd" 46 + item2["attribute"] = "abc" 47 + 48 + s = spider.FourmiSpider(selected_attributes=["a.d"]) 49 + pipe = pipelines.AttributeSelectionPipeline() 50 + 51 + self.assertEqual(pipe.process_item(item1, s), item1) 52 + self.assertRaises(DropItem, pipe.process_item, item2, s)
+33
tests/test_sourceloader.py
··· 1 + import unittest 2 + 3 + from sourceloader import SourceLoader 4 + 5 + 6 + class TestSourceloader(unittest.TestCase): 7 + def setUp(self): 8 + self.loader = SourceLoader() 9 + 10 + def test_init(self): 11 + # Test if sourceloader points to the right directory, where the sources are present. 12 + self.assertIn("Source: Source", str(self.loader)) 13 + self.assertIn("Source: NIST", str(self.loader)) 14 + self.assertIn("Source: ChemSpider", str(self.loader)) 15 + self.assertIn("Source: WikipediaParser", str(self.loader)) 16 + 17 + def test_include(self): 18 + # Tests for the include functionality. 19 + self.loader.include(["So.rc.*"]) 20 + 21 + self.assertIn("Source: Source", str(self.loader)) 22 + self.assertNotIn("Source: NIST", str(self.loader)) 23 + self.assertNotIn("Source: ChemSpider", str(self.loader)) 24 + self.assertNotIn("Source: WikipediaParser", str(self.loader)) 25 + 26 + def test_exclude(self): 27 + # Tests for the exclude functionality. 28 + self.loader.exclude(["So.rc.*"]) 29 + 30 + self.assertNotIn("Source: Source", str(self.loader)) 31 + self.assertIn("Source: NIST", str(self.loader)) 32 + self.assertIn("Source: ChemSpider", str(self.loader)) 33 + self.assertIn("Source: WikipediaParser", str(self.loader))
+61
tests/test_spider.py
··· 1 + import unittest 2 + 3 + from scrapy.http import Request 4 + 5 + from FourmiCrawler import spider 6 + from FourmiCrawler.sources.ChemSpider import ChemSpider 7 + from FourmiCrawler.sources.source import Source 8 + 9 + 10 + class TestFoumiSpider(unittest.TestCase): 11 + def setUp(self): 12 + self.compound = "test_compound" 13 + self.attributes = ["a.*", ".*a"] 14 + self.spi = spider.FourmiSpider(self.compound, self.attributes) 15 + 16 + def test_init(self): 17 + # Test the initiation of the Fourmi spider 18 + self.assertIn(self.compound, self.spi.synonyms) 19 + for attr in self.attributes: 20 + self.assertIn(attr, self.spi.selected_attributes) 21 + 22 + def test_add_source(self): 23 + # Testing the source adding function of the Fourmi spider 24 + src = Source() 25 + self.spi.add_source(src) 26 + self.assertIn(src, self.spi._sources) 27 + 28 + def test_add_sources(self): 29 + # Testing the function that adds multiple sources 30 + srcs = [Source(), Source(), Source()] 31 + self.spi.add_sources(srcs) 32 + 33 + for src in srcs: 34 + self.assertIn(src, self.spi._sources) 35 + 36 + def test_start_requests(self): 37 + # A test for the function that generates the start requests 38 + self.spi._sources = [] 39 + 40 + src = Source() 41 + self.spi.add_source(src) 42 + self.assertEqual(self.spi.start_requests(), []) 43 + 44 + src2 = ChemSpider() 45 + self.spi.add_source(src2) 46 + self.assertIsNotNone(self.spi.start_requests()) 47 + 48 + def test_synonym_requests(self): 49 + # A test for the synonym request function 50 + self.spi._sources = [] 51 + 52 + src = Source() 53 + self.spi.add_source(src) 54 + self.assertEqual(self.spi.get_synonym_requests("new_compound"), []) 55 + self.assertIn("new_compound", self.spi.synonyms) 56 + 57 + src2 = ChemSpider() 58 + self.spi.add_source(src2) 59 + self.assertIsInstance(self.spi.get_synonym_requests("other_compound")[0], Request) 60 + self.assertIn("other_compound", self.spi.synonyms) 61 + self.assertEqual(self.spi.get_synonym_requests("other_compound"), [])