diff --git a/Lib/test/test_robotparser.py b/Lib/test/test_robotparser.py --- a/Lib/test/test_robotparser.py +++ b/Lib/test/test_robotparser.py @@ -1,368 +1,323 @@ import io import unittest import urllib.robotparser from collections import namedtuple -from urllib.error import URLError, HTTPError -from urllib.request import urlopen from test import support from http.server import BaseHTTPRequestHandler, HTTPServer try: import threading except ImportError: threading = None -class RobotTestCase(unittest.TestCase): - def __init__(self, index=None, parser=None, url=None, good=None, - agent=None, request_rate=None, crawl_delay=None): - # workaround to make unittest discovery work (see #17066) - if not isinstance(index, int): - return - unittest.TestCase.__init__(self) - if good: - self.str = "RobotTest(%d, good, %s)" % (index, url) - else: - self.str = "RobotTest(%d, bad, %s)" % (index, url) - self.parser = parser - self.url = url - self.good = good - self.agent = agent - self.request_rate = request_rate - self.crawl_delay = crawl_delay +class BaseRobotTest: + robots_txt = '' + agent = 'test_robotparser' + good = [] + bad = [] - def runTest(self): - if isinstance(self.url, tuple): - agent, url = self.url - else: - url = self.url - agent = self.agent - if self.good: - self.assertTrue(self.parser.can_fetch(agent, url)) - self.assertEqual(self.parser.crawl_delay(agent), self.crawl_delay) - # if we have actual values for request rate - if self.request_rate and self.parser.request_rate(agent): - self.assertEqual( - self.parser.request_rate(agent).requests, - self.request_rate.requests - ) - self.assertEqual( - self.parser.request_rate(agent).seconds, - self.request_rate.seconds - ) - self.assertEqual(self.parser.request_rate(agent), self.request_rate) - else: - self.assertFalse(self.parser.can_fetch(agent, url)) + def setUp(self): + lines = io.StringIO(self.robots_txt).readlines() + self.parser = urllib.robotparser.RobotFileParser() + self.parser.parse(lines) - def __str__(self): - return self.str + def test_good(self): + for url in self.good: + with self.subTest(url=url): + if isinstance(url, tuple): + agent, url = url + else: + agent = self.agent + self.assertTrue(self.parser.can_fetch(agent, url)) -tests = unittest.TestSuite() + def test_bad(self): + for url in self.bad: + with self.subTest(url=url): + if isinstance(url, tuple): + agent, url = url + else: + agent = self.agent + self.assertFalse(self.parser.can_fetch(agent, url)) -def RobotTest(index, robots_txt, good_urls, bad_urls, - request_rate, crawl_delay, agent="test_robotparser"): - lines = io.StringIO(robots_txt).readlines() - parser = urllib.robotparser.RobotFileParser() - parser.parse(lines) - for url in good_urls: - tests.addTest(RobotTestCase(index, parser, url, 1, agent, - request_rate, crawl_delay)) - for url in bad_urls: - tests.addTest(RobotTestCase(index, parser, url, 0, agent, - request_rate, crawl_delay)) - -# Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002) - -# 1. -doc = """ +class UserAgentWildcardTest(BaseRobotTest, unittest.TestCase): + robots_txt = """\ User-agent: * Disallow: /cyberworld/map/ # This is an infinite virtual URL space Disallow: /tmp/ # these will soon disappear Disallow: /foo.html -""" + """ + good = ['/', '/test.html'] + bad = ['/cyberworld/map/index.html', '/tmp/xxx', '/foo.html'] -good = ['/','/test.html'] -bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html'] -request_rate = None -crawl_delay = None -RobotTest(1, doc, good, bad, request_rate, crawl_delay) - -# 2. -doc = """ +class CrawlDelayAndCustomAgentTest(BaseRobotTest, unittest.TestCase): + robots_txt = """\ # robots.txt for http://www.example.com/ User-agent: * Crawl-delay: 1 Request-rate: 3/15 Disallow: /cyberworld/map/ # This is an infinite virtual URL space # Cybermapper knows where to go. User-agent: cybermapper Disallow: + """ + good = ['/', '/test.html', ('cybermapper', '/cyberworld/map/index.html')] + bad = ['/cyberworld/map/index.html'] -""" -good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')] -bad = ['/cyberworld/map/index.html'] -request_rate = None # The parameters should be equal to None since they -crawl_delay = None # don't apply to the cybermapper user agent - -RobotTest(2, doc, good, bad, request_rate, crawl_delay) - -# 3. -doc = """ +class RejectAllRobotsTest(BaseRobotTest, unittest.TestCase): + robots_txt = """\ # go away User-agent: * Disallow: / -""" + """ + good = [] + bad = ['/cyberworld/map/index.html', '/', '/tmp/'] -good = [] -bad = ['/cyberworld/map/index.html','/','/tmp/'] -request_rate = None -crawl_delay = None -RobotTest(3, doc, good, bad, request_rate, crawl_delay) - -# Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002) - -# 4. -doc = """ +class CrawlDelayAndRequestRateTest(BaseRobotTest, unittest.TestCase): + robots_txt = """\ User-agent: figtree Crawl-delay: 3 Request-rate: 9/30 Disallow: /tmp Disallow: /a%3cd.html Disallow: /a%2fb.html Disallow: /%7ejoe/index.html -""" + """ + agent = 'figtree' + request_rate = namedtuple('req_rate', 'requests seconds')(9, 30) + crawl_delay = 3 + good = [('figtree', '/foo.html')] + bad = ['/tmp', '/tmp.html', '/tmp/a.html', '/a%3cd.html', '/a%3Cd.html', + '/a%2fb.html', '/~joe/index.html'] -good = [] # XFAIL '/a/b.html' -bad = ['/tmp','/tmp.html','/tmp/a.html', - '/a%3cd.html','/a%3Cd.html','/a%2fb.html', - '/~joe/index.html' - ] + def test_request_rate(self): + for url in self.good: + with self.subTest(url=url): + if isinstance(url, tuple): + agent, url = url + else: + agent = self.agent + if self.crawl_delay: + self.assertEqual(self.parser.crawl_delay(agent), + self.crawl_delay) + if self.request_rate and self.parser.request_rate(agent): + self.assertEqual( + self.parser.request_rate(agent).requests, + self.request_rate.requests + ) + self.assertEqual( + self.parser.request_rate(agent).seconds, + self.request_rate.seconds + ) -request_rate = namedtuple('req_rate', 'requests seconds') -request_rate.requests = 9 -request_rate.seconds = 30 -crawl_delay = 3 -request_rate_bad = None # not actually tested, but we still need to parse it -crawl_delay_bad = None # in order to accommodate the input parameters +class DifferentUserAgentTest(CrawlDelayAndRequestRateTest): + agent = 'FigTree Robot libwww-perl/5.04' + request_rate = None + crawl_delay = None -RobotTest(4, doc, good, bad, request_rate, crawl_delay, 'figtree' ) -RobotTest(5, doc, good, bad, request_rate_bad, crawl_delay_bad, - 'FigTree Robot libwww-perl/5.04') -# 6. -doc = """ +class RobotTest6(BaseRobotTest, unittest.TestCase): + robots_txt = """\ User-agent: * Disallow: /tmp/ Disallow: /a%3Cd.html Disallow: /a/b.html Disallow: /%7ejoe/index.html Crawl-delay: 3 Request-rate: 9/banana -""" + """ + good = ['/tmp'] + bad = ['/tmp/', '/tmp/a.html', '/a%3cd.html', '/a%3Cd.html', '/a/b.html', + '/%7Ejoe/index.html'] + crawl_delay = 3 -good = ['/tmp',] # XFAIL: '/a%2fb.html' -bad = ['/tmp/','/tmp/a.html', - '/a%3cd.html','/a%3Cd.html',"/a/b.html", - '/%7Ejoe/index.html'] -crawl_delay = 3 -request_rate = None # since request rate has invalid syntax, return None -RobotTest(6, doc, good, bad, None, None) +class RobotTest7(BaseRobotTest, unittest.TestCase): + """From bug report #523041""" -# From bug report #523041 - -# 7. -doc = """ + robots_txt = """\ User-Agent: * Disallow: /. Crawl-delay: pears -""" + """ + good = ['/foo.html'] + # bug report says "/" should be denied, but that is not in the RFC + bad = [] -good = ['/foo.html'] -bad = [] # bug report says "/" should be denied, but that is not in the RFC -crawl_delay = None # since crawl delay has invalid syntax, return None -request_rate = None +class RobotTest8(BaseRobotTest, unittest.TestCase): + """From Google: + http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364""" -RobotTest(7, doc, good, bad, crawl_delay, request_rate) - -# From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364 - -# 8. -doc = """ + robots_txt = """\ User-agent: Googlebot Allow: /folder1/myfile.html Disallow: /folder1/ Request-rate: whale/banana -""" + """ + agent = 'Googlebot' + good = ['/folder1/myfile.html'] + bad = ['/folder1/anotherfile.html'] -good = ['/folder1/myfile.html'] -bad = ['/folder1/anotherfile.html'] -crawl_delay = None -request_rate = None # invalid syntax, return none -RobotTest(8, doc, good, bad, crawl_delay, request_rate, agent="Googlebot") +class RobotTest9(BaseRobotTest, unittest.TestCase): + """This file is incorrect because "Googlebot" is a substring of + "Googlebot-Mobile", so test 10 works just like test 9.""" -# 9. This file is incorrect because "Googlebot" is a substring of -# "Googlebot-Mobile", so test 10 works just like test 9. -doc = """ + robots_txt = """\ User-agent: Googlebot Disallow: / User-agent: Googlebot-Mobile Allow: / -""" + """ + agent = 'Googlebot' + bad = ['/something.jpg'] -good = [] -bad = ['/something.jpg'] -RobotTest(9, doc, good, bad, None, None, agent="Googlebot") +class RobotTest10(RobotTest9): + """This file is incorrect because "Googlebot" is a substring of + "Googlebot-Mobile", so test 10 works just like test 9.""" -good = [] -bad = ['/something.jpg'] + agent = 'Googlebot-Mobile' -RobotTest(10, doc, good, bad, None, None, agent="Googlebot-Mobile") -# 11. Get the order correct. -doc = """ +class RobotTest11(BaseRobotTest, unittest.TestCase): + """Get the order correct.""" + + robots_txt = """\ User-agent: Googlebot-Mobile Allow: / User-agent: Googlebot Disallow: / -""" + """ + agent = 'Googlebot' + bad = ['/something.jpg'] -good = [] -bad = ['/something.jpg'] -RobotTest(11, doc, good, bad, None, None, agent="Googlebot") +class RobotTest12(BaseRobotTest, unittest.TestCase): -good = ['/something.jpg'] -bad = [] + robots_txt = """\ +User-agent: Googlebot-Mobile +Allow: / -RobotTest(12, doc, good, bad, None, None, agent="Googlebot-Mobile") +User-agent: Googlebot +Disallow: / + """ + agent = 'Googlebot-Mobile' + good = ['/something.jpg'] -# 13. Google also got the order wrong in #8. You need to specify the -# URLs from more specific to more general. -doc = """ +class RobotTest13(BaseRobotTest, unittest.TestCase): + """Google also got the order wrong in #8. + + You need to specify the URLs from more specific to more general.""" + + robots_txt = """\ User-agent: Googlebot Allow: /folder1/myfile.html Disallow: /folder1/ -""" + """ + agent = 'googlebot' + good = ['/folder1/myfile.html'] + bad = ['/folder1/anotherfile.html'] -good = ['/folder1/myfile.html'] -bad = ['/folder1/anotherfile.html'] -RobotTest(13, doc, good, bad, None, None, agent="googlebot") +class RobotTest14(BaseRobotTest, unittest.TestCase): + """"For issue #6325 (query string support)""" - -# 14. For issue #6325 (query string support) -doc = """ + robots_txt = """\ User-agent: * Disallow: /some/path?name=value -""" + """ + good = ['/some/path'] + bad = ['/some/path?name=value'] -good = ['/some/path'] -bad = ['/some/path?name=value'] -RobotTest(14, doc, good, bad, None, None) +class RobotTest15(BaseRobotTest, unittest.TestCase): + """For issue #4108 (obey first * entry)""" -# 15. For issue #4108 (obey first * entry) -doc = """ + robots_txt = """\ User-agent: * Disallow: /some/path User-agent: * Disallow: /another/path -""" + """ + good = ['/another/path'] + bad = ['/some/path'] -good = ['/another/path'] -bad = ['/some/path'] -RobotTest(15, doc, good, bad, None, None) +class RobotTest16(BaseRobotTest, unittest.TestCase): + """Empty query (issue #17403). Normalizing the url first.""" -# 16. Empty query (issue #17403). Normalizing the url first. -doc = """ + robots_txt = """\ User-agent: * Allow: /some/path? Disallow: /another/path? -""" - -good = ['/some/path?'] -bad = ['/another/path?'] - -RobotTest(16, doc, good, bad, None, None) + """ + good = ['/some/path?'] + bad = ['/another/path?'] class RobotHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_error(403, "Forbidden access") def log_message(self, format, *args): pass @unittest.skipUnless(threading, 'threading required for this test') +@support.reap_threads class PasswordProtectedSiteTestCase(unittest.TestCase): def setUp(self): self.server = HTTPServer((support.HOST, 0), RobotHandler) self.t = threading.Thread( name='HTTPServer serving', target=self.server.serve_forever, # Short poll interval to make the test finish quickly. # Time between requests is short enough that we won't wake # up spuriously too many times. - kwargs={'poll_interval':0.01}) + kwargs={'poll_interval': 0.01}) self.t.daemon = True # In case this function raises. self.t.start() def tearDown(self): self.server.shutdown() self.t.join() self.server.server_close() - def runTest(self): - self.testPasswordProtectedSite() - def testPasswordProtectedSite(self): addr = self.server.server_address url = 'http://' + support.HOST + ':' + str(addr[1]) robots_url = url + "/robots.txt" parser = urllib.robotparser.RobotFileParser() parser.set_url(url) parser.read() self.assertFalse(parser.can_fetch("*", robots_url)) - def __str__(self): - return '%s' % self.__class__.__name__ class NetworkTestCase(unittest.TestCase): - @unittest.skip('does not handle the gzip encoding delivered by pydotorg') def testPythonOrg(self): support.requires('network') with support.transient_internet('www.python.org'): parser = urllib.robotparser.RobotFileParser( "http://www.python.org/robots.txt") parser.read() self.assertTrue( parser.can_fetch("*", "http://www.python.org/robots.txt")) -def load_tests(loader, suite, pattern): - suite = unittest.makeSuite(NetworkTestCase) - suite.addTest(tests) - suite.addTest(PasswordProtectedSiteTestCase()) - return suite - -if __name__=='__main__': +if __name__ == '__main__': unittest.main()