diff -r c7840999b65d Doc/library/urllib.robotparser.rst --- a/Doc/library/urllib.robotparser.rst Sat Oct 06 18:33:08 2012 -0400 +++ b/Doc/library/urllib.robotparser.rst Sun Oct 07 20:50:08 2012 +0100 @@ -53,6 +53,18 @@ Sets the time the ``robots.txt`` file was last fetched to the current time. + .. method:: crawl_delay(useragent) + + Returns the value of the Crawl-delay: parameter from ``robots.txt`` for + the *useragent* in question. If there is no Crawl-delay parameter or + it doesn't apply to this user agent, it returns ``-1`` + + .. method:: request_rate(useragent) + + Returns the contents of the Request-rate: parameter from ``robots.txt`` + in the form of a list ``[requests, seconds]``. If there is no such + parameter or it doesn't apply to the *useragent* specified, return ``-1`` + The following example demonstrates basic use of the RobotFileParser class. @@ -60,6 +72,10 @@ >>> rp = urllib.robotparser.RobotFileParser() >>> rp.set_url("http://www.musi-cal.com/robots.txt") >>> rp.read() + >>> rp.request_rate("*") + [3/20] + >>> rp.crawl_delay("*") + 6 >>> rp.can_fetch("*", "http://www.musi-cal.com/cgi-bin/search?city=San+Francisco") False >>> rp.can_fetch("*", "http://www.musi-cal.com/") diff -r c7840999b65d Lib/test/test_robotparser.py --- a/Lib/test/test_robotparser.py Sat Oct 06 18:33:08 2012 -0400 +++ b/Lib/test/test_robotparser.py Sun Oct 07 20:50:08 2012 +0100 @@ -6,7 +6,8 @@ from test import support class RobotTestCase(unittest.TestCase): - def __init__(self, index, parser, url, good, agent): + def __init__(self, index, parser, url, good, agent, + request_rate, crawl_delay): unittest.TestCase.__init__(self) if good: self.str = "RobotTest(%d, good, %s)" % (index, url) @@ -16,6 +17,8 @@ self.url = url self.good = good self.agent = agent + self.request_rate = request_rate + self.crawl_delay = crawl_delay def runTest(self): if isinstance(self.url, tuple): @@ -25,6 +28,12 @@ agent = self.agent if self.good: self.assertTrue(self.parser.can_fetch(agent, url)) + self.assertEqual(self.parser.crawl_delay(agent),self.crawl_delay) + self.assertEqual(self.parser.request_rate(agent),self.request_rate) + # As the good tests and the bad tests sometimes use different + # user agent strings you have to choose only one of them to + # test crawl-delay and request-rate since their output depends + # on the user agent string. else: self.assertFalse(self.parser.can_fetch(agent, url)) @@ -34,15 +43,17 @@ tests = unittest.TestSuite() def RobotTest(index, robots_txt, good_urls, bad_urls, - agent="test_robotparser"): + request_rate, crawl_delay, agent="test_robotparser"): lines = io.StringIO(robots_txt).readlines() parser = urllib.robotparser.RobotFileParser() parser.parse(lines) for url in good_urls: - tests.addTest(RobotTestCase(index, parser, url, 1, agent)) + tests.addTest(RobotTestCase(index, parser, url, 1, agent, + request_rate, crawl_delay)) for url in bad_urls: - tests.addTest(RobotTestCase(index, parser, url, 0, agent)) + tests.addTest(RobotTestCase(index, parser, url, 0, agent, + request_rate, crawl_delay)) # Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002) @@ -56,14 +67,18 @@ good = ['/','/test.html'] bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html'] +request_rate = -1 +crawl_delay = -1 -RobotTest(1, doc, good, bad) +RobotTest(1, doc, good, bad, request_rate, crawl_delay) # 2. doc = """ # robots.txt for http://www.example.com/ User-agent: * +Crawl-delay: 1 +Request-rate: 3/15 Disallow: /cyberworld/map/ # This is an infinite virtual URL space # Cybermapper knows where to go. @@ -74,8 +89,10 @@ good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')] bad = ['/cyberworld/map/index.html'] +request_rate = -1 # The parameters should be equal to -1 since they +crawl_delay = -1 # don't apply to the cybermapper user agent -RobotTest(2, doc, good, bad) +RobotTest(2, doc, good, bad, request_rate, crawl_delay) # 3. doc = """ @@ -86,14 +103,18 @@ good = [] bad = ['/cyberworld/map/index.html','/','/tmp/'] +request_rate = -1 +crawl_delay = -1 -RobotTest(3, doc, good, bad) +RobotTest(3, doc, good, bad, request_rate, crawl_delay) # Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002) # 4. doc = """ User-agent: figtree +Crawl-delay: 3 +Request-rate: 9/30 Disallow: /tmp Disallow: /a%3cd.html Disallow: /a%2fb.html @@ -105,9 +126,15 @@ '/a%3cd.html','/a%3Cd.html','/a%2fb.html', '/~joe/index.html' ] +request_rate= [9,30] +crawl_delay = 3 +request_rate_bad = -1 # Not actually tested, but we still need to parse it +crawl_delay_bad = -1 # In order to accomodate the input parameters -RobotTest(4, doc, good, bad, 'figtree') -RobotTest(5, doc, good, bad, 'FigTree Robot libwww-perl/5.04') + +RobotTest(4, doc, good, bad, request_rate, crawl_delay, 'figtree' ) +RobotTest(5, doc, good, bad, request_rate_bad, crawl_delay_bad, + 'FigTree Robot libwww-perl/5.04') # 6. doc = """ @@ -123,7 +150,7 @@ '/a%3cd.html','/a%3Cd.html',"/a/b.html", '/%7Ejoe/index.html'] -RobotTest(6, doc, good, bad) +RobotTest(6, doc, good, bad, -1, -1) # From bug report #523041 @@ -136,7 +163,7 @@ good = ['/foo.html'] bad = [] # Bug report says "/" should be denied, but that is not in the RFC -RobotTest(7, doc, good, bad) +RobotTest(7, doc, good, bad, -1, -1) # From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364 @@ -150,7 +177,7 @@ good = ['/folder1/myfile.html'] bad = ['/folder1/anotherfile.html'] -RobotTest(8, doc, good, bad, agent="Googlebot") +RobotTest(8, doc, good, bad, -1, -1, agent="Googlebot") # 9. This file is incorrect because "Googlebot" is a substring of # "Googlebot-Mobile", so test 10 works just like test 9. @@ -165,12 +192,12 @@ good = [] bad = ['/something.jpg'] -RobotTest(9, doc, good, bad, agent="Googlebot") +RobotTest(9, doc, good, bad, -1, -1, agent="Googlebot") good = [] bad = ['/something.jpg'] -RobotTest(10, doc, good, bad, agent="Googlebot-Mobile") +RobotTest(10, doc, good, bad, -1, -1, agent="Googlebot-Mobile") # 11. Get the order correct. doc = """ @@ -184,12 +211,12 @@ good = [] bad = ['/something.jpg'] -RobotTest(11, doc, good, bad, agent="Googlebot") +RobotTest(11, doc, good, bad, -1, -1, agent="Googlebot") good = ['/something.jpg'] bad = [] -RobotTest(12, doc, good, bad, agent="Googlebot-Mobile") +RobotTest(12, doc, good, bad, -1, -1, agent="Googlebot-Mobile") # 13. Google also got the order wrong in #8. You need to specify the @@ -203,7 +230,7 @@ good = ['/folder1/myfile.html'] bad = ['/folder1/anotherfile.html'] -RobotTest(13, doc, good, bad, agent="googlebot") +RobotTest(13, doc, good, bad, -1, -1, agent="googlebot") # 14. For issue #6325 (query string support) @@ -215,7 +242,7 @@ good = ['/some/path'] bad = ['/some/path?name=value'] -RobotTest(14, doc, good, bad) +RobotTest(14, doc, good, bad, -1, -1) # 15. For issue #4108 (obey first * entry) doc = """ @@ -229,7 +256,7 @@ good = ['/another/path'] bad = ['/some/path'] -RobotTest(15, doc, good, bad) +RobotTest(15, doc, good, bad, -1, -1) class NetworkTestCase(unittest.TestCase): diff -r c7840999b65d Lib/urllib/robotparser.py --- a/Lib/urllib/robotparser.py Sat Oct 06 18:33:08 2012 -0400 +++ b/Lib/urllib/robotparser.py Sun Oct 07 20:50:08 2012 +0100 @@ -119,6 +119,15 @@ if state != 0: entry.rulelines.append(RuleLine(line[1], True)) state = 2 + elif line[0] == "crawl-delay": + if state != 0: + entry.delay = int(line[1]) + state = 2 + elif line[0] == "request-rate": + if state != 0: + numbers = line[1].split('/') + entry.req_rate = [int(numbers[0]), int(numbers[1])] + state = 2 if state == 2: self._add_entry(entry) @@ -146,6 +155,24 @@ # agent not found ==> access granted return True + def crawl_delay(self, useragent): + for entry in self.entries: + if entry.applies_to(useragent): + if entry.delay == 0: + return -1 # If we don't have timeout to wait + else: # before crawling, or we don't have entry for the + return entry.delay # current user agent + return -1 # If there is no request_rate defined + + def request_rate(self, useragent): + for entry in self.entries: + if entry.applies_to(useragent): + if entry.req_rate == []: + return -1 # If we don't have request_rate + else: # for the current agent. + return entry.req_rate + return -1 # If there is no request_rate defined + def __str__(self): return ''.join([str(entry) + "\n" for entry in self.entries]) @@ -172,6 +199,8 @@ def __init__(self): self.useragents = [] self.rulelines = [] + self.delay = 0 # Support for request rate and crawl delay here + self.req_rate = [] def __str__(self): ret = []