diff --git a/Lib/urllib/robotparser.py b/Lib/urllib/robotparser.py --- a/Lib/urllib/robotparser.py +++ b/Lib/urllib/robotparser.py @@ -7,7 +7,7 @@ 2) PSF license for Python 2.2 The robots.txt Exclusion Protocol is implemented as specified in - http://info.webcrawler.com/mak/projects/robots/norobots-rfc.html + http://www.robotstxt.org/norobots-rfc.txt """ import urllib.parse, urllib.request @@ -57,11 +57,12 @@ except urllib.error.HTTPError as err: if err.code in (401, 403): self.disallow_all = True - elif err.code >= 400: + elif err.code >= 400 and err.code < 500: self.allow_all = True else: raw = f.read() self.parse(raw.decode("utf-8").splitlines()) + self.modified() def _add_entry(self, entry): if "*" in entry.useragents: @@ -129,6 +130,12 @@ return False if self.allow_all: return True + # Until the robots.txt file has been read or found not + # to exist, we must assume that no url is allowable. + # This prevents false positives when a user erronenously + # calls can_fetch() before calling read(). + if not self.last_checked: + return False # search for given user agent matches # the first match counts parsed_url = urllib.parse.urlparse(urllib.parse.unquote(url))