diff --git a/Doc/includes/email-alternative-new-api.py b/Doc/includes/email-alternative-new-api.py --- a/Doc/includes/email-alternative-new-api.py +++ b/Doc/includes/email-alternative-new-api.py @@ -30,13 +30,13 @@ msg.add_alternative("""\ -

Salut!<\p> +

Salut!<\\p>

Cela ressemble Γ  un excellent + """.format(asparagus_cid=asparagus_cid[1:-1]), subtype='html') diff --git a/Lib/_osx_support.py b/Lib/_osx_support.py --- a/Lib/_osx_support.py +++ b/Lib/_osx_support.py @@ -210,7 +210,7 @@ def _remove_universal_flags(_config_vars): # Do not alter a config var explicitly overridden by env var if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] - flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII) + flags = re.sub(r'-arch\s+\w+\s', ' ', flags, re.ASCII) flags = re.sub('-isysroot [^ \t]*', ' ', flags) _save_modified_value(_config_vars, cv, flags) @@ -232,7 +232,7 @@ def _remove_unsupported_archs(_config_vars): if 'CC' in os.environ: return _config_vars - if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None: + if re.search(r'-arch\s+ppc', _config_vars['CFLAGS']) is not None: # NOTE: Cannot use subprocess here because of bootstrap # issues when building Python itself status = os.system( @@ -251,7 +251,7 @@ def _remove_unsupported_archs(_config_vars): for cv in _UNIVERSAL_CONFIG_VARS: if cv in _config_vars and cv not in os.environ: flags = _config_vars[cv] - flags = re.sub('-arch\s+ppc\w*\s', ' ', flags) + flags = re.sub(r'-arch\s+ppc\w*\s', ' ', flags) _save_modified_value(_config_vars, cv, flags) return _config_vars @@ -267,7 +267,7 @@ def _override_all_archs(_config_vars): for cv in _UNIVERSAL_CONFIG_VARS: if cv in _config_vars and '-arch' in _config_vars[cv]: flags = _config_vars[cv] - flags = re.sub('-arch\s+\w+\s', ' ', flags) + flags = re.sub(r'-arch\s+\w+\s', ' ', flags) flags = flags + ' ' + arch _save_modified_value(_config_vars, cv, flags) @@ -465,7 +465,7 @@ def get_platform_osx(_config_vars, osname, release, machine): machine = 'fat' - archs = re.findall('-arch\s+(\S+)', cflags) + archs = re.findall(r'-arch\s+(\S+)', cflags) archs = tuple(sorted(set(archs))) if len(archs) == 1: diff --git a/Lib/csv.py b/Lib/csv.py --- a/Lib/csv.py +++ b/Lib/csv.py @@ -215,10 +215,10 @@ class Sniffer: """ matches = [] - for restr in ('(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?P=delim)', # ,".*?", - '(?:^|\n)(?P["\']).*?(?P=quote)(?P[^\w\n"\'])(?P ?)', # ".*?", - '(?P>[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?:$|\n)', # ,".*?" - '(?:^|\n)(?P["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) + for restr in (r'(?P[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?P=delim)', # ,".*?", + r'(?:^|\n)(?P["\']).*?(?P=quote)(?P[^\w\n"\'])(?P ?)', # ".*?", + r'(?P>[^\w\n"\'])(?P ?)(?P["\']).*?(?P=quote)(?:$|\n)', # ,".*?" + r'(?:^|\n)(?P["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) regexp = re.compile(restr, re.DOTALL | re.MULTILINE) matches = regexp.findall(data) if matches: diff --git a/Lib/difflib.py b/Lib/difflib.py --- a/Lib/difflib.py +++ b/Lib/difflib.py @@ -1415,7 +1415,7 @@ def _mdiff(fromlines, tolines, context=None, linejunk=None, import re # regular expression for finding intraline change indices - change_re = re.compile('(\++|\-+|\^+)') + change_re = re.compile(r'(\++|\-+|\^+)') # create the difference iterator to generate the differences diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk) diff --git a/Lib/distutils/cmd.py b/Lib/distutils/cmd.py --- a/Lib/distutils/cmd.py +++ b/Lib/distutils/cmd.py @@ -221,7 +221,7 @@ class Command: self._ensure_stringlike(option, "string", default) def ensure_string_list(self, option): - """Ensure that 'option' is a list of strings. If 'option' is + r"""Ensure that 'option' is a list of strings. If 'option' is currently a string, we split it either on /,\s*/ or /\s+/, so "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become ["foo", "bar", "baz"]. diff --git a/Lib/distutils/command/bdist_msi.py b/Lib/distutils/command/bdist_msi.py --- a/Lib/distutils/command/bdist_msi.py +++ b/Lib/distutils/command/bdist_msi.py @@ -623,7 +623,7 @@ class bdist_msi(Command): cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title, "OK", "OK", "OK", bitmap=False) cost.text("Title", 15, 6, 200, 15, 0x30003, - "{\DlgFontBold8}Disk Space Requirements") + r"{\DlgFontBold8}Disk Space Requirements") cost.text("Description", 20, 20, 280, 20, 0x30003, "The disk space required for the installation of the selected features.") cost.text("Text", 20, 53, 330, 60, 3, @@ -670,7 +670,7 @@ class bdist_msi(Command): progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel", bitmap=False) progress.text("Title", 20, 15, 200, 15, 0x30003, - "{\DlgFontBold8}[Progress1] [ProductName]") + r"{\DlgFontBold8}[Progress1] [ProductName]") progress.text("Text", 35, 65, 300, 30, 3, "Please wait while the Installer [Progress2] [ProductName]. " "This may take several minutes.") diff --git a/Lib/distutils/command/build_scripts.py b/Lib/distutils/command/build_scripts.py --- a/Lib/distutils/command/build_scripts.py +++ b/Lib/distutils/command/build_scripts.py @@ -51,7 +51,7 @@ class build_scripts(Command): def copy_scripts(self): - """Copy each script listed in 'self.scripts'; if it's marked as a + r"""Copy each script listed in 'self.scripts'; if it's marked as a Python script in the Unix way (first line matches 'first_line_re', ie. starts with "\#!" and contains "python"), then adjust the first line to refer to the current Python interpreter as we copy. diff --git a/Lib/distutils/cygwinccompiler.py b/Lib/distutils/cygwinccompiler.py --- a/Lib/distutils/cygwinccompiler.py +++ b/Lib/distutils/cygwinccompiler.py @@ -368,7 +368,7 @@ def check_config_h(): return (CONFIG_H_UNCERTAIN, "couldn't read '%s': %s" % (fn, exc.strerror)) -RE_VERSION = re.compile(b'(\d+\.\d+(\.\d+)*)') +RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)') def _find_exe_version(cmd): """Find the version of an executable by running `cmd` in the shell. diff --git a/Lib/distutils/msvc9compiler.py b/Lib/distutils/msvc9compiler.py --- a/Lib/distutils/msvc9compiler.py +++ b/Lib/distutils/msvc9compiler.py @@ -716,7 +716,7 @@ class MSVCCompiler(CCompiler) : r"""VC\d{2}\.CRT("|').*?(/>|)""", re.DOTALL) manifest_buf = re.sub(pattern, "", manifest_buf) - pattern = "\s*" + pattern = r"\s*" manifest_buf = re.sub(pattern, "", manifest_buf) # Now see if any other assemblies are referenced - if not, we # don't want a manifest embedded. diff --git a/Lib/distutils/sysconfig.py b/Lib/distutils/sysconfig.py --- a/Lib/distutils/sysconfig.py +++ b/Lib/distutils/sysconfig.py @@ -278,7 +278,7 @@ def parse_config_h(fp, g=None): # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). -_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") +_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") diff --git a/Lib/distutils/versionpredicate.py b/Lib/distutils/versionpredicate.py --- a/Lib/distutils/versionpredicate.py +++ b/Lib/distutils/versionpredicate.py @@ -154,7 +154,7 @@ def split_provision(value): global _provision_rx if _provision_rx is None: _provision_rx = re.compile( - "([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$", + r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$", re.ASCII) value = value.strip() m = _provision_rx.match(value) diff --git a/Lib/doctest.py b/Lib/doctest.py --- a/Lib/doctest.py +++ b/Lib/doctest.py @@ -765,7 +765,7 @@ class DocTestParser: # This regular expression finds the indentation of every non-blank # line in a string. - _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE) + _INDENT_RE = re.compile(r'^([ ]*)(?=\S)', re.MULTILINE) def _min_indent(self, s): "Return the minimum indentation of any non-blank line in `s`" @@ -1106,7 +1106,7 @@ class DocTestFinder: if lineno is not None: if source_lines is None: return lineno+1 - pat = re.compile('(^|.*:)\s*\w*("|\')') + pat = re.compile(r'(^|.*:)\s*\w*("|\')') for lineno in range(lineno, len(source_lines)): if pat.match(source_lines[lineno]): return lineno @@ -1608,11 +1608,11 @@ class OutputChecker: # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used. if not (optionflags & DONT_ACCEPT_BLANKLINE): # Replace in want with a blank line. - want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), + want = re.sub(r'(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER), '', want) # If a line in got contains only spaces, then remove the # spaces. - got = re.sub('(?m)^\s*?$', '', got) + got = re.sub(r'(?m)^\s*?$', '', got) if got == want: return True diff --git a/Lib/email/_header_value_parser.py b/Lib/email/_header_value_parser.py --- a/Lib/email/_header_value_parser.py +++ b/Lib/email/_header_value_parser.py @@ -652,8 +652,8 @@ class Comment(WhiteSpaceTokenList): if value.token_type == 'comment': return str(value) return str(value).replace('\\', '\\\\').replace( - '(', '\(').replace( - ')', '\)') + '(', r'\(').replace( + ')', r'\)') @property def content(self): @@ -1356,15 +1356,15 @@ RouteComponentMarker = ValueTerminal('@', 'route-component-marker') _wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split _non_atom_end_matcher = re.compile(r"[^{}]+".format( - ''.join(ATOM_ENDS).replace('\\','\\\\').replace(']','\]'))).match + ''.join(ATOM_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match _non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall _non_token_end_matcher = re.compile(r"[^{}]+".format( - ''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']','\]'))).match + ''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match _non_attribute_end_matcher = re.compile(r"[^{}]+".format( - ''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']','\]'))).match + ''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match _non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format( ''.join(EXTENDED_ATTRIBUTE_ENDS).replace( - '\\','\\\\').replace(']','\]'))).match + '\\','\\\\').replace(']',r'\]'))).match def _validate_xtext(xtext): """If input token contains ASCII non-printables, register a defect.""" @@ -1517,7 +1517,7 @@ def get_unstructured(value): return unstructured def get_qp_ctext(value): - """ctext = + r"""ctext = This is not the RFC ctext, since we are handling nested comments in comment and unquoting quoted-pairs here. We allow anything except the '()' @@ -1878,7 +1878,7 @@ def get_obs_local_part(value): return obs_local_part, value def get_dtext(value): - """ dtext = / obs-dtext + r""" dtext = / obs-dtext obs-dtext = obs-NO-WS-CTL / quoted-pair We allow anything except the excluded characters, but if we find any diff --git a/Lib/email/feedparser.py b/Lib/email/feedparser.py --- a/Lib/email/feedparser.py +++ b/Lib/email/feedparser.py @@ -28,10 +28,10 @@ from email import message from email._policybase import compat32 from collections import deque -NLCRE = re.compile('\r\n|\r|\n') -NLCRE_bol = re.compile('(\r\n|\r|\n)') -NLCRE_eol = re.compile('(\r\n|\r|\n)\Z') -NLCRE_crack = re.compile('(\r\n|\r|\n)') +NLCRE = re.compile(r'\r\n|\r|\n') +NLCRE_bol = re.compile(r'(\r\n|\r|\n)') +NLCRE_eol = re.compile(r'(\r\n|\r|\n)\Z') +NLCRE_crack = re.compile(r'(\r\n|\r|\n)') # RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character # except controls, SP, and ":". headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])') diff --git a/Lib/fnmatch.py b/Lib/fnmatch.py --- a/Lib/fnmatch.py +++ b/Lib/fnmatch.py @@ -106,4 +106,4 @@ def translate(pat): res = '%s[%s]' % (res, stuff) else: res = res + re.escape(c) - return res + '\Z(?ms)' + return res + r'\Z(?ms)' diff --git a/Lib/ftplib.py b/Lib/ftplib.py --- a/Lib/ftplib.py +++ b/Lib/ftplib.py @@ -821,7 +821,7 @@ def parse150(resp): if _150_re is None: import re _150_re = re.compile( - "150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII) + r"150 .* \((\d+) bytes\)", re.IGNORECASE | re.ASCII) m = _150_re.match(resp) if not m: return None diff --git a/Lib/html/parser.py b/Lib/html/parser.py --- a/Lib/html/parser.py +++ b/Lib/html/parser.py @@ -34,7 +34,7 @@ commentclose = re.compile(r'--\s*>') # explode, so don't do it. # see http://www.w3.org/TR/html5/tokenization.html#tag-open-state # and http://www.w3.org/TR/html5/tokenization.html#tag-name-state -tagfind_tolerant = re.compile('([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') attrfind_tolerant = re.compile( r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') @@ -56,7 +56,7 @@ locatestarttagend_tolerant = re.compile(r""" endendtag = re.compile('>') # the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between # ') +endtagfind = re.compile(r'') diff --git a/Lib/http/client.py b/Lib/http/client.py --- a/Lib/http/client.py +++ b/Lib/http/client.py @@ -1,4 +1,4 @@ -"""HTTP/1.1 client library +r"""HTTP/1.1 client library diff --git a/Lib/http/cookiejar.py b/Lib/http/cookiejar.py --- a/Lib/http/cookiejar.py +++ b/Lib/http/cookiejar.py @@ -200,7 +200,7 @@ def _str2time(day, mon, yr, hr, min, sec, tz): STRICT_DATE_RE = re.compile( r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) " - "(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII) + r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII) WEEKDAY_RE = re.compile( r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII) LOOSE_HTTP_DATE_RE = re.compile( @@ -277,7 +277,7 @@ def http2time(text): return _str2time(day, mon, yr, hr, min, sec, tz) ISO_DATE_RE = re.compile( - """^ + r"""^ (\d{4}) # year [-\/]? (\d\d?) # numerical month @@ -411,7 +411,7 @@ def split_header_words(header_values): pairs = [] else: # skip junk - non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text) + non_junk, nr_junk_chars = re.subn(r"^[=\s;]*", "", text) assert nr_junk_chars > 0, ( "split_header_words bug: '%s', '%s', %s" % (orig_text, text, pairs)) diff --git a/Lib/http/cookies.py b/Lib/http/cookies.py --- a/Lib/http/cookies.py +++ b/Lib/http/cookies.py @@ -456,7 +456,7 @@ class Morsel(dict): # _LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" -_LegalValueChars = _LegalKeyChars + '\[\]' +_LegalValueChars = _LegalKeyChars + r'\[\]' _CookiePattern = re.compile(r""" (?x) # This is a verbose pattern \s* # Optional whitespace at start of cookie diff --git a/Lib/idlelib/calltips.py b/Lib/idlelib/calltips.py --- a/Lib/idlelib/calltips.py +++ b/Lib/idlelib/calltips.py @@ -120,7 +120,7 @@ def get_entity(expression): _MAX_COLS = 85 _MAX_LINES = 5 # enough for bytes _INDENT = ' '*4 # for wrapped signatures -_first_param = re.compile('(?<=\()\w*\,?\s*') +_first_param = re.compile(r'(?<=\()\w*\,?\s*') _default_callable_argspec = "See source or doc" diff --git a/Lib/idlelib/idle_test/test_replace.py b/Lib/idlelib/idle_test/test_replace.py --- a/Lib/idlelib/idle_test/test_replace.py +++ b/Lib/idlelib/idle_test/test_replace.py @@ -91,7 +91,7 @@ class ReplaceDialogTest(unittest.TestCase): text.mark_set('insert', 'end') text.insert('insert', '\nline42:') before_text = text.get('1.0', 'end') - pv.set('[a-z][\d]+') + pv.set(r'[a-z][\d]+') replace() after_text = text.get('1.0', 'end') equal(before_text, after_text) @@ -192,7 +192,7 @@ class ReplaceDialogTest(unittest.TestCase): self.engine.revar.set(True) before_text = text.get('1.0', 'end') - pv.set('[a-z][\d]+') + pv.set(r'[a-z][\d]+') rv.set('hello') replace() after_text = text.get('1.0', 'end') @@ -207,7 +207,7 @@ class ReplaceDialogTest(unittest.TestCase): self.assertIn('error', showerror.title) self.assertIn('Empty', showerror.message) - pv.set('[\d') + pv.set(r'[\d') replace() self.assertIn('error', showerror.title) self.assertIn('Pattern', showerror.message) diff --git a/Lib/idlelib/idle_test/test_searchengine.py b/Lib/idlelib/idle_test/test_searchengine.py --- a/Lib/idlelib/idle_test/test_searchengine.py +++ b/Lib/idlelib/idle_test/test_searchengine.py @@ -139,10 +139,10 @@ class SearchEngineTest(unittest.TestCase): def test_setcookedpat(self): engine = self.engine - engine.setcookedpat('\s') - self.assertEqual(engine.getpat(), '\s') + engine.setcookedpat(r'\s') + self.assertEqual(engine.getpat(), r'\s') engine.revar.set(1) - engine.setcookedpat('\s') + engine.setcookedpat(r'\s') self.assertEqual(engine.getpat(), r'\\s') def test_getcookedpat(self): @@ -156,10 +156,10 @@ class SearchEngineTest(unittest.TestCase): Equal(engine.getcookedpat(), r'\bhello\b') engine.wordvar.set(False) - engine.setpat('\s') + engine.setpat(r'\s') Equal(engine.getcookedpat(), r'\\s') engine.revar.set(True) - Equal(engine.getcookedpat(), '\s') + Equal(engine.getcookedpat(), r'\s') def test_getprog(self): engine = self.engine @@ -282,7 +282,7 @@ class ForwardBackwardTest(unittest.TestCase): cls.pat = re.compile('target') cls.res = (2, (10, 16)) # line, slice indexes of 'target' cls.failpat = re.compile('xyz') # not in text - cls.emptypat = re.compile('\w*') # empty match possible + cls.emptypat = re.compile(r'\w*') # empty match possible def make_search(self, func): def search(pat, line, col, wrap, ok=0): diff --git a/Lib/idlelib/paragraph.py b/Lib/idlelib/paragraph.py --- a/Lib/idlelib/paragraph.py +++ b/Lib/idlelib/paragraph.py @@ -130,7 +130,7 @@ def reformat_paragraph(data, limit): partial = indent1 while i < n and not is_all_white(lines[i]): # XXX Should take double space after period (etc.) into account - words = re.split("(\s+)", lines[i]) + words = re.split(r"(\s+)", lines[i]) for j in range(0, len(words), 2): word = words[j] if not word: diff --git a/Lib/imaplib.py b/Lib/imaplib.py --- a/Lib/imaplib.py +++ b/Lib/imaplib.py @@ -132,7 +132,7 @@ _Untagged_status = br'\* (?P\d+) (?P[A-Z-]+)( (?P.*))?' class IMAP4: - """IMAP4 client class. + r"""IMAP4 client class. Instantiate with: IMAP4([host[, port]]) @@ -1535,7 +1535,7 @@ if __name__ == '__main__': ('select', ('/tmp/yyz 2',)), ('search', (None, 'SUBJECT', 'test')), ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), - ('store', ('1', 'FLAGS', '(\Deleted)')), + ('store', ('1', 'FLAGS', r'(\Deleted)')), ('namespace', ()), ('expunge', ()), ('recent', ()), diff --git a/Lib/msilib/__init__.py b/Lib/msilib/__init__.py --- a/Lib/msilib/__init__.py +++ b/Lib/msilib/__init__.py @@ -289,7 +289,7 @@ class Directory: def make_short(self, file): oldfile = file file = file.replace('+', '_') - file = ''.join(c for c in file if not c in ' "/\[]:;=,') + file = ''.join(c for c in file if not c in r' "/\[]:;=,') parts = file.split(".") if len(parts) > 1: prefix = "".join(parts[:-1]).upper() diff --git a/Lib/platform.py b/Lib/platform.py --- a/Lib/platform.py +++ b/Lib/platform.py @@ -251,13 +251,13 @@ def _dist_try_harder(distname, version, id): _release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII) _lsb_release_version = re.compile(r'(.+)' - ' release ' - '([\d.]+)' - '[^(]*(?:\((.+)\))?', re.ASCII) + r' release ' + r'([\d.]+)' + r'[^(]*(?:\((.+)\))?', re.ASCII) _release_version = re.compile(r'([^0-9]+)' - '(?: release )?' - '([\d.]+)' - '[^(]*(?:\((.+)\))?', re.ASCII) + r'(?: release )?' + r'([\d.]+)' + r'[^(]*(?:\((.+)\))?', re.ASCII) # See also http://www.novell.com/coolsolutions/feature/11251.html # and http://linuxmafia.com/faq/Admin/release-files.html @@ -407,8 +407,8 @@ def _norm_version(version, build=''): return version _ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) ' - '.*' - '\[.* ([\d.]+)\])') + r'.*' + r'\[.* ([\d.]+)\])') # Examples of VER command output: # @@ -1153,22 +1153,22 @@ _sys_version_parser = re.compile( _ironpython_sys_version_parser = re.compile( r'IronPython\s*' - '([\d\.]+)' - '(?: \(([\d\.]+)\))?' - ' on (.NET [\d\.]+)', re.ASCII) + r'([\d\.]+)' + r'(?: \(([\d\.]+)\))?' + r' on (.NET [\d\.]+)', re.ASCII) # IronPython covering 2.6 and 2.7 _ironpython26_sys_version_parser = re.compile( r'([\d.]+)\s*' - '\(IronPython\s*' - '[\d.]+\s*' - '\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)' + r'\(IronPython\s*' + r'[\d.]+\s*' + r'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)' ) _pypy_sys_version_parser = re.compile( r'([\w.+]+)\s*' - '\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*' - '\[PyPy [^\]]+\]?') + r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*' + r'\[PyPy [^\]]+\]?') _sys_version_cache = {} @@ -1403,7 +1403,7 @@ def platform(aliased=0, terse=0): # see issue #1322 for more information warnings.filterwarnings( 'ignore', - 'dist\(\) and linux_distribution\(\) ' + r'dist\(\) and linux_distribution\(\) ' 'functions are deprecated .*', PendingDeprecationWarning, ) diff --git a/Lib/string.py b/Lib/string.py --- a/Lib/string.py +++ b/Lib/string.py @@ -28,7 +28,7 @@ ascii_letters = ascii_lowercase + ascii_uppercase digits = '0123456789' hexdigits = digits + 'abcdef' + 'ABCDEF' octdigits = '01234567' -punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" +punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~""" printable = digits + ascii_letters + punctuation + whitespace # Functions which aren't available as string methods. diff --git a/Lib/sysconfig.py b/Lib/sysconfig.py --- a/Lib/sysconfig.py +++ b/Lib/sysconfig.py @@ -215,7 +215,7 @@ def _parse_makefile(filename, vars=None): # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). import re - _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") + _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py --- a/Lib/test/_test_multiprocessing.py +++ b/Lib/test/_test_multiprocessing.py @@ -3791,7 +3791,7 @@ class TestSemaphoreTracker(unittest.TestCase): p.stderr.close() expected = 'semaphore_tracker: There appear to be 2 leaked semaphores' self.assertRegex(err, expected) - self.assertRegex(err, 'semaphore_tracker: %r: \[Errno' % name1) + self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1) # # Mixins diff --git a/Lib/test/datetimetester.py b/Lib/test/datetimetester.py --- a/Lib/test/datetimetester.py +++ b/Lib/test/datetimetester.py @@ -4001,12 +4001,12 @@ class Oddballs(unittest.TestCase): datetime(xx, xx, xx, xx, xx, xx, xx)) with self.assertRaisesRegex(TypeError, '^an integer is required ' - '\(got type str\)$'): + r'\(got type str\)$'): datetime(10, 10, '10') f10 = Number(10.9) with self.assertRaisesRegex(TypeError, '^__int__ returned non-int ' - '\(type float\)$'): + r'\(type float\)$'): datetime(10, 10, f10) class Float(float): diff --git a/Lib/test/re_tests.py b/Lib/test/re_tests.py --- a/Lib/test/re_tests.py +++ b/Lib/test/re_tests.py @@ -158,7 +158,7 @@ tests = [ ('(abc', '-', SYNTAX_ERROR), ('a]', 'a]', SUCCEED, 'found', 'a]'), ('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'), - ('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'), + ('a[\\]]b', 'a]b', SUCCEED, 'found', 'a]b'), ('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'), ('a[^bc]d', 'abd', FAIL), ('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'), @@ -551,7 +551,7 @@ tests = [ # lookbehind: split by : but not if it is escaped by -. ('(?= 39 except OSError: can = False diff --git a/Lib/test/test_asyncio/test_streams.py b/Lib/test/test_asyncio/test_streams.py --- a/Lib/test/test_asyncio/test_streams.py +++ b/Lib/test/test_asyncio/test_streams.py @@ -831,7 +831,7 @@ os.close(fd) stream._waiter = asyncio.Future(loop=self.loop) self.assertRegex( repr(stream), - ">") + r">") stream._waiter.set_result(None) self.loop.run_until_complete(stream._waiter) stream._waiter = None diff --git a/Lib/test/test_builtin.py b/Lib/test/test_builtin.py --- a/Lib/test/test_builtin.py +++ b/Lib/test/test_builtin.py @@ -83,7 +83,7 @@ test_conv_no_sign = [ ('', ValueError), (' ', ValueError), (' \t\t ', ValueError), - (str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314), + (str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314), (chr(0x200), ValueError), ] @@ -105,7 +105,7 @@ test_conv_sign = [ ('', ValueError), (' ', ValueError), (' \t\t ', ValueError), - (str(b'\u0663\u0661\u0664 ','raw-unicode-escape'), 314), + (str(br'\u0663\u0661\u0664 ','raw-unicode-escape'), 314), (chr(0x200), ValueError), ] diff --git a/Lib/test/test_bytes.py b/Lib/test/test_bytes.py --- a/Lib/test/test_bytes.py +++ b/Lib/test/test_bytes.py @@ -216,7 +216,7 @@ class BaseBytesTest: self.assertEqual(b, self.type2test(sample[:-3], "utf-8")) def test_decode(self): - sample = "Hello world\n\u1234\u5678\u9abc\def0\def0" + sample = "Hello world\n\u1234\u5678\u9abc\\def0\\def0" for enc in ("utf-8", "utf-16"): b = self.type2test(sample, enc) self.assertEqual(b.decode(enc), sample) diff --git a/Lib/test/test_capi.py b/Lib/test/test_capi.py --- a/Lib/test/test_capi.py +++ b/Lib/test/test_capi.py @@ -533,21 +533,21 @@ class SkipitemTest(unittest.TestCase): parse((1, 2, 3), {}, b'OOO', ['', '', 'a']) parse((1, 2), {'a': 3}, b'OOO', ['', '', 'a']) with self.assertRaisesRegex(TypeError, - 'Function takes at least 2 positional arguments \(1 given\)'): + r'Function takes at least 2 positional arguments \(1 given\)'): parse((1,), {'a': 3}, b'OOO', ['', '', 'a']) parse((1,), {}, b'O|OO', ['', '', 'a']) with self.assertRaisesRegex(TypeError, - 'Function takes at least 1 positional arguments \(0 given\)'): + r'Function takes at least 1 positional arguments \(0 given\)'): parse((), {}, b'O|OO', ['', '', 'a']) parse((1, 2), {'a': 3}, b'OO$O', ['', '', 'a']) with self.assertRaisesRegex(TypeError, - 'Function takes exactly 2 positional arguments \(1 given\)'): + r'Function takes exactly 2 positional arguments \(1 given\)'): parse((1,), {'a': 3}, b'OO$O', ['', '', 'a']) parse((1,), {}, b'O|O$O', ['', '', 'a']) with self.assertRaisesRegex(TypeError, - 'Function takes at least 1 positional arguments \(0 given\)'): + r'Function takes at least 1 positional arguments \(0 given\)'): parse((), {}, b'O|O$O', ['', '', 'a']) - with self.assertRaisesRegex(SystemError, 'Empty parameter name after \$'): + with self.assertRaisesRegex(SystemError, r'Empty parameter name after \$'): parse((1,), {}, b'O|$OO', ['', '', 'a']) with self.assertRaisesRegex(SystemError, 'Empty keyword'): parse((1,), {}, b'O|OO', ['', 'a', '']) diff --git a/Lib/test/test_cgi.py b/Lib/test/test_cgi.py --- a/Lib/test/test_cgi.py +++ b/Lib/test/test_cgi.py @@ -148,7 +148,7 @@ class CgiTests(unittest.TestCase): def test_escape(self): # cgi.escape() is deprecated. with warnings.catch_warnings(): - warnings.filterwarnings('ignore', 'cgi\.escape', + warnings.filterwarnings('ignore', r'cgi\.escape', DeprecationWarning) self.assertEqual("test & string", cgi.escape("test & string")) self.assertEqual("<test string>", cgi.escape("")) diff --git a/Lib/test/test_codeccallbacks.py b/Lib/test/test_codeccallbacks.py --- a/Lib/test/test_codeccallbacks.py +++ b/Lib/test/test_codeccallbacks.py @@ -280,12 +280,12 @@ class CodecCallbackTest(unittest.TestCase): ) self.assertEqual( - b"\\u3042\u3xxx".decode("unicode-escape", "test.handler1"), + b"\\u3042\\u3xxx".decode("unicode-escape", "test.handler1"), "\u3042[<92><117><51>]xxx" ) self.assertEqual( - b"\\u3042\u3xx".decode("unicode-escape", "test.handler1"), + b"\\u3042\\u3xx".decode("unicode-escape", "test.handler1"), "\u3042[<92><117><51>]xx" ) diff --git a/Lib/test/test_codecs.py b/Lib/test/test_codecs.py --- a/Lib/test/test_codecs.py +++ b/Lib/test/test_codecs.py @@ -2703,8 +2703,8 @@ class TransformCodecTest(unittest.TestCase): bad_input = "bad input type" for encoding in bytes_transform_encodings: with self.subTest(encoding=encoding): - fmt = ( "{!r} is not a text encoding; " - "use codecs.encode\(\) to handle arbitrary codecs") + fmt = (r"{!r} is not a text encoding; " + r"use codecs.encode\(\) to handle arbitrary codecs") msg = fmt.format(encoding) with self.assertRaisesRegex(LookupError, msg) as failure: bad_input.encode(encoding) @@ -2713,7 +2713,7 @@ class TransformCodecTest(unittest.TestCase): def test_text_to_binary_blacklists_text_transforms(self): # Check str.encode gives a good error message for str -> str codecs msg = (r"^'rot_13' is not a text encoding; " - "use codecs.encode\(\) to handle arbitrary codecs") + r"use codecs.encode\(\) to handle arbitrary codecs") with self.assertRaisesRegex(LookupError, msg): "just an example message".encode("rot_13") @@ -2725,7 +2725,7 @@ class TransformCodecTest(unittest.TestCase): with self.subTest(encoding=encoding): encoded_data = codecs.encode(data, encoding) fmt = (r"{!r} is not a text encoding; " - "use codecs.decode\(\) to handle arbitrary codecs") + r"use codecs.decode\(\) to handle arbitrary codecs") msg = fmt.format(encoding) with self.assertRaisesRegex(LookupError, msg): encoded_data.decode(encoding) @@ -2737,7 +2737,7 @@ class TransformCodecTest(unittest.TestCase): for bad_input in (b"immutable", bytearray(b"mutable")): with self.subTest(bad_input=bad_input): msg = (r"^'rot_13' is not a text encoding; " - "use codecs.decode\(\) to handle arbitrary codecs") + r"use codecs.decode\(\) to handle arbitrary codecs") with self.assertRaisesRegex(LookupError, msg) as failure: bad_input.decode("rot_13") self.assertIsNone(failure.exception.__cause__) @@ -2956,12 +2956,12 @@ class ExceptionChainingTest(unittest.TestCase): self.assertEqual(decoded, b"not str!") # Text model methods should complain fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; " - "use codecs.encode\(\) to encode to arbitrary types$") + r"use codecs.encode\(\) to encode to arbitrary types$") msg = fmt.format(self.codec_name) with self.assertRaisesRegex(TypeError, msg): "str_input".encode(self.codec_name) fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; " - "use codecs.decode\(\) to decode to arbitrary types$") + r"use codecs.decode\(\) to decode to arbitrary types$") msg = fmt.format(self.codec_name) with self.assertRaisesRegex(TypeError, msg): b"bytes input".decode(self.codec_name) diff --git a/Lib/test/test_coroutines.py b/Lib/test/test_coroutines.py --- a/Lib/test/test_coroutines.py +++ b/Lib/test/test_coroutines.py @@ -891,7 +891,7 @@ class CoroutineTest(unittest.TestCase): return await Awaitable() with self.assertRaisesRegex( - TypeError, "__await__\(\) returned a coroutine"): + TypeError, r"__await__\(\) returned a coroutine"): run_async(foo()) @@ -1333,7 +1333,7 @@ class CoroutineTest(unittest.TestCase): with self.assertRaisesRegex( TypeError, - "async for' received an invalid object.*__aiter.*\: I"): + r"async for' received an invalid object.*__aiter.*\: I"): run_async(foo()) @@ -1666,8 +1666,8 @@ class SysSetCoroWrapperTest(unittest.TestCase): try: with silence_coro_gc(), self.assertRaisesRegex( RuntimeError, - "coroutine wrapper.*\.wrapper at 0x.*attempted to " - "recursively wrap .* wrap .*"): + r"coroutine wrapper.*\.wrapper at 0x.*attempted to " + r"recursively wrap .* wrap .*"): foo() finally: diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py --- a/Lib/test/test_doctest.py +++ b/Lib/test/test_doctest.py @@ -291,7 +291,7 @@ constructor: ... ... Non-example text. ... - ... >>> print('another\example') + ... >>> print('another\\example') ... another ... example ... ''' diff --git a/Lib/test/test_email/test__header_value_parser.py b/Lib/test/test_email/test__header_value_parser.py --- a/Lib/test/test_email/test__header_value_parser.py +++ b/Lib/test/test_email/test__header_value_parser.py @@ -581,7 +581,7 @@ class TestParser(TestParserMixin, TestEmailBase): def test_get_comment_quoted_parens(self): self._test_get_x(parser.get_comment, - '(foo\) \(\)bar)', '(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar']) + r'(foo\) \(\)bar)', r'(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar']) def test_get_comment_non_printable(self): self._test_get_x(parser.get_comment, @@ -625,7 +625,7 @@ class TestParser(TestParserMixin, TestEmailBase): def test_get_comment_qs_in_nested_comment(self): comment = self._test_get_x(parser.get_comment, - '(foo (b\)))', '(foo (b\)))', ' ', [], '', ['foo (b\))']) + r'(foo (b\)))', r'(foo (b\)))', ' ', [], '', [r'foo (b\))']) self.assertEqual(comment[2].content, 'b)') # get_cfws diff --git a/Lib/test/test_email/test_email.py b/Lib/test/test_email/test_email.py --- a/Lib/test/test_email/test_email.py +++ b/Lib/test/test_email/test_email.py @@ -3017,7 +3017,7 @@ class TestMiscellaneous(TestEmailBase): def test_escape_backslashes(self): self.assertEqual( - utils.formataddr(('Arthur \Backslash\ Foobar', 'person@dom.ain')), + utils.formataddr((r'Arthur \Backslash\ Foobar', 'person@dom.ain')), r'"Arthur \\Backslash\\ Foobar" ') a = r'Arthur \Backslash\ Foobar' b = 'person@dom.ain' diff --git a/Lib/test/test_faulthandler.py b/Lib/test/test_faulthandler.py --- a/Lib/test/test_faulthandler.py +++ b/Lib/test/test_faulthandler.py @@ -93,7 +93,7 @@ class FaultHandlerTests(unittest.TestCase): header = 'Thread 0x[0-9a-f]+' else: header = 'Stack' - regex = """ + regex = r""" ^{fatal_error} {header} \(most recent call first\): @@ -490,7 +490,7 @@ class FaultHandlerTests(unittest.TestCase): lineno = 8 else: lineno = 10 - regex = """ + regex = r""" ^Thread 0x[0-9a-f]+ \(most recent call first\): (?: File ".*threading.py", line [0-9]+ in [_a-z]+ ){{1,3}} File "", line 23 in run @@ -669,9 +669,9 @@ class FaultHandlerTests(unittest.TestCase): trace = '\n'.join(trace) if not unregister: if all_threads: - regex = 'Current thread 0x[0-9a-f]+ \(most recent call first\):\n' + regex = r'Current thread 0x[0-9a-f]+ \(most recent call first\):\n' else: - regex = 'Stack \(most recent call first\):\n' + regex = r'Stack \(most recent call first\):\n' regex = expected_traceback(14, 32, regex) self.assertRegex(trace, regex) else: diff --git a/Lib/test/test_fnmatch.py b/Lib/test/test_fnmatch.py --- a/Lib/test/test_fnmatch.py +++ b/Lib/test/test_fnmatch.py @@ -62,14 +62,14 @@ class FnmatchTestCase(unittest.TestCase): class TranslateTestCase(unittest.TestCase): def test_translate(self): - self.assertEqual(translate('*'), '.*\Z(?ms)') - self.assertEqual(translate('?'), '.\Z(?ms)') - self.assertEqual(translate('a?b*'), 'a.b.*\Z(?ms)') - self.assertEqual(translate('[abc]'), '[abc]\Z(?ms)') - self.assertEqual(translate('[]]'), '[]]\Z(?ms)') - self.assertEqual(translate('[!x]'), '[^x]\Z(?ms)') - self.assertEqual(translate('[^x]'), '[\\^x]\Z(?ms)') - self.assertEqual(translate('[x'), '\\[x\Z(?ms)') + self.assertEqual(translate('*'), r'.*\Z(?ms)') + self.assertEqual(translate('?'), r'.\Z(?ms)') + self.assertEqual(translate('a?b*'), r'a.b.*\Z(?ms)') + self.assertEqual(translate('[abc]'), r'[abc]\Z(?ms)') + self.assertEqual(translate('[]]'), r'[]]\Z(?ms)') + self.assertEqual(translate('[!x]'), r'[^x]\Z(?ms)') + self.assertEqual(translate('[^x]'), r'[\^x]\Z(?ms)') + self.assertEqual(translate('[x'), r'\[x\Z(?ms)') class FilterTestCase(unittest.TestCase): diff --git a/Lib/test/test_future.py b/Lib/test/test_future.py --- a/Lib/test/test_future.py +++ b/Lib/test/test_future.py @@ -4,7 +4,7 @@ import unittest from test import support import re -rx = re.compile('\((\S+).py, line (\d+)') +rx = re.compile(r'\((\S+).py, line (\d+)') def get_error_location(msg): mo = rx.search(str(msg)) diff --git a/Lib/test/test_gdb.py b/Lib/test/test_gdb.py --- a/Lib/test/test_gdb.py +++ b/Lib/test/test_gdb.py @@ -241,7 +241,7 @@ class DebuggerTests(unittest.TestCase): # gdb can insert additional '\n' and space characters in various places # in its output, depending on the width of the terminal it's connected # to (using its "wrap_here" function) - m = re.match('.*#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)\)\s+at\s+\S*Python/bltinmodule.c.*', + m = re.match(r'.*#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)\)\s+at\s+\S*Python/bltinmodule.c.*', gdb_output, re.DOTALL) if not m: self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output)) @@ -549,7 +549,7 @@ class Foo: foo = Foo() foo.an_attr = foo id(foo)''') - self.assertTrue(re.match('\) at remote 0x-?[0-9a-f]+>', + self.assertTrue(re.match(r'\) at remote 0x-?[0-9a-f]+>', gdb_repr), 'Unexpected gdb representation: %r\n%s' % \ (gdb_repr, gdb_output)) @@ -562,7 +562,7 @@ class Foo(object): foo = Foo() foo.an_attr = foo id(foo)''') - self.assertTrue(re.match('\) at remote 0x-?[0-9a-f]+>', + self.assertTrue(re.match(r'\) at remote 0x-?[0-9a-f]+>', gdb_repr), 'Unexpected gdb representation: %r\n%s' % \ (gdb_repr, gdb_output)) @@ -576,7 +576,7 @@ b = Foo() a.an_attr = b b.an_attr = a id(a)''') - self.assertTrue(re.match('\) at remote 0x-?[0-9a-f]+>\) at remote 0x-?[0-9a-f]+>', + self.assertTrue(re.match(r'\) at remote 0x-?[0-9a-f]+>\) at remote 0x-?[0-9a-f]+>', gdb_repr), 'Unexpected gdb representation: %r\n%s' % \ (gdb_repr, gdb_output)) @@ -611,7 +611,7 @@ id(a)''') def test_builtin_method(self): gdb_repr, gdb_output = self.get_gdb_repr('import sys; id(sys.stdout.readlines)') - self.assertTrue(re.match('', + self.assertTrue(re.match(r'', gdb_repr), 'Unexpected gdb representation: %r\n%s' % \ (gdb_repr, gdb_output)) @@ -626,7 +626,7 @@ id(foo.__code__)''', breakpoint='builtin_id', cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)v)->co_zombieframe)'] ) - self.assertTrue(re.match('.*\s+\$1 =\s+Frame 0x-?[0-9a-f]+, for file , line 3, in foo \(\)\s+.*', + self.assertTrue(re.match(r'.*\s+\$1 =\s+Frame 0x-?[0-9a-f]+, for file , line 3, in foo \(\)\s+.*', gdb_output, re.DOTALL), 'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output)) diff --git a/Lib/test/test_getargs2.py b/Lib/test/test_getargs2.py --- a/Lib/test/test_getargs2.py +++ b/Lib/test/test_getargs2.py @@ -630,20 +630,20 @@ class KeywordOnly_TestCase(unittest.TestCase): ) # required arg missing with self.assertRaisesRegex(TypeError, - "Required argument 'required' \(pos 1\) not found"): + r"Required argument 'required' \(pos 1\) not found"): getargs_keyword_only(optional=2) with self.assertRaisesRegex(TypeError, - "Required argument 'required' \(pos 1\) not found"): + r"Required argument 'required' \(pos 1\) not found"): getargs_keyword_only(keyword_only=3) def test_too_many_args(self): with self.assertRaisesRegex(TypeError, - "Function takes at most 2 positional arguments \(3 given\)"): + r"Function takes at most 2 positional arguments \(3 given\)"): getargs_keyword_only(1, 2, 3) with self.assertRaisesRegex(TypeError, - "function takes at most 3 arguments \(4 given\)"): + r"function takes at most 3 arguments \(4 given\)"): getargs_keyword_only(1, 2, 3, keyword_only=5) def test_invalid_keyword(self): @@ -678,11 +678,11 @@ class PositionalOnlyAndKeywords_TestCase(unittest.TestCase): self.assertEqual(self.getargs(1), (1, -1, -1)) # required positional arg missing with self.assertRaisesRegex(TypeError, - "Function takes at least 1 positional arguments \(0 given\)"): + r"Function takes at least 1 positional arguments \(0 given\)"): self.getargs() with self.assertRaisesRegex(TypeError, - "Function takes at least 1 positional arguments \(0 given\)"): + r"Function takes at least 1 positional arguments \(0 given\)"): self.getargs(keyword=3) def test_empty_keyword(self): diff --git a/Lib/test/test_htmlparser.py b/Lib/test/test_htmlparser.py --- a/Lib/test/test_htmlparser.py +++ b/Lib/test/test_htmlparser.py @@ -701,7 +701,7 @@ class AttributesTestCase(TestCaseBase): def test_attr_funky_names2(self): self._run_check( - "", + r"", [("starttag", "a", [("$", None)]), ("starttag", "b", [("$", "%")]), ("starttag", "c", [("\\", "/")])]) diff --git a/Lib/test/test_http_cookiejar.py b/Lib/test/test_http_cookiejar.py --- a/Lib/test/test_http_cookiejar.py +++ b/Lib/test/test_http_cookiejar.py @@ -1051,7 +1051,7 @@ class CookieTests(unittest.TestCase): url = "http://foo.bar.com/" interact_2965(c, url, "spam=eggs; Version=1; Port") h = interact_2965(c, url) - self.assertRegex(h, "\$Port([^=]|$)", + self.assertRegex(h, r"\$Port([^=]|$)", "port with no value not returned with no value") c = CookieJar(pol) @@ -1396,9 +1396,9 @@ class LWPCookieTests(unittest.TestCase): self.assertRegex(cookie, r'^\$Version="?1"?;') self.assertRegex(cookie, r'Part_Number="?Rocket_Launcher_0001"?;' - '\s*\$Path="\/acme"') + r'\s*\$Path="\/acme"') self.assertRegex(cookie, r'Customer="?WILE_E_COYOTE"?;' - '\s*\$Path="\/acme"') + r'\s*\$Path="\/acme"') # # 7. User Agent -> Server diff --git a/Lib/test/test_mailcap.py b/Lib/test/test_mailcap.py --- a/Lib/test/test_mailcap.py +++ b/Lib/test/test_mailcap.py @@ -101,7 +101,7 @@ class HelperFunctionTest(unittest.TestCase): (["echo foo", "audio/*", "foo.txt"], "echo foo"), (["echo %s", "audio/*", "foo.txt"], "echo foo.txt"), (["echo %t", "audio/*", "foo.txt"], "echo audio/*"), - (["echo \%t", "audio/*", "foo.txt"], "echo %t"), + (["echo \\%t", "audio/*", "foo.txt"], "echo %t"), (["echo foo", "audio/*", "foo.txt", plist], "echo foo"), (["echo %{total}", "audio/*", "foo.txt", plist], "echo 3") ] diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py --- a/Lib/test/test_os.py +++ b/Lib/test/test_os.py @@ -2104,7 +2104,7 @@ class Win32JunctionTests(unittest.TestCase): class NonLocalSymlinkTests(unittest.TestCase): def setUp(self): - """ + r""" Create this structure: base diff --git a/Lib/test/test_platform.py b/Lib/test/test_platform.py --- a/Lib/test/test_platform.py +++ b/Lib/test/test_platform.py @@ -255,7 +255,7 @@ class PlatformTest(unittest.TestCase): with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', - 'dist\(\) and linux_distribution\(\) ' + r'dist\(\) and linux_distribution\(\) ' 'functions are deprecated .*', PendingDeprecationWarning, ) @@ -331,7 +331,7 @@ class PlatformTest(unittest.TestCase): with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', - 'dist\(\) and linux_distribution\(\) ' + r'dist\(\) and linux_distribution\(\) ' 'functions are deprecated .*', PendingDeprecationWarning, ) diff --git a/Lib/test/test_re.py b/Lib/test/test_re.py --- a/Lib/test/test_re.py +++ b/Lib/test/test_re.py @@ -113,10 +113,10 @@ class ReTests(unittest.TestCase): self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s) self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s) - self.assertEqual(re.sub('(?Px)', '\g\g', 'xx'), 'xxxx') - self.assertEqual(re.sub('(?Px)', '\g\g<1>', 'xx'), 'xxxx') - self.assertEqual(re.sub('(?Px)', '\g\g', 'xx'), 'xxxx') - self.assertEqual(re.sub('(?Px)', '\g<1>\g<1>', 'xx'), 'xxxx') + self.assertEqual(re.sub('(?Px)', r'\g\g', 'xx'), 'xxxx') + self.assertEqual(re.sub('(?Px)', r'\g\g<1>', 'xx'), 'xxxx') + self.assertEqual(re.sub('(?Px)', r'\g\g', 'xx'), 'xxxx') + self.assertEqual(re.sub('(?Px)', r'\g<1>\g<1>', 'xx'), 'xxxx') self.assertEqual(re.sub('a', r'\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b') self.assertEqual(re.sub('a', '\t\n\v\r\f\a\b', 'a'), '\t\n\v\r\f\a\b') @@ -127,11 +127,11 @@ class ReTests(unittest.TestCase): with self.assertRaises(re.error): self.assertEqual(re.sub('a', '\\' + c, 'a'), '\\' + c) - self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest') + self.assertEqual(re.sub(r'^\s*', 'X', 'test'), 'Xtest') def test_bug_449964(self): # fails for group followed by other escape - self.assertEqual(re.sub(r'(?Px)', '\g<1>\g<1>\\b', 'xx'), + self.assertEqual(re.sub(r'(?Px)', r'\g<1>\g<1>\b', 'xx'), 'xx\bxx\b') def test_bug_449000(self): @@ -218,26 +218,26 @@ class ReTests(unittest.TestCase): self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d') def test_symbolic_groups(self): - re.compile('(?Px)(?P=a)(?(a)y)') - re.compile('(?Px)(?P=a1)(?(a1)y)') - re.compile('(?Px)\1(?(1)y)') - self.checkPatternError('(?P)(?P)', + re.compile(r'(?Px)(?P=a)(?(a)y)') + re.compile(r'(?Px)(?P=a1)(?(a1)y)') + re.compile(r'(?Px)\1(?(1)y)') + self.checkPatternError(r'(?P)(?P)', "redefinition of group name 'a' as group 2; " "was group 1") - self.checkPatternError('(?P(?P=a))', + self.checkPatternError(r'(?P(?P=a))', "cannot refer to an open group", 10) - self.checkPatternError('(?Pxy)', 'unknown extension ?Px') - self.checkPatternError('(?P)(?P=a', 'missing ), unterminated name', 11) - self.checkPatternError('(?P=', 'missing group name', 4) - self.checkPatternError('(?P=)', 'missing group name', 4) - self.checkPatternError('(?P=1)', "bad character in group name '1'", 4) - self.checkPatternError('(?P=a)', "unknown group name 'a'") - self.checkPatternError('(?P=a1)', "unknown group name 'a1'") - self.checkPatternError('(?P=a.)', "bad character in group name 'a.'", 4) - self.checkPatternError('(?P<)', 'missing >, unterminated name', 4) - self.checkPatternError('(?P, unterminated name', 4) - self.checkPatternError('(?P<', 'missing group name', 4) - self.checkPatternError('(?P<>)', 'missing group name', 4) + self.checkPatternError(r'(?Pxy)', 'unknown extension ?Px') + self.checkPatternError(r'(?P)(?P=a', 'missing ), unterminated name', 11) + self.checkPatternError(r'(?P=', 'missing group name', 4) + self.checkPatternError(r'(?P=)', 'missing group name', 4) + self.checkPatternError(r'(?P=1)', "bad character in group name '1'", 4) + self.checkPatternError(r'(?P=a)', "unknown group name 'a'") + self.checkPatternError(r'(?P=a1)', "unknown group name 'a1'") + self.checkPatternError(r'(?P=a.)', "bad character in group name 'a.'", 4) + self.checkPatternError(r'(?P<)', 'missing >, unterminated name', 4) + self.checkPatternError(r'(?P, unterminated name', 4) + self.checkPatternError(r'(?P<', 'missing group name', 4) + self.checkPatternError(r'(?P<>)', 'missing group name', 4) self.checkPatternError(r'(?P<1>)', "bad character in group name '1'", 4) self.checkPatternError(r'(?P)', "bad character in group name 'a.'", 4) self.checkPatternError(r'(?(', 'missing group name', 3) @@ -256,35 +256,35 @@ class ReTests(unittest.TestCase): self.assertEqual(re.match(pat, 'xc8yz').span(), (0, 5)) def test_symbolic_refs(self): - self.checkTemplateError('(?Px)', '\gx)', r'\g, unterminated name', 3) - self.checkTemplateError('(?Px)', '\g<', 'xx', + self.checkTemplateError('(?Px)', r'\g<', 'xx', 'missing group name', 3) - self.checkTemplateError('(?Px)', '\g', 'xx', 'missing <', 2) - self.checkTemplateError('(?Px)', '\g', 'xx', + self.checkTemplateError('(?Px)', r'\g', 'xx', 'missing <', 2) + self.checkTemplateError('(?Px)', r'\g', 'xx', "bad character in group name 'a a'", 3) - self.checkTemplateError('(?Px)', '\g<>', 'xx', + self.checkTemplateError('(?Px)', r'\g<>', 'xx', 'missing group name', 3) - self.checkTemplateError('(?Px)', '\g<1a1>', 'xx', + self.checkTemplateError('(?Px)', r'\g<1a1>', 'xx', "bad character in group name '1a1'", 3) self.checkTemplateError('(?Px)', r'\g<2>', 'xx', 'invalid group reference') self.checkTemplateError('(?Px)', r'\2', 'xx', 'invalid group reference') with self.assertRaisesRegex(IndexError, "unknown group name 'ab'"): - re.sub('(?Px)', '\g', 'xx') + re.sub('(?Px)', r'\g', 'xx') self.assertEqual(re.sub('(?Px)|(?Py)', r'\g', 'xx'), '') self.assertEqual(re.sub('(?Px)|(?Py)', r'\2', 'xx'), '') - self.checkTemplateError('(?Px)', '\g<-1>', 'xx', + self.checkTemplateError('(?Px)', r'\g<-1>', 'xx', "bad character in group name '-1'", 3) # New valid/invalid identifiers in Python 3 self.assertEqual(re.sub('(?P<Β΅>x)', r'\g<Β΅>', 'xx'), 'xx') self.assertEqual(re.sub('(?P<π”˜π”«π”¦π” π”¬π”‘π”’>x)', r'\g<π”˜π”«π”¦π” π”¬π”‘π”’>', 'xx'), 'xx') - self.checkTemplateError('(?Px)', '\g<Β©>', 'xx', + self.checkTemplateError('(?Px)', r'\g<Β©>', 'xx', "bad character in group name 'Β©'", 3) # Support > 100 groups. pat = '|'.join('x(?P%x)y' % (i, i) for i in range(1, 200 + 1)) - self.assertEqual(re.sub(pat, '\g<200>', 'xc8yzxc8y'), 'c8zc8') + self.assertEqual(re.sub(pat, r'\g<200>', 'xc8yzxc8y'), 'c8zc8') def test_re_subn(self): self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) @@ -472,19 +472,19 @@ class ReTests(unittest.TestCase): re.compile(r".*?").fullmatch("abcd", pos=1, endpos=3).span(), (1, 3)) def test_re_groupref_exists(self): - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(), + self.assertEqual(re.match(r'^(\()?([^()]+)(?(1)\))$', '(a)').groups(), ('(', 'a')) - self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(), + self.assertEqual(re.match(r'^(\()?([^()]+)(?(1)\))$', 'a').groups(), (None, 'a')) - self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', 'a)')) - self.assertIsNone(re.match('^(\()?([^()]+)(?(1)\))$', '(a')) + self.assertIsNone(re.match(r'^(\()?([^()]+)(?(1)\))$', 'a)')) + self.assertIsNone(re.match(r'^(\()?([^()]+)(?(1)\))$', '(a')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(), ('a', 'b')) - self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), + self.assertEqual(re.match(r'^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), (None, 'd')) - self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(), + self.assertEqual(re.match(r'^(?:(a)|c)((?(1)|d))$', 'cd').groups(), (None, 'd')) - self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(), + self.assertEqual(re.match(r'^(?:(a)|c)((?(1)|d))$', 'a').groups(), ('a', '')) # Tests for bug #1177831: exercise groups other than the first group @@ -509,7 +509,7 @@ class ReTests(unittest.TestCase): 'two branches', 10) def test_re_groupref_overflow(self): - self.checkTemplateError('()', '\g<%s>' % sre_constants.MAXGROUPS, 'xx', + self.checkTemplateError('()', r'\g<%s>' % sre_constants.MAXGROUPS, 'xx', 'invalid group reference', 3) self.checkPatternError(r'(?P)(?(%d))' % sre_constants.MAXGROUPS, 'invalid group reference', 10) @@ -544,37 +544,37 @@ class ReTests(unittest.TestCase): " ") def test_repeat_minmax(self): - self.assertIsNone(re.match("^(\w){1}$", "abc")) - self.assertIsNone(re.match("^(\w){1}?$", "abc")) - self.assertIsNone(re.match("^(\w){1,2}$", "abc")) - self.assertIsNone(re.match("^(\w){1,2}?$", "abc")) - - self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c") - self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c") - self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c") - self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") - self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c") - self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c") - self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c") - self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") - - self.assertIsNone(re.match("^x{1}$", "xxx")) - self.assertIsNone(re.match("^x{1}?$", "xxx")) - self.assertIsNone(re.match("^x{1,2}$", "xxx")) - self.assertIsNone(re.match("^x{1,2}?$", "xxx")) - - self.assertTrue(re.match("^x{3}$", "xxx")) - self.assertTrue(re.match("^x{1,3}$", "xxx")) - self.assertTrue(re.match("^x{3,3}$", "xxx")) - self.assertTrue(re.match("^x{1,4}$", "xxx")) - self.assertTrue(re.match("^x{3,4}?$", "xxx")) - self.assertTrue(re.match("^x{3}?$", "xxx")) - self.assertTrue(re.match("^x{1,3}?$", "xxx")) - self.assertTrue(re.match("^x{1,4}?$", "xxx")) - self.assertTrue(re.match("^x{3,4}?$", "xxx")) - - self.assertIsNone(re.match("^x{}$", "xxx")) - self.assertTrue(re.match("^x{}$", "x{}")) + self.assertIsNone(re.match(r"^(\w){1}$", "abc")) + self.assertIsNone(re.match(r"^(\w){1}?$", "abc")) + self.assertIsNone(re.match(r"^(\w){1,2}$", "abc")) + self.assertIsNone(re.match(r"^(\w){1,2}?$", "abc")) + + self.assertEqual(re.match(r"^(\w){3}$", "abc").group(1), "c") + self.assertEqual(re.match(r"^(\w){1,3}$", "abc").group(1), "c") + self.assertEqual(re.match(r"^(\w){1,4}$", "abc").group(1), "c") + self.assertEqual(re.match(r"^(\w){3,4}?$", "abc").group(1), "c") + self.assertEqual(re.match(r"^(\w){3}?$", "abc").group(1), "c") + self.assertEqual(re.match(r"^(\w){1,3}?$", "abc").group(1), "c") + self.assertEqual(re.match(r"^(\w){1,4}?$", "abc").group(1), "c") + self.assertEqual(re.match(r"^(\w){3,4}?$", "abc").group(1), "c") + + self.assertIsNone(re.match(r"^x{1}$", "xxx")) + self.assertIsNone(re.match(r"^x{1}?$", "xxx")) + self.assertIsNone(re.match(r"^x{1,2}$", "xxx")) + self.assertIsNone(re.match(r"^x{1,2}?$", "xxx")) + + self.assertTrue(re.match(r"^x{3}$", "xxx")) + self.assertTrue(re.match(r"^x{1,3}$", "xxx")) + self.assertTrue(re.match(r"^x{3,3}$", "xxx")) + self.assertTrue(re.match(r"^x{1,4}$", "xxx")) + self.assertTrue(re.match(r"^x{3,4}?$", "xxx")) + self.assertTrue(re.match(r"^x{3}?$", "xxx")) + self.assertTrue(re.match(r"^x{1,3}?$", "xxx")) + self.assertTrue(re.match(r"^x{1,4}?$", "xxx")) + self.assertTrue(re.match(r"^x{3,4}?$", "xxx")) + + self.assertIsNone(re.match(r"^x{}$", "xxx")) + self.assertTrue(re.match(r"^x{}$", "x{}")) self.checkPatternError(r'x{2,1}', 'min repeat greater than max repeat', 2) @@ -697,10 +697,10 @@ class ReTests(unittest.TestCase): "a\n\nb") def test_lookahead(self): - self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a") - self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a") - self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a") - self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a") + self.assertEqual(re.match(r"(a(?=\s[^a]))", "a b").group(1), "a") + self.assertEqual(re.match(r"(a(?=\s[^a]*))", "a b").group(1), "a") + self.assertEqual(re.match(r"(a(?=\s[abc]))", "a b").group(1), "a") + self.assertEqual(re.match(r"(a(?=\s[abc]*))", "a bc").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a") @@ -848,12 +848,12 @@ class ReTests(unittest.TestCase): self.assertEqual(re.match(b"abc", b"ABC", re.I|re.L).group(0), b"ABC") def test_not_literal(self): - self.assertEqual(re.search("\s([^a])", " b").group(1), "b") - self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb") + self.assertEqual(re.search(r"\s([^a])", " b").group(1), "b") + self.assertEqual(re.search(r"\s([^a]*)", " bb").group(1), "bb") def test_search_coverage(self): - self.assertEqual(re.search("\s(b)", " b").group(1), "b") - self.assertEqual(re.search("a\s", "a ").group(0), "a ") + self.assertEqual(re.search(r"\s(b)", " b").group(1), "b") + self.assertEqual(re.search(r"a\s", "a ").group(0), "a ") def assertMatch(self, pattern, text, match=None, span=None, matcher=re.match): @@ -1055,8 +1055,8 @@ class ReTests(unittest.TestCase): self.assertIsNone(re.match(r'(a)?a','a').lastindex) self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1) self.assertEqual(re.match(r'(?Pa)(?Pb)?b','ab').lastgroup, 'a') - self.assertEqual(re.match("(?Pa(b))", "ab").lastgroup, 'a') - self.assertEqual(re.match("((a))", "a").lastindex, 1) + self.assertEqual(re.match(r"(?Pa(b))", "ab").lastgroup, 'a') + self.assertEqual(re.match(r"((a))", "a").lastindex, 1) def test_bug_418626(self): # bugs 418626 at al. -- Testing Greg Chapman's addition of op code @@ -1228,7 +1228,7 @@ class ReTests(unittest.TestCase): '\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd' ] for x in decimal_digits: - self.assertEqual(re.match('^\d$', x).group(0), x) + self.assertEqual(re.match(r'^\d$', x).group(0), x) not_decimal_digits = [ '\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl' @@ -1237,7 +1237,7 @@ class ReTests(unittest.TestCase): '\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No' ] for x in not_decimal_digits: - self.assertIsNone(re.match('^\d$', x)) + self.assertIsNone(re.match(r'^\d$', x)) def test_empty_array(self): # SF buf 1647541 @@ -1306,29 +1306,29 @@ class ReTests(unittest.TestCase): for flags in (0, re.UNICODE): pat = re.compile('\xc0', flags | re.IGNORECASE) self.assertTrue(pat.match('\xe0')) - pat = re.compile('\w', flags) + pat = re.compile(r'\w', flags) self.assertTrue(pat.match('\xe0')) pat = re.compile('\xc0', re.ASCII | re.IGNORECASE) self.assertIsNone(pat.match('\xe0')) pat = re.compile('(?a)\xc0', re.IGNORECASE) self.assertIsNone(pat.match('\xe0')) - pat = re.compile('\w', re.ASCII) + pat = re.compile(r'\w', re.ASCII) self.assertIsNone(pat.match('\xe0')) - pat = re.compile('(?a)\w') + pat = re.compile(r'(?a)\w') self.assertIsNone(pat.match('\xe0')) # Bytes patterns for flags in (0, re.ASCII): pat = re.compile(b'\xc0', flags | re.IGNORECASE) self.assertIsNone(pat.match(b'\xe0')) - pat = re.compile(b'\w', flags) + pat = re.compile(br'\w', flags) self.assertIsNone(pat.match(b'\xe0')) # Incompatibilities - self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) - self.assertRaises(ValueError, re.compile, b'(?u)\w') - self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII) - self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII) - self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE) - self.assertRaises(ValueError, re.compile, '(?au)\w') + self.assertRaises(ValueError, re.compile, br'\w', re.UNICODE) + self.assertRaises(ValueError, re.compile, br'(?u)\w') + self.assertRaises(ValueError, re.compile, r'\w', re.UNICODE | re.ASCII) + self.assertRaises(ValueError, re.compile, r'(?u)\w', re.ASCII) + self.assertRaises(ValueError, re.compile, r'(?a)\w', re.UNICODE) + self.assertRaises(ValueError, re.compile, r'(?au)\w') def test_locale_flag(self): import locale @@ -1359,13 +1359,13 @@ class ReTests(unittest.TestCase): pat = re.compile(bpat, re.IGNORECASE) if bletter: self.assertIsNone(pat.match(bletter)) - pat = re.compile(b'\w', re.LOCALE) + pat = re.compile(br'\w', re.LOCALE) if bletter: self.assertTrue(pat.match(bletter)) - pat = re.compile(b'(?L)\w') + pat = re.compile(br'(?L)\w') if bletter: self.assertTrue(pat.match(bletter)) - pat = re.compile(b'\w') + pat = re.compile(br'\w') if bletter: self.assertIsNone(pat.match(bletter)) # Incompatibilities @@ -1379,7 +1379,7 @@ class ReTests(unittest.TestCase): def test_bug_6509(self): # Replacement strings of both types must parse properly. # all strings - pat = re.compile('a(\w)') + pat = re.compile(r'a(\w)') self.assertEqual(pat.sub('b\\1', 'ac'), 'bc') pat = re.compile('a(.)') self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234') @@ -1387,7 +1387,7 @@ class ReTests(unittest.TestCase): self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str') # all bytes - pat = re.compile(b'a(\w)') + pat = re.compile(br'a(\w)') self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc') pat = re.compile(b'a(.)') self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD') @@ -1509,7 +1509,7 @@ class ReTests(unittest.TestCase): for string in (b'[abracadabra]', B(b'[abracadabra]'), bytearray(b'[abracadabra]'), memoryview(b'[abracadabra]')): - m = re.search(rb'(.+)(.*?)\1', string) + m = re.search(br'(.+)(.*?)\1', string) self.assertEqual(repr(m), "<%s.%s object; " "span=(1, 12), match=b'abracadabra'>" % (type(m).__module__, type(m).__qualname__)) diff --git a/Lib/test/test_regrtest.py b/Lib/test/test_regrtest.py --- a/Lib/test/test_regrtest.py +++ b/Lib/test/test_regrtest.py @@ -704,8 +704,8 @@ class ArgsTestCase(BaseTestCase): test = self.create_test('coverage') output = self.run_tests("--coverage", test) self.check_executed_tests(output, [test]) - regex = ('lines +cov% +module +\(path\)\n' - '(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+') + regex = (r'lines +cov% +module +\(path\)\n' + r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+') self.check_line(output, regex) def test_wait(self): diff --git a/Lib/test/test_ssl.py b/Lib/test/test_ssl.py --- a/Lib/test/test_ssl.py +++ b/Lib/test/test_ssl.py @@ -3405,7 +3405,7 @@ def test_main(verbose=False): with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', - 'dist\(\) and linux_distribution\(\) ' + r'dist\(\) and linux_distribution\(\) ' 'functions are deprecated .*', PendingDeprecationWarning, ) diff --git a/Lib/test/test_strftime.py b/Lib/test/test_strftime.py --- a/Lib/test/test_strftime.py +++ b/Lib/test/test_strftime.py @@ -23,9 +23,9 @@ def escapestr(text, ampm): """ new_text = re.escape(text) new_text = new_text.replace(re.escape(ampm), ampm) - new_text = new_text.replace('\%', '%') - new_text = new_text.replace('\:', ':') - new_text = new_text.replace('\?', '?') + new_text = new_text.replace(r'\%', '%') + new_text = new_text.replace(r'\:', ':') + new_text = new_text.replace(r'\?', '?') return new_text diff --git a/Lib/test/test_strlit.py b/Lib/test/test_strlit.py --- a/Lib/test/test_strlit.py +++ b/Lib/test/test_strlit.py @@ -121,9 +121,9 @@ class TestLiterals(unittest.TestCase): self.assertEqual(eval(""" b'\x01' """), byte(1)) self.assertEqual(eval(r""" b'\x81' """), byte(0x81)) self.assertRaises(SyntaxError, eval, """ b'\x81' """) - self.assertEqual(eval(r""" b'\u1881' """), b'\\' + b'u1881') + self.assertEqual(eval(r""" br'\u1881' """), b'\\' + b'u1881') self.assertRaises(SyntaxError, eval, """ b'\u1881' """) - self.assertEqual(eval(r""" b'\U0001d120' """), b'\\' + b'U0001d120') + self.assertEqual(eval(r""" br'\U0001d120' """), b'\\' + b'U0001d120') self.assertRaises(SyntaxError, eval, """ b'\U0001d120' """) def test_eval_bytes_incomplete(self): diff --git a/Lib/test/test_strptime.py b/Lib/test/test_strptime.py --- a/Lib/test/test_strptime.py +++ b/Lib/test/test_strptime.py @@ -129,7 +129,7 @@ class TimeRETests(unittest.TestCase): def test_pattern_escaping(self): # Make sure any characters in the format string that might be taken as # regex syntax is escaped. - pattern_string = self.time_re.pattern("\d+") + pattern_string = self.time_re.pattern(r"\d+") self.assertIn(r"\\d\+", pattern_string, "%s does not have re characters escaped properly" % pattern_string) @@ -170,9 +170,9 @@ class TimeRETests(unittest.TestCase): def test_matching_with_escapes(self): # Make sure a format that requires escaping of characters works - compiled_re = self.time_re.compile("\w+ %m") - found = compiled_re.match("\w+ 10") - self.assertTrue(found, "Escaping failed of format '\w+ 10'") + compiled_re = self.time_re.compile(r"\w+ %m") + found = compiled_re.match(r"\w+ 10") + self.assertTrue(found, r"Escaping failed of format '\w+ 10'") def test_locale_data_w_regex_metacharacters(self): # Check that if locale data contains regex metacharacters they are @@ -403,7 +403,7 @@ class StrptimeTests(unittest.TestCase): # unbalanced parentheses when the regex is compiled if they are not # escaped. # Test instigated by bug #796149 . - need_escaping = ".^$*+?{}\[]|)(" + need_escaping = r".^$*+?{}\[]|)(" self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping)) def test_feb29_on_leap_year_without_year(self): diff --git a/Lib/test/test_unicode.py b/Lib/test/test_unicode.py --- a/Lib/test/test_unicode.py +++ b/Lib/test/test_unicode.py @@ -1564,7 +1564,7 @@ class UnicodeTest(string_tests.CommonTest, ('+', b'+-'), ('+-', b'+--'), ('+?', b'+-?'), - ('\?', b'+AFw?'), + (r'\?', b'+AFw?'), ('+?', b'+-?'), (r'\\?', b'+AFwAXA?'), (r'\\\?', b'+AFwAXABc?'), @@ -2326,7 +2326,7 @@ class UnicodeTest(string_tests.CommonTest, # non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV() # raises an error self.assertRaisesRegex(ValueError, - '^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format ' + r'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format ' 'string, got a non-ASCII byte: 0xe9$', PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii') diff --git a/Lib/test/test_urllib.py b/Lib/test/test_urllib.py --- a/Lib/test/test_urllib.py +++ b/Lib/test/test_urllib.py @@ -729,7 +729,7 @@ FF class QuotingTests(unittest.TestCase): - """Tests for urllib.quote() and urllib.quote_plus() + r"""Tests for urllib.quote() and urllib.quote_plus() According to RFC 2396 (Uniform Resource Identifiers), to escape a character you write it as '%' + <2 character US-ASCII hex value>. @@ -804,7 +804,7 @@ class QuotingTests(unittest.TestCase): # Make sure all characters that should be quoted are by default sans # space (separate test for that). should_quote = [chr(num) for num in range(32)] # For 0x00 - 0x1F - should_quote.append('<>#%"{}|\^[]`') + should_quote.append(r'<>#%"{}|\^[]`') should_quote.append(chr(127)) # For 0x7F should_quote = ''.join(should_quote) for char in should_quote: diff --git a/Lib/test/test_xmlrpc.py b/Lib/test/test_xmlrpc.py --- a/Lib/test/test_xmlrpc.py +++ b/Lib/test/test_xmlrpc.py @@ -1218,7 +1218,7 @@ class CGIHandlerTestCase(unittest.TestCase): content = handle[handle.find("\d+)\s+(?P\d+)\s+(?P\d+)\s+(?P.*)') + r'\s*(?P\d+)\s+(?P\d+)\s+(?P\d+)\s+(?P.*)') class HTML40DB(ColorDB): - _re = re.compile('(?P\S+)\s+(?P#[0-9a-fA-F]{6})') + _re = re.compile(r'(?P\S+)\s+(?P#[0-9a-fA-F]{6})') def _extractrgb(self, mo): return rrggbb_to_triplet(mo.group('hexrgb')) class LightlinkDB(HTML40DB): - _re = re.compile('(?P(.+))\s+(?P#[0-9a-fA-F]{6})') + _re = re.compile(r'(?P(.+))\s+(?P#[0-9a-fA-F]{6})') def _extractname(self, mo): return mo.group('name').strip() diff --git a/Tools/scripts/fixcid.py b/Tools/scripts/fixcid.py --- a/Tools/scripts/fixcid.py +++ b/Tools/scripts/fixcid.py @@ -88,7 +88,7 @@ def main(): sys.exit(bad) # Change this regular expression to select a different set of files -Wanted = '^[a-zA-Z0-9_]+\.[ch]$' +Wanted = r'^[a-zA-Z0-9_]+\.[ch]$' def wanted(name): return re.match(Wanted, name) >= 0 @@ -193,21 +193,21 @@ def fix(filename): # Tokenizing ANSI C (partly) -Identifier = '\(struct \)?[a-zA-Z_][a-zA-Z0-9_]+' -String = '"\([^\n\\"]\|\\\\.\)*"' -Char = '\'\([^\n\\\']\|\\\\.\)*\'' -CommentStart = '/\*' -CommentEnd = '\*/' +Identifier = r'\(struct \)?[a-zA-Z_][a-zA-Z0-9_]+' +String = r'"\([^\n\\"]\|\\.\)*"' +Char = r'\'\([^\n\\\']\|\\.\)*\'' +CommentStart = r'/\*' +CommentEnd = r'\*/' Hexnumber = '0[xX][0-9a-fA-F]*[uUlL]*' Octnumber = '0[0-7]*[uUlL]*' Decnumber = '[1-9][0-9]*[uUlL]*' -Intnumber = Hexnumber + '\|' + Octnumber + '\|' + Decnumber +Intnumber = Hexnumber + r'\|' + Octnumber + r'\|' + Decnumber Exponent = '[eE][-+]?[0-9]+' -Pointfloat = '\([0-9]+\.[0-9]*\|\.[0-9]+\)\(' + Exponent + '\)?' +Pointfloat = r'\([0-9]+\.[0-9]*\|\.[0-9]+\)\(' + Exponent + r'\)?' Expfloat = '[0-9]+' + Exponent -Floatnumber = Pointfloat + '\|' + Expfloat -Number = Floatnumber + '\|' + Intnumber +Floatnumber = Pointfloat + r'\|' + Expfloat +Number = Floatnumber + r'\|' + Intnumber # Anything else is an operator -- don't list this explicitly because of '/*' diff --git a/Tools/scripts/fixdiv.py b/Tools/scripts/fixdiv.py --- a/Tools/scripts/fixdiv.py +++ b/Tools/scripts/fixdiv.py @@ -174,8 +174,8 @@ def usage(msg): sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0]) sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0]) -PATTERN = ("^(.+?):(\d+): DeprecationWarning: " - "classic (int|long|float|complex) division$") +PATTERN = (r"^(.+?):(\d+): DeprecationWarning: " + "classic (int|long|float|complex) division$") def readwarnings(warningsfile): prog = re.compile(PATTERN) diff --git a/Tools/scripts/h2py.py b/Tools/scripts/h2py.py --- a/Tools/scripts/h2py.py +++ b/Tools/scripts/h2py.py @@ -23,13 +23,13 @@ import sys, re, getopt, os -p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+') +p_define = re.compile(r'^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+') p_macro = re.compile( - '^[\t ]*#[\t ]*define[\t ]+' - '([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+') + r'^[\t ]*#[\t ]*define[\t ]+' + r'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+') -p_include = re.compile('^[\t ]*#[\t ]*include[\t ]+<([^>\n]+)>') +p_include = re.compile(r'^[\t ]*#[\t ]*include[\t ]+<([^>\n]+)>') p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?') p_cpp_comment = re.compile('//.*') diff --git a/Tools/scripts/highlight.py b/Tools/scripts/highlight.py --- a/Tools/scripts/highlight.py +++ b/Tools/scripts/highlight.py @@ -147,14 +147,14 @@ def build_html_page(classified_text, title='python', #### LaTeX Output ########################################## default_latex_commands = { - 'comment': '{\color{red}#1}', - 'string': '{\color{ForestGreen}#1}', - 'docstring': '{\emph{\color{ForestGreen}#1}}', - 'keyword': '{\color{orange}#1}', - 'builtin': '{\color{purple}#1}', - 'definition': '{\color{orange}#1}', - 'defname': '{\color{blue}#1}', - 'operator': '{\color{brown}#1}', + 'comment': r'{\color{red}#1}', + 'string': r'{\color{ForestGreen}#1}', + 'docstring': r'{\emph{\color{ForestGreen}#1}}', + 'keyword': r'{\color{orange}#1}', + 'builtin': r'{\color{purple}#1}', + 'definition': r'{\color{orange}#1}', + 'defname': r'{\color{blue}#1}', + 'operator': r'{\color{brown}#1}', } default_latex_document = r''' diff --git a/Tools/scripts/mailerdaemon.py b/Tools/scripts/mailerdaemon.py --- a/Tools/scripts/mailerdaemon.py +++ b/Tools/scripts/mailerdaemon.py @@ -88,7 +88,7 @@ del i # no more expressions are searched for. So, order is important. emparse_list_reason = [ r'^5\d{2} <>\.\.\. (?P.*)', - '<>\.\.\. (?P.*)', + r'<>\.\.\. (?P.*)', re.compile(r'^<<< 5\d{2} (?P.*)', re.MULTILINE), re.compile('===== stderr was =====\nrmail: (?P.*)'), re.compile('^Diagnostic-Code: (?P.*)', re.MULTILINE), diff --git a/Tools/scripts/parseentities.py b/Tools/scripts/parseentities.py --- a/Tools/scripts/parseentities.py +++ b/Tools/scripts/parseentities.py @@ -14,7 +14,7 @@ """ import re,sys -entityRE = re.compile('') +entityRE = re.compile(r'') def parse(text,pos=0,endpos=None): @@ -39,7 +39,7 @@ def writefile(f,defs): if charcode[:2] == '&#': code = int(charcode[2:-1]) if code < 256: - charcode = "'\%o'" % code + charcode = r"'\%o'" % code else: charcode = repr(charcode) else: diff --git a/Tools/scripts/pathfix.py b/Tools/scripts/pathfix.py --- a/Tools/scripts/pathfix.py +++ b/Tools/scripts/pathfix.py @@ -64,7 +64,7 @@ def main(): if fix(arg): bad = 1 sys.exit(bad) -ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$') +ispythonprog = re.compile(r'^[a-zA-Z0-9_]+\.py$') def ispython(name): return bool(ispythonprog.match(name)) diff --git a/Tools/scripts/ptags.py b/Tools/scripts/ptags.py --- a/Tools/scripts/ptags.py +++ b/Tools/scripts/ptags.py @@ -24,7 +24,7 @@ def main(): for s in tags: fp.write(s) -expr = '^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]' +expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]' matcher = re.compile(expr) def treat_file(filename): diff --git a/Tools/scripts/svneol.py b/Tools/scripts/svneol.py --- a/Tools/scripts/svneol.py +++ b/Tools/scripts/svneol.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 -""" +r""" SVN helper script. Try to set the svn:eol-style property to "native" on every .py, .txt, .c and diff --git a/Tools/scripts/texi2html.py b/Tools/scripts/texi2html.py --- a/Tools/scripts/texi2html.py +++ b/Tools/scripts/texi2html.py @@ -78,11 +78,11 @@ spprog = re.compile('[\n@{}&<>]') # Special characters in # running text # # menu item (Yuck!) -miprog = re.compile('^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*') -# 0 1 1 2 3 34 42 0 -# ----- ---------- --------- -# -|----------------------------- -# ----------------------------------------------------- +miprog = re.compile(r'^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*') +# 0 1 1 2 3 34 42 0 +# ----- ---------- --------- +# -|----------------------------- +# ----------------------------------------------------- diff --git a/Tools/unicode/gencodec.py b/Tools/unicode/gencodec.py --- a/Tools/unicode/gencodec.py +++ b/Tools/unicode/gencodec.py @@ -37,11 +37,11 @@ UNI_UNDEFINED = chr(0xFFFE) # Placeholder for a missing code point MISSING_CODE = -1 -mapRE = re.compile('((?:0x[0-9a-fA-F]+\+?)+)' - '\s+' - '((?:(?:0x[0-9a-fA-Z]+|<[A-Za-z]+>)\+?)*)' - '\s*' - '(#.+)?') +mapRE = re.compile(r'((?:0x[0-9a-fA-F]+\+?)+)' + r'\s+' + r'((?:(?:0x[0-9a-fA-Z]+|<[A-Za-z]+>)\+?)*)' + r'\s*' + r'(#.+)?') def parsecodes(codes, len=len, range=range): diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -838,7 +838,7 @@ class PyBuildExt(build_ext): # find out which version of OpenSSL we have openssl_ver = 0 openssl_ver_re = re.compile( - '^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' ) + r'^\s*#\s*define\s+OPENSSL_VERSION_NUMBER\s+(0x[0-9a-fA-F]+)' ) # look for the openssl version header on the compiler search path. opensslv_h = find_file('openssl/opensslv.h', [], @@ -1724,7 +1724,7 @@ class PyBuildExt(build_ext): # All existing framework builds of Tcl/Tk don't support 64-bit # architectures. cflags = sysconfig.get_config_vars('CFLAGS')[0] - archs = re.findall('-arch\s+(\w+)', cflags) + archs = re.findall(r'-arch\s+(\w+)', cflags) tmpfile = os.path.join(self.build_temp, 'tk.arch') if not os.path.exists(self.build_temp):