From e1e3e54bdbc8fa2ca223f8bb5a799ddd527676e3 Mon Sep 17 00:00:00 2001 From: Max Bachmann Date: Tue, 9 Jan 2024 00:27:33 +0100 Subject: [PATCH] apply consistent formatting --- .gitattributes | 2 +- .gitignore | 2 +- CMakeLists.txt | 12 +- README.md | 4 +- bench/CyDifflib.svg | 1722 ++++++------ bench/benchmark.py | 58 +- pyproject.toml | 94 +- setup.py | 64 +- src/cydifflib/__init__.py | 3 +- src/cython/CMakeLists.txt | 18 +- src/cython/_initialize.pyx | 4188 +++++++++++++++--------------- tests/test_cydifflib.py | 452 ++-- tests/test_cydifflib_expect.html | 32 +- 13 files changed, 3392 insertions(+), 3259 deletions(-) diff --git a/.gitattributes b/.gitattributes index 650c02b..f914df4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1 @@ -tests/test_cydifflib_expect.html linguist-vendored \ No newline at end of file +tests/test_cydifflib_expect.html linguist-vendored diff --git a/.gitignore b/.gitignore index a068773..fa1a732 100644 --- a/.gitignore +++ b/.gitignore @@ -151,4 +151,4 @@ cython_debug/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ -*.cxx \ No newline at end of file +*.cxx diff --git a/CMakeLists.txt b/CMakeLists.txt index 34451e3..7bdd399 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,16 +4,18 @@ cmake_policy(SET CMP0054 NEW) set(SKBUILD_LINK_LIBRARIES_KEYWORD PRIVATE) set(THREADS_PREFER_PTHREAD_FLAG ON) -if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.9" CACHE STRING "Minimum OS X deployment version") +if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + set(CMAKE_OSX_DEPLOYMENT_TARGET + "10.9" + CACHE STRING "Minimum OS X deployment version") endif() project(cydifflib LANGUAGES C CXX) -if (MSVC) - add_compile_options(/W4) +if(MSVC) + add_compile_options(/W4) else() - add_compile_options(-Wall -Wextra -pedantic) + add_compile_options(-Wall -Wextra -pedantic) endif() find_package(PythonExtensions REQUIRED) diff --git a/README.md b/README.md index 4ff3ba7..ca2a1fc 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@

Continous Integration + alt="Continuous Integration"> - - - - @@ -58,25 +58,25 @@ L 0 3.5 - @@ -86,8 +86,8 @@ z - @@ -99,53 +99,53 @@ L 229.060465 41.472 - - @@ -156,8 +156,8 @@ z - @@ -169,81 +169,81 @@ L 400.52093 41.472 - - - @@ -257,267 +257,267 @@ z - - - - - - - - - - - - - @@ -556,14 +556,14 @@ z - - @@ -579,8 +579,8 @@ L -3.5 0 - @@ -592,29 +592,29 @@ L 414.72 254.026355 - @@ -625,8 +625,8 @@ z - @@ -645,8 +645,8 @@ L 414.72 200.468709 - @@ -665,8 +665,8 @@ L 414.72 146.911064 - @@ -687,84 +687,84 @@ L 414.72 93.353419 - - - @@ -784,317 +784,317 @@ z - - - - - - - - + - - - - @@ -1130,77 +1130,77 @@ z - - - - @@ -1255,22 +1255,22 @@ z - - @@ -1286,9 +1286,9 @@ L 86.6 54.570438 - @@ -1305,30 +1305,30 @@ L 86.6 69.248563 - - diff --git a/bench/benchmark.py b/bench/benchmark.py index c77de73..b460d5e 100644 --- a/bench/benchmark.py +++ b/bench/benchmark.py @@ -1,7 +1,10 @@ # todo combine benchmarks of scorers into common code base +from __future__ import annotations + import timeit -import pandas -import numpy as np + +import pandas as pd + def benchmark(name, func, setup, lengths, count): print(f"starting {name}") @@ -14,7 +17,8 @@ def benchmark(name, func, setup, lengths, count): print(f"finished {name}, Runtime: ", stop - start) return results -setup =""" + +setup = """ from difflib import SequenceMatcher from cydifflib import SequenceMatcher as CySequenceMatcher from cdifflib import CSequenceMatcher @@ -26,27 +30,41 @@ def benchmark(name, func, setup, lengths, count): b_list = [''.join(random.choice(characters) for _ in range({0})) for _ in range({1})] """ -lengths = list(range(1,128,2)) +lengths = list(range(1, 128, 2)) count = 2000 -time_difflib = benchmark("difflib", - '[SequenceMatcher(None, a, b).get_matching_blocks() for b in b_list]', - setup, lengths, count) +time_difflib = benchmark( + "difflib", + "[SequenceMatcher(None, a, b).get_matching_blocks() for b in b_list]", + setup, + lengths, + count, +) -time_cdifflib = benchmark("cdifflib", - '[CSequenceMatcher(None, a, b).get_matching_blocks() for b in b_list]', - setup, lengths, count) +time_cdifflib = benchmark( + "cdifflib", + "[CSequenceMatcher(None, a, b).get_matching_blocks() for b in b_list]", + setup, + lengths, + count, +) -time_cydifflib = benchmark("cydifflib", - '[CySequenceMatcher(None, a, b).get_matching_blocks() for b in b_list]', - setup, lengths, count) +time_cydifflib = benchmark( + "cydifflib", + "[CySequenceMatcher(None, a, b).get_matching_blocks() for b in b_list]", + setup, + lengths, + count, +) -df = pandas.DataFrame(data={ - "length": lengths, - "difflib": time_difflib, - "cdifflib": time_cdifflib, - "cydifflib": time_cydifflib -}) +results = pd.DataFrame( + data={ + "length": lengths, + "difflib": time_difflib, + "cdifflib": time_cdifflib, + "cydifflib": time_cydifflib, + } +) -df.to_csv("benchmark_results.csv", sep=',',index=False) +results.to_csv("benchmark_results.csv", sep=",", index=False) diff --git a/pyproject.toml b/pyproject.toml index 5bf4512..2023780 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,96 @@ [build-system] requires = [ - "setuptools", - "wheel", - "scikit-build>=0.13.0", + "setuptools>=42", + "scikit-build~=0.17.0", "cmake", "ninja; platform_system!='Windows'", "Cython >=3.0.7, <3.1.0" ] -build-backend = "setuptools.build_meta" \ No newline at end of file +build-backend = "setuptools.build_meta" + +[tool.black] +line-length = 120 + +[tool.mypy] +files = ["src"] +python_version = "3.8" +warn_unused_configs = true +show_error_codes = true +enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] +strict = true +disallow_untyped_defs = false + +[tool.pytest.ini_options] +minversion = "6.0" +testpaths = ["tests"] +addopts = ["-ra", "--showlocals", "--strict-markers", "--strict-config"] +norecursedirs = ["_skbuild"] +xfail_strict = true +log_cli_level = "info" + +[tool.pylint] +py-version = "3.8" + +[tool.pylint.reports] +output-format = "colorized" + +[tool.pylint.messages_control] +disable = [ + "design", + "fixme", + "imports", + "line-too-long", + "imports", + "invalid-name", + "protected-access", + "missing-module-docstring", +] + +[tool.ruff] +select = [ + "E", "F", "W", # flake8 + "B", # flake8-bugbear + "I", # isort + "ARG", # flake8-unused-arguments + "C4", # flake8-comprehensions + "EM", # flake8-errmsg + "ICN", # flake8-import-conventions + "ISC", # flake8-implicit-str-concat + "G", # flake8-logging-format + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # pylint + "PT", # flake8-pytest-style + "PTH", # flake8-use-pathlib + "RET", # flake8-return + "RUF", # Ruff-specific + "SIM", # flake8-simplify + "T20", # flake8-print + "UP", # pyupgrade + "YTT", # flake8-2020 + "EXE", # flake8-executable + "NPY", # NumPy specific rules + "PD", # pandas-vet +] +extend-ignore = [ + "PLR", # Design related pylint codes + "E501", # Line too long + "PT004", # Use underscore for non-returning fixture (use usefixture instead) + "PTH123", # use pathlib instead of builtin open + + "F403" # todo we should do better in the future +] +target-version = "py37" +src = ["src"] +unfixable = [ + "T20", # Removes print statements + "F841", # Removes unused variables +] +exclude = [] +flake8-unused-arguments.ignore-variadic-names = true +isort.required-imports = ["from __future__ import annotations"] + +[tool.ruff.per-file-ignores] +"tests/**" = ["T20", "PT009", "ARG001", "PTH118", "PTH120"] +"bench/**" = ["T20"] +"setup.py" = ["T20"] diff --git a/setup.py b/setup.py index d490f9b..a8dcc5b 100644 --- a/setup.py +++ b/setup.py @@ -1,32 +1,32 @@ -from skbuild import setup - -with open('README.md', 'rt', encoding="utf8") as f: - readme = f.read() - -setup( - name="cydifflib", - version="1.0.1", - url="https://github.com/rapidfuzz/cydifflib", - author="Max Bachmann", - author_email="pypi@maxbachmann.de", - description="Fast implementation of difflib's algorithms", - long_description=readme, - long_description_content_type="text/markdown", - - license="MIT", - classifiers=[ - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "License :: OSI Approved :: MIT License" - ], - - packages=["cydifflib"], - package_dir={'':'src'}, - zip_safe=True, - include_package_data=True, - python_requires=">=3.6", -) +from __future__ import annotations + +from skbuild import setup + +with open("README.md", encoding="utf8") as f: + readme = f.read() + +setup( + name="cydifflib", + version="1.0.1", + url="https://github.com/rapidfuzz/cydifflib", + author="Max Bachmann", + author_email="pypi@maxbachmann.de", + description="Fast implementation of difflib's algorithms", + long_description=readme, + long_description_content_type="text/markdown", + license="MIT", + classifiers=[ + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "License :: OSI Approved :: MIT License", + ], + packages=["cydifflib"], + package_dir={"": "src"}, + zip_safe=True, + include_package_data=True, + python_requires=">=3.6", +) diff --git a/src/cydifflib/__init__.py b/src/cydifflib/__init__.py index 10ab2b5..6e2d9cd 100644 --- a/src/cydifflib/__init__.py +++ b/src/cydifflib/__init__.py @@ -25,9 +25,10 @@ Class HtmlDiff: For producing HTML side by side comparison with change highlights. """ +from __future__ import annotations __author__ = "Max Bachmann" __license__ = "MIT" __version__ = "1.0.1" -from ._initialize import * \ No newline at end of file +from ._initialize import * diff --git a/src/cython/CMakeLists.txt b/src/cython/CMakeLists.txt index b0ff8d9..971616a 100644 --- a/src/cython/CMakeLists.txt +++ b/src/cython/CMakeLists.txt @@ -1,11 +1,15 @@ function(create_cython_target _name) - if(EXISTS ${CMAKE_CURRENT_LIST_DIR}/${_name}.cxx) - set(${_name} ${CMAKE_CURRENT_LIST_DIR}/${_name}.cxx PARENT_SCOPE) - else() - find_package(Cython REQUIRED) - add_cython_target(${_name} CXX) - set(${_name} ${_name} PARENT_SCOPE) - endif() + if(EXISTS ${CMAKE_CURRENT_LIST_DIR}/${_name}.cxx) + set(${_name} + ${CMAKE_CURRENT_LIST_DIR}/${_name}.cxx + PARENT_SCOPE) + else() + find_package(Cython REQUIRED) + add_cython_target(${_name} CXX) + set(${_name} + ${_name} + PARENT_SCOPE) + endif() endfunction(create_cython_target) create_cython_target(_initialize) diff --git a/src/cython/_initialize.pyx b/src/cython/_initialize.pyx index 846d998..7a808b3 100644 --- a/src/cython/_initialize.pyx +++ b/src/cython/_initialize.pyx @@ -1,2094 +1,2094 @@ -__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher', - 'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff', - 'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match'] - -from heapq import nlargest as _nlargest -from collections import namedtuple as _namedtuple -# todo add this once it is supported in all Python versions -#from types import GenericAlias - -cimport cython -from libcpp.vector cimport vector -from libcpp.algorithm cimport fill, sort as cpp_sort -from libc.stdlib cimport malloc, free -from libcpp.unordered_map cimport unordered_map - -Match = _namedtuple('Match', 'a b size') - -@cython.cdivision(True) -cdef double _calculate_ratio(Py_ssize_t matches, Py_ssize_t length) except -1.0: - if length: - return 2.0 * matches / length - return 1.0 - -ctypedef struct MatchingBlockQueueElem: - Py_ssize_t alo - Py_ssize_t ahi - Py_ssize_t blo - Py_ssize_t bhi - -ctypedef struct CMatch: - Py_ssize_t a - Py_ssize_t b - Py_ssize_t size - -cdef int CMatch_sorter(const CMatch& lhs, const CMatch& rhs): - if lhs.a != rhs.a: - return lhs.a < rhs.a - if lhs.b != rhs.b: - return lhs.b < rhs.b - return lhs.size < rhs.size - -cdef class SequenceMatcher: - - """ - SequenceMatcher is a flexible class for comparing pairs of sequences of - any type, so long as the sequence elements are hashable. The basic - algorithm predates, and is a little fancier than, an algorithm - published in the late 1980's by Ratcliff and Obershelp under the - hyperbolic name "gestalt pattern matching". The basic idea is to find - the longest contiguous matching subsequence that contains no "junk" - elements (R-O doesn't address junk). The same idea is then applied - recursively to the pieces of the sequences to the left and to the right - of the matching subsequence. This does not yield minimal edit - sequences, but does tend to yield matches that "look right" to people. - - SequenceMatcher tries to compute a "human-friendly diff" between two - sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the - longest *contiguous* & junk-free matching subsequence. That's what - catches peoples' eyes. The Windows(tm) windiff has another interesting - notion, pairing up elements that appear uniquely in each sequence. - That, and the method here, appear to yield more intuitive difference - reports than does diff. This method appears to be the least vulnerable - to syncing up on blocks of "junk lines", though (like blank lines in - ordinary text files, or maybe "

" lines in HTML files). That may be - because this is the only method of the 3 that has a *concept* of - "junk" . - - Example, comparing two strings, and considering blanks to be "junk": - - >>> s = SequenceMatcher(lambda x: x == " ", - ... "private Thread currentThread;", - ... "private volatile Thread currentThread;") - >>> - - .ratio() returns a float in [0, 1], measuring the "similarity" of the - sequences. As a rule of thumb, a .ratio() value over 0.6 means the - sequences are close matches: - - >>> print(round(s.ratio(), 3)) - 0.866 - >>> - - If you're only interested in where the sequences match, - .get_matching_blocks() is handy: - - >>> for block in s.get_matching_blocks(): - ... print("a[%d] and b[%d] match for %d elements" % block) - a[0] and b[0] match for 8 elements - a[8] and b[17] match for 21 elements - a[29] and b[38] match for 0 elements - - Note that the last tuple returned by .get_matching_blocks() is always a - dummy, (len(a), len(b), 0), and this is the only case in which the last - tuple element (number of elements matched) is 0. - - If you want to know how to change the first sequence into the second, - use .get_opcodes(): - - >>> for opcode in s.get_opcodes(): - ... print("%6s a[%d:%d] b[%d:%d]" % opcode) - equal a[0:8] b[0:8] - insert a[8:8] b[8:17] - equal a[8:29] b[17:38] - - See the Differ class for a fancy human-friendly file differencer, which - uses SequenceMatcher both to compare sequences of lines, and to compare - sequences of characters within similar (near-matching) lines. - - See also function get_close_matches() in this module, which shows how - simple code building on SequenceMatcher can be used to do useful work. - - Timing: Basic R-O is cubic time worst case and quadratic time expected - case. SequenceMatcher is quadratic time for the worst case and has - expected-case behavior dependent in a complicated way on how many - elements the sequences have in common; best case time is linear. - """ - - cdef public object a - cdef public object b - cdef public dict b2j - cdef public dict fullbcount - cdef public list matching_blocks - cdef public list opcodes - cdef public object isjunk - cdef public set bjunk - cdef public set bpopular - cdef public object autojunk - - # todo this is not threadsafe, which could be an problem in the long run - cdef vector[Py_ssize_t] j2len_ - cdef vector[Py_ssize_t] newj2len_ - cdef Py_hash_t* a_ - cdef Py_ssize_t la - cdef Py_hash_t* b_ - cdef Py_ssize_t lb - - def __init__(self, isjunk=None, a='', b='', autojunk=True): - """Construct a SequenceMatcher. - - Optional arg isjunk is None (the default), or a one-argument - function that takes a sequence element and returns true iff the - element is junk. None is equivalent to passing "lambda x: 0", i.e. - no elements are considered to be junk. For example, pass - lambda x: x in " \\t" - if you're comparing lines as sequences of characters, and don't - want to synch up on blanks or hard tabs. - - Optional arg a is the first of two sequences to be compared. By - default, an empty string. The elements of a must be hashable. See - also .set_seqs() and .set_seq1(). - - Optional arg b is the second of two sequences to be compared. By - default, an empty string. The elements of b must be hashable. See - also .set_seqs() and .set_seq2(). - - Optional arg autojunk should be set to False to disable the - "automatic junk heuristic" that treats popular elements as junk - (see module documentation for more information). - """ - - # Members: - # a - # first sequence - # b - # second sequence; differences are computed as "what do - # we need to do to 'a' to change it into 'b'?" - # b2j - # for x in b, b2j[x] is a list of the indices (into b) - # at which x appears; junk and popular elements do not appear - # fullbcount - # for x in b, fullbcount[x] == the number of times x - # appears in b; only materialized if really needed (used - # only for computing quick_ratio()) - # matching_blocks - # a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k]; - # ascending & non-overlapping in i and in j; terminated by - # a dummy (len(a), len(b), 0) sentinel - # opcodes - # a list of (tag, i1, i2, j1, j2) tuples, where tag is - # one of - # 'replace' a[i1:i2] should be replaced by b[j1:j2] - # 'delete' a[i1:i2] should be deleted - # 'insert' b[j1:j2] should be inserted - # 'equal' a[i1:i2] == b[j1:j2] - # isjunk - # a user-supplied function taking a sequence element and - # returning true iff the element is "junk" -- this has - # subtle but helpful effects on the algorithm, which I'll - # get around to writing up someday <0.9 wink>. - # DON'T USE! Only __chain_b uses this. Use "in self.bjunk". - # bjunk - # the items in b for which isjunk is True. - # bpopular - # nonjunk items in b treated as junk by the heuristic (if used). - - self.isjunk = isjunk - self.a = self.b = None - self.autojunk = autojunk - self.set_seqs(a, b) - - cpdef set_seqs(self, a, b): - """Set the two sequences to be compared. - - >>> s = SequenceMatcher() - >>> s.set_seqs("abcd", "bcde") - >>> s.ratio() - 0.75 - """ - - self.set_seq1(a) - self.set_seq2(b) - - cpdef set_seq1(self, a): - """Set the first sequence to be compared. - - The second sequence to be compared is not changed. - - >>> s = SequenceMatcher(None, "abcd", "bcde") - >>> s.ratio() - 0.75 - >>> s.set_seq1("bcde") - >>> s.ratio() - 1.0 - >>> - - SequenceMatcher computes and caches detailed information about the - second sequence, so if you want to compare one sequence S against - many sequences, use .set_seq2(S) once and call .set_seq1(x) - repeatedly for each of the other sequences. - - See also set_seqs() and set_seq2(). - """ - - if a is self.a: - return - self.a = a - self.matching_blocks = self.opcodes = None - self.la = len(a) - - cpdef set_seq2(self, b): - """Set the second sequence to be compared. - - The first sequence to be compared is not changed. - - >>> s = SequenceMatcher(None, "abcd", "bcde") - >>> s.ratio() - 0.75 - >>> s.set_seq2("abcd") - >>> s.ratio() - 1.0 - >>> - - SequenceMatcher computes and caches detailed information about the - second sequence, so if you want to compare one sequence S against - many sequences, use .set_seq2(S) once and call .set_seq1(x) - repeatedly for each of the other sequences. - - See also set_seqs() and set_seq1(). - """ - - if b is self.b: - return - self.b = b - self.j2len_.resize(len(b) + 1) - self.newj2len_.resize(len(b) + 1) - self.matching_blocks = self.opcodes = None - self.fullbcount = None - self.lb = len(b) - self.__chain_b() - - # For each element x in b, set b2j[x] to a list of the indices in - # b where x appears; the indices are in increasing order; note that - # the number of times x appears in b is len(b2j[x]) ... - # when self.isjunk is defined, junk elements don't show up in this - # map at all, which stops the central find_longest_match method - # from starting any matching block at a junk element ... - # b2j also does not contain entries for "popular" elements, meaning - # elements that account for more than 1 + 1% of the total elements, and - # when the sequence is reasonably large (>= 200 elements); this can - # be viewed as an adaptive notion of semi-junk, and yields an enormous - # speedup when, e.g., comparing program files with hundreds of - # instances of "return NULL;" ... - # note that this is only called when b changes; so for cross-product - # kinds of matches, it's best to call set_seq2 once, then set_seq1 - # repeatedly - - cdef __chain_b(self): - # Because isjunk is a user-defined (not C) function, and we test - # for junk a LOT, it's important to minimize the number of calls. - # Before the tricks described here, __chain_b was by far the most - # time-consuming routine in the whole module! If anyone sees - # Jim Roskind, thank him again for profile.py -- I never would - # have guessed that. - # The first trick is to build b2j ignoring the possibility - # of junk. I.e., we don't call isjunk at all yet. Throwing - # out the junk later is much cheaper than building b2j "right" - # from the start. - b = self.b - self.b2j = b2j = {} - - for i, elt in enumerate(b): - indices = b2j.setdefault(elt, []) - indices.append(i) - - # Purge junk elements - self.bjunk = junk = set() - isjunk = self.isjunk - if isjunk: - for elt in b2j.keys(): - if isjunk(elt): - junk.add(elt) - for elt in junk: # separate loop avoids separate list of keys - del b2j[elt] - - # Purge popular elements that are not junk - self.bpopular = popular = set() - n = len(b) - if self.autojunk and n >= 200: - ntest = n // 100 + 1 - for elt, idxs in b2j.items(): - if len(idxs) > ntest: - popular.add(elt) - for elt in popular: # ditto; as fast for 1% deletion - del b2j[elt] - - cdef CMatch __find_longest_match(self, Py_ssize_t alo, Py_ssize_t ahi, Py_ssize_t blo, Py_ssize_t bhi) except *: - cdef list indexes - cdef Py_ssize_t besti, bestj, bestsize - cdef Py_ssize_t i, j, k - cdef Py_ssize_t index_len, pos, next_val - cdef int found - - # CAUTION: stripping common prefix or suffix would be incorrect. - # E.g., - # ab - # acab - # Longest matching block is "ab", but if common prefix is - # stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - # strip, so ends up claiming that ab is changed to acab by - # inserting "ca" in the middle. That's minimal but unintuitive: - # "it's obvious" that someone inserted "ac" at the front. - # Windiff ends up at the same place as diff, but by pairing up - # the unique 'b's and then matching the first two 'a's. - - a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__ - isjunk = self.isjunk - besti, bestj, bestsize = alo, blo, 0 - # find longest junk-free match - # during an iteration of the loop, j2len[j] = length of longest - # junk-free match ending with a[i-1] and b[j] - nothing = [] - for i in range(alo, ahi): - # look at all instances of a[i] in b; note that because - # b2j has no junk keys, the loop is skipped if a[i] is junk - for j in b2j.get(a[i], nothing): - # a[i] matches b[j] - if j < blo: - continue - if j >= bhi: - break - k = self.j2len_[j] + 1 - self.newj2len_[j + 1] = k - if k > bestsize: - besti = i-k+1 - bestj = j-k+1 - bestsize = k - - self.j2len_.swap(self.newj2len_) - fill(self.newj2len_.begin() + blo, self.newj2len_.begin() + bhi + 1, 0) - - fill(self.j2len_.begin() + blo, self.j2len_.begin() + bhi + 1, 0) - - # Extend the best by non-junk elements on each end. In particular, - # "popular" non-junk elements aren't in b2j, which greatly speeds - # the inner loop above, but also means "the best" match so far - # doesn't contain any junk *or* popular non-junk elements. - while besti > alo and bestj > blo and \ - not isbjunk(b[bestj-1]) and \ - a[besti-1] == b[bestj-1]: - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - while besti+bestsize < ahi and bestj+bestsize < bhi and \ - not isbjunk(b[bestj+bestsize]) and \ - a[besti+bestsize] == b[bestj+bestsize]: - bestsize += 1 - - # Now that we have a wholly interesting match (albeit possibly - # empty!), we may as well suck up the matching junk on each - # side of it too. Can't think of a good reason not to, and it - # saves post-processing the (possibly considerable) expense of - # figuring out what to do with it. In the case of an empty - # interesting match, this is clearly the right thing to do, - # because no other kind of match is possible in the regions. - while besti > alo and bestj > blo and \ - isbjunk(b[bestj-1]) and \ - a[besti-1] == b[bestj-1]: - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - while besti+bestsize < ahi and bestj+bestsize < bhi and \ - isbjunk(b[bestj+bestsize]) and \ - a[besti+bestsize] == b[bestj+bestsize]: - bestsize = bestsize + 1 - - return CMatch(besti, bestj, bestsize) - - def find_longest_match(self, alo=0, ahi=None, blo=0, bhi=None): - """Find longest matching block in a[alo:ahi] and b[blo:bhi]. - - By default it will find the longest match in the entirety of a and b. - - If isjunk is not defined: - - Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where - alo <= i <= i+k <= ahi - blo <= j <= j+k <= bhi - and for all (i',j',k') meeting those conditions, - k >= k' - i <= i' - and if i == i', j <= j' - - In other words, of all maximal matching blocks, return one that - starts earliest in a, and of all those maximal matching blocks that - start earliest in a, return the one that starts earliest in b. - - >>> s = SequenceMatcher(None, " abcd", "abcd abcd") - >>> s.find_longest_match(0, 5, 0, 9) - Match(a=0, b=4, size=5) - - If isjunk is defined, first the longest matching block is - determined as above, but with the additional restriction that no - junk element appears in the block. Then that block is extended as - far as possible by matching (only) junk elements on both sides. So - the resulting block never matches on junk except as identical junk - happens to be adjacent to an "interesting" match. - - Here's the same example as before, but considering blanks to be - junk. That prevents " abcd" from matching the " abcd" at the tail - end of the second sequence directly. Instead only the "abcd" can - match, and matches the leftmost "abcd" in the second sequence: - - >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd") - >>> s.find_longest_match(0, 5, 0, 9) - Match(a=1, b=0, size=4) - - If no blocks match, return (alo, blo, 0). - - >>> s = SequenceMatcher(None, "ab", "c") - >>> s.find_longest_match(0, 2, 0, 1) - Match(a=0, b=0, size=0) - """ - cdef Py_ssize_t ahi_ = ahi if ahi is not None else len(self.a) - cdef Py_ssize_t bhi_ = bhi if bhi is not None else len(self.b) - match = self.__find_longest_match(alo, ahi_, blo, bhi_) - - return Match(match.a, match.b, match.size) - - cpdef get_matching_blocks(self): - """Return list of triples describing matching subsequences. - - Each triple is of the form (i, j, n), and means that - a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in - i and in j. New in Python 2.5, it's also guaranteed that if - (i, j, n) and (i', j', n') are adjacent triples in the list, and - the second is not the last triple in the list, then i+n != i' or - j+n != j'. IOW, adjacent triples never describe adjacent equal - blocks. - - The last triple is a dummy, (len(a), len(b), 0), and is the only - triple with n==0. - - >>> s = SequenceMatcher(None, "abxcd", "abcd") - >>> list(s.get_matching_blocks()) - [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)] - """ - cdef Py_ssize_t i, j, k, i1, j1, k1, i2, j2, k2 - cdef Py_ssize_t alo, ahi, blo, bhi - cdef size_t queue_head - cdef vector[MatchingBlockQueueElem] queue - cdef vector[CMatch] matching_blocks_ - - if self.matching_blocks is not None: - return self.matching_blocks - - # This is most naturally expressed as a recursive algorithm, but - # at least one user bumped into extreme use cases that exceeded - # the recursion limit on their box. So, now we maintain a list - # ('queue`) of blocks we still need to look at, and append partial - # results to `matching_blocks` in a loop; the matches are sorted - # at the end. - queue.push_back(MatchingBlockQueueElem(0, self.la, 0, self.lb)) - while not queue.empty(): - elem = queue.back() - alo, ahi, blo, bhi = elem.alo, elem.ahi, elem.blo, elem.bhi - queue.pop_back() - x = self.__find_longest_match(alo, ahi, blo, bhi) - i, j, k = x.a, x.b, x.size - # a[alo:i] vs b[blo:j] unknown - # a[i:i+k] same as b[j:j+k] - # a[i+k:ahi] vs b[j+k:bhi] unknown - if k: # if k is 0, there was no matching block - matching_blocks_.push_back(x) - if alo < i and blo < j: - queue.push_back(MatchingBlockQueueElem(alo, i, blo, j)) - if i+k < ahi and j+k < bhi: - queue.push_back(MatchingBlockQueueElem(i+k, ahi, j+k, bhi)) - cpp_sort(matching_blocks_.begin(), matching_blocks_.end(), &CMatch_sorter) - - # It's possible that we have adjacent equal blocks in the - # matching_blocks list now. Starting with 2.5, this code was added - # to collapse them. - i1 = j1 = k1 = 0 - non_adjacent = [] - for match in matching_blocks_: - i2, j2, k2 = match.a, match.b, match.size - # Is this block adjacent to i1, j1, k1? - if i1 + k1 == i2 and j1 + k1 == j2: - # Yes, so collapse them -- this just increases the length of - # the first block by the length of the second, and the first - # block so lengthened remains the block to compare against. - k1 += k2 - else: - # Not adjacent. Remember the first block (k1==0 means it's - # the dummy we started with), and make the second block the - # new block to compare against. - if k1: - non_adjacent.append(Match(i1, j1, k1)) - i1, j1, k1 = i2, j2, k2 - if k1: - non_adjacent.append(Match(i1, j1, k1)) - - non_adjacent.append(Match(self.la, self.lb, 0)) - self.matching_blocks = non_adjacent - return self.matching_blocks - - def get_opcodes(self): - """Return list of 5-tuples describing how to turn a into b. - - Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple - has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the - tuple preceding it, and likewise for j1 == the previous j2. - - The tags are strings, with these meanings: - - 'replace': a[i1:i2] should be replaced by b[j1:j2] - 'delete': a[i1:i2] should be deleted. - Note that j1==j2 in this case. - 'insert': b[j1:j2] should be inserted at a[i1:i1]. - Note that i1==i2 in this case. - 'equal': a[i1:i2] == b[j1:j2] - - >>> a = "qabxcd" - >>> b = "abycdf" - >>> s = SequenceMatcher(None, a, b) - >>> for tag, i1, i2, j1, j2 in s.get_opcodes(): - ... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % - ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))) - delete a[0:1] (q) b[0:0] () - equal a[1:3] (ab) b[0:2] (ab) - replace a[3:4] (x) b[2:3] (y) - equal a[4:6] (cd) b[3:5] (cd) - insert a[6:6] () b[5:6] (f) - """ - - if self.opcodes is not None: - return self.opcodes - i = j = 0 - self.opcodes = answer = [] - for ai, bj, size in self.get_matching_blocks(): - # invariant: we've pumped out correct diffs to change - # a[:i] into b[:j], and the next matching block is - # a[ai:ai+size] == b[bj:bj+size]. So we need to pump - # out a diff to change a[i:ai] into b[j:bj], pump out - # the matching block, and move (i,j) beyond the match - tag = '' - if i < ai and j < bj: - tag = 'replace' - elif i < ai: - tag = 'delete' - elif j < bj: - tag = 'insert' - if tag: - answer.append( (tag, i, ai, j, bj) ) - i, j = ai+size, bj+size - # the list of matching blocks is terminated by a - # sentinel with size 0 - if size: - answer.append( ('equal', ai, i, bj, j) ) - return answer - - def get_grouped_opcodes(self, n=3): - """ Isolate change clusters by eliminating ranges with no changes. - - Return a generator of groups with up to n lines of context. - Each group is in the same format as returned by get_opcodes(). - - >>> from pprint import pprint - >>> a = list(map(str, range(1,40))) - >>> b = a[:] - >>> b[8:8] = ['i'] # Make an insertion - >>> b[20] += 'x' # Make a replacement - >>> b[23:28] = [] # Make a deletion - >>> b[30] += 'y' # Make another replacement - >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) - [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)], - [('equal', 16, 19, 17, 20), - ('replace', 19, 20, 20, 21), - ('equal', 20, 22, 21, 23), - ('delete', 22, 27, 23, 23), - ('equal', 27, 30, 23, 26)], - [('equal', 31, 34, 27, 30), - ('replace', 34, 35, 30, 31), - ('equal', 35, 38, 31, 34)]] - """ - - codes = self.get_opcodes() - if not codes: - codes = [("equal", 0, 1, 0, 1)] - # Fixup leading and trailing groups if they show no changes. - if codes[0][0] == 'equal': - tag, i1, i2, j1, j2 = codes[0] - codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2 - if codes[-1][0] == 'equal': - tag, i1, i2, j1, j2 = codes[-1] - codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n) - - nn = n + n - group = [] - for tag, i1, i2, j1, j2 in codes: - # End the current group and start a new one whenever - # there is a large range with no changes. - if tag == 'equal' and i2-i1 > nn: - group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n))) - yield group - group = [] - i1, j1 = max(i1, i2-n), max(j1, j2-n) - group.append((tag, i1, i2, j1 ,j2)) - if group and not (len(group)==1 and group[0][0] == 'equal'): - yield group - - def ratio(self): - """Return a measure of the sequences' similarity (float in [0,1]). - - Where T is the total number of elements in both sequences, and - M is the number of matches, this is 2.0*M / T. - Note that this is 1 if the sequences are identical, and 0 if - they have nothing in common. - - .ratio() is expensive to compute if you haven't already computed - .get_matching_blocks() or .get_opcodes(), in which case you may - want to try .quick_ratio() or .real_quick_ratio() first to get an - upper bound. - - >>> s = SequenceMatcher(None, "abcd", "bcde") - >>> s.ratio() - 0.75 - >>> s.quick_ratio() - 0.75 - >>> s.real_quick_ratio() - 1.0 - """ - - matches = sum(triple[-1] for triple in self.get_matching_blocks()) - return _calculate_ratio(matches, len(self.a) + len(self.b)) - - def quick_ratio(self): - """Return an upper bound on ratio() relatively quickly. - - This isn't defined beyond that it is an upper bound on .ratio(), and - is faster to compute. - """ - - # viewing a and b as multisets, set matches to the cardinality - # of their intersection; this counts the number of matches - # without regard to order, so is clearly an upper bound - if self.fullbcount is None: - self.fullbcount = fullbcount = {} - for elt in self.b: - fullbcount[elt] = fullbcount.get(elt, 0) + 1 - fullbcount = self.fullbcount - # avail[x] is the number of times x appears in 'b' less the - # number of times we've seen it in 'a' so far ... kinda - avail = {} - availhas, matches = avail.__contains__, 0 - for elt in self.a: - if availhas(elt): - numb = avail[elt] - else: - numb = fullbcount.get(elt, 0) - avail[elt] = numb - 1 - if numb > 0: - matches = matches + 1 - return _calculate_ratio(matches, len(self.a) + len(self.b)) - - def real_quick_ratio(self): - """Return an upper bound on ratio() very quickly. - - This isn't defined beyond that it is an upper bound on .ratio(), and - is faster to compute than either .ratio() or .quick_ratio(). - """ - - la, lb = len(self.a), len(self.b) - # can't have more matches than the number of elements in the - # shorter sequence - return _calculate_ratio(min(la, lb), la + lb) - - # todo add this once it is supported in all Python versions - #__class_getitem__ = classmethod(GenericAlias) - - -def get_close_matches(word, possibilities, n=3, cutoff=0.6): - """Use SequenceMatcher to return list of the best "good enough" matches. - - word is a sequence for which close matches are desired (typically a - string). - - possibilities is a list of sequences against which to match word - (typically a list of strings). - - Optional arg n (default 3) is the maximum number of close matches to - return. n must be > 0. - - Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities - that don't score at least that similar to word are ignored. - - The best (no more than n) matches among the possibilities are returned - in a list, sorted by similarity score, most similar first. - - >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"]) - ['apple', 'ape'] - >>> import keyword as _keyword - >>> get_close_matches("wheel", _keyword.kwlist) - ['while'] - >>> get_close_matches("Apple", _keyword.kwlist) - [] - >>> get_close_matches("accept", _keyword.kwlist) - ['except'] - """ - - if not n > 0: - raise ValueError("n must be > 0: %r" % (n,)) - if not 0.0 <= cutoff <= 1.0: - raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,)) - result = [] - s = SequenceMatcher() - s.set_seq2(word) - for x in possibilities: - s.set_seq1(x) - if s.real_quick_ratio() >= cutoff and \ - s.quick_ratio() >= cutoff and \ - s.ratio() >= cutoff: - result.append((s.ratio(), x)) - - # Move the best scorers to head of list - result = _nlargest(n, result) - # Strip scores for the best n matches - return [x for score, x in result] - - -def _keep_original_ws(s, tag_s): - """Replace whitespace with the original whitespace characters in `s`""" - return ''.join( - c if tag_c == " " and c.isspace() else tag_c - for c, tag_c in zip(s, tag_s) - ) - - - -class Differ: - r""" - Differ is a class for comparing sequences of lines of text, and - producing human-readable differences or deltas. Differ uses - SequenceMatcher both to compare sequences of lines, and to compare - sequences of characters within similar (near-matching) lines. - - Each line of a Differ delta begins with a two-letter code: - - '- ' line unique to sequence 1 - '+ ' line unique to sequence 2 - ' ' line common to both sequences - '? ' line not present in either input sequence - - Lines beginning with '? ' attempt to guide the eye to intraline - differences, and were not present in either input sequence. These lines - can be confusing if the sequences contain tab characters. - - Note that Differ makes no claim to produce a *minimal* diff. To the - contrary, minimal diffs are often counter-intuitive, because they synch - up anywhere possible, sometimes accidental matches 100 pages apart. - Restricting synch points to contiguous matches preserves some notion of - locality, at the occasional cost of producing a longer diff. - - Example: Comparing two texts. - - First we set up the texts, sequences of individual single-line strings - ending with newlines (such sequences can also be obtained from the - `readlines()` method of file-like objects): - - >>> text1 = ''' 1. Beautiful is better than ugly. - ... 2. Explicit is better than implicit. - ... 3. Simple is better than complex. - ... 4. Complex is better than complicated. - ... '''.splitlines(keepends=True) - >>> len(text1) - 4 - >>> text1[0][-1] - '\n' - >>> text2 = ''' 1. Beautiful is better than ugly. - ... 3. Simple is better than complex. - ... 4. Complicated is better than complex. - ... 5. Flat is better than nested. - ... '''.splitlines(keepends=True) - - Next we instantiate a Differ object: - - >>> d = Differ() - - Note that when instantiating a Differ object we may pass functions to - filter out line and character 'junk'. See Differ.__init__ for details. - - Finally, we compare the two: - - >>> result = list(d.compare(text1, text2)) - - 'result' is a list of strings, so let's pretty-print it: - - >>> from pprint import pprint as _pprint - >>> _pprint(result) - [' 1. Beautiful is better than ugly.\n', - '- 2. Explicit is better than implicit.\n', - '- 3. Simple is better than complex.\n', - '+ 3. Simple is better than complex.\n', - '? ++\n', - '- 4. Complex is better than complicated.\n', - '? ^ ---- ^\n', - '+ 4. Complicated is better than complex.\n', - '? ++++ ^ ^\n', - '+ 5. Flat is better than nested.\n'] - - As a single multi-line string it looks like this: - - >>> print(''.join(result), end="") - 1. Beautiful is better than ugly. - - 2. Explicit is better than implicit. - - 3. Simple is better than complex. - + 3. Simple is better than complex. - ? ++ - - 4. Complex is better than complicated. - ? ^ ---- ^ - + 4. Complicated is better than complex. - ? ++++ ^ ^ - + 5. Flat is better than nested. - """ - - def __init__(self, linejunk=None, charjunk=None): - """ - Construct a text differencer, with optional filters. - - The two optional keyword parameters are for filter functions: - - - `linejunk`: A function that should accept a single string argument, - and return true iff the string is junk. The module-level function - `IS_LINE_JUNK` may be used to filter out lines without visible - characters, except for at most one splat ('#'). It is recommended - to leave linejunk None; the underlying SequenceMatcher class has - an adaptive notion of "noise" lines that's better than any static - definition the author has ever been able to craft. - - - `charjunk`: A function that should accept a string of length 1. The - module-level function `IS_CHARACTER_JUNK` may be used to filter out - whitespace characters (a blank or tab; **note**: bad idea to include - newline in this!). Use of IS_CHARACTER_JUNK is recommended. - """ - - self.linejunk = linejunk - self.charjunk = charjunk - - def compare(self, a, b): - r""" - Compare two sequences of lines; generate the resulting delta. - - Each sequence must contain individual single-line strings ending with - newlines. Such sequences can be obtained from the `readlines()` method - of file-like objects. The delta generated also consists of newline- - terminated strings, ready to be printed as-is via the writeline() - method of a file-like object. - - Example: - - >>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True), - ... 'ore\ntree\nemu\n'.splitlines(True))), - ... end="") - - one - ? ^ - + ore - ? ^ - - two - - three - ? - - + tree - + emu - """ - - cruncher = SequenceMatcher(self.linejunk, a, b) - for tag, alo, ahi, blo, bhi in cruncher.get_opcodes(): - if tag == 'replace': - g = self._fancy_replace(a, alo, ahi, b, blo, bhi) - elif tag == 'delete': - g = self._dump('-', a, alo, ahi) - elif tag == 'insert': - g = self._dump('+', b, blo, bhi) - elif tag == 'equal': - g = self._dump(' ', a, alo, ahi) - else: - raise ValueError('unknown tag %r' % (tag,)) - - yield from g - - def _dump(self, tag, x, lo, hi): - """Generate comparison results for a same-tagged range.""" - for i in range(lo, hi): - yield '%s %s' % (tag, x[i]) - - def _plain_replace(self, a, alo, ahi, b, blo, bhi): - assert alo < ahi and blo < bhi - # dump the shorter block first -- reduces the burden on short-term - # memory if the blocks are of very different sizes - if bhi - blo < ahi - alo: - first = self._dump('+', b, blo, bhi) - second = self._dump('-', a, alo, ahi) - else: - first = self._dump('-', a, alo, ahi) - second = self._dump('+', b, blo, bhi) - - for g in first, second: - yield from g - - def _fancy_replace(self, a, alo, ahi, b, blo, bhi): - r""" - When replacing one block of lines with another, search the blocks - for *similar* lines; the best-matching pair (if any) is used as a - synch point, and intraline difference marking is done on the - similar pair. Lots of work, but often worth it. - - Example: - - >>> d = Differ() - >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1, - ... ['abcdefGhijkl\n'], 0, 1) - >>> print(''.join(results), end="") - - abcDefghiJkl - ? ^ ^ ^ - + abcdefGhijkl - ? ^ ^ ^ - """ - - # don't synch up unless the lines have a similarity score of at - # least cutoff; best_ratio tracks the best score seen so far - best_ratio, cutoff = 0.74, 0.75 - cruncher = SequenceMatcher(self.charjunk) - eqi, eqj = None, None # 1st indices of equal lines (if any) - - # search for the pair that matches best without being identical - # (identical lines must be junk lines, & we don't want to synch up - # on junk -- unless we have to) - for j in range(blo, bhi): - bj = b[j] - cruncher.set_seq2(bj) - for i in range(alo, ahi): - ai = a[i] - if ai == bj: - if eqi is None: - eqi, eqj = i, j - continue - cruncher.set_seq1(ai) - # computing similarity is expensive, so use the quick - # upper bounds first -- have seen this speed up messy - # compares by a factor of 3. - # note that ratio() is only expensive to compute the first - # time it's called on a sequence pair; the expensive part - # of the computation is cached by cruncher - if cruncher.real_quick_ratio() > best_ratio and \ - cruncher.quick_ratio() > best_ratio and \ - cruncher.ratio() > best_ratio: - best_ratio, best_i, best_j = cruncher.ratio(), i, j - if best_ratio < cutoff: - # no non-identical "pretty close" pair - if eqi is None: - # no identical pair either -- treat it as a straight replace - yield from self._plain_replace(a, alo, ahi, b, blo, bhi) - return - # no close pair, but an identical pair -- synch up on that - best_i, best_j, best_ratio = eqi, eqj, 1.0 - else: - # there's a close pair, so forget the identical pair (if any) - eqi = None - - # a[best_i] very similar to b[best_j]; eqi is None iff they're not - # identical - - # pump out diffs from before the synch point - yield from self._fancy_helper(a, alo, best_i, b, blo, best_j) - - # do intraline marking on the synch pair - aelt, belt = a[best_i], b[best_j] - if eqi is None: - # pump out a '-', '?', '+', '?' quad for the synched lines - atags = btags = "" - cruncher.set_seqs(aelt, belt) - for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes(): - la, lb = ai2 - ai1, bj2 - bj1 - if tag == 'replace': - atags += '^' * la - btags += '^' * lb - elif tag == 'delete': - atags += '-' * la - elif tag == 'insert': - btags += '+' * lb - elif tag == 'equal': - atags += ' ' * la - btags += ' ' * lb - else: - raise ValueError('unknown tag %r' % (tag,)) - yield from self._qformat(aelt, belt, atags, btags) - else: - # the synch pair is identical - yield ' ' + aelt - - # pump out diffs from after the synch point - yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi) - - def _fancy_helper(self, a, alo, ahi, b, blo, bhi): - g = [] - if alo < ahi: - if blo < bhi: - g = self._fancy_replace(a, alo, ahi, b, blo, bhi) - else: - g = self._dump('-', a, alo, ahi) - elif blo < bhi: - g = self._dump('+', b, blo, bhi) - - yield from g - - def _qformat(self, aline, bline, atags, btags): - r""" - Format "?" output and deal with tabs. - - Example: - - >>> d = Differ() - >>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n', - ... ' ^ ^ ^ ', ' ^ ^ ^ ') - >>> for line in results: print(repr(line)) - ... - '- \tabcDefghiJkl\n' - '? \t ^ ^ ^\n' - '+ \tabcdefGhijkl\n' - '? \t ^ ^ ^\n' - """ - atags = _keep_original_ws(aline, atags).rstrip() - btags = _keep_original_ws(bline, btags).rstrip() - - yield "- " + aline - if atags: - yield f"? {atags}\n" - - yield "+ " + bline - if btags: - yield f"? {btags}\n" - -# With respect to junk, an earlier version of ndiff simply refused to -# *start* a match with a junk element. The result was cases like this: -# before: private Thread currentThread; -# after: private volatile Thread currentThread; -# If you consider whitespace to be junk, the longest contiguous match -# not starting with junk is "e Thread currentThread". So ndiff reported -# that "e volatil" was inserted between the 't' and the 'e' in "private". -# While an accurate view, to people that's absurd. The current version -# looks for matching blocks that are entirely junk-free, then extends the -# longest one of those as far as possible but only with matching junk. -# So now "currentThread" is matched, then extended to suck up the -# preceding blank; then "private" is matched, and extended to suck up the -# following blank; then "Thread" is matched; and finally ndiff reports -# that "volatile " was inserted before "Thread". The only quibble -# remaining is that perhaps it was really the case that " volatile" -# was inserted after "private". I can live with that . - -import re - -def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match): - r""" - Return True for ignorable line: iff `line` is blank or contains a single '#'. - - Examples: - - >>> IS_LINE_JUNK('\n') - True - >>> IS_LINE_JUNK(' # \n') - True - >>> IS_LINE_JUNK('hello\n') - False - """ - - return pat(line) is not None - -def IS_CHARACTER_JUNK(ch, ws=" \t"): - r""" - Return True for ignorable character: iff `ch` is a space or tab. - - Examples: - - >>> IS_CHARACTER_JUNK(' ') - True - >>> IS_CHARACTER_JUNK('\t') - True - >>> IS_CHARACTER_JUNK('\n') - False - >>> IS_CHARACTER_JUNK('x') - False - """ - - return ch in ws - - -######################################################################## -### Unified Diff -######################################################################## - -def _format_range_unified(start, stop): - 'Convert range to the "ed" format' - # Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning = start + 1 # lines start numbering with one - length = stop - start - if length == 1: - return '{}'.format(beginning) - if not length: - beginning -= 1 # empty ranges begin at line just before the range - return '{},{}'.format(beginning, length) - -def unified_diff(a, b, fromfile='', tofile='', fromfiledate='', - tofiledate='', n=3, lineterm='\n'): - r""" - Compare two sequences of lines; generate the delta as a unified diff. - - Unified diffs are a compact way of showing line changes and a few - lines of context. The number of context lines is set by 'n' which - defaults to three. - - By default, the diff control lines (those with ---, +++, or @@) are - created with a trailing newline. This is helpful so that inputs - created from file.readlines() result in diffs that are suitable for - file.writelines() since both the inputs and outputs have trailing - newlines. - - For inputs that do not have trailing newlines, set the lineterm - argument to "" so that the output will be uniformly newline free. - - The unidiff format normally has a header for filenames and modification - times. Any or all of these may be specified using strings for - 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. - The modification times are normally expressed in the ISO 8601 format. - - Example: - - >>> for line in unified_diff('one two three four'.split(), - ... 'zero one tree four'.split(), 'Original', 'Current', - ... '2005-01-26 23:30:50', '2010-04-02 10:20:52', - ... lineterm=''): - ... print(line) # doctest: +NORMALIZE_WHITESPACE - --- Original 2005-01-26 23:30:50 - +++ Current 2010-04-02 10:20:52 - @@ -1,4 +1,4 @@ - +zero - one - -two - -three - +tree - four - """ - - _check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm) - started = False - for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): - if not started: - started = True - fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' - todate = '\t{}'.format(tofiledate) if tofiledate else '' - yield '--- {}{}{}'.format(fromfile, fromdate, lineterm) - yield '+++ {}{}{}'.format(tofile, todate, lineterm) - - first, last = group[0], group[-1] - file1_range = _format_range_unified(first[1], last[2]) - file2_range = _format_range_unified(first[3], last[4]) - yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm) - - for tag, i1, i2, j1, j2 in group: - if tag == 'equal': - for line in a[i1:i2]: - yield ' ' + line - continue - if tag in {'replace', 'delete'}: - for line in a[i1:i2]: - yield '-' + line - if tag in {'replace', 'insert'}: - for line in b[j1:j2]: - yield '+' + line - - -######################################################################## -### Context Diff -######################################################################## - -def _format_range_context(start, stop): - 'Convert range to the "ed" format' - # Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning = start + 1 # lines start numbering with one - length = stop - start - if not length: - beginning -= 1 # empty ranges begin at line just before the range - if length <= 1: - return '{}'.format(beginning) - return '{},{}'.format(beginning, beginning + length - 1) - -# See http://www.unix.org/single_unix_specification/ -def context_diff(a, b, fromfile='', tofile='', - fromfiledate='', tofiledate='', n=3, lineterm='\n'): - r""" - Compare two sequences of lines; generate the delta as a context diff. - - Context diffs are a compact way of showing line changes and a few - lines of context. The number of context lines is set by 'n' which - defaults to three. - - By default, the diff control lines (those with *** or ---) are - created with a trailing newline. This is helpful so that inputs - created from file.readlines() result in diffs that are suitable for - file.writelines() since both the inputs and outputs have trailing - newlines. - - For inputs that do not have trailing newlines, set the lineterm - argument to "" so that the output will be uniformly newline free. - - The context diff format normally has a header for filenames and - modification times. Any or all of these may be specified using - strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. - The modification times are normally expressed in the ISO 8601 format. - If not specified, the strings default to blanks. - - Example: - - >>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True), - ... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')), - ... end="") - *** Original - --- Current - *************** - *** 1,4 **** - one - ! two - ! three - four - --- 1,4 ---- - + zero - one - ! tree - four - """ - - _check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm) - prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ') - started = False - for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): - if not started: - started = True - fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' - todate = '\t{}'.format(tofiledate) if tofiledate else '' - yield '*** {}{}{}'.format(fromfile, fromdate, lineterm) - yield '--- {}{}{}'.format(tofile, todate, lineterm) - - first, last = group[0], group[-1] - yield '***************' + lineterm - - file1_range = _format_range_context(first[1], last[2]) - yield '*** {} ****{}'.format(file1_range, lineterm) - - if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group): - for tag, i1, i2, _, _ in group: - if tag != 'insert': - for line in a[i1:i2]: - yield prefix[tag] + line - - file2_range = _format_range_context(first[3], last[4]) - yield '--- {} ----{}'.format(file2_range, lineterm) - - if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group): - for tag, _, _, j1, j2 in group: - if tag != 'delete': - for line in b[j1:j2]: - yield prefix[tag] + line - -def _check_types(a, b, *args): - # Checking types is weird, but the alternative is garbled output when - # someone passes mixed bytes and str to {unified,context}_diff(). E.g. - # without this check, passing filenames as bytes results in output like - # --- b'oldfile.txt' - # +++ b'newfile.txt' - # because of how str.format() incorporates bytes objects. - if a and not isinstance(a[0], str): - raise TypeError('lines to compare must be str, not %s (%r)' % - (type(a[0]).__name__, a[0])) - if b and not isinstance(b[0], str): - raise TypeError('lines to compare must be str, not %s (%r)' % - (type(b[0]).__name__, b[0])) - for arg in args: - if not isinstance(arg, str): - raise TypeError('all arguments must be str, not: %r' % (arg,)) - -def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'', - fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'): - r""" - Compare `a` and `b`, two sequences of lines represented as bytes rather - than str. This is a wrapper for `dfunc`, which is typically either - unified_diff() or context_diff(). Inputs are losslessly converted to - strings so that `dfunc` only has to worry about strings, and encoded - back to bytes on return. This is necessary to compare files with - unknown or inconsistent encoding. All other inputs (except `n`) must be - bytes rather than str. - """ - def decode(s): - try: - return s.decode('ascii', 'surrogateescape') - except AttributeError as err: - msg = ('all arguments must be bytes, not %s (%r)' % - (type(s).__name__, s)) - raise TypeError(msg) from err - a = list(map(decode, a)) - b = list(map(decode, b)) - fromfile = decode(fromfile) - tofile = decode(tofile) - fromfiledate = decode(fromfiledate) - tofiledate = decode(tofiledate) - lineterm = decode(lineterm) - - lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm) - for line in lines: - yield line.encode('ascii', 'surrogateescape') - -def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK): - r""" - Compare `a` and `b` (lists of strings); return a `Differ`-style delta. - - Optional keyword parameters `linejunk` and `charjunk` are for filter - functions, or can be None: - - - linejunk: A function that should accept a single string argument and - return true iff the string is junk. The default is None, and is - recommended; the underlying SequenceMatcher class has an adaptive - notion of "noise" lines. - - - charjunk: A function that accepts a character (string of length - 1), and returns true iff the character is junk. The default is - the module-level function IS_CHARACTER_JUNK, which filters out - whitespace characters (a blank or tab; note: it's a bad idea to - include newline in this!). - - Tools/scripts/ndiff.py is a command-line front-end to this function. - - Example: - - >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), - ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) - >>> print(''.join(diff), end="") - - one - ? ^ - + ore - ? ^ - - two - - three - ? - - + tree - + emu - """ - return Differ(linejunk, charjunk).compare(a, b) - -def _mdiff(fromlines, tolines, context=None, linejunk=None, - charjunk=IS_CHARACTER_JUNK): - r"""Returns generator yielding marked up from/to side by side differences. - - Arguments: - fromlines -- list of text lines to compared to tolines - tolines -- list of text lines to be compared to fromlines - context -- number of context lines to display on each side of difference, - if None, all from/to text lines will be generated. - linejunk -- passed on to ndiff (see ndiff documentation) - charjunk -- passed on to ndiff (see ndiff documentation) - - This function returns an iterator which returns a tuple: - (from line tuple, to line tuple, boolean flag) - - from/to line tuple -- (line num, line text) - line num -- integer or None (to indicate a context separation) - line text -- original line text with following markers inserted: - '\0+' -- marks start of added text - '\0-' -- marks start of deleted text - '\0^' -- marks start of changed text - '\1' -- marks end of added/deleted/changed text - - boolean flag -- None indicates context separation, True indicates - either "from" or "to" line contains a change, otherwise False. - - This function/iterator was originally developed to generate side by side - file difference for making HTML pages (see HtmlDiff class for example - usage). - - Note, this function utilizes the ndiff function to generate the side by - side difference markup. Optional ndiff arguments may be passed to this - function and they in turn will be passed to ndiff. - """ - import re - - # regular expression for finding intraline change indices - change_re = re.compile(r'(\++|\-+|\^+)') - - # create the difference iterator to generate the differences - diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk) - - def _make_line(lines, format_key, side, num_lines=[0,0]): - """Returns line of text with user's change markup and line formatting. - - lines -- list of lines from the ndiff generator to produce a line of - text from. When producing the line of text to return, the - lines used are removed from this list. - format_key -- '+' return first line in list with "add" markup around - the entire line. - '-' return first line in list with "delete" markup around - the entire line. - '?' return first line in list with add/delete/change - intraline markup (indices obtained from second line) - None return first line in list with no markup - side -- indice into the num_lines list (0=from,1=to) - num_lines -- from/to current line number. This is NOT intended to be a - passed parameter. It is present as a keyword argument to - maintain memory of the current line numbers between calls - of this function. - - Note, this function is purposefully not defined at the module scope so - that data it needs from its parent function (within whose context it - is defined) does not need to be of module scope. - """ - num_lines[side] += 1 - # Handle case where no user markup is to be added, just return line of - # text with user's line format to allow for usage of the line number. - if format_key is None: - return (num_lines[side],lines.pop(0)[2:]) - # Handle case of intraline changes - if format_key == '?': - text, markers = lines.pop(0), lines.pop(0) - # find intraline changes (store change type and indices in tuples) - sub_info = [] - def record_sub_info(match_object,sub_info=sub_info): - sub_info.append([match_object.group(1)[0],match_object.span()]) - return match_object.group(1) - change_re.sub(record_sub_info,markers) - # process each tuple inserting our special marks that won't be - # noticed by an xml/html escaper. - for key,(begin,end) in reversed(sub_info): - text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:] - text = text[2:] - # Handle case of add/delete entire line - else: - text = lines.pop(0)[2:] - # if line of text is just a newline, insert a space so there is - # something for the user to highlight and see. - if not text: - text = ' ' - # insert marks that won't be noticed by an xml/html escaper. - text = '\0' + format_key + text + '\1' - # Return line of text, first allow user's line formatter to do its - # thing (such as adding the line number) then replace the special - # marks with what the user's change markup. - return (num_lines[side],text) - - def _line_iterator(): - """Yields from/to lines of text with a change indication. - - This function is an iterator. It itself pulls lines from a - differencing iterator, processes them and yields them. When it can - it yields both a "from" and a "to" line, otherwise it will yield one - or the other. In addition to yielding the lines of from/to text, a - boolean flag is yielded to indicate if the text line(s) have - differences in them. - - Note, this function is purposefully not defined at the module scope so - that data it needs from its parent function (within whose context it - is defined) does not need to be of module scope. - """ - lines = [] - num_blanks_pending, num_blanks_to_yield = 0, 0 - while True: - # Load up next 4 lines so we can look ahead, create strings which - # are a concatenation of the first character of each of the 4 lines - # so we can do some very readable comparisons. - while len(lines) < 4: - lines.append(next(diff_lines_iterator, 'X')) - s = ''.join([line[0] for line in lines]) - if s.startswith('X'): - # When no more lines, pump out any remaining blank lines so the - # corresponding add/delete lines get a matching blank line so - # all line pairs get yielded at the next level. - num_blanks_to_yield = num_blanks_pending - elif s.startswith('-?+?'): - # simple intraline change - yield _make_line(lines,'?',0), _make_line(lines,'?',1), True - continue - elif s.startswith('--++'): - # in delete block, add block coming: we do NOT want to get - # caught up on blank lines yet, just process the delete line - num_blanks_pending -= 1 - yield _make_line(lines,'-',0), None, True - continue - elif s.startswith(('--?+', '--+', '- ')): - # in delete block and see an intraline change or unchanged line - # coming: yield the delete line and then blanks - from_line,to_line = _make_line(lines,'-',0), None - num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0 - elif s.startswith('-+?'): - # intraline change - yield _make_line(lines,None,0), _make_line(lines,'?',1), True - continue - elif s.startswith('-?+'): - # intraline change - yield _make_line(lines,'?',0), _make_line(lines,None,1), True - continue - elif s.startswith('-'): - # delete FROM line - num_blanks_pending -= 1 - yield _make_line(lines,'-',0), None, True - continue - elif s.startswith('+--'): - # in add block, delete block coming: we do NOT want to get - # caught up on blank lines yet, just process the add line - num_blanks_pending += 1 - yield None, _make_line(lines,'+',1), True - continue - elif s.startswith(('+ ', '+-')): - # will be leaving an add block: yield blanks then add line - from_line, to_line = None, _make_line(lines,'+',1) - num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0 - elif s.startswith('+'): - # inside an add block, yield the add line - num_blanks_pending += 1 - yield None, _make_line(lines,'+',1), True - continue - elif s.startswith(' '): - # unchanged text, yield it to both sides - yield _make_line(lines[:],None,0),_make_line(lines,None,1),False - continue - # Catch up on the blank lines so when we yield the next from/to - # pair, they are lined up. - while(num_blanks_to_yield < 0): - num_blanks_to_yield += 1 - yield None,('','\n'),True - while(num_blanks_to_yield > 0): - num_blanks_to_yield -= 1 - yield ('','\n'),None,True - if s.startswith('X'): - return - else: - yield from_line,to_line,True - - def _line_pair_iterator(): - """Yields from/to lines of text with a change indication. - - This function is an iterator. It itself pulls lines from the line - iterator. Its difference from that iterator is that this function - always yields a pair of from/to text lines (with the change - indication). If necessary it will collect single from/to lines - until it has a matching pair from/to pair to yield. - - Note, this function is purposefully not defined at the module scope so - that data it needs from its parent function (within whose context it - is defined) does not need to be of module scope. - """ - line_iterator = _line_iterator() - fromlines,tolines=[],[] - while True: - # Collecting lines of text until we have a from/to pair - while (len(fromlines)==0 or len(tolines)==0): - try: - from_line, to_line, found_diff = next(line_iterator) - except StopIteration: - return - if from_line is not None: - fromlines.append((from_line,found_diff)) - if to_line is not None: - tolines.append((to_line,found_diff)) - # Once we have a pair, remove them from the collection and yield it - from_line, fromDiff = fromlines.pop(0) - to_line, to_diff = tolines.pop(0) - yield (from_line,to_line,fromDiff or to_diff) - - # Handle case where user does not want context differencing, just yield - # them up without doing anything else with them. - line_pair_iterator = _line_pair_iterator() - if context is None: - yield from line_pair_iterator - # Handle case where user wants context differencing. We must do some - # storage of lines until we know for sure that they are to be yielded. - else: - context += 1 - lines_to_write = 0 - while True: - # Store lines up until we find a difference, note use of a - # circular queue because we only need to keep around what - # we need for context. - index, contextLines = 0, [None]*(context) - found_diff = False - while(found_diff is False): - try: - from_line, to_line, found_diff = next(line_pair_iterator) - except StopIteration: - return - i = index % context - contextLines[i] = (from_line, to_line, found_diff) - index += 1 - # Yield lines that we have collected so far, but first yield - # the user's separator. - if index > context: - yield None, None, None - lines_to_write = context - else: - lines_to_write = index - index = 0 - while(lines_to_write): - i = index % context - index += 1 - yield contextLines[i] - lines_to_write -= 1 - # Now yield the context lines after the change - lines_to_write = context-1 - try: - while(lines_to_write): - from_line, to_line, found_diff = next(line_pair_iterator) - # If another change within the context, extend the context - if found_diff: - lines_to_write = context-1 - else: - lines_to_write -= 1 - yield from_line, to_line, found_diff - except StopIteration: - # Catch exception from next() and return normally - return - - -_file_template = """ - - - - - - - - - - - - %(table)s%(legend)s - - -""" - -_styles = """ - table.diff {font-family:Courier; border:medium;} - .diff_header {background-color:#e0e0e0} - td.diff_header {text-align:right} - .diff_next {background-color:#c0c0c0} - .diff_add {background-color:#aaffaa} - .diff_chg {background-color:#ffff77} - .diff_sub {background-color:#ffaaaa}""" - -_table_template = """ - - - - %(header_row)s - -%(data_rows)s -
""" - -_legend = """ - - - - -
Legends
- - - - -
Colors
 Added 
Changed
Deleted
- - - - -
Links
(f)irst change
(n)ext change
(t)op
""" - -class HtmlDiff(object): - """For producing HTML side by side comparison with change highlights. - - This class can be used to create an HTML table (or a complete HTML file - containing the table) showing a side by side, line by line comparison - of text with inter-line and intra-line change highlights. The table can - be generated in either full or contextual difference mode. - - The following methods are provided for HTML generation: - - make_table -- generates HTML for a single side by side table - make_file -- generates complete HTML file with a single side by side table - - See tools/scripts/diff.py for an example usage of this class. - """ - - _file_template = _file_template - _styles = _styles - _table_template = _table_template - _legend = _legend - _default_prefix = 0 - - def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None, - charjunk=IS_CHARACTER_JUNK): - """HtmlDiff instance initializer - - Arguments: - tabsize -- tab stop spacing, defaults to 8. - wrapcolumn -- column number where lines are broken and wrapped, - defaults to None where lines are not wrapped. - linejunk,charjunk -- keyword arguments passed into ndiff() (used by - HtmlDiff() to generate the side by side HTML differences). See - ndiff() documentation for argument default values and descriptions. - """ - self._tabsize = tabsize - self._wrapcolumn = wrapcolumn - self._linejunk = linejunk - self._charjunk = charjunk - - def make_file(self, fromlines, tolines, fromdesc='', todesc='', - context=False, numlines=5, *, charset='utf-8'): - """Returns HTML file of side by side comparison with change highlights - - Arguments: - fromlines -- list of "from" lines - tolines -- list of "to" lines - fromdesc -- "from" file column header string - todesc -- "to" file column header string - context -- set to True for contextual differences (defaults to False - which shows full differences). - numlines -- number of context lines. When context is set True, - controls number of lines displayed before and after the change. - When context is False, controls the number of lines to place - the "next" link anchors before the next change (so click of - "next" link jumps to just before the change). - charset -- charset of the HTML document - """ - - return (self._file_template % dict( - styles=self._styles, - legend=self._legend, - table=self.make_table(fromlines, tolines, fromdesc, todesc, - context=context, numlines=numlines), - charset=charset - )).encode(charset, 'xmlcharrefreplace').decode(charset) - - def _tab_newline_replace(self,fromlines,tolines): - """Returns from/to line lists with tabs expanded and newlines removed. - - Instead of tab characters being replaced by the number of spaces - needed to fill in to the next tab stop, this function will fill - the space with tab characters. This is done so that the difference - algorithms can identify changes in a file when tabs are replaced by - spaces and vice versa. At the end of the HTML generation, the tab - characters will be replaced with a nonbreakable space. - """ - def expand_tabs(line): - # hide real spaces - line = line.replace(' ','\0') - # expand tabs into spaces - line = line.expandtabs(self._tabsize) - # replace spaces from expanded tabs back into tab characters - # (we'll replace them with markup after we do differencing) - line = line.replace(' ','\t') - return line.replace('\0',' ').rstrip('\n') - fromlines = [expand_tabs(line) for line in fromlines] - tolines = [expand_tabs(line) for line in tolines] - return fromlines,tolines - - def _split_line(self,data_list,line_num,text): - """Builds list of text lines by splitting text lines at wrap point - - This function will determine if the input text line needs to be - wrapped (split) into separate lines. If so, the first wrap point - will be determined and the first line appended to the output - text line list. This function is used recursively to handle - the second part of the split line to further split it. - """ - # if blank line or context separator, just add it to the output list - if not line_num: - data_list.append((line_num,text)) - return - - # if line text doesn't need wrapping, just add it to the output list - size = len(text) - max = self._wrapcolumn - if (size <= max) or ((size -(text.count('\0')*3)) <= max): - data_list.append((line_num,text)) - return - - # scan text looking for the wrap point, keeping track if the wrap - # point is inside markers - i = 0 - n = 0 - mark = '' - while n < max and i < size: - if text[i] == '\0': - i += 1 - mark = text[i] - i += 1 - elif text[i] == '\1': - i += 1 - mark = '' - else: - i += 1 - n += 1 - - # wrap point is inside text, break it up into separate lines - line1 = text[:i] - line2 = text[i:] - - # if wrap point is inside markers, place end marker at end of first - # line and start marker at beginning of second line because each - # line will have its own table tag markup around it. - if mark: - line1 = line1 + '\1' - line2 = '\0' + mark + line2 - - # tack on first line onto the output list - data_list.append((line_num,line1)) - - # use this routine again to wrap the remaining text - self._split_line(data_list,'>',line2) - - def _line_wrapper(self,diffs): - """Returns iterator that splits (wraps) mdiff text lines""" - - # pull from/to data and flags from mdiff iterator - for fromdata,todata,flag in diffs: - # check for context separators and pass them through - if flag is None: - yield fromdata,todata,flag - continue - (fromline,fromtext),(toline,totext) = fromdata,todata - # for each from/to line split it at the wrap column to form - # list of text lines. - fromlist,tolist = [],[] - self._split_line(fromlist,fromline,fromtext) - self._split_line(tolist,toline,totext) - # yield from/to line in pairs inserting blank lines as - # necessary when one side has more wrapped lines - while fromlist or tolist: - if fromlist: - fromdata = fromlist.pop(0) - else: - fromdata = ('',' ') - if tolist: - todata = tolist.pop(0) - else: - todata = ('',' ') - yield fromdata,todata,flag - - def _collect_lines(self,diffs): - """Collects mdiff output into separate lists - - Before storing the mdiff from/to data into a list, it is converted - into a single line of text with HTML markup. - """ - - fromlist,tolist,flaglist = [],[],[] - # pull from/to data and flags from mdiff style iterator - for fromdata,todata,flag in diffs: - try: - # store HTML markup of the lines into the lists - fromlist.append(self._format_line(0,flag,*fromdata)) - tolist.append(self._format_line(1,flag,*todata)) - except TypeError: - # exceptions occur for lines where context separators go - fromlist.append(None) - tolist.append(None) - flaglist.append(flag) - return fromlist,tolist,flaglist - - def _format_line(self,side,flag,linenum,text): - """Returns HTML markup of "from" / "to" text lines - - side -- 0 or 1 indicating "from" or "to" text - flag -- indicates if difference on line - linenum -- line number (used for line number column) - text -- line text to be marked up - """ - try: - linenum = '%d' % linenum - id = ' id="%s%s"' % (self._prefix[side],linenum) - except TypeError: - # handle blank lines where linenum is '>' or '' - id = '' - # replace those things that would get confused with HTML symbols - text=text.replace("&","&").replace(">",">").replace("<","<") - - # make space non-breakable so they don't get compressed or line wrapped - text = text.replace(' ',' ').rstrip() - - return '%s%s' \ - % (id,linenum,text) - - def _make_prefix(self): - """Create unique anchor prefixes""" - - # Generate a unique anchor prefix so multiple tables - # can exist on the same HTML page without conflicts. - fromprefix = "from%d_" % HtmlDiff._default_prefix - toprefix = "to%d_" % HtmlDiff._default_prefix - HtmlDiff._default_prefix += 1 - # store prefixes so line format method has access - self._prefix = [fromprefix,toprefix] - - def _convert_flags(self,fromlist,tolist,flaglist,context,numlines): - """Makes list of "next" links""" - - # all anchor names will be generated using the unique "to" prefix - toprefix = self._prefix[1] - - # process change flags, generating middle column of next anchors/links - next_id = ['']*len(flaglist) - next_href = ['']*len(flaglist) - num_chg, in_change = 0, False - last = 0 - for i,flag in enumerate(flaglist): - if flag: - if not in_change: - in_change = True - last = i - # at the beginning of a change, drop an anchor a few lines - # (the context lines) before the change for the previous - # link - i = max([0,i-numlines]) - next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg) - # at the beginning of a change, drop a link to the next - # change - num_chg += 1 - next_href[last] = '
n' % ( - toprefix,num_chg) - else: - in_change = False - # check for cases where there is no content to avoid exceptions - if not flaglist: - flaglist = [False] - next_id = [''] - next_href = [''] - last = 0 - if context: - fromlist = [' No Differences Found '] - tolist = fromlist - else: - fromlist = tolist = [' Empty File '] - # if not a change on first line, drop a link - if not flaglist[0]: - next_href[0] = 'f' % toprefix - # redo the last link to link to the top - next_href[last] = 't' % (toprefix) - - return fromlist,tolist,flaglist,next_href,next_id - - def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False, - numlines=5): - """Returns HTML table of side by side comparison with change highlights - - Arguments: - fromlines -- list of "from" lines - tolines -- list of "to" lines - fromdesc -- "from" file column header string - todesc -- "to" file column header string - context -- set to True for contextual differences (defaults to False - which shows full differences). - numlines -- number of context lines. When context is set True, - controls number of lines displayed before and after the change. - When context is False, controls the number of lines to place - the "next" link anchors before the next change (so click of - "next" link jumps to just before the change). - """ - - # make unique anchor prefixes so that multiple tables may exist - # on the same page without conflict. - self._make_prefix() - - # change tabs to spaces before it gets more difficult after we insert - # markup - fromlines,tolines = self._tab_newline_replace(fromlines,tolines) - - # create diffs iterator which generates side by side from/to data - if context: - context_lines = numlines - else: - context_lines = None - diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk, - charjunk=self._charjunk) - - # set up iterator to wrap lines that exceed desired width - if self._wrapcolumn: - diffs = self._line_wrapper(diffs) - - # collect up from/to lines and flags into lists (also format the lines) - fromlist,tolist,flaglist = self._collect_lines(diffs) - - # process change flags, generating middle column of next anchors/links - fromlist,tolist,flaglist,next_href,next_id = self._convert_flags( - fromlist,tolist,flaglist,context,numlines) - - s = [] - fmt = ' %s%s' + \ - '%s%s\n' - for i in range(len(flaglist)): - if flaglist[i] is None: - # mdiff yields None on separator lines skip the bogus ones - # generated for the first line - if i > 0: - s.append(' \n \n') - else: - s.append( fmt % (next_id[i],next_href[i],fromlist[i], - next_href[i],tolist[i])) - if fromdesc or todesc: - header_row = '%s%s%s%s' % ( - '
', - '%s' % fromdesc, - '
', - '%s' % todesc) - else: - header_row = '' - - table = self._table_template % dict( - data_rows=''.join(s), - header_row=header_row, - prefix=self._prefix[1]) - - return table.replace('\0+',''). \ - replace('\0-',''). \ - replace('\0^',''). \ - replace('\1',''). \ - replace('\t',' ') - -del re - -def restore(delta, which): - r""" - Generate one of the two sequences that generated a delta. - - Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract - lines originating from file 1 or 2 (parameter `which`), stripping off line - prefixes. - - Examples: - - >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), - ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) - >>> diff = list(diff) - >>> print(''.join(restore(diff, 1)), end="") - one - two - three - >>> print(''.join(restore(diff, 2)), end="") - ore - tree - emu - """ - cdef Py_ssize_t _which = int(which) - if _which == 1: - tag = "- " - elif _which == 2: - tag = "+ " - else: - raise ValueError('unknown delta choice (must be 1 or 2): %r' - % which) from None - prefixes = (" ", tag) - for line in delta: - if line[:2] in prefixes: - yield line[2:] +__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher', + 'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff', + 'unified_diff', 'diff_bytes', 'HtmlDiff', 'Match'] + +from heapq import nlargest as _nlargest +from collections import namedtuple as _namedtuple +# todo add this once it is supported in all Python versions +#from types import GenericAlias + +cimport cython +from libcpp.vector cimport vector +from libcpp.algorithm cimport fill, sort as cpp_sort +from libc.stdlib cimport malloc, free +from libcpp.unordered_map cimport unordered_map + +Match = _namedtuple('Match', 'a b size') + +@cython.cdivision(True) +cdef double _calculate_ratio(Py_ssize_t matches, Py_ssize_t length) except -1.0: + if length: + return 2.0 * matches / length + return 1.0 + +ctypedef struct MatchingBlockQueueElem: + Py_ssize_t alo + Py_ssize_t ahi + Py_ssize_t blo + Py_ssize_t bhi + +ctypedef struct CMatch: + Py_ssize_t a + Py_ssize_t b + Py_ssize_t size + +cdef int CMatch_sorter(const CMatch& lhs, const CMatch& rhs): + if lhs.a != rhs.a: + return lhs.a < rhs.a + if lhs.b != rhs.b: + return lhs.b < rhs.b + return lhs.size < rhs.size + +cdef class SequenceMatcher: + + """ + SequenceMatcher is a flexible class for comparing pairs of sequences of + any type, so long as the sequence elements are hashable. The basic + algorithm predates, and is a little fancier than, an algorithm + published in the late 1980's by Ratcliff and Obershelp under the + hyperbolic name "gestalt pattern matching". The basic idea is to find + the longest contiguous matching subsequence that contains no "junk" + elements (R-O doesn't address junk). The same idea is then applied + recursively to the pieces of the sequences to the left and to the right + of the matching subsequence. This does not yield minimal edit + sequences, but does tend to yield matches that "look right" to people. + + SequenceMatcher tries to compute a "human-friendly diff" between two + sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the + longest *contiguous* & junk-free matching subsequence. That's what + catches peoples' eyes. The Windows(tm) windiff has another interesting + notion, pairing up elements that appear uniquely in each sequence. + That, and the method here, appear to yield more intuitive difference + reports than does diff. This method appears to be the least vulnerable + to syncing up on blocks of "junk lines", though (like blank lines in + ordinary text files, or maybe "

" lines in HTML files). That may be + because this is the only method of the 3 that has a *concept* of + "junk" . + + Example, comparing two strings, and considering blanks to be "junk": + + >>> s = SequenceMatcher(lambda x: x == " ", + ... "private Thread currentThread;", + ... "private volatile Thread currentThread;") + >>> + + .ratio() returns a float in [0, 1], measuring the "similarity" of the + sequences. As a rule of thumb, a .ratio() value over 0.6 means the + sequences are close matches: + + >>> print(round(s.ratio(), 3)) + 0.866 + >>> + + If you're only interested in where the sequences match, + .get_matching_blocks() is handy: + + >>> for block in s.get_matching_blocks(): + ... print("a[%d] and b[%d] match for %d elements" % block) + a[0] and b[0] match for 8 elements + a[8] and b[17] match for 21 elements + a[29] and b[38] match for 0 elements + + Note that the last tuple returned by .get_matching_blocks() is always a + dummy, (len(a), len(b), 0), and this is the only case in which the last + tuple element (number of elements matched) is 0. + + If you want to know how to change the first sequence into the second, + use .get_opcodes(): + + >>> for opcode in s.get_opcodes(): + ... print("%6s a[%d:%d] b[%d:%d]" % opcode) + equal a[0:8] b[0:8] + insert a[8:8] b[8:17] + equal a[8:29] b[17:38] + + See the Differ class for a fancy human-friendly file differencer, which + uses SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + See also function get_close_matches() in this module, which shows how + simple code building on SequenceMatcher can be used to do useful work. + + Timing: Basic R-O is cubic time worst case and quadratic time expected + case. SequenceMatcher is quadratic time for the worst case and has + expected-case behavior dependent in a complicated way on how many + elements the sequences have in common; best case time is linear. + """ + + cdef public object a + cdef public object b + cdef public dict b2j + cdef public dict fullbcount + cdef public list matching_blocks + cdef public list opcodes + cdef public object isjunk + cdef public set bjunk + cdef public set bpopular + cdef public object autojunk + + # todo this is not threadsafe, which could be an problem in the long run + cdef vector[Py_ssize_t] j2len_ + cdef vector[Py_ssize_t] newj2len_ + cdef Py_hash_t* a_ + cdef Py_ssize_t la + cdef Py_hash_t* b_ + cdef Py_ssize_t lb + + def __init__(self, isjunk=None, a='', b='', autojunk=True): + """Construct a SequenceMatcher. + + Optional arg isjunk is None (the default), or a one-argument + function that takes a sequence element and returns true iff the + element is junk. None is equivalent to passing "lambda x: 0", i.e. + no elements are considered to be junk. For example, pass + lambda x: x in " \\t" + if you're comparing lines as sequences of characters, and don't + want to synch up on blanks or hard tabs. + + Optional arg a is the first of two sequences to be compared. By + default, an empty string. The elements of a must be hashable. See + also .set_seqs() and .set_seq1(). + + Optional arg b is the second of two sequences to be compared. By + default, an empty string. The elements of b must be hashable. See + also .set_seqs() and .set_seq2(). + + Optional arg autojunk should be set to False to disable the + "automatic junk heuristic" that treats popular elements as junk + (see module documentation for more information). + """ + + # Members: + # a + # first sequence + # b + # second sequence; differences are computed as "what do + # we need to do to 'a' to change it into 'b'?" + # b2j + # for x in b, b2j[x] is a list of the indices (into b) + # at which x appears; junk and popular elements do not appear + # fullbcount + # for x in b, fullbcount[x] == the number of times x + # appears in b; only materialized if really needed (used + # only for computing quick_ratio()) + # matching_blocks + # a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k]; + # ascending & non-overlapping in i and in j; terminated by + # a dummy (len(a), len(b), 0) sentinel + # opcodes + # a list of (tag, i1, i2, j1, j2) tuples, where tag is + # one of + # 'replace' a[i1:i2] should be replaced by b[j1:j2] + # 'delete' a[i1:i2] should be deleted + # 'insert' b[j1:j2] should be inserted + # 'equal' a[i1:i2] == b[j1:j2] + # isjunk + # a user-supplied function taking a sequence element and + # returning true iff the element is "junk" -- this has + # subtle but helpful effects on the algorithm, which I'll + # get around to writing up someday <0.9 wink>. + # DON'T USE! Only __chain_b uses this. Use "in self.bjunk". + # bjunk + # the items in b for which isjunk is True. + # bpopular + # nonjunk items in b treated as junk by the heuristic (if used). + + self.isjunk = isjunk + self.a = self.b = None + self.autojunk = autojunk + self.set_seqs(a, b) + + cpdef set_seqs(self, a, b): + """Set the two sequences to be compared. + + >>> s = SequenceMatcher() + >>> s.set_seqs("abcd", "bcde") + >>> s.ratio() + 0.75 + """ + + self.set_seq1(a) + self.set_seq2(b) + + cpdef set_seq1(self, a): + """Set the first sequence to be compared. + + The second sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq1("bcde") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq2(). + """ + + if a is self.a: + return + self.a = a + self.matching_blocks = self.opcodes = None + self.la = len(a) + + cpdef set_seq2(self, b): + """Set the second sequence to be compared. + + The first sequence to be compared is not changed. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.set_seq2("abcd") + >>> s.ratio() + 1.0 + >>> + + SequenceMatcher computes and caches detailed information about the + second sequence, so if you want to compare one sequence S against + many sequences, use .set_seq2(S) once and call .set_seq1(x) + repeatedly for each of the other sequences. + + See also set_seqs() and set_seq1(). + """ + + if b is self.b: + return + self.b = b + self.j2len_.resize(len(b) + 1) + self.newj2len_.resize(len(b) + 1) + self.matching_blocks = self.opcodes = None + self.fullbcount = None + self.lb = len(b) + self.__chain_b() + + # For each element x in b, set b2j[x] to a list of the indices in + # b where x appears; the indices are in increasing order; note that + # the number of times x appears in b is len(b2j[x]) ... + # when self.isjunk is defined, junk elements don't show up in this + # map at all, which stops the central find_longest_match method + # from starting any matching block at a junk element ... + # b2j also does not contain entries for "popular" elements, meaning + # elements that account for more than 1 + 1% of the total elements, and + # when the sequence is reasonably large (>= 200 elements); this can + # be viewed as an adaptive notion of semi-junk, and yields an enormous + # speedup when, e.g., comparing program files with hundreds of + # instances of "return NULL;" ... + # note that this is only called when b changes; so for cross-product + # kinds of matches, it's best to call set_seq2 once, then set_seq1 + # repeatedly + + cdef __chain_b(self): + # Because isjunk is a user-defined (not C) function, and we test + # for junk a LOT, it's important to minimize the number of calls. + # Before the tricks described here, __chain_b was by far the most + # time-consuming routine in the whole module! If anyone sees + # Jim Roskind, thank him again for profile.py -- I never would + # have guessed that. + # The first trick is to build b2j ignoring the possibility + # of junk. I.e., we don't call isjunk at all yet. Throwing + # out the junk later is much cheaper than building b2j "right" + # from the start. + b = self.b + self.b2j = b2j = {} + + for i, elt in enumerate(b): + indices = b2j.setdefault(elt, []) + indices.append(i) + + # Purge junk elements + self.bjunk = junk = set() + isjunk = self.isjunk + if isjunk: + for elt in b2j.keys(): + if isjunk(elt): + junk.add(elt) + for elt in junk: # separate loop avoids separate list of keys + del b2j[elt] + + # Purge popular elements that are not junk + self.bpopular = popular = set() + n = len(b) + if self.autojunk and n >= 200: + ntest = n // 100 + 1 + for elt, idxs in b2j.items(): + if len(idxs) > ntest: + popular.add(elt) + for elt in popular: # ditto; as fast for 1% deletion + del b2j[elt] + + cdef CMatch __find_longest_match(self, Py_ssize_t alo, Py_ssize_t ahi, Py_ssize_t blo, Py_ssize_t bhi) except *: + cdef list indexes + cdef Py_ssize_t besti, bestj, bestsize + cdef Py_ssize_t i, j, k + cdef Py_ssize_t index_len, pos, next_val + cdef int found + + # CAUTION: stripping common prefix or suffix would be incorrect. + # E.g., + # ab + # acab + # Longest matching block is "ab", but if common prefix is + # stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + # strip, so ends up claiming that ab is changed to acab by + # inserting "ca" in the middle. That's minimal but unintuitive: + # "it's obvious" that someone inserted "ac" at the front. + # Windiff ends up at the same place as diff, but by pairing up + # the unique 'b's and then matching the first two 'a's. + + a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.bjunk.__contains__ + isjunk = self.isjunk + besti, bestj, bestsize = alo, blo, 0 + # find longest junk-free match + # during an iteration of the loop, j2len[j] = length of longest + # junk-free match ending with a[i-1] and b[j] + nothing = [] + for i in range(alo, ahi): + # look at all instances of a[i] in b; note that because + # b2j has no junk keys, the loop is skipped if a[i] is junk + for j in b2j.get(a[i], nothing): + # a[i] matches b[j] + if j < blo: + continue + if j >= bhi: + break + k = self.j2len_[j] + 1 + self.newj2len_[j + 1] = k + if k > bestsize: + besti = i-k+1 + bestj = j-k+1 + bestsize = k + + self.j2len_.swap(self.newj2len_) + fill(self.newj2len_.begin() + blo, self.newj2len_.begin() + bhi + 1, 0) + + fill(self.j2len_.begin() + blo, self.j2len_.begin() + bhi + 1, 0) + + # Extend the best by non-junk elements on each end. In particular, + # "popular" non-junk elements aren't in b2j, which greatly speeds + # the inner loop above, but also means "the best" match so far + # doesn't contain any junk *or* popular non-junk elements. + while besti > alo and bestj > blo and \ + not isbjunk(b[bestj-1]) and \ + a[besti-1] == b[bestj-1]: + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + while besti+bestsize < ahi and bestj+bestsize < bhi and \ + not isbjunk(b[bestj+bestsize]) and \ + a[besti+bestsize] == b[bestj+bestsize]: + bestsize += 1 + + # Now that we have a wholly interesting match (albeit possibly + # empty!), we may as well suck up the matching junk on each + # side of it too. Can't think of a good reason not to, and it + # saves post-processing the (possibly considerable) expense of + # figuring out what to do with it. In the case of an empty + # interesting match, this is clearly the right thing to do, + # because no other kind of match is possible in the regions. + while besti > alo and bestj > blo and \ + isbjunk(b[bestj-1]) and \ + a[besti-1] == b[bestj-1]: + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + while besti+bestsize < ahi and bestj+bestsize < bhi and \ + isbjunk(b[bestj+bestsize]) and \ + a[besti+bestsize] == b[bestj+bestsize]: + bestsize = bestsize + 1 + + return CMatch(besti, bestj, bestsize) + + def find_longest_match(self, alo=0, ahi=None, blo=0, bhi=None): + """Find longest matching block in a[alo:ahi] and b[blo:bhi]. + + By default it will find the longest match in the entirety of a and b. + + If isjunk is not defined: + + Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where + alo <= i <= i+k <= ahi + blo <= j <= j+k <= bhi + and for all (i',j',k') meeting those conditions, + k >= k' + i <= i' + and if i == i', j <= j' + + In other words, of all maximal matching blocks, return one that + starts earliest in a, and of all those maximal matching blocks that + start earliest in a, return the one that starts earliest in b. + + >>> s = SequenceMatcher(None, " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=0, b=4, size=5) + + If isjunk is defined, first the longest matching block is + determined as above, but with the additional restriction that no + junk element appears in the block. Then that block is extended as + far as possible by matching (only) junk elements on both sides. So + the resulting block never matches on junk except as identical junk + happens to be adjacent to an "interesting" match. + + Here's the same example as before, but considering blanks to be + junk. That prevents " abcd" from matching the " abcd" at the tail + end of the second sequence directly. Instead only the "abcd" can + match, and matches the leftmost "abcd" in the second sequence: + + >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd") + >>> s.find_longest_match(0, 5, 0, 9) + Match(a=1, b=0, size=4) + + If no blocks match, return (alo, blo, 0). + + >>> s = SequenceMatcher(None, "ab", "c") + >>> s.find_longest_match(0, 2, 0, 1) + Match(a=0, b=0, size=0) + """ + cdef Py_ssize_t ahi_ = ahi if ahi is not None else len(self.a) + cdef Py_ssize_t bhi_ = bhi if bhi is not None else len(self.b) + match = self.__find_longest_match(alo, ahi_, blo, bhi_) + + return Match(match.a, match.b, match.size) + + cpdef get_matching_blocks(self): + """Return list of triples describing matching subsequences. + + Each triple is of the form (i, j, n), and means that + a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in + i and in j. New in Python 2.5, it's also guaranteed that if + (i, j, n) and (i', j', n') are adjacent triples in the list, and + the second is not the last triple in the list, then i+n != i' or + j+n != j'. IOW, adjacent triples never describe adjacent equal + blocks. + + The last triple is a dummy, (len(a), len(b), 0), and is the only + triple with n==0. + + >>> s = SequenceMatcher(None, "abxcd", "abcd") + >>> list(s.get_matching_blocks()) + [Match(a=0, b=0, size=2), Match(a=3, b=2, size=2), Match(a=5, b=4, size=0)] + """ + cdef Py_ssize_t i, j, k, i1, j1, k1, i2, j2, k2 + cdef Py_ssize_t alo, ahi, blo, bhi + cdef size_t queue_head + cdef vector[MatchingBlockQueueElem] queue + cdef vector[CMatch] matching_blocks_ + + if self.matching_blocks is not None: + return self.matching_blocks + + # This is most naturally expressed as a recursive algorithm, but + # at least one user bumped into extreme use cases that exceeded + # the recursion limit on their box. So, now we maintain a list + # ('queue`) of blocks we still need to look at, and append partial + # results to `matching_blocks` in a loop; the matches are sorted + # at the end. + queue.push_back(MatchingBlockQueueElem(0, self.la, 0, self.lb)) + while not queue.empty(): + elem = queue.back() + alo, ahi, blo, bhi = elem.alo, elem.ahi, elem.blo, elem.bhi + queue.pop_back() + x = self.__find_longest_match(alo, ahi, blo, bhi) + i, j, k = x.a, x.b, x.size + # a[alo:i] vs b[blo:j] unknown + # a[i:i+k] same as b[j:j+k] + # a[i+k:ahi] vs b[j+k:bhi] unknown + if k: # if k is 0, there was no matching block + matching_blocks_.push_back(x) + if alo < i and blo < j: + queue.push_back(MatchingBlockQueueElem(alo, i, blo, j)) + if i+k < ahi and j+k < bhi: + queue.push_back(MatchingBlockQueueElem(i+k, ahi, j+k, bhi)) + cpp_sort(matching_blocks_.begin(), matching_blocks_.end(), &CMatch_sorter) + + # It's possible that we have adjacent equal blocks in the + # matching_blocks list now. Starting with 2.5, this code was added + # to collapse them. + i1 = j1 = k1 = 0 + non_adjacent = [] + for match in matching_blocks_: + i2, j2, k2 = match.a, match.b, match.size + # Is this block adjacent to i1, j1, k1? + if i1 + k1 == i2 and j1 + k1 == j2: + # Yes, so collapse them -- this just increases the length of + # the first block by the length of the second, and the first + # block so lengthened remains the block to compare against. + k1 += k2 + else: + # Not adjacent. Remember the first block (k1==0 means it's + # the dummy we started with), and make the second block the + # new block to compare against. + if k1: + non_adjacent.append(Match(i1, j1, k1)) + i1, j1, k1 = i2, j2, k2 + if k1: + non_adjacent.append(Match(i1, j1, k1)) + + non_adjacent.append(Match(self.la, self.lb, 0)) + self.matching_blocks = non_adjacent + return self.matching_blocks + + def get_opcodes(self): + """Return list of 5-tuples describing how to turn a into b. + + Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple + has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the + tuple preceding it, and likewise for j1 == the previous j2. + + The tags are strings, with these meanings: + + 'replace': a[i1:i2] should be replaced by b[j1:j2] + 'delete': a[i1:i2] should be deleted. + Note that j1==j2 in this case. + 'insert': b[j1:j2] should be inserted at a[i1:i1]. + Note that i1==i2 in this case. + 'equal': a[i1:i2] == b[j1:j2] + + >>> a = "qabxcd" + >>> b = "abycdf" + >>> s = SequenceMatcher(None, a, b) + >>> for tag, i1, i2, j1, j2 in s.get_opcodes(): + ... print(("%7s a[%d:%d] (%s) b[%d:%d] (%s)" % + ... (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))) + delete a[0:1] (q) b[0:0] () + equal a[1:3] (ab) b[0:2] (ab) + replace a[3:4] (x) b[2:3] (y) + equal a[4:6] (cd) b[3:5] (cd) + insert a[6:6] () b[5:6] (f) + """ + + if self.opcodes is not None: + return self.opcodes + i = j = 0 + self.opcodes = answer = [] + for ai, bj, size in self.get_matching_blocks(): + # invariant: we've pumped out correct diffs to change + # a[:i] into b[:j], and the next matching block is + # a[ai:ai+size] == b[bj:bj+size]. So we need to pump + # out a diff to change a[i:ai] into b[j:bj], pump out + # the matching block, and move (i,j) beyond the match + tag = '' + if i < ai and j < bj: + tag = 'replace' + elif i < ai: + tag = 'delete' + elif j < bj: + tag = 'insert' + if tag: + answer.append( (tag, i, ai, j, bj) ) + i, j = ai+size, bj+size + # the list of matching blocks is terminated by a + # sentinel with size 0 + if size: + answer.append( ('equal', ai, i, bj, j) ) + return answer + + def get_grouped_opcodes(self, n=3): + """ Isolate change clusters by eliminating ranges with no changes. + + Return a generator of groups with up to n lines of context. + Each group is in the same format as returned by get_opcodes(). + + >>> from pprint import pprint + >>> a = list(map(str, range(1,40))) + >>> b = a[:] + >>> b[8:8] = ['i'] # Make an insertion + >>> b[20] += 'x' # Make a replacement + >>> b[23:28] = [] # Make a deletion + >>> b[30] += 'y' # Make another replacement + >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) + [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)], + [('equal', 16, 19, 17, 20), + ('replace', 19, 20, 20, 21), + ('equal', 20, 22, 21, 23), + ('delete', 22, 27, 23, 23), + ('equal', 27, 30, 23, 26)], + [('equal', 31, 34, 27, 30), + ('replace', 34, 35, 30, 31), + ('equal', 35, 38, 31, 34)]] + """ + + codes = self.get_opcodes() + if not codes: + codes = [("equal", 0, 1, 0, 1)] + # Fixup leading and trailing groups if they show no changes. + if codes[0][0] == 'equal': + tag, i1, i2, j1, j2 = codes[0] + codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2 + if codes[-1][0] == 'equal': + tag, i1, i2, j1, j2 = codes[-1] + codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n) + + nn = n + n + group = [] + for tag, i1, i2, j1, j2 in codes: + # End the current group and start a new one whenever + # there is a large range with no changes. + if tag == 'equal' and i2-i1 > nn: + group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n))) + yield group + group = [] + i1, j1 = max(i1, i2-n), max(j1, j2-n) + group.append((tag, i1, i2, j1 ,j2)) + if group and not (len(group)==1 and group[0][0] == 'equal'): + yield group + + def ratio(self): + """Return a measure of the sequences' similarity (float in [0,1]). + + Where T is the total number of elements in both sequences, and + M is the number of matches, this is 2.0*M / T. + Note that this is 1 if the sequences are identical, and 0 if + they have nothing in common. + + .ratio() is expensive to compute if you haven't already computed + .get_matching_blocks() or .get_opcodes(), in which case you may + want to try .quick_ratio() or .real_quick_ratio() first to get an + upper bound. + + >>> s = SequenceMatcher(None, "abcd", "bcde") + >>> s.ratio() + 0.75 + >>> s.quick_ratio() + 0.75 + >>> s.real_quick_ratio() + 1.0 + """ + + matches = sum(triple[-1] for triple in self.get_matching_blocks()) + return _calculate_ratio(matches, len(self.a) + len(self.b)) + + def quick_ratio(self): + """Return an upper bound on ratio() relatively quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute. + """ + + # viewing a and b as multisets, set matches to the cardinality + # of their intersection; this counts the number of matches + # without regard to order, so is clearly an upper bound + if self.fullbcount is None: + self.fullbcount = fullbcount = {} + for elt in self.b: + fullbcount[elt] = fullbcount.get(elt, 0) + 1 + fullbcount = self.fullbcount + # avail[x] is the number of times x appears in 'b' less the + # number of times we've seen it in 'a' so far ... kinda + avail = {} + availhas, matches = avail.__contains__, 0 + for elt in self.a: + if availhas(elt): + numb = avail[elt] + else: + numb = fullbcount.get(elt, 0) + avail[elt] = numb - 1 + if numb > 0: + matches = matches + 1 + return _calculate_ratio(matches, len(self.a) + len(self.b)) + + def real_quick_ratio(self): + """Return an upper bound on ratio() very quickly. + + This isn't defined beyond that it is an upper bound on .ratio(), and + is faster to compute than either .ratio() or .quick_ratio(). + """ + + la, lb = len(self.a), len(self.b) + # can't have more matches than the number of elements in the + # shorter sequence + return _calculate_ratio(min(la, lb), la + lb) + + # todo add this once it is supported in all Python versions + #__class_getitem__ = classmethod(GenericAlias) + + +def get_close_matches(word, possibilities, n=3, cutoff=0.6): + """Use SequenceMatcher to return list of the best "good enough" matches. + + word is a sequence for which close matches are desired (typically a + string). + + possibilities is a list of sequences against which to match word + (typically a list of strings). + + Optional arg n (default 3) is the maximum number of close matches to + return. n must be > 0. + + Optional arg cutoff (default 0.6) is a float in [0, 1]. Possibilities + that don't score at least that similar to word are ignored. + + The best (no more than n) matches among the possibilities are returned + in a list, sorted by similarity score, most similar first. + + >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"]) + ['apple', 'ape'] + >>> import keyword as _keyword + >>> get_close_matches("wheel", _keyword.kwlist) + ['while'] + >>> get_close_matches("Apple", _keyword.kwlist) + [] + >>> get_close_matches("accept", _keyword.kwlist) + ['except'] + """ + + if not n > 0: + raise ValueError("n must be > 0: %r" % (n,)) + if not 0.0 <= cutoff <= 1.0: + raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,)) + result = [] + s = SequenceMatcher() + s.set_seq2(word) + for x in possibilities: + s.set_seq1(x) + if s.real_quick_ratio() >= cutoff and \ + s.quick_ratio() >= cutoff and \ + s.ratio() >= cutoff: + result.append((s.ratio(), x)) + + # Move the best scorers to head of list + result = _nlargest(n, result) + # Strip scores for the best n matches + return [x for score, x in result] + + +def _keep_original_ws(s, tag_s): + """Replace whitespace with the original whitespace characters in `s`""" + return ''.join( + c if tag_c == " " and c.isspace() else tag_c + for c, tag_c in zip(s, tag_s) + ) + + + +class Differ: + r""" + Differ is a class for comparing sequences of lines of text, and + producing human-readable differences or deltas. Differ uses + SequenceMatcher both to compare sequences of lines, and to compare + sequences of characters within similar (near-matching) lines. + + Each line of a Differ delta begins with a two-letter code: + + '- ' line unique to sequence 1 + '+ ' line unique to sequence 2 + ' ' line common to both sequences + '? ' line not present in either input sequence + + Lines beginning with '? ' attempt to guide the eye to intraline + differences, and were not present in either input sequence. These lines + can be confusing if the sequences contain tab characters. + + Note that Differ makes no claim to produce a *minimal* diff. To the + contrary, minimal diffs are often counter-intuitive, because they synch + up anywhere possible, sometimes accidental matches 100 pages apart. + Restricting synch points to contiguous matches preserves some notion of + locality, at the occasional cost of producing a longer diff. + + Example: Comparing two texts. + + First we set up the texts, sequences of individual single-line strings + ending with newlines (such sequences can also be obtained from the + `readlines()` method of file-like objects): + + >>> text1 = ''' 1. Beautiful is better than ugly. + ... 2. Explicit is better than implicit. + ... 3. Simple is better than complex. + ... 4. Complex is better than complicated. + ... '''.splitlines(keepends=True) + >>> len(text1) + 4 + >>> text1[0][-1] + '\n' + >>> text2 = ''' 1. Beautiful is better than ugly. + ... 3. Simple is better than complex. + ... 4. Complicated is better than complex. + ... 5. Flat is better than nested. + ... '''.splitlines(keepends=True) + + Next we instantiate a Differ object: + + >>> d = Differ() + + Note that when instantiating a Differ object we may pass functions to + filter out line and character 'junk'. See Differ.__init__ for details. + + Finally, we compare the two: + + >>> result = list(d.compare(text1, text2)) + + 'result' is a list of strings, so let's pretty-print it: + + >>> from pprint import pprint as _pprint + >>> _pprint(result) + [' 1. Beautiful is better than ugly.\n', + '- 2. Explicit is better than implicit.\n', + '- 3. Simple is better than complex.\n', + '+ 3. Simple is better than complex.\n', + '? ++\n', + '- 4. Complex is better than complicated.\n', + '? ^ ---- ^\n', + '+ 4. Complicated is better than complex.\n', + '? ++++ ^ ^\n', + '+ 5. Flat is better than nested.\n'] + + As a single multi-line string it looks like this: + + >>> print(''.join(result), end="") + 1. Beautiful is better than ugly. + - 2. Explicit is better than implicit. + - 3. Simple is better than complex. + + 3. Simple is better than complex. + ? ++ + - 4. Complex is better than complicated. + ? ^ ---- ^ + + 4. Complicated is better than complex. + ? ++++ ^ ^ + + 5. Flat is better than nested. + """ + + def __init__(self, linejunk=None, charjunk=None): + """ + Construct a text differencer, with optional filters. + + The two optional keyword parameters are for filter functions: + + - `linejunk`: A function that should accept a single string argument, + and return true iff the string is junk. The module-level function + `IS_LINE_JUNK` may be used to filter out lines without visible + characters, except for at most one splat ('#'). It is recommended + to leave linejunk None; the underlying SequenceMatcher class has + an adaptive notion of "noise" lines that's better than any static + definition the author has ever been able to craft. + + - `charjunk`: A function that should accept a string of length 1. The + module-level function `IS_CHARACTER_JUNK` may be used to filter out + whitespace characters (a blank or tab; **note**: bad idea to include + newline in this!). Use of IS_CHARACTER_JUNK is recommended. + """ + + self.linejunk = linejunk + self.charjunk = charjunk + + def compare(self, a, b): + r""" + Compare two sequences of lines; generate the resulting delta. + + Each sequence must contain individual single-line strings ending with + newlines. Such sequences can be obtained from the `readlines()` method + of file-like objects. The delta generated also consists of newline- + terminated strings, ready to be printed as-is via the writeline() + method of a file-like object. + + Example: + + >>> print(''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(True), + ... 'ore\ntree\nemu\n'.splitlines(True))), + ... end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + """ + + cruncher = SequenceMatcher(self.linejunk, a, b) + for tag, alo, ahi, blo, bhi in cruncher.get_opcodes(): + if tag == 'replace': + g = self._fancy_replace(a, alo, ahi, b, blo, bhi) + elif tag == 'delete': + g = self._dump('-', a, alo, ahi) + elif tag == 'insert': + g = self._dump('+', b, blo, bhi) + elif tag == 'equal': + g = self._dump(' ', a, alo, ahi) + else: + raise ValueError('unknown tag %r' % (tag,)) + + yield from g + + def _dump(self, tag, x, lo, hi): + """Generate comparison results for a same-tagged range.""" + for i in range(lo, hi): + yield '%s %s' % (tag, x[i]) + + def _plain_replace(self, a, alo, ahi, b, blo, bhi): + assert alo < ahi and blo < bhi + # dump the shorter block first -- reduces the burden on short-term + # memory if the blocks are of very different sizes + if bhi - blo < ahi - alo: + first = self._dump('+', b, blo, bhi) + second = self._dump('-', a, alo, ahi) + else: + first = self._dump('-', a, alo, ahi) + second = self._dump('+', b, blo, bhi) + + for g in first, second: + yield from g + + def _fancy_replace(self, a, alo, ahi, b, blo, bhi): + r""" + When replacing one block of lines with another, search the blocks + for *similar* lines; the best-matching pair (if any) is used as a + synch point, and intraline difference marking is done on the + similar pair. Lots of work, but often worth it. + + Example: + + >>> d = Differ() + >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1, + ... ['abcdefGhijkl\n'], 0, 1) + >>> print(''.join(results), end="") + - abcDefghiJkl + ? ^ ^ ^ + + abcdefGhijkl + ? ^ ^ ^ + """ + + # don't synch up unless the lines have a similarity score of at + # least cutoff; best_ratio tracks the best score seen so far + best_ratio, cutoff = 0.74, 0.75 + cruncher = SequenceMatcher(self.charjunk) + eqi, eqj = None, None # 1st indices of equal lines (if any) + + # search for the pair that matches best without being identical + # (identical lines must be junk lines, & we don't want to synch up + # on junk -- unless we have to) + for j in range(blo, bhi): + bj = b[j] + cruncher.set_seq2(bj) + for i in range(alo, ahi): + ai = a[i] + if ai == bj: + if eqi is None: + eqi, eqj = i, j + continue + cruncher.set_seq1(ai) + # computing similarity is expensive, so use the quick + # upper bounds first -- have seen this speed up messy + # compares by a factor of 3. + # note that ratio() is only expensive to compute the first + # time it's called on a sequence pair; the expensive part + # of the computation is cached by cruncher + if cruncher.real_quick_ratio() > best_ratio and \ + cruncher.quick_ratio() > best_ratio and \ + cruncher.ratio() > best_ratio: + best_ratio, best_i, best_j = cruncher.ratio(), i, j + if best_ratio < cutoff: + # no non-identical "pretty close" pair + if eqi is None: + # no identical pair either -- treat it as a straight replace + yield from self._plain_replace(a, alo, ahi, b, blo, bhi) + return + # no close pair, but an identical pair -- synch up on that + best_i, best_j, best_ratio = eqi, eqj, 1.0 + else: + # there's a close pair, so forget the identical pair (if any) + eqi = None + + # a[best_i] very similar to b[best_j]; eqi is None iff they're not + # identical + + # pump out diffs from before the synch point + yield from self._fancy_helper(a, alo, best_i, b, blo, best_j) + + # do intraline marking on the synch pair + aelt, belt = a[best_i], b[best_j] + if eqi is None: + # pump out a '-', '?', '+', '?' quad for the synched lines + atags = btags = "" + cruncher.set_seqs(aelt, belt) + for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes(): + la, lb = ai2 - ai1, bj2 - bj1 + if tag == 'replace': + atags += '^' * la + btags += '^' * lb + elif tag == 'delete': + atags += '-' * la + elif tag == 'insert': + btags += '+' * lb + elif tag == 'equal': + atags += ' ' * la + btags += ' ' * lb + else: + raise ValueError('unknown tag %r' % (tag,)) + yield from self._qformat(aelt, belt, atags, btags) + else: + # the synch pair is identical + yield ' ' + aelt + + # pump out diffs from after the synch point + yield from self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi) + + def _fancy_helper(self, a, alo, ahi, b, blo, bhi): + g = [] + if alo < ahi: + if blo < bhi: + g = self._fancy_replace(a, alo, ahi, b, blo, bhi) + else: + g = self._dump('-', a, alo, ahi) + elif blo < bhi: + g = self._dump('+', b, blo, bhi) + + yield from g + + def _qformat(self, aline, bline, atags, btags): + r""" + Format "?" output and deal with tabs. + + Example: + + >>> d = Differ() + >>> results = d._qformat('\tabcDefghiJkl\n', '\tabcdefGhijkl\n', + ... ' ^ ^ ^ ', ' ^ ^ ^ ') + >>> for line in results: print(repr(line)) + ... + '- \tabcDefghiJkl\n' + '? \t ^ ^ ^\n' + '+ \tabcdefGhijkl\n' + '? \t ^ ^ ^\n' + """ + atags = _keep_original_ws(aline, atags).rstrip() + btags = _keep_original_ws(bline, btags).rstrip() + + yield "- " + aline + if atags: + yield f"? {atags}\n" + + yield "+ " + bline + if btags: + yield f"? {btags}\n" + +# With respect to junk, an earlier version of ndiff simply refused to +# *start* a match with a junk element. The result was cases like this: +# before: private Thread currentThread; +# after: private volatile Thread currentThread; +# If you consider whitespace to be junk, the longest contiguous match +# not starting with junk is "e Thread currentThread". So ndiff reported +# that "e volatil" was inserted between the 't' and the 'e' in "private". +# While an accurate view, to people that's absurd. The current version +# looks for matching blocks that are entirely junk-free, then extends the +# longest one of those as far as possible but only with matching junk. +# So now "currentThread" is matched, then extended to suck up the +# preceding blank; then "private" is matched, and extended to suck up the +# following blank; then "Thread" is matched; and finally ndiff reports +# that "volatile " was inserted before "Thread". The only quibble +# remaining is that perhaps it was really the case that " volatile" +# was inserted after "private". I can live with that . + +import re + +def IS_LINE_JUNK(line, pat=re.compile(r"\s*(?:#\s*)?$").match): + r""" + Return True for ignorable line: iff `line` is blank or contains a single '#'. + + Examples: + + >>> IS_LINE_JUNK('\n') + True + >>> IS_LINE_JUNK(' # \n') + True + >>> IS_LINE_JUNK('hello\n') + False + """ + + return pat(line) is not None + +def IS_CHARACTER_JUNK(ch, ws=" \t"): + r""" + Return True for ignorable character: iff `ch` is a space or tab. + + Examples: + + >>> IS_CHARACTER_JUNK(' ') + True + >>> IS_CHARACTER_JUNK('\t') + True + >>> IS_CHARACTER_JUNK('\n') + False + >>> IS_CHARACTER_JUNK('x') + False + """ + + return ch in ws + + +######################################################################## +### Unified Diff +######################################################################## + +def _format_range_unified(start, stop): + 'Convert range to the "ed" format' + # Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning = start + 1 # lines start numbering with one + length = stop - start + if length == 1: + return '{}'.format(beginning) + if not length: + beginning -= 1 # empty ranges begin at line just before the range + return '{},{}'.format(beginning, length) + +def unified_diff(a, b, fromfile='', tofile='', fromfiledate='', + tofiledate='', n=3, lineterm='\n'): + r""" + Compare two sequences of lines; generate the delta as a unified diff. + + Unified diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with ---, +++, or @@) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The unidiff format normally has a header for filenames and modification + times. Any or all of these may be specified using strings for + 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + + Example: + + >>> for line in unified_diff('one two three four'.split(), + ... 'zero one tree four'.split(), 'Original', 'Current', + ... '2005-01-26 23:30:50', '2010-04-02 10:20:52', + ... lineterm=''): + ... print(line) # doctest: +NORMALIZE_WHITESPACE + --- Original 2005-01-26 23:30:50 + +++ Current 2010-04-02 10:20:52 + @@ -1,4 +1,4 @@ + +zero + one + -two + -three + +tree + four + """ + + _check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm) + started = False + for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): + if not started: + started = True + fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' + todate = '\t{}'.format(tofiledate) if tofiledate else '' + yield '--- {}{}{}'.format(fromfile, fromdate, lineterm) + yield '+++ {}{}{}'.format(tofile, todate, lineterm) + + first, last = group[0], group[-1] + file1_range = _format_range_unified(first[1], last[2]) + file2_range = _format_range_unified(first[3], last[4]) + yield '@@ -{} +{} @@{}'.format(file1_range, file2_range, lineterm) + + for tag, i1, i2, j1, j2 in group: + if tag == 'equal': + for line in a[i1:i2]: + yield ' ' + line + continue + if tag in {'replace', 'delete'}: + for line in a[i1:i2]: + yield '-' + line + if tag in {'replace', 'insert'}: + for line in b[j1:j2]: + yield '+' + line + + +######################################################################## +### Context Diff +######################################################################## + +def _format_range_context(start, stop): + 'Convert range to the "ed" format' + # Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning = start + 1 # lines start numbering with one + length = stop - start + if not length: + beginning -= 1 # empty ranges begin at line just before the range + if length <= 1: + return '{}'.format(beginning) + return '{},{}'.format(beginning, beginning + length - 1) + +# See http://www.unix.org/single_unix_specification/ +def context_diff(a, b, fromfile='', tofile='', + fromfiledate='', tofiledate='', n=3, lineterm='\n'): + r""" + Compare two sequences of lines; generate the delta as a context diff. + + Context diffs are a compact way of showing line changes and a few + lines of context. The number of context lines is set by 'n' which + defaults to three. + + By default, the diff control lines (those with *** or ---) are + created with a trailing newline. This is helpful so that inputs + created from file.readlines() result in diffs that are suitable for + file.writelines() since both the inputs and outputs have trailing + newlines. + + For inputs that do not have trailing newlines, set the lineterm + argument to "" so that the output will be uniformly newline free. + + The context diff format normally has a header for filenames and + modification times. Any or all of these may be specified using + strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. + The modification times are normally expressed in the ISO 8601 format. + If not specified, the strings default to blanks. + + Example: + + >>> print(''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(True), + ... 'zero\none\ntree\nfour\n'.splitlines(True), 'Original', 'Current')), + ... end="") + *** Original + --- Current + *************** + *** 1,4 **** + one + ! two + ! three + four + --- 1,4 ---- + + zero + one + ! tree + four + """ + + _check_types(a, b, fromfile, tofile, fromfiledate, tofiledate, lineterm) + prefix = dict(insert='+ ', delete='- ', replace='! ', equal=' ') + started = False + for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n): + if not started: + started = True + fromdate = '\t{}'.format(fromfiledate) if fromfiledate else '' + todate = '\t{}'.format(tofiledate) if tofiledate else '' + yield '*** {}{}{}'.format(fromfile, fromdate, lineterm) + yield '--- {}{}{}'.format(tofile, todate, lineterm) + + first, last = group[0], group[-1] + yield '***************' + lineterm + + file1_range = _format_range_context(first[1], last[2]) + yield '*** {} ****{}'.format(file1_range, lineterm) + + if any(tag in {'replace', 'delete'} for tag, _, _, _, _ in group): + for tag, i1, i2, _, _ in group: + if tag != 'insert': + for line in a[i1:i2]: + yield prefix[tag] + line + + file2_range = _format_range_context(first[3], last[4]) + yield '--- {} ----{}'.format(file2_range, lineterm) + + if any(tag in {'replace', 'insert'} for tag, _, _, _, _ in group): + for tag, _, _, j1, j2 in group: + if tag != 'delete': + for line in b[j1:j2]: + yield prefix[tag] + line + +def _check_types(a, b, *args): + # Checking types is weird, but the alternative is garbled output when + # someone passes mixed bytes and str to {unified,context}_diff(). E.g. + # without this check, passing filenames as bytes results in output like + # --- b'oldfile.txt' + # +++ b'newfile.txt' + # because of how str.format() incorporates bytes objects. + if a and not isinstance(a[0], str): + raise TypeError('lines to compare must be str, not %s (%r)' % + (type(a[0]).__name__, a[0])) + if b and not isinstance(b[0], str): + raise TypeError('lines to compare must be str, not %s (%r)' % + (type(b[0]).__name__, b[0])) + for arg in args: + if not isinstance(arg, str): + raise TypeError('all arguments must be str, not: %r' % (arg,)) + +def diff_bytes(dfunc, a, b, fromfile=b'', tofile=b'', + fromfiledate=b'', tofiledate=b'', n=3, lineterm=b'\n'): + r""" + Compare `a` and `b`, two sequences of lines represented as bytes rather + than str. This is a wrapper for `dfunc`, which is typically either + unified_diff() or context_diff(). Inputs are losslessly converted to + strings so that `dfunc` only has to worry about strings, and encoded + back to bytes on return. This is necessary to compare files with + unknown or inconsistent encoding. All other inputs (except `n`) must be + bytes rather than str. + """ + def decode(s): + try: + return s.decode('ascii', 'surrogateescape') + except AttributeError as err: + msg = ('all arguments must be bytes, not %s (%r)' % + (type(s).__name__, s)) + raise TypeError(msg) from err + a = list(map(decode, a)) + b = list(map(decode, b)) + fromfile = decode(fromfile) + tofile = decode(tofile) + fromfiledate = decode(fromfiledate) + tofiledate = decode(tofiledate) + lineterm = decode(lineterm) + + lines = dfunc(a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm) + for line in lines: + yield line.encode('ascii', 'surrogateescape') + +def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK): + r""" + Compare `a` and `b` (lists of strings); return a `Differ`-style delta. + + Optional keyword parameters `linejunk` and `charjunk` are for filter + functions, or can be None: + + - linejunk: A function that should accept a single string argument and + return true iff the string is junk. The default is None, and is + recommended; the underlying SequenceMatcher class has an adaptive + notion of "noise" lines. + + - charjunk: A function that accepts a character (string of length + 1), and returns true iff the character is junk. The default is + the module-level function IS_CHARACTER_JUNK, which filters out + whitespace characters (a blank or tab; note: it's a bad idea to + include newline in this!). + + Tools/scripts/ndiff.py is a command-line front-end to this function. + + Example: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> print(''.join(diff), end="") + - one + ? ^ + + ore + ? ^ + - two + - three + ? - + + tree + + emu + """ + return Differ(linejunk, charjunk).compare(a, b) + +def _mdiff(fromlines, tolines, context=None, linejunk=None, + charjunk=IS_CHARACTER_JUNK): + r"""Returns generator yielding marked up from/to side by side differences. + + Arguments: + fromlines -- list of text lines to compared to tolines + tolines -- list of text lines to be compared to fromlines + context -- number of context lines to display on each side of difference, + if None, all from/to text lines will be generated. + linejunk -- passed on to ndiff (see ndiff documentation) + charjunk -- passed on to ndiff (see ndiff documentation) + + This function returns an iterator which returns a tuple: + (from line tuple, to line tuple, boolean flag) + + from/to line tuple -- (line num, line text) + line num -- integer or None (to indicate a context separation) + line text -- original line text with following markers inserted: + '\0+' -- marks start of added text + '\0-' -- marks start of deleted text + '\0^' -- marks start of changed text + '\1' -- marks end of added/deleted/changed text + + boolean flag -- None indicates context separation, True indicates + either "from" or "to" line contains a change, otherwise False. + + This function/iterator was originally developed to generate side by side + file difference for making HTML pages (see HtmlDiff class for example + usage). + + Note, this function utilizes the ndiff function to generate the side by + side difference markup. Optional ndiff arguments may be passed to this + function and they in turn will be passed to ndiff. + """ + import re + + # regular expression for finding intraline change indices + change_re = re.compile(r'(\++|\-+|\^+)') + + # create the difference iterator to generate the differences + diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk) + + def _make_line(lines, format_key, side, num_lines=[0,0]): + """Returns line of text with user's change markup and line formatting. + + lines -- list of lines from the ndiff generator to produce a line of + text from. When producing the line of text to return, the + lines used are removed from this list. + format_key -- '+' return first line in list with "add" markup around + the entire line. + '-' return first line in list with "delete" markup around + the entire line. + '?' return first line in list with add/delete/change + intraline markup (indices obtained from second line) + None return first line in list with no markup + side -- indice into the num_lines list (0=from,1=to) + num_lines -- from/to current line number. This is NOT intended to be a + passed parameter. It is present as a keyword argument to + maintain memory of the current line numbers between calls + of this function. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + """ + num_lines[side] += 1 + # Handle case where no user markup is to be added, just return line of + # text with user's line format to allow for usage of the line number. + if format_key is None: + return (num_lines[side],lines.pop(0)[2:]) + # Handle case of intraline changes + if format_key == '?': + text, markers = lines.pop(0), lines.pop(0) + # find intraline changes (store change type and indices in tuples) + sub_info = [] + def record_sub_info(match_object,sub_info=sub_info): + sub_info.append([match_object.group(1)[0],match_object.span()]) + return match_object.group(1) + change_re.sub(record_sub_info,markers) + # process each tuple inserting our special marks that won't be + # noticed by an xml/html escaper. + for key,(begin,end) in reversed(sub_info): + text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:] + text = text[2:] + # Handle case of add/delete entire line + else: + text = lines.pop(0)[2:] + # if line of text is just a newline, insert a space so there is + # something for the user to highlight and see. + if not text: + text = ' ' + # insert marks that won't be noticed by an xml/html escaper. + text = '\0' + format_key + text + '\1' + # Return line of text, first allow user's line formatter to do its + # thing (such as adding the line number) then replace the special + # marks with what the user's change markup. + return (num_lines[side],text) + + def _line_iterator(): + """Yields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from a + differencing iterator, processes them and yields them. When it can + it yields both a "from" and a "to" line, otherwise it will yield one + or the other. In addition to yielding the lines of from/to text, a + boolean flag is yielded to indicate if the text line(s) have + differences in them. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + """ + lines = [] + num_blanks_pending, num_blanks_to_yield = 0, 0 + while True: + # Load up next 4 lines so we can look ahead, create strings which + # are a concatenation of the first character of each of the 4 lines + # so we can do some very readable comparisons. + while len(lines) < 4: + lines.append(next(diff_lines_iterator, 'X')) + s = ''.join([line[0] for line in lines]) + if s.startswith('X'): + # When no more lines, pump out any remaining blank lines so the + # corresponding add/delete lines get a matching blank line so + # all line pairs get yielded at the next level. + num_blanks_to_yield = num_blanks_pending + elif s.startswith('-?+?'): + # simple intraline change + yield _make_line(lines,'?',0), _make_line(lines,'?',1), True + continue + elif s.startswith('--++'): + # in delete block, add block coming: we do NOT want to get + # caught up on blank lines yet, just process the delete line + num_blanks_pending -= 1 + yield _make_line(lines,'-',0), None, True + continue + elif s.startswith(('--?+', '--+', '- ')): + # in delete block and see an intraline change or unchanged line + # coming: yield the delete line and then blanks + from_line,to_line = _make_line(lines,'-',0), None + num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0 + elif s.startswith('-+?'): + # intraline change + yield _make_line(lines,None,0), _make_line(lines,'?',1), True + continue + elif s.startswith('-?+'): + # intraline change + yield _make_line(lines,'?',0), _make_line(lines,None,1), True + continue + elif s.startswith('-'): + # delete FROM line + num_blanks_pending -= 1 + yield _make_line(lines,'-',0), None, True + continue + elif s.startswith('+--'): + # in add block, delete block coming: we do NOT want to get + # caught up on blank lines yet, just process the add line + num_blanks_pending += 1 + yield None, _make_line(lines,'+',1), True + continue + elif s.startswith(('+ ', '+-')): + # will be leaving an add block: yield blanks then add line + from_line, to_line = None, _make_line(lines,'+',1) + num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0 + elif s.startswith('+'): + # inside an add block, yield the add line + num_blanks_pending += 1 + yield None, _make_line(lines,'+',1), True + continue + elif s.startswith(' '): + # unchanged text, yield it to both sides + yield _make_line(lines[:],None,0),_make_line(lines,None,1),False + continue + # Catch up on the blank lines so when we yield the next from/to + # pair, they are lined up. + while(num_blanks_to_yield < 0): + num_blanks_to_yield += 1 + yield None,('','\n'),True + while(num_blanks_to_yield > 0): + num_blanks_to_yield -= 1 + yield ('','\n'),None,True + if s.startswith('X'): + return + else: + yield from_line,to_line,True + + def _line_pair_iterator(): + """Yields from/to lines of text with a change indication. + + This function is an iterator. It itself pulls lines from the line + iterator. Its difference from that iterator is that this function + always yields a pair of from/to text lines (with the change + indication). If necessary it will collect single from/to lines + until it has a matching pair from/to pair to yield. + + Note, this function is purposefully not defined at the module scope so + that data it needs from its parent function (within whose context it + is defined) does not need to be of module scope. + """ + line_iterator = _line_iterator() + fromlines,tolines=[],[] + while True: + # Collecting lines of text until we have a from/to pair + while (len(fromlines)==0 or len(tolines)==0): + try: + from_line, to_line, found_diff = next(line_iterator) + except StopIteration: + return + if from_line is not None: + fromlines.append((from_line,found_diff)) + if to_line is not None: + tolines.append((to_line,found_diff)) + # Once we have a pair, remove them from the collection and yield it + from_line, fromDiff = fromlines.pop(0) + to_line, to_diff = tolines.pop(0) + yield (from_line,to_line,fromDiff or to_diff) + + # Handle case where user does not want context differencing, just yield + # them up without doing anything else with them. + line_pair_iterator = _line_pair_iterator() + if context is None: + yield from line_pair_iterator + # Handle case where user wants context differencing. We must do some + # storage of lines until we know for sure that they are to be yielded. + else: + context += 1 + lines_to_write = 0 + while True: + # Store lines up until we find a difference, note use of a + # circular queue because we only need to keep around what + # we need for context. + index, contextLines = 0, [None]*(context) + found_diff = False + while(found_diff is False): + try: + from_line, to_line, found_diff = next(line_pair_iterator) + except StopIteration: + return + i = index % context + contextLines[i] = (from_line, to_line, found_diff) + index += 1 + # Yield lines that we have collected so far, but first yield + # the user's separator. + if index > context: + yield None, None, None + lines_to_write = context + else: + lines_to_write = index + index = 0 + while(lines_to_write): + i = index % context + index += 1 + yield contextLines[i] + lines_to_write -= 1 + # Now yield the context lines after the change + lines_to_write = context-1 + try: + while(lines_to_write): + from_line, to_line, found_diff = next(line_pair_iterator) + # If another change within the context, extend the context + if found_diff: + lines_to_write = context-1 + else: + lines_to_write -= 1 + yield from_line, to_line, found_diff + except StopIteration: + # Catch exception from next() and return normally + return + + +_file_template = """ + + + + + + + + + + + + %(table)s%(legend)s + + +""" + +_styles = """ + table.diff {font-family:Courier; border:medium;} + .diff_header {background-color:#e0e0e0} + td.diff_header {text-align:right} + .diff_next {background-color:#c0c0c0} + .diff_add {background-color:#aaffaa} + .diff_chg {background-color:#ffff77} + .diff_sub {background-color:#ffaaaa}""" + +_table_template = """ + + + + %(header_row)s + +%(data_rows)s +
""" + +_legend = """ + + + + +
Legends
+ + + + +
Colors
 Added 
Changed
Deleted
+ + + + +
Links
(f)irst change
(n)ext change
(t)op
""" + +class HtmlDiff(object): + """For producing HTML side by side comparison with change highlights. + + This class can be used to create an HTML table (or a complete HTML file + containing the table) showing a side by side, line by line comparison + of text with inter-line and intra-line change highlights. The table can + be generated in either full or contextual difference mode. + + The following methods are provided for HTML generation: + + make_table -- generates HTML for a single side by side table + make_file -- generates complete HTML file with a single side by side table + + See tools/scripts/diff.py for an example usage of this class. + """ + + _file_template = _file_template + _styles = _styles + _table_template = _table_template + _legend = _legend + _default_prefix = 0 + + def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None, + charjunk=IS_CHARACTER_JUNK): + """HtmlDiff instance initializer + + Arguments: + tabsize -- tab stop spacing, defaults to 8. + wrapcolumn -- column number where lines are broken and wrapped, + defaults to None where lines are not wrapped. + linejunk,charjunk -- keyword arguments passed into ndiff() (used by + HtmlDiff() to generate the side by side HTML differences). See + ndiff() documentation for argument default values and descriptions. + """ + self._tabsize = tabsize + self._wrapcolumn = wrapcolumn + self._linejunk = linejunk + self._charjunk = charjunk + + def make_file(self, fromlines, tolines, fromdesc='', todesc='', + context=False, numlines=5, *, charset='utf-8'): + """Returns HTML file of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + charset -- charset of the HTML document + """ + + return (self._file_template % dict( + styles=self._styles, + legend=self._legend, + table=self.make_table(fromlines, tolines, fromdesc, todesc, + context=context, numlines=numlines), + charset=charset + )).encode(charset, 'xmlcharrefreplace').decode(charset) + + def _tab_newline_replace(self,fromlines,tolines): + """Returns from/to line lists with tabs expanded and newlines removed. + + Instead of tab characters being replaced by the number of spaces + needed to fill in to the next tab stop, this function will fill + the space with tab characters. This is done so that the difference + algorithms can identify changes in a file when tabs are replaced by + spaces and vice versa. At the end of the HTML generation, the tab + characters will be replaced with a nonbreakable space. + """ + def expand_tabs(line): + # hide real spaces + line = line.replace(' ','\0') + # expand tabs into spaces + line = line.expandtabs(self._tabsize) + # replace spaces from expanded tabs back into tab characters + # (we'll replace them with markup after we do differencing) + line = line.replace(' ','\t') + return line.replace('\0',' ').rstrip('\n') + fromlines = [expand_tabs(line) for line in fromlines] + tolines = [expand_tabs(line) for line in tolines] + return fromlines,tolines + + def _split_line(self,data_list,line_num,text): + """Builds list of text lines by splitting text lines at wrap point + + This function will determine if the input text line needs to be + wrapped (split) into separate lines. If so, the first wrap point + will be determined and the first line appended to the output + text line list. This function is used recursively to handle + the second part of the split line to further split it. + """ + # if blank line or context separator, just add it to the output list + if not line_num: + data_list.append((line_num,text)) + return + + # if line text doesn't need wrapping, just add it to the output list + size = len(text) + max = self._wrapcolumn + if (size <= max) or ((size -(text.count('\0')*3)) <= max): + data_list.append((line_num,text)) + return + + # scan text looking for the wrap point, keeping track if the wrap + # point is inside markers + i = 0 + n = 0 + mark = '' + while n < max and i < size: + if text[i] == '\0': + i += 1 + mark = text[i] + i += 1 + elif text[i] == '\1': + i += 1 + mark = '' + else: + i += 1 + n += 1 + + # wrap point is inside text, break it up into separate lines + line1 = text[:i] + line2 = text[i:] + + # if wrap point is inside markers, place end marker at end of first + # line and start marker at beginning of second line because each + # line will have its own table tag markup around it. + if mark: + line1 = line1 + '\1' + line2 = '\0' + mark + line2 + + # tack on first line onto the output list + data_list.append((line_num,line1)) + + # use this routine again to wrap the remaining text + self._split_line(data_list,'>',line2) + + def _line_wrapper(self,diffs): + """Returns iterator that splits (wraps) mdiff text lines""" + + # pull from/to data and flags from mdiff iterator + for fromdata,todata,flag in diffs: + # check for context separators and pass them through + if flag is None: + yield fromdata,todata,flag + continue + (fromline,fromtext),(toline,totext) = fromdata,todata + # for each from/to line split it at the wrap column to form + # list of text lines. + fromlist,tolist = [],[] + self._split_line(fromlist,fromline,fromtext) + self._split_line(tolist,toline,totext) + # yield from/to line in pairs inserting blank lines as + # necessary when one side has more wrapped lines + while fromlist or tolist: + if fromlist: + fromdata = fromlist.pop(0) + else: + fromdata = ('',' ') + if tolist: + todata = tolist.pop(0) + else: + todata = ('',' ') + yield fromdata,todata,flag + + def _collect_lines(self,diffs): + """Collects mdiff output into separate lists + + Before storing the mdiff from/to data into a list, it is converted + into a single line of text with HTML markup. + """ + + fromlist,tolist,flaglist = [],[],[] + # pull from/to data and flags from mdiff style iterator + for fromdata,todata,flag in diffs: + try: + # store HTML markup of the lines into the lists + fromlist.append(self._format_line(0,flag,*fromdata)) + tolist.append(self._format_line(1,flag,*todata)) + except TypeError: + # exceptions occur for lines where context separators go + fromlist.append(None) + tolist.append(None) + flaglist.append(flag) + return fromlist,tolist,flaglist + + def _format_line(self,side,flag,linenum,text): + """Returns HTML markup of "from" / "to" text lines + + side -- 0 or 1 indicating "from" or "to" text + flag -- indicates if difference on line + linenum -- line number (used for line number column) + text -- line text to be marked up + """ + try: + linenum = '%d' % linenum + id = ' id="%s%s"' % (self._prefix[side],linenum) + except TypeError: + # handle blank lines where linenum is '>' or '' + id = '' + # replace those things that would get confused with HTML symbols + text=text.replace("&","&").replace(">",">").replace("<","<") + + # make space non-breakable so they don't get compressed or line wrapped + text = text.replace(' ',' ').rstrip() + + return '%s%s' \ + % (id,linenum,text) + + def _make_prefix(self): + """Create unique anchor prefixes""" + + # Generate a unique anchor prefix so multiple tables + # can exist on the same HTML page without conflicts. + fromprefix = "from%d_" % HtmlDiff._default_prefix + toprefix = "to%d_" % HtmlDiff._default_prefix + HtmlDiff._default_prefix += 1 + # store prefixes so line format method has access + self._prefix = [fromprefix,toprefix] + + def _convert_flags(self,fromlist,tolist,flaglist,context,numlines): + """Makes list of "next" links""" + + # all anchor names will be generated using the unique "to" prefix + toprefix = self._prefix[1] + + # process change flags, generating middle column of next anchors/links + next_id = ['']*len(flaglist) + next_href = ['']*len(flaglist) + num_chg, in_change = 0, False + last = 0 + for i,flag in enumerate(flaglist): + if flag: + if not in_change: + in_change = True + last = i + # at the beginning of a change, drop an anchor a few lines + # (the context lines) before the change for the previous + # link + i = max([0,i-numlines]) + next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg) + # at the beginning of a change, drop a link to the next + # change + num_chg += 1 + next_href[last] = 'n' % ( + toprefix,num_chg) + else: + in_change = False + # check for cases where there is no content to avoid exceptions + if not flaglist: + flaglist = [False] + next_id = [''] + next_href = [''] + last = 0 + if context: + fromlist = [' No Differences Found '] + tolist = fromlist + else: + fromlist = tolist = [' Empty File '] + # if not a change on first line, drop a link + if not flaglist[0]: + next_href[0] = 'f' % toprefix + # redo the last link to link to the top + next_href[last] = 't' % (toprefix) + + return fromlist,tolist,flaglist,next_href,next_id + + def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False, + numlines=5): + """Returns HTML table of side by side comparison with change highlights + + Arguments: + fromlines -- list of "from" lines + tolines -- list of "to" lines + fromdesc -- "from" file column header string + todesc -- "to" file column header string + context -- set to True for contextual differences (defaults to False + which shows full differences). + numlines -- number of context lines. When context is set True, + controls number of lines displayed before and after the change. + When context is False, controls the number of lines to place + the "next" link anchors before the next change (so click of + "next" link jumps to just before the change). + """ + + # make unique anchor prefixes so that multiple tables may exist + # on the same page without conflict. + self._make_prefix() + + # change tabs to spaces before it gets more difficult after we insert + # markup + fromlines,tolines = self._tab_newline_replace(fromlines,tolines) + + # create diffs iterator which generates side by side from/to data + if context: + context_lines = numlines + else: + context_lines = None + diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk, + charjunk=self._charjunk) + + # set up iterator to wrap lines that exceed desired width + if self._wrapcolumn: + diffs = self._line_wrapper(diffs) + + # collect up from/to lines and flags into lists (also format the lines) + fromlist,tolist,flaglist = self._collect_lines(diffs) + + # process change flags, generating middle column of next anchors/links + fromlist,tolist,flaglist,next_href,next_id = self._convert_flags( + fromlist,tolist,flaglist,context,numlines) + + s = [] + fmt = ' %s%s' + \ + '%s%s\n' + for i in range(len(flaglist)): + if flaglist[i] is None: + # mdiff yields None on separator lines skip the bogus ones + # generated for the first line + if i > 0: + s.append(' \n \n') + else: + s.append( fmt % (next_id[i],next_href[i],fromlist[i], + next_href[i],tolist[i])) + if fromdesc or todesc: + header_row = '%s%s%s%s' % ( + '
', + '%s' % fromdesc, + '
', + '%s' % todesc) + else: + header_row = '' + + table = self._table_template % dict( + data_rows=''.join(s), + header_row=header_row, + prefix=self._prefix[1]) + + return table.replace('\0+',''). \ + replace('\0-',''). \ + replace('\0^',''). \ + replace('\1',''). \ + replace('\t',' ') + +del re + +def restore(delta, which): + r""" + Generate one of the two sequences that generated a delta. + + Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract + lines originating from file 1 or 2 (parameter `which`), stripping off line + prefixes. + + Examples: + + >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True), + ... 'ore\ntree\nemu\n'.splitlines(keepends=True)) + >>> diff = list(diff) + >>> print(''.join(restore(diff, 1)), end="") + one + two + three + >>> print(''.join(restore(diff, 2)), end="") + ore + tree + emu + """ + cdef Py_ssize_t _which = int(which) + if _which == 1: + tag = "- " + elif _which == 2: + tag = "+ " + else: + raise ValueError('unknown delta choice (must be 1 or 2): %r' + % which) from None + prefixes = (" ", tag) + for line in delta: + if line[:2] in prefixes: + yield line[2:] diff --git a/tests/test_cydifflib.py b/tests/test_cydifflib.py index 613fbd5..1d2eae2 100644 --- a/tests/test_cydifflib.py +++ b/tests/test_cydifflib.py @@ -1,73 +1,82 @@ -import cydifflib -import os -import unittest +from __future__ import annotations + import doctest +import os import sys +import unittest + +import cydifflib class TestWithAscii(unittest.TestCase): def test_one_insert(self): - sm = cydifflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100) + sm = cydifflib.SequenceMatcher(None, "b" * 100, "a" + "b" * 100) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) - self.assertEqual(list(sm.get_opcodes()), - [ ('insert', 0, 0, 0, 1), - ('equal', 0, 100, 1, 101)]) - self.assertEqual(sm.bpopular, set()) - sm = cydifflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50) + assert list(sm.get_opcodes()) == [("insert", 0, 0, 0, 1), ("equal", 0, 100, 1, 101)] + assert sm.bpopular == set() + sm = cydifflib.SequenceMatcher(None, "b" * 100, "b" * 50 + "a" + "b" * 50) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) - self.assertEqual(list(sm.get_opcodes()), - [ ('equal', 0, 50, 0, 50), - ('insert', 50, 50, 50, 51), - ('equal', 50, 100, 51, 101)]) - self.assertEqual(sm.bpopular, set()) + assert list(sm.get_opcodes()) == [ + ("equal", 0, 50, 0, 50), + ("insert", 50, 50, 50, 51), + ("equal", 50, 100, 51, 101), + ] + assert sm.bpopular == set() def test_one_delete(self): - sm = cydifflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40) + sm = cydifflib.SequenceMatcher(None, "a" * 40 + "c" + "b" * 40, "a" * 40 + "b" * 40) self.assertAlmostEqual(sm.ratio(), 0.994, places=3) - self.assertEqual(list(sm.get_opcodes()), - [ ('equal', 0, 40, 0, 40), - ('delete', 40, 41, 40, 40), - ('equal', 41, 81, 40, 80)]) + assert list(sm.get_opcodes()) == [ + ("equal", 0, 40, 0, 40), + ("delete", 40, 41, 40, 40), + ("equal", 41, 81, 40, 80), + ] def test_bjunk(self): - sm = cydifflib.SequenceMatcher(isjunk=lambda x: x == ' ', - a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40) - self.assertEqual(sm.bjunk, set()) + sm = cydifflib.SequenceMatcher(isjunk=lambda x: x == " ", a="a" * 40 + "b" * 40, b="a" * 44 + "b" * 40) + assert sm.bjunk == set() - sm = cydifflib.SequenceMatcher(isjunk=lambda x: x == ' ', - a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40 + ' ' * 20) - self.assertEqual(sm.bjunk, {' '}) + sm = cydifflib.SequenceMatcher( + isjunk=lambda x: x == " ", + a="a" * 40 + "b" * 40, + b="a" * 44 + "b" * 40 + " " * 20, + ) + assert sm.bjunk == {" "} - sm = cydifflib.SequenceMatcher(isjunk=lambda x: x in [' ', 'b'], - a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40 + ' ' * 20) - self.assertEqual(sm.bjunk, {' ', 'b'}) + sm = cydifflib.SequenceMatcher( + isjunk=lambda x: x in [" ", "b"], + a="a" * 40 + "b" * 40, + b="a" * 44 + "b" * 40 + " " * 20, + ) + assert sm.bjunk == {" ", "b"} class TestAutojunk(unittest.TestCase): """Tests for the autojunk parameter added in 2.7""" + def test_one_insert_homogenous_sequence(self): # By default autojunk=True and the heuristic kicks in for a sequence # of length 200+ - seq1 = 'b' * 200 - seq2 = 'a' + 'b' * 200 + seq1 = "b" * 200 + seq2 = "a" + "b" * 200 sm = cydifflib.SequenceMatcher(None, seq1, seq2) self.assertAlmostEqual(sm.ratio(), 0, places=3) - self.assertEqual(sm.bpopular, {'b'}) + assert sm.bpopular == {"b"} # Now turn the heuristic off sm = cydifflib.SequenceMatcher(None, seq1, seq2, autojunk=False) self.assertAlmostEqual(sm.ratio(), 0.9975, places=3) - self.assertEqual(sm.bpopular, set()) + assert sm.bpopular == set() class TestSFbugs(unittest.TestCase): def test_ratio_for_null_seqn(self): # Check clearing of SF bug 763023 s = cydifflib.SequenceMatcher(None, [], []) - self.assertEqual(s.ratio(), 1) - self.assertEqual(s.quick_ratio(), 1) - self.assertEqual(s.real_quick_ratio(), 1) + assert s.ratio() == 1 + assert s.quick_ratio() == 1 + assert s.real_quick_ratio() == 1 def test_comparing_empty_lists(self): # Check fix for bug #979794 @@ -79,27 +88,27 @@ def test_comparing_empty_lists(self): def test_matching_blocks_cache(self): # Issue #21635 s = cydifflib.SequenceMatcher(None, "abxcd", "abcd") - first = s.get_matching_blocks() + s.get_matching_blocks() second = s.get_matching_blocks() - self.assertEqual(second[0].size, 2) - self.assertEqual(second[1].size, 2) - self.assertEqual(second[2].size, 0) + assert second[0].size == 2 + assert second[1].size == 2 + assert second[2].size == 0 def test_added_tab_hint(self): # Check fix for bug #1488943 - diff = list(cydifflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"])) - self.assertEqual("- \tI am a buggy", diff[0]) - self.assertEqual("? \t --\n", diff[1]) - self.assertEqual("+ \t\tI am a bug", diff[2]) - self.assertEqual("? +\n", diff[3]) + diff = list(cydifflib.Differ().compare(["\tI am a buggy"], ["\t\tI am a bug"])) + assert diff[0] == "- \tI am a buggy" + assert diff[1] == "? \t --\n" + assert diff[2] == "+ \t\tI am a bug" + assert diff[3] == "? +\n" def test_hint_indented_properly_with_tabs(self): diff = list(cydifflib.Differ().compare(["\t \t \t^"], ["\t \t \t^\n"])) - self.assertEqual("- \t \t \t^", diff[0]) - self.assertEqual("+ \t \t \t^\n", diff[1]) - self.assertEqual("? \t \t \t +\n", diff[2]) + assert diff[0] == "- \t \t \t^" + assert diff[1] == "+ \t \t \t^\n" + assert diff[2] == "? \t \t \t +\n" - #def test_mdiff_catch_stop_iteration(self): + # def test_mdiff_catch_stop_iteration(self): # # Issue #33224 # self.assertEqual( # list(cydifflib._mdiff(["2"], ["3"], 1)), @@ -107,9 +116,10 @@ def test_hint_indented_properly_with_tabs(self): # ) def test_issue3(self): - a = '计算:[小题]根号81+-273+-3分之22;[小题]-273+根号9-4分之1×根号0.16.' - b = '已知3x+1的算术平方根是4,x+2y的立方根是-1,(1)求x、y的值;(2)求2x-5y的平方根.' - self.assertEqual(cydifflib.SequenceMatcher(None, a, b).ratio(), 0.12) + a = "计算:[小题]根号81+-273+-3分之22;[小题]-273+根号9-4分之1x根号0.16." + b = "已知3x+1的算术平方根是4,x+2y的立方根是-1,(1)求x、y的值;(2)求2x-5y的平方根." + assert cydifflib.SequenceMatcher(None, a, b).ratio() == 0.12 + patch914575_from1 = """ 1. Beautiful is beTTer than ugly. @@ -127,14 +137,14 @@ def test_issue3(self): patch914575_nonascii_from1 = """ 1. Beautiful is beTTer than ugly. - 2. Explicit is better than ımplıcıt. + 2. Explicit is better than implicit. 3. Simple is better than complex. 4. Complex is better than complicated. """ patch914575_nonascii_to1 = """ 1. Beautiful is better than ügly. - 3. Sımple is better than complex. + 3. Simple is better than complex. 4. Complicated is better than cömplex. 5. Flat is better than nested. """ @@ -189,14 +199,14 @@ def test_issue3(self): just fits in two lineS yup!! the end""" -class TestSFpatches(unittest.TestCase): +class TestSFpatches(unittest.TestCase): def test_html_diff(self): # Check SF patch 914575 for generating HTML differences - f1a = ((patch914575_from1 + '123\n'*10)*3) - t1a = (patch914575_to1 + '123\n'*10)*3 - f1b = '456\n'*10 + f1a - t1b = '456\n'*10 + t1a + f1a = (patch914575_from1 + "123\n" * 10) * 3 + t1a = (patch914575_to1 + "123\n" * 10) * 3 + f1b = "456\n" * 10 + f1a + t1b = "456\n" * 10 + t1a f1a = f1a.splitlines() t1a = t1a.splitlines() f1b = f1b.splitlines() @@ -209,98 +219,104 @@ def test_html_diff(self): j = cydifflib.HtmlDiff(tabsize=2) k = cydifflib.HtmlDiff(wrapcolumn=14) - full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5) - tables = '\n'.join( + full = i.make_file(f1a, t1a, "from", "to", context=False, numlines=5) + tables = "\n".join( [ - '

Context (first diff within numlines=5(default))

', - i.make_table(f1a,t1a,'from','to',context=True), - '

Context (first diff after numlines=5(default))

', - i.make_table(f1b,t1b,'from','to',context=True), - '

Context (numlines=6)

', - i.make_table(f1a,t1a,'from','to',context=True,numlines=6), - '

Context (numlines=0)

', - i.make_table(f1a,t1a,'from','to',context=True,numlines=0), - '

Same Context

', - i.make_table(f1a,f1a,'from','to',context=True), - '

Same Full

', - i.make_table(f1a,f1a,'from','to',context=False), - '

Empty Context

', - i.make_table([],[],'from','to',context=True), - '

Empty Full

', - i.make_table([],[],'from','to',context=False), - '

tabsize=2

', - j.make_table(f2,t2), - '

tabsize=default

', - i.make_table(f2,t2), - '

Context (wrapcolumn=14,numlines=0)

', - k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0), - '

wrapcolumn=14,splitlines()

', - k.make_table(f3.splitlines(),t3.splitlines()), - '

wrapcolumn=14,splitlines(True)

', - k.make_table(f3.splitlines(True),t3.splitlines(True)), - ]) - actual = full.replace('','\n%s\n' % tables) + "

Context (first diff within numlines=5(default))

", + i.make_table(f1a, t1a, "from", "to", context=True), + "

Context (first diff after numlines=5(default))

", + i.make_table(f1b, t1b, "from", "to", context=True), + "

Context (numlines=6)

", + i.make_table(f1a, t1a, "from", "to", context=True, numlines=6), + "

Context (numlines=0)

", + i.make_table(f1a, t1a, "from", "to", context=True, numlines=0), + "

Same Context

", + i.make_table(f1a, f1a, "from", "to", context=True), + "

Same Full

", + i.make_table(f1a, f1a, "from", "to", context=False), + "

Empty Context

", + i.make_table([], [], "from", "to", context=True), + "

Empty Full

", + i.make_table([], [], "from", "to", context=False), + "

tabsize=2

", + j.make_table(f2, t2), + "

tabsize=default

", + i.make_table(f2, t2), + "

Context (wrapcolumn=14,numlines=0)

", + k.make_table(f3.splitlines(), t3.splitlines(), context=True, numlines=0), + "

wrapcolumn=14,splitlines()

", + k.make_table(f3.splitlines(), t3.splitlines()), + "

wrapcolumn=14,splitlines(True)

", + k.make_table(f3.splitlines(True), t3.splitlines(True)), + ] + ) + actual = full.replace("", "\n%s\n" % tables) dir_path = os.path.dirname(os.path.realpath(__file__)) - file_path = os.path.join(dir_path, 'test_cydifflib_expect.html') + file_path = os.path.join(dir_path, "test_cydifflib_expect.html") # temporarily uncomment next two lines to baseline this test - #with open(file_path,'w') as fp: + # with open(file_path,'w') as fp: # fp.write(actual) with open(file_path, encoding="utf-8") as fp: - self.assertEqual(actual, fp.read()) + assert actual == fp.read() def test_recursion_limit(self): # Check if the problem described in patch #1413711 exists. limit = sys.getrecursionlimit() - old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)] - new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)] + old = [(i % 2 and "K:%d" or "V:A:%d") % i for i in range(limit * 2)] + new = [(i % 2 and "K:%d" or "V:B:%d") % i for i in range(limit * 2)] cydifflib.SequenceMatcher(None, old, new).get_opcodes() def test_make_file_default_charset(self): html_diff = cydifflib.HtmlDiff() - output = html_diff.make_file(patch914575_from1.splitlines(), - patch914575_to1.splitlines()) - self.assertIn('content="text/html; charset=utf-8"', output) + output = html_diff.make_file(patch914575_from1.splitlines(), patch914575_to1.splitlines()) + assert 'content="text/html; charset=utf-8"' in output def test_make_file_iso88591_charset(self): html_diff = cydifflib.HtmlDiff() - output = html_diff.make_file(patch914575_from1.splitlines(), - patch914575_to1.splitlines(), - charset='iso-8859-1') - self.assertIn('content="text/html; charset=iso-8859-1"', output) + output = html_diff.make_file( + patch914575_from1.splitlines(), + patch914575_to1.splitlines(), + charset="iso-8859-1", + ) + assert 'content="text/html; charset=iso-8859-1"' in output def test_make_file_usascii_charset_with_nonascii_input(self): html_diff = cydifflib.HtmlDiff() - output = html_diff.make_file(patch914575_nonascii_from1.splitlines(), - patch914575_nonascii_to1.splitlines(), - charset='us-ascii') - self.assertIn('content="text/html; charset=us-ascii"', output) - self.assertIn('ımplıcıt', output) + output = html_diff.make_file( + patch914575_nonascii_from1.splitlines(), + patch914575_nonascii_to1.splitlines(), + charset="us-ascii", + ) + assert 'content="text/html; charset=us-ascii"' in output + assert "ımplıcıt" in output class TestOutputFormat(unittest.TestCase): def test_tab_delimiter(self): - args = ['one', 'two', 'Original', 'Current', - '2005-01-26 23:30:50', '2010-04-02 10:20:52'] - ud = cydifflib.unified_diff(*args, lineterm='') - self.assertEqual(list(ud)[0:2], [ - "--- Original\t2005-01-26 23:30:50", - "+++ Current\t2010-04-02 10:20:52"]) - cd = cydifflib.context_diff(*args, lineterm='') - self.assertEqual(list(cd)[0:2], [ - "*** Original\t2005-01-26 23:30:50", - "--- Current\t2010-04-02 10:20:52"]) + args = [ + "one", + "two", + "Original", + "Current", + "2005-01-26 23:30:50", + "2010-04-02 10:20:52", + ] + ud = cydifflib.unified_diff(*args, lineterm="") + assert list(ud)[0:2] == ["--- Original\t2005-01-26 23:30:50", "+++ Current\t2010-04-02 10:20:52"] + cd = cydifflib.context_diff(*args, lineterm="") + assert list(cd)[0:2] == ["*** Original\t2005-01-26 23:30:50", "--- Current\t2010-04-02 10:20:52"] def test_no_trailing_tab_on_empty_filedate(self): - args = ['one', 'two', 'Original', 'Current'] - ud = cydifflib.unified_diff(*args, lineterm='') - self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"]) + args = ["one", "two", "Original", "Current"] + ud = cydifflib.unified_diff(*args, lineterm="") + assert list(ud)[0:2] == ["--- Original", "+++ Current"] - cd = cydifflib.context_diff(*args, lineterm='') - self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"]) + cd = cydifflib.context_diff(*args, lineterm="") + assert list(cd)[0:2] == ["*** Original", "--- Current"] - #def test_range_format_unified(self): + # def test_range_format_unified(self): # # Per the diff spec at http://www.unix.org/single_unix_specification/ # spec = '''\ # Each field shall be of the form: @@ -317,7 +333,7 @@ def test_no_trailing_tab_on_empty_filedate(self): # self.assertEqual(fmt(3,6), '4,3') # self.assertEqual(fmt(0,0), '0,0') - #def test_range_format_context(self): + # def test_range_format_context(self): # # Per the diff spec at http://www.unix.org/single_unix_specification/ # spec = '''\ # The range of lines in file1 shall be written in the following format @@ -346,16 +362,14 @@ class TestBytes(unittest.TestCase): # don't really care about the content of the output, just the fact # that it's bytes and we don't crash def check(self, diff): - diff = list(diff) # trigger exceptions first + diff = list(diff) # trigger exceptions first for line in diff: - self.assertIsInstance( - line, bytes, - "all lines of diff should be bytes, but got: %r" % line) + assert isinstance(line, bytes), "all lines of diff should be bytes, but got: %r" % line def test_byte_content(self): # if we receive byte strings, we return byte strings - a = [b'hello', b'andr\xe9'] # iso-8859-1 bytes - b = [b'hello', b'andr\xc3\xa9'] # utf-8 bytes + a = [b"hello", b"andr\xe9"] # iso-8859-1 bytes + b = [b"hello", b"andr\xc3\xa9"] # utf-8 bytes unified = cydifflib.unified_diff context = cydifflib.context_diff @@ -365,29 +379,29 @@ def test_byte_content(self): check(cydifflib.diff_bytes(unified, a, b)) # now with filenames (content and filenames are all bytes!) - check(cydifflib.diff_bytes(unified, a, a, b'a', b'a')) - check(cydifflib.diff_bytes(unified, a, b, b'a', b'b')) + check(cydifflib.diff_bytes(unified, a, a, b"a", b"a")) + check(cydifflib.diff_bytes(unified, a, b, b"a", b"b")) # and with filenames and dates - check(cydifflib.diff_bytes(unified, a, a, b'a', b'a', b'2005', b'2013')) - check(cydifflib.diff_bytes(unified, a, b, b'a', b'b', b'2005', b'2013')) + check(cydifflib.diff_bytes(unified, a, a, b"a", b"a", b"2005", b"2013")) + check(cydifflib.diff_bytes(unified, a, b, b"a", b"b", b"2005", b"2013")) # same all over again, with context diff check(cydifflib.diff_bytes(context, a, a)) check(cydifflib.diff_bytes(context, a, b)) - check(cydifflib.diff_bytes(context, a, a, b'a', b'a')) - check(cydifflib.diff_bytes(context, a, b, b'a', b'b')) - check(cydifflib.diff_bytes(context, a, a, b'a', b'a', b'2005', b'2013')) - check(cydifflib.diff_bytes(context, a, b, b'a', b'b', b'2005', b'2013')) + check(cydifflib.diff_bytes(context, a, a, b"a", b"a")) + check(cydifflib.diff_bytes(context, a, b, b"a", b"b")) + check(cydifflib.diff_bytes(context, a, a, b"a", b"a", b"2005", b"2013")) + check(cydifflib.diff_bytes(context, a, b, b"a", b"b", b"2005", b"2013")) def test_byte_filenames(self): # somebody renamed a file from ISO-8859-2 to UTF-8 - fna = b'\xb3odz.txt' # "łodz.txt" - fnb = b'\xc5\x82odz.txt' + fna = b"\xb3odz.txt" # "łodz.txt" + fnb = b"\xc5\x82odz.txt" # they transcoded the content at the same time - a = [b'\xa3odz is a city in Poland.'] - b = [b'\xc5\x81odz is a city in Poland.'] + a = [b"\xa3odz is a city in Poland."] + b = [b"\xc5\x81odz is a city in Poland."] check = self.check unified = cydifflib.unified_diff @@ -399,23 +413,23 @@ def assertDiff(expect, actual): # do not compare expect and equal as lists, because unittest # uses cydifflib to report difference between lists actual = list(actual) - self.assertEqual(len(expect), len(actual)) + assert len(expect) == len(actual) for e, a in zip(expect, actual): - self.assertEqual(e, a) + assert e == a expect = [ - b'--- \xb3odz.txt', - b'+++ \xc5\x82odz.txt', - b'@@ -1 +1 @@', - b'-\xa3odz is a city in Poland.', - b'+\xc5\x81odz is a city in Poland.', + b"--- \xb3odz.txt", + b"+++ \xc5\x82odz.txt", + b"@@ -1 +1 @@", + b"-\xa3odz is a city in Poland.", + b"+\xc5\x81odz is a city in Poland.", ] - actual = cydifflib.diff_bytes(unified, a, b, fna, fnb, lineterm=b'') + actual = cydifflib.diff_bytes(unified, a, b, fna, fnb, lineterm=b"") assertDiff(expect, actual) # with dates (plain ASCII) - datea = b'2005-03-18' - dateb = b'2005-03-19' + datea = b"2005-03-18" + dateb = b"2005-03-19" check(cydifflib.diff_bytes(unified, a, b, fna, fnb, datea, dateb)) check(cydifflib.diff_bytes(context, a, b, fna, fnb, datea, dateb)) @@ -423,20 +437,19 @@ def assertDiff(expect, actual): # note the mixed encodings here: this is deeply wrong by every # tenet of Unicode, but it doesn't crash, it's parseable by # patch, and it's how UNIX(tm) diff behaves - b'--- \xb3odz.txt\t2005-03-18', - b'+++ \xc5\x82odz.txt\t2005-03-19', - b'@@ -1 +1 @@', - b'-\xa3odz is a city in Poland.', - b'+\xc5\x81odz is a city in Poland.', + b"--- \xb3odz.txt\t2005-03-18", + b"+++ \xc5\x82odz.txt\t2005-03-19", + b"@@ -1 +1 @@", + b"-\xa3odz is a city in Poland.", + b"+\xc5\x81odz is a city in Poland.", ] - actual = cydifflib.diff_bytes(unified, a, b, fna, fnb, datea, dateb, - lineterm=b'') + actual = cydifflib.diff_bytes(unified, a, b, fna, fnb, datea, dateb, lineterm=b"") assertDiff(expect, actual) def test_mixed_types_content(self): # type of input content must be consistent: all str or all bytes - a = [b'hello'] - b = ['hello'] + a = [b"hello"] + b = ["hello"] unified = cydifflib.unified_diff context = cydifflib.context_diff @@ -457,100 +470,109 @@ def test_mixed_types_filenames(self): # cannot pass filenames as bytes if content is str (this may not be # the right behaviour, but at least the test demonstrates how # things work) - a = ['hello\n'] - b = ['ohell\n'] - fna = b'ol\xe9.txt' # filename transcoded from ISO-8859-1 - fnb = b'ol\xc3a9.txt' # to UTF-8 + a = ["hello\n"] + b = ["ohell\n"] + fna = b"ol\xe9.txt" # filename transcoded from ISO-8859-1 + fnb = b"ol\xc3a9.txt" # to UTF-8 self._assert_type_error( "all arguments must be str, not: b'ol\\xe9.txt'", - cydifflib.unified_diff, a, b, fna, fnb) + cydifflib.unified_diff, + a, + b, + fna, + fnb, + ) def test_mixed_types_dates(self): # type of dates must be consistent with type of contents - a = [b'foo\n'] - b = [b'bar\n'] - datea = '1 fév' - dateb = '3 fév' + a = [b"foo\n"] + b = [b"bar\n"] + datea = "1 fév" + dateb = "3 fév" self._assert_type_error( "all arguments must be bytes, not str ('1 fév')", - cydifflib.diff_bytes, cydifflib.unified_diff, - a, b, b'a', b'b', datea, dateb) + cydifflib.diff_bytes, + cydifflib.unified_diff, + a, + b, + b"a", + b"b", + datea, + dateb, + ) # if input is str, non-ASCII dates are fine - a = ['foo\n'] - b = ['bar\n'] - list(cydifflib.unified_diff(a, b, 'a', 'b', datea, dateb)) + a = ["foo\n"] + b = ["bar\n"] + list(cydifflib.unified_diff(a, b, "a", "b", datea, dateb)) def _assert_type_error(self, msg, generator, *args): with self.assertRaises(TypeError) as ctx: list(generator(*args)) - self.assertEqual(msg, str(ctx.exception)) + assert msg == str(ctx.exception) + class TestJunkAPIs(unittest.TestCase): def test_is_line_junk_true(self): - for line in ['#', ' ', ' #', '# ', ' # ', '']: - self.assertTrue(cydifflib.IS_LINE_JUNK(line), repr(line)) + for line in ["#", " ", " #", "# ", " # ", ""]: + assert cydifflib.IS_LINE_JUNK(line), repr(line) def test_is_line_junk_false(self): - for line in ['##', ' ##', '## ', 'abc ', 'abc #', 'Mr. Moose is up!']: - self.assertFalse(cydifflib.IS_LINE_JUNK(line), repr(line)) + for line in ["##", " ##", "## ", "abc ", "abc #", "Mr. Moose is up!"]: + assert not cydifflib.IS_LINE_JUNK(line), repr(line) def test_is_line_junk_REDOS(self): - evil_input = ('\t' * 1000000) + '##' - self.assertFalse(cydifflib.IS_LINE_JUNK(evil_input)) + evil_input = ("\t" * 1000000) + "##" + assert not cydifflib.IS_LINE_JUNK(evil_input) def test_is_character_junk_true(self): - for char in [' ', '\t']: - self.assertTrue(cydifflib.IS_CHARACTER_JUNK(char), repr(char)) + for char in [" ", "\t"]: + assert cydifflib.IS_CHARACTER_JUNK(char), repr(char) def test_is_character_junk_false(self): - for char in ['a', '#', '\n', '\f', '\r', '\v']: - self.assertFalse(cydifflib.IS_CHARACTER_JUNK(char), repr(char)) + for char in ["a", "#", "\n", "\f", "\r", "\v"]: + assert not cydifflib.IS_CHARACTER_JUNK(char), repr(char) + class TestFindLongest(unittest.TestCase): def longer_match_exists(self, a, b, n): - return any(b_part in a for b_part in - [b[i:i + n + 1] for i in range(0, len(b) - n - 1)]) + return any(b_part in a for b_part in [b[i : i + n + 1] for i in range(0, len(b) - n - 1)]) def test_default_args(self): - a = 'foo bar' - b = 'foo baz bar' + a = "foo bar" + b = "foo baz bar" sm = cydifflib.SequenceMatcher(a=a, b=b) match = sm.find_longest_match() - self.assertEqual(match.a, 0) - self.assertEqual(match.b, 0) - self.assertEqual(match.size, 6) - self.assertEqual(a[match.a: match.a + match.size], - b[match.b: match.b + match.size]) - self.assertFalse(self.longer_match_exists(a, b, match.size)) + assert match.a == 0 + assert match.b == 0 + assert match.size == 6 + assert a[match.a : match.a + match.size] == b[match.b : match.b + match.size] + assert not self.longer_match_exists(a, b, match.size) match = sm.find_longest_match(alo=2, blo=4) - self.assertEqual(match.a, 3) - self.assertEqual(match.b, 7) - self.assertEqual(match.size, 4) - self.assertEqual(a[match.a: match.a + match.size], - b[match.b: match.b + match.size]) - self.assertFalse(self.longer_match_exists(a[2:], b[4:], match.size)) + assert match.a == 3 + assert match.b == 7 + assert match.size == 4 + assert a[match.a : match.a + match.size] == b[match.b : match.b + match.size] + assert not self.longer_match_exists(a[2:], b[4:], match.size) match = sm.find_longest_match(bhi=5, blo=1) - self.assertEqual(match.a, 1) - self.assertEqual(match.b, 1) - self.assertEqual(match.size, 4) - self.assertEqual(a[match.a: match.a + match.size], - b[match.b: match.b + match.size]) - self.assertFalse(self.longer_match_exists(a, b[1:5], match.size)) + assert match.a == 1 + assert match.b == 1 + assert match.size == 4 + assert a[match.a : match.a + match.size] == b[match.b : match.b + match.size] + assert not self.longer_match_exists(a, b[1:5], match.size) def test_longest_match_with_popular_chars(self): - a = 'dabcd' - b = 'd'*100 + 'abc' + 'd'*100 # length over 200 so popular used + a = "dabcd" + b = "d" * 100 + "abc" + "d" * 100 # length over 200 so popular used sm = cydifflib.SequenceMatcher(a=a, b=b) match = sm.find_longest_match(0, len(a), 0, len(b)) - self.assertEqual(match.a, 0) - self.assertEqual(match.b, 99) - self.assertEqual(match.size, 5) - self.assertEqual(a[match.a: match.a + match.size], - b[match.b: match.b + match.size]) - self.assertFalse(self.longer_match_exists(a, b, match.size)) + assert match.a == 0 + assert match.b == 99 + assert match.size == 5 + assert a[match.a : match.a + match.size] == b[match.b : match.b + match.size] + assert not self.longer_match_exists(a, b, match.size) def setUpModule(): @@ -562,5 +584,5 @@ def load_tests(loader, tests, pattern): return tests -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_cydifflib_expect.html b/tests/test_cydifflib_expect.html index 3e6a7b7..d47f958 100644 --- a/tests/test_cydifflib_expect.html +++ b/tests/test_cydifflib_expect.html @@ -20,7 +20,7 @@ - + @@ -112,7 +112,7 @@

Context (first diff within numlines=5(default))

- + @@ -129,7 +129,7 @@

Context (first diff within numlines=5(default))

- + @@ -171,7 +171,7 @@

Context (first diff after numlines=5(default))

- + @@ -188,7 +188,7 @@

Context (first diff after numlines=5(default))

- + @@ -274,14 +274,14 @@

Context (numlines=0)

- + - + @@ -384,7 +384,7 @@

tabsize=2

cellspacing="0" cellpadding="0" rules="groups" > - + @@ -400,7 +400,7 @@

tabsize=default

cellspacing="0" cellpadding="0" rules="groups" > - + @@ -416,11 +416,11 @@

Context (wrapcolumn=14,numlines=0)

cellspacing="0" cellpadding="0" rules="groups" > - + - + @@ -428,11 +428,11 @@

Context (wrapcolumn=14,numlines=0)

- + - + @@ -451,7 +451,7 @@

wrapcolumn=14,splitlines()

cellspacing="0" cellpadding="0" rules="groups" > - + @@ -489,7 +489,7 @@

wrapcolumn=14,splitlines(True)

cellspacing="0" cellpadding="0" rules="groups" > - + @@ -523,4 +523,4 @@

wrapcolumn=14,splitlines(True)

81238123
91239123
1012310123
1212312123
1312313123
2312323123
2412324123
2512325123
2712327123
2812328123
1812318123
1912319123
2012320123
2212322123
2312323123
3312333123
3412334123
3512335123
3712337123
3812338123
4   3. Simple is better than complex.3   3.   Simple is better than complex.
5   4. Complex is better than complicated.4   4. Complicated is better than complex.
5   5. Flat is better than nested.
n17   1. Beautiful is beTTer than ugly.n17   1. Beautiful is better than ugly.
18   2. Explicit is better than implicit.
19   3. Simple is better than complex.18   3.   Simple is better than complex.
20   4. Complex is better than complicated.19   4. Complicated is better than complex.
20   5. Flat is better than nested.
t32   1. Beautiful is beTTer than ugly.t32   1. Beautiful is better than ugly.
33   2. Explicit is better than implicit.
f1f1
t2    Line 1: preceded by from:[tt] to:[ssss]t2    Line 1: preceded by from:[tt] to:[ssss]
f1f1
t2                Line 1: preceded by from:[tt] to:[ssss]t2    Line 1: preceded by from:[tt] to:[ssss]
n4line 2n4line 2    adde
 >d
n6line 4   changn6line 4   chanG
>ed>Ed
>ed>ed
8line 6   chang8line 6a  chang
>ed>Ed
n10line 8  subtran10line 8
>cted 
t1212345678901234t121234567890
>56789012345689 
f1line 0f1line 0
212345678901234212345678901234
f1line 0f1line 0
212345678901234212345678901234
- \ No newline at end of file +