--- a/tests/coverage.py Sun Dec 02 23:26:40 2007 +0100
+++ b/tests/coverage.py Sun Dec 02 23:26:40 2007 +0100
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python
#
# Perforce Defect Tracking Integration Project
# <http://www.ravenbrook.com/project/p4dti/>
@@ -22,15 +22,20 @@
# interface and limitations. See [GDR 2001-12-04b] for requirements and
# design.
-"""Usage:
+r"""Usage:
-coverage.py -x MODULE.py [ARG1 ARG2 ...]
+coverage.py -x [-p] MODULE.py [ARG1 ARG2 ...]
Execute module, passing the given command-line arguments, collecting
- coverage data.
+ coverage data. With the -p option, write to a temporary file containing
+ the machine name and process ID.
coverage.py -e
Erase collected coverage data.
+coverage.py -c
+ Collect data from multiple coverage files (as created by -p option above)
+ and store it into a single file representing the union of the coverage.
+
coverage.py -r [-m] [-o dir1,dir2,...] FILE1 FILE2 ...
Report on the statement coverage for the given files. With the -m
option, show line numbers of the statements that weren't executed.
@@ -49,16 +54,26 @@
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else."""
-__version__ = "2.5.20051204" # see detailed history at the end of this file.
+__version__ = "2.77.20070729" # see detailed history at the end of this file.
import compiler
import compiler.visitor
+import glob
import os
import re
import string
+import symbol
import sys
import threading
+import token
import types
+from socket import gethostname
+
+# Python version compatibility
+try:
+ strclass = basestring # new to 2.3
+except:
+ strclass = str
# 2. IMPLEMENTATION
#
@@ -81,25 +96,29 @@
# names to increase speed.
class StatementFindingAstVisitor(compiler.visitor.ASTVisitor):
+ """ A visitor for a parsed Abstract Syntax Tree which finds executable
+ statements.
+ """
def __init__(self, statements, excluded, suite_spots):
compiler.visitor.ASTVisitor.__init__(self)
self.statements = statements
self.excluded = excluded
self.suite_spots = suite_spots
self.excluding_suite = 0
-
+
def doRecursive(self, node):
- self.recordNodeLine(node)
for n in node.getChildNodes():
self.dispatch(n)
visitStmt = visitModule = doRecursive
-
+
def doCode(self, node):
if hasattr(node, 'decorators') and node.decorators:
self.dispatch(node.decorators)
- self.doSuite(node, node.code)
-
+ self.recordAndDispatch(node.code)
+ else:
+ self.doSuite(node, node.code)
+
visitFunction = visitClass = doCode
def getFirstLine(self, node):
@@ -119,17 +138,40 @@
for n in node.getChildNodes():
lineno = max(lineno, self.getLastLine(n))
return lineno
-
+
def doStatement(self, node):
self.recordLine(self.getFirstLine(node))
- visitAssert = visitAssign = visitAssTuple = visitDiscard = visitPrint = \
+ visitAssert = visitAssign = visitAssTuple = visitPrint = \
visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
doStatement
+
+ def visitPass(self, node):
+ # Pass statements have weird interactions with docstrings. If this
+ # pass statement is part of one of those pairs, claim that the statement
+ # is on the later of the two lines.
+ l = node.lineno
+ if l:
+ lines = self.suite_spots.get(l, [l,l])
+ self.statements[lines[1]] = 1
+
+ def visitDiscard(self, node):
+ # Discard nodes are statements that execute an expression, but then
+ # discard the results. This includes function calls, so we can't
+ # ignore them all. But if the expression is a constant, the statement
+ # won't be "executed", so don't count it now.
+ if node.expr.__class__.__name__ != 'Const':
+ self.doStatement(node)
def recordNodeLine(self, node):
- return self.recordLine(node.lineno)
-
+ # Stmt nodes often have None, but shouldn't claim the first line of
+ # their children (because the first child might be an ignorable line
+ # like "global a").
+ if node.__class__.__name__ != 'Stmt':
+ return self.recordLine(self.getFirstLine(node))
+ else:
+ return 0
+
def recordLine(self, lineno):
# Returns a bool, whether the line is included or excluded.
if lineno:
@@ -137,7 +179,7 @@
# keyword.
if lineno in self.suite_spots:
lineno = self.suite_spots[lineno][0]
- # If we're inside an exluded suite, record that this line was
+ # If we're inside an excluded suite, record that this line was
# excluded.
if self.excluding_suite:
self.excluded[lineno] = 1
@@ -153,9 +195,9 @@
self.statements[lineno] = 1
return 1
return 0
-
+
default = recordNodeLine
-
+
def recordAndDispatch(self, node):
self.recordNodeLine(node)
self.dispatch(node)
@@ -166,7 +208,7 @@
self.excluding_suite = 1
self.recordAndDispatch(body)
self.excluding_suite = exsuite
-
+
def doPlainWordSuite(self, prevsuite, suite):
# Finding the exclude lines for else's is tricky, because they aren't
# present in the compiler parse tree. Look at the previous suite,
@@ -180,15 +222,17 @@
break
else:
self.doSuite(None, suite)
-
+
def doElse(self, prevsuite, node):
if node.else_:
self.doPlainWordSuite(prevsuite, node.else_)
-
+
def visitFor(self, node):
self.doSuite(node, node.body)
self.doElse(node.body, node)
+ visitWhile = visitFor
+
def visitIf(self, node):
# The first test has to be handled separately from the rest.
# The first test is credited to the line with the "if", but the others
@@ -198,10 +242,6 @@
self.doSuite(t, n)
self.doElse(node.tests[-1][1], node)
- def visitWhile(self, node):
- self.doSuite(node, node.body)
- self.doElse(node.body, node)
-
def visitTryExcept(self, node):
self.doSuite(node, node.body)
for i in range(len(node.handlers)):
@@ -216,11 +256,14 @@
else:
self.doSuite(a, h)
self.doElse(node.handlers[-1][2], node)
-
+
def visitTryFinally(self, node):
self.doSuite(node, node.body)
self.doPlainWordSuite(node.body, node.final)
-
+
+ def visitWith(self, node):
+ self.doSuite(node, node.body)
+
def visitGlobal(self, node):
# "global" statements don't execute like others (they don't call the
# trace function), so don't record their line numbers.
@@ -228,9 +271,9 @@
the_coverage = None
+class CoverageException(Exception): pass
+
class coverage:
- error = "coverage error"
-
# Name of the cache file (unless environment variable is set).
cache_default = ".coverage"
@@ -240,7 +283,7 @@
# A dictionary with an entry for (Python source file name, line number
# in that file) if that line has been executed.
c = {}
-
+
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed.
@@ -257,53 +300,58 @@
def __init__(self):
global the_coverage
if the_coverage:
- raise self.error, "Only one coverage object allowed."
+ raise CoverageException, "Only one coverage object allowed."
self.usecache = 1
self.cache = None
+ self.parallel_mode = False
self.exclude_re = ''
self.nesting = 0
self.cstack = []
self.xstack = []
- self.relative_dir = os.path.normcase(os.path.abspath(os.curdir)+os.path.sep)
+ self.relative_dir = os.path.normcase(os.path.abspath(os.curdir)+os.sep)
+ self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')
- # t(f, x, y). This method is passed to sys.settrace as a trace function.
- # See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
+ # t(f, x, y). This method is passed to sys.settrace as a trace function.
+ # See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
# the arguments and return value of the trace function.
# See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
# objects.
-
- def t(self, f, w, a): #pragma: no cover
- #print w, f.f_code.co_filename, f.f_lineno
+
+ def t(self, f, w, unused): #pragma: no cover
if w == 'line':
+ #print "Executing %s @ %d" % (f.f_code.co_filename, f.f_lineno)
self.c[(f.f_code.co_filename, f.f_lineno)] = 1
for c in self.cstack:
c[(f.f_code.co_filename, f.f_lineno)] = 1
return self.t
-
- def help(self, error=None):
+
+ def help(self, error=None): #pragma: no cover
if error:
print error
print
print __doc__
sys.exit(1)
- def command_line(self):
+ def command_line(self, argv, help_fn=None):
import getopt
+ help_fn = help_fn or self.help
settings = {}
optmap = {
'-a': 'annotate',
+ '-c': 'collect',
'-d:': 'directory=',
'-e': 'erase',
'-h': 'help',
'-i': 'ignore-errors',
'-m': 'show-missing',
+ '-p': 'parallel-mode',
'-r': 'report',
'-x': 'execute',
- '-o': 'omit=',
+ '-o:': 'omit=',
}
short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
long_opts = optmap.values()
- options, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
+ options, args = getopt.getopt(argv, short_opts, long_opts)
for o, a in options:
if optmap.has_key(o):
settings[optmap[o]] = 1
@@ -312,69 +360,84 @@
elif o[2:] in long_opts:
settings[o[2:]] = 1
elif o[2:] + '=' in long_opts:
- settings[o[2:]] = a
- else:
- self.help("Unknown option: '%s'." % o)
+ settings[o[2:]+'='] = a
+ else: #pragma: no cover
+ pass # Can't get here, because getopt won't return anything unknown.
+
if settings.get('help'):
- self.help()
+ help_fn()
+
for i in ['erase', 'execute']:
- for j in ['annotate', 'report']:
+ for j in ['annotate', 'report', 'collect']:
if settings.get(i) and settings.get(j):
- self.help("You can't specify the '%s' and '%s' "
+ help_fn("You can't specify the '%s' and '%s' "
"options at the same time." % (i, j))
+
args_needed = (settings.get('execute')
or settings.get('annotate')
or settings.get('report'))
- action = settings.get('erase') or args_needed
+ action = (settings.get('erase')
+ or settings.get('collect')
+ or args_needed)
if not action:
- self.help("You must specify at least one of -e, -x, -r, or -a.")
+ help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")
if not args_needed and args:
- self.help("Unexpected arguments %s." % args)
-
+ help_fn("Unexpected arguments: %s" % " ".join(args))
+
+ self.parallel_mode = settings.get('parallel-mode')
self.get_ready()
- self.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
if settings.get('erase'):
self.erase()
if settings.get('execute'):
if not args:
- self.help("Nothing to do.")
+ help_fn("Nothing to do.")
sys.argv = args
self.start()
import __main__
sys.path[0] = os.path.dirname(sys.argv[0])
execfile(sys.argv[0], __main__.__dict__)
+ if settings.get('collect'):
+ self.collect()
if not args:
args = self.cexecuted.keys()
+
ignore_errors = settings.get('ignore-errors')
show_missing = settings.get('show-missing')
- directory = settings.get('directory')
- omit = filter(None, settings.get('omit', '').split(','))
- omit += ['/<'] # Always skip /<string> etc.
+ directory = settings.get('directory=')
+
+ omit = settings.get('omit=')
+ if omit is not None:
+ omit = omit.split(',')
+ else:
+ omit = []
if settings.get('report'):
self.report(args, show_missing, ignore_errors, omit_prefixes=omit)
if settings.get('annotate'):
self.annotate(args, directory, ignore_errors, omit_prefixes=omit)
- def use_cache(self, usecache):
+ def use_cache(self, usecache, cache_file=None):
self.usecache = usecache
-
- def get_ready(self):
+ if cache_file and not self.cache:
+ self.cache_default = cache_file
+
+ def get_ready(self, parallel_mode=False):
if self.usecache and not self.cache:
- self.cache = os.path.abspath(os.environ.get(self.cache_env,
- self.cache_default))
+ self.cache = os.environ.get(self.cache_env, self.cache_default)
+ if self.parallel_mode:
+ self.cache += "." + gethostname() + "." + str(os.getpid())
self.restore()
self.analysis_cache = {}
-
- def start(self):
+
+ def start(self, parallel_mode=False):
self.get_ready()
if self.nesting == 0: #pragma: no cover
sys.settrace(self.t)
if hasattr(threading, 'settrace'):
threading.settrace(self.t)
self.nesting += 1
-
+
def stop(self):
self.nesting -= 1
if self.nesting == 0: #pragma: no cover
@@ -383,12 +446,12 @@
threading.settrace(None)
def erase(self):
+ self.get_ready()
self.c = {}
self.analysis_cache = {}
self.cexecuted = {}
if self.cache and os.path.exists(self.cache):
os.remove(self.cache)
- self.exclude_re = ""
def exclude(self, re):
if self.exclude_re:
@@ -398,7 +461,7 @@
def begin_recursive(self):
self.cstack.append(self.c)
self.xstack.append(self.exclude_re)
-
+
def end_recursive(self):
self.c = self.cstack.pop()
self.exclude_re = self.xstack.pop()
@@ -406,8 +469,6 @@
# save(). Save coverage data to the coverage cache.
def save(self):
- # move to directory that must exist.
- os.chdir(os.sep)
if self.usecache and self.cache:
self.canonicalize_filenames()
cache = open(self.cache, 'wb')
@@ -421,17 +482,45 @@
self.c = {}
self.cexecuted = {}
assert self.usecache
- if not os.path.exists(self.cache):
- return
+ if os.path.exists(self.cache):
+ self.cexecuted = self.restore_file(self.cache)
+
+ def restore_file(self, file_name):
try:
- cache = open(self.cache, 'rb')
+ cache = open(file_name, 'rb')
import marshal
cexecuted = marshal.load(cache)
cache.close()
if isinstance(cexecuted, types.DictType):
- self.cexecuted = cexecuted
+ return cexecuted
+ else:
+ return {}
except:
- pass
+ return {}
+
+ # collect(). Collect data in multiple files produced by parallel mode
+
+ def collect(self):
+ cache_dir, local = os.path.split(self.cache)
+ for f in os.listdir(cache_dir or '.'):
+ if not f.startswith(local):
+ continue
+
+ full_path = os.path.join(cache_dir, f)
+ cexecuted = self.restore_file(full_path)
+ self.merge_data(cexecuted)
+
+ def merge_data(self, new_data):
+ for file_name, file_data in new_data.items():
+ if self.cexecuted.has_key(file_name):
+ self.merge_file_data(self.cexecuted[file_name], file_data)
+ else:
+ self.cexecuted[file_name] = file_data
+
+ def merge_file_data(self, cache_data, new_data):
+ for line_number in new_data.keys():
+ if not cache_data.has_key(line_number):
+ cache_data[line_number] = new_data[line_number]
# canonical_filename(filename). Return a canonical filename for the
# file (that is, an absolute path with no redundant components and
@@ -452,11 +541,14 @@
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
- # canonicalize_filenames(). Copy results from "c" to "cexecuted",
+ # canonicalize_filenames(). Copy results from "c" to "cexecuted",
# canonicalizing filenames on the way. Clear the "c" map.
def canonicalize_filenames(self):
for filename, lineno in self.c.keys():
+ if filename == '<string>':
+ # Can't do anything useful with exec'd strings, so skip them.
+ continue
f = self.canonical_filename(filename)
if not self.cexecuted.has_key(f):
self.cexecuted[f] = {}
@@ -468,18 +560,20 @@
def morf_filename(self, morf):
if isinstance(morf, types.ModuleType):
if not hasattr(morf, '__file__'):
- raise self.error, "Module has no __file__ attribute."
- file = morf.__file__
+ raise CoverageException, "Module has no __file__ attribute."
+ f = morf.__file__
else:
- file = morf
- return self.canonical_filename(file)
+ f = morf
+ return self.canonical_filename(f)
# analyze_morf(morf). Analyze the module or filename passed as
# the argument. If the source code can't be found, raise an error.
# Otherwise, return a tuple of (1) the canonical filename of the
# source code for the module, (2) a list of lines of statements
- # in the source code, and (3) a list of lines of excluded statements.
-
+ # in the source code, (3) a list of lines of excluded statements,
+ # and (4), a map of line numbers to multi-line line number ranges, for
+ # statements that cross lines.
+
def analyze_morf(self, morf):
if self.analysis_cache.has_key(morf):
return self.analysis_cache[morf]
@@ -487,30 +581,69 @@
ext = os.path.splitext(filename)[1]
if ext == '.pyc':
if not os.path.exists(filename[0:-1]):
- raise self.error, ("No source for compiled code '%s'."
+ raise CoverageException, ("No source for compiled code '%s'."
% filename)
filename = filename[0:-1]
elif ext != '.py':
- raise self.error, "File '%s' not Python source." % filename
+ raise CoverageException, "File '%s' not Python source." % filename
source = open(filename, 'r')
- lines, excluded_lines = self.find_executable_statements(
+ lines, excluded_lines, line_map = self.find_executable_statements(
source.read(), exclude=self.exclude_re
)
source.close()
- result = filename, lines, excluded_lines
+ result = filename, lines, excluded_lines, line_map
self.analysis_cache[morf] = result
return result
+ def first_line_of_tree(self, tree):
+ while True:
+ if len(tree) == 3 and type(tree[2]) == type(1):
+ return tree[2]
+ tree = tree[1]
+
+ def last_line_of_tree(self, tree):
+ while True:
+ if len(tree) == 3 and type(tree[2]) == type(1):
+ return tree[2]
+ tree = tree[-1]
+
+ def find_docstring_pass_pair(self, tree, spots):
+ for i in range(1, len(tree)):
+ if self.is_string_constant(tree[i]) and self.is_pass_stmt(tree[i+1]):
+ first_line = self.first_line_of_tree(tree[i])
+ last_line = self.last_line_of_tree(tree[i+1])
+ self.record_multiline(spots, first_line, last_line)
+
+ def is_string_constant(self, tree):
+ try:
+ return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.expr_stmt
+ except:
+ return False
+
+ def is_pass_stmt(self, tree):
+ try:
+ return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.pass_stmt
+ except:
+ return False
+
+ def record_multiline(self, spots, i, j):
+ for l in range(i, j+1):
+ spots[l] = (i, j)
+
def get_suite_spots(self, tree, spots):
- import symbol, token
+ """ Analyze a parse tree to find suite introducers which span a number
+ of lines.
+ """
for i in range(1, len(tree)):
- if isinstance(tree[i], tuple):
+ if type(tree[i]) == type(()):
if tree[i][0] == symbol.suite:
# Found a suite, look back for the colon and keyword.
lineno_colon = lineno_word = None
for j in range(i-1, 0, -1):
if tree[j][0] == token.COLON:
- lineno_colon = tree[j][2]
+ # Colons are never executed themselves: we want the
+ # line number of the last token before the colon.
+ lineno_colon = self.last_line_of_tree(tree[j-1])
elif tree[j][0] == token.NAME:
if tree[j][1] == 'elif':
# Find the line number of the first non-terminal
@@ -532,8 +665,18 @@
if lineno_colon and lineno_word:
# Found colon and keyword, mark all the lines
# between the two with the two line numbers.
- for l in range(lineno_word, lineno_colon+1):
- spots[l] = (lineno_word, lineno_colon)
+ self.record_multiline(spots, lineno_word, lineno_colon)
+
+ # "pass" statements are tricky: different versions of Python
+ # treat them differently, especially in the common case of a
+ # function with a doc string and a single pass statement.
+ self.find_docstring_pass_pair(tree[i], spots)
+
+ elif tree[i][0] == symbol.simple_stmt:
+ first_line = self.first_line_of_tree(tree[i])
+ last_line = self.last_line_of_tree(tree[i])
+ if first_line != last_line:
+ self.record_multiline(spots, first_line, last_line)
self.get_suite_spots(tree[i], spots)
def find_executable_statements(self, text, exclude=None):
@@ -547,10 +690,13 @@
if reExclude.search(lines[i]):
excluded[i+1] = 1
+ # Parse the code and analyze the parse tree to find out which statements
+ # are multiline, and where suites begin and end.
import parser
tree = parser.suite(text+'\n\n').totuple(1)
self.get_suite_spots(tree, suite_spots)
-
+ #print "Suite spots:", suite_spots
+
# Use the compiler module to parse the text and find the executable
# statements. We add newlines to be impervious to final partial lines.
statements = {}
@@ -562,7 +708,7 @@
lines.sort()
excluded_lines = excluded.keys()
excluded_lines.sort()
- return lines, excluded_lines
+ return lines, excluded_lines, suite_spots
# format_lines(statements, lines). Format a list of line numbers
# for printing by coalescing groups of lines as long as the lines
@@ -595,7 +741,8 @@
return "%d" % start
else:
return "%d-%d" % (start, end)
- return string.join(map(stringify, pairs), ", ")
+ ret = string.join(map(stringify, pairs), ", ")
+ return ret
# Backward compatibility with version 1.
def analysis(self, morf):
@@ -603,13 +750,17 @@
return f, s, m, mf
def analysis2(self, morf):
- filename, statements, excluded = self.analyze_morf(morf)
+ filename, statements, excluded, line_map = self.analyze_morf(morf)
self.canonicalize_filenames()
if not self.cexecuted.has_key(filename):
self.cexecuted[filename] = {}
missing = []
for line in statements:
- if not self.cexecuted[filename].has_key(line):
+ lines = line_map.get(line, [line, line])
+ for l in range(lines[0], lines[1]+1):
+ if self.cexecuted[filename].has_key(l):
+ break
+ else:
missing.append(line)
return (filename, statements, excluded, missing,
self.format_lines(statements, missing))
@@ -647,6 +798,15 @@
def report(self, morfs, show_missing=1, ignore_errors=0, file=None, omit_prefixes=[]):
if not isinstance(morfs, types.ListType):
morfs = [morfs]
+ # On windows, the shell doesn't expand wildcards. Do it here.
+ globbed = []
+ for morf in morfs:
+ if isinstance(morf, strclass):
+ globbed.extend(glob.glob(morf))
+ else:
+ globbed.append(morf)
+ morfs = globbed
+
morfs = self.filter_by_prefix(morfs, omit_prefixes)
morfs.sort(self.morf_name_compare)
@@ -684,8 +844,8 @@
raise
except:
if not ignore_errors:
- type, msg = sys.exc_info()[0:2]
- print >>file, fmt_err % (name, type, msg)
+ typ, msg = sys.exc_info()[0:2]
+ print >>file, fmt_err % (name, typ, msg)
if len(morfs) > 1:
print >>file, "-" * len(header)
if total_statements > 0:
@@ -713,7 +873,7 @@
except:
if not ignore_errors:
raise
-
+
def annotate_file(self, filename, statements, excluded, missing, directory=None):
source = open(filename, 'r')
if directory:
@@ -741,7 +901,7 @@
if self.blank_re.match(line):
dest.write(' ')
elif self.else_re.match(line):
- # Special logic for lines containing only 'else:'.
+ # Special logic for lines containing only 'else:'.
# See [GDR 2001-12-04b, 3.2].
if i >= len(statements) and j >= len(missing):
dest.write('! ')
@@ -765,18 +925,41 @@
the_coverage = coverage()
# Module functions call methods in the singleton object.
-def use_cache(*args, **kw): return the_coverage.use_cache(*args, **kw)
-def start(*args, **kw): return the_coverage.start(*args, **kw)
-def stop(*args, **kw): return the_coverage.stop(*args, **kw)
-def erase(*args, **kw): return the_coverage.erase(*args, **kw)
-def begin_recursive(*args, **kw): return the_coverage.begin_recursive(*args, **kw)
-def end_recursive(*args, **kw): return the_coverage.end_recursive(*args, **kw)
-def exclude(*args, **kw): return the_coverage.exclude(*args, **kw)
-def analysis(*args, **kw): return the_coverage.analysis(*args, **kw)
-def analysis2(*args, **kw): return the_coverage.analysis2(*args, **kw)
-def report(*args, **kw): return the_coverage.report(*args, **kw)
-def annotate(*args, **kw): return the_coverage.annotate(*args, **kw)
-def annotate_file(*args, **kw): return the_coverage.annotate_file(*args, **kw)
+def use_cache(*args, **kw):
+ return the_coverage.use_cache(*args, **kw)
+
+def start(*args, **kw):
+ return the_coverage.start(*args, **kw)
+
+def stop(*args, **kw):
+ return the_coverage.stop(*args, **kw)
+
+def erase(*args, **kw):
+ return the_coverage.erase(*args, **kw)
+
+def begin_recursive(*args, **kw):
+ return the_coverage.begin_recursive(*args, **kw)
+
+def end_recursive(*args, **kw):
+ return the_coverage.end_recursive(*args, **kw)
+
+def exclude(*args, **kw):
+ return the_coverage.exclude(*args, **kw)
+
+def analysis(*args, **kw):
+ return the_coverage.analysis(*args, **kw)
+
+def analysis2(*args, **kw):
+ return the_coverage.analysis2(*args, **kw)
+
+def report(*args, **kw):
+ return the_coverage.report(*args, **kw)
+
+def annotate(*args, **kw):
+ return the_coverage.annotate(*args, **kw)
+
+def annotate_file(*args, **kw):
+ return the_coverage.annotate_file(*args, **kw)
# Save coverage data when Python exits. (The atexit module wasn't
# introduced until Python 2.0, so use sys.exitfunc when it's not
@@ -789,7 +972,7 @@
# Command-line interface.
if __name__ == '__main__':
- the_coverage.command_line()
+ the_coverage.command_line(sys.argv[1:])
# A. REFERENCES
@@ -850,7 +1033,7 @@
# Thanks, Allen.
#
# 2005-12-02 NMB Call threading.settrace so that all threads are measured.
-# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
+# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
# captured to a different destination.
#
# 2005-12-03 NMB coverage.py can now measure itself.
@@ -858,10 +1041,46 @@
# 2005-12-04 NMB Adapted Greg Rogers' patch for using relative filenames,
# and sorting and omitting files to report on.
#
+# 2006-07-23 NMB Applied Joseph Tate's patch for function decorators.
+#
+# 2006-08-21 NMB Applied Sigve Tjora and Mark van der Wal's fixes for argument
+# handling.
+#
+# 2006-08-22 NMB Applied Geoff Bache's parallel mode patch.
+#
+# 2006-08-23 NMB Refactorings to improve testability. Fixes to command-line
+# logic for parallel mode and collect.
+#
+# 2006-08-25 NMB "#pragma: nocover" is excluded by default.
+#
+# 2006-09-10 NMB Properly ignore docstrings and other constant expressions that
+# appear in the middle of a function, a problem reported by Tim Leslie.
+# Minor changes to avoid lint warnings.
+#
+# 2006-09-17 NMB coverage.erase() shouldn't clobber the exclude regex.
+# Change how parallel mode is invoked, and fix erase() so that it erases the
+# cache when called programmatically.
+#
+# 2007-07-21 NMB In reports, ignore code executed from strings, since we can't
+# do anything useful with it anyway.
+# Better file handling on Linux, thanks Guillaume Chazarain.
+# Better shell support on Windows, thanks Noel O'Boyle.
+# Python 2.2 support maintained, thanks Catherine Proulx.
+#
+# 2007-07-22 NMB Python 2.5 now fully supported. The method of dealing with
+# multi-line statements is now less sensitive to the exact line that Python
+# reports during execution. Pass statements are handled specially so that their
+# disappearance during execution won't throw off the measurement.
+#
+# 2007-07-23 NMB Now Python 2.5 is *really* fully supported: the body of the
+# new with statement is counted as executable.
+#
+# 2007-07-29 NMB Better packaging.
+
# C. COPYRIGHT AND LICENCE
#
# Copyright 2001 Gareth Rees. All rights reserved.
-# Copyright 2004-2005 Ned Batchelder. All rights reserved.
+# Copyright 2004-2007 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -888,4 +1107,4 @@
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
-# $Id: coverage.py 26 2005-12-04 18:42:44Z ned $
+# $Id: coverage.py 74 2007-07-29 22:28:35Z nedbat $