# HG changeset patch # User FUJIWARA Katsunori # Date 1490575474 -32400 # Node ID d5cbbee542eb51965ff11afb67641acf7335a3c3 # Parent d3140fd322bfd74db8a7bec16fd20d8947190dae largefiles: reuse hexsha1() to centralize hash calculation logic into it This patch also renames argument of hexsha1(), not only for readability ("data" isn't good name for file-like object), but also for reviewability (including hexsha1() code helps reviewers to confirm how these functions are similar). BTW, copyandhash() has also similar logic, but it can't reuse hexsha1(), because it writes read-in data into specified fileobj simultaneously. diff -r d3140fd322bf -r d5cbbee542eb hgext/largefiles/lfutil.py --- a/hgext/largefiles/lfutil.py Sun Mar 26 19:11:41 2017 +0900 +++ b/hgext/largefiles/lfutil.py Mon Mar 27 09:44:34 2017 +0900 @@ -373,11 +373,8 @@ def hashfile(file): if not os.path.exists(file): return '' - hasher = hashlib.sha1('') with open(file, 'rb') as fd: - for data in util.filechunkiter(fd): - hasher.update(data) - return hasher.hexdigest() + return hexsha1(fd) def getexecutable(filename): mode = os.stat(filename).st_mode @@ -398,11 +395,11 @@ url = join(url, a) return url -def hexsha1(data): +def hexsha1(fileobj): """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like object data""" h = hashlib.sha1() - for chunk in util.filechunkiter(data): + for chunk in util.filechunkiter(fileobj): h.update(chunk) return h.hexdigest()