16 def listsort(list, key): |
16 def listsort(list, key): |
17 "helper to sort by key in Python 2.3" |
17 "helper to sort by key in Python 2.3" |
18 try: |
18 try: |
19 list.sort(key=key) |
19 list.sort(key=key) |
20 except TypeError: |
20 except TypeError: |
21 list.sort(lambda l, r:cmp(key(l), key(r))) |
21 list.sort(lambda l, r: cmp(key(l), key(r))) |
22 |
22 |
23 class logentry(object): |
23 class logentry(object): |
24 '''Class logentry has the following attributes: |
24 '''Class logentry has the following attributes: |
25 .author - author name as CVS knows it |
25 .author - author name as CVS knows it |
26 .branch - name of branch this revision is on |
26 .branch - name of branch this revision is on |
76 |
76 |
77 # Get the real directory in the repository |
77 # Get the real directory in the repository |
78 try: |
78 try: |
79 prefix = file(os.path.join('CVS','Repository')).read().strip() |
79 prefix = file(os.path.join('CVS','Repository')).read().strip() |
80 if prefix == ".": |
80 if prefix == ".": |
81 prefix="" |
81 prefix = "" |
82 directory = prefix |
82 directory = prefix |
83 except IOError: |
83 except IOError: |
84 raise logerror('Not a CVS sandbox') |
84 raise logerror('Not a CVS sandbox') |
85 |
85 |
86 if prefix and not prefix.endswith('/'): |
86 if prefix and not prefix.endswith('/'): |
87 prefix+='/' |
87 prefix += '/' |
88 |
88 |
89 # Use the Root file in the sandbox, if it exists |
89 # Use the Root file in the sandbox, if it exists |
90 try: |
90 try: |
91 root = file(os.path.join('CVS','Root')).read().strip() |
91 root = file(os.path.join('CVS','Root')).read().strip() |
92 except IOError: |
92 except IOError: |
111 # various components, so that |
111 # various components, so that |
112 # :pserver:user@server:/path |
112 # :pserver:user@server:/path |
113 # and |
113 # and |
114 # /pserver/user/server/path |
114 # /pserver/user/server/path |
115 # are mapped to different cache file names. |
115 # are mapped to different cache file names. |
116 cachefile = root.split(":")+[directory, "cache"] |
116 cachefile = root.split(":") + [directory, "cache"] |
117 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] |
117 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] |
118 cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) |
118 cachefile = os.path.join(cachedir, |
|
119 '.'.join([s for s in cachefile if s])) |
119 |
120 |
120 if cache == 'update': |
121 if cache == 'update': |
121 try: |
122 try: |
122 ui.note(_('reading cvs log cache %s\n') % cachefile) |
123 ui.note(_('reading cvs log cache %s\n') % cachefile) |
123 oldlog = pickle.load(file(cachefile)) |
124 oldlog = pickle.load(file(cachefile)) |
133 cmd = ['cvs', '-q'] |
134 cmd = ['cvs', '-q'] |
134 if root: |
135 if root: |
135 cmd.append('-d%s' % root) |
136 cmd.append('-d%s' % root) |
136 p = root.split(':')[-1] |
137 p = root.split(':')[-1] |
137 if not p.endswith('/'): |
138 if not p.endswith('/'): |
138 p+='/' |
139 p += '/' |
139 prefix = p+prefix |
140 prefix = p + prefix |
140 cmd.append(['log', 'rlog'][rlog]) |
141 cmd.append(['log', 'rlog'][rlog]) |
141 if date: |
142 if date: |
142 # no space between option and date string |
143 # no space between option and date string |
143 cmd.append('-d>%s' % date) |
144 cmd.append('-d>%s' % date) |
144 cmd.append(directory) |
145 cmd.append(directory) |
204 if match: |
205 if match: |
205 rev = [int(x) for x in match.group(2).split('.')] |
206 rev = [int(x) for x in match.group(2).split('.')] |
206 |
207 |
207 # Convert magic branch number to an odd-numbered one |
208 # Convert magic branch number to an odd-numbered one |
208 revn = len(rev) |
209 revn = len(rev) |
209 if revn>3 and (revn%2) == 0 and rev[-2] == 0: |
210 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0: |
210 rev = rev[:-2]+rev[-1:] |
211 rev = rev[:-2] + rev[-1:] |
211 rev = tuple(rev) |
212 rev = tuple(rev) |
212 |
213 |
213 if rev not in tags: |
214 if rev not in tags: |
214 tags[rev] = [] |
215 tags[rev] = [] |
215 tags[rev].append(match.group(1)) |
216 tags[rev].append(match.group(1)) |
242 match = re_60.match(line) |
243 match = re_60.match(line) |
243 assert match, _('revision must be followed by date line') |
244 assert match, _('revision must be followed by date line') |
244 d = match.group(1) |
245 d = match.group(1) |
245 if d[2] == '/': |
246 if d[2] == '/': |
246 # Y2K |
247 # Y2K |
247 d = '19'+d |
248 d = '19' + d |
248 |
249 |
249 if len(d.split()) != 3: |
250 if len(d.split()) != 3: |
250 # cvs log dates always in GMT |
251 # cvs log dates always in GMT |
251 d = d+' UTC' |
252 d = d + ' UTC' |
252 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S']) |
253 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S']) |
253 e.author = scache(match.group(2)) |
254 e.author = scache(match.group(2)) |
254 e.dead = match.group(3).lower() == 'dead' |
255 e.dead = match.group(3).lower() == 'dead' |
255 |
256 |
256 if match.group(5): |
257 if match.group(5): |
264 e.lines = None |
265 e.lines = None |
265 e.comment = [] |
266 e.comment = [] |
266 state = 7 |
267 state = 7 |
267 |
268 |
268 elif state == 7: |
269 elif state == 7: |
269 # read the revision numbers of branches that start at this revision, |
270 # read the revision numbers of branches that start at this revision |
270 # or store the commit log message otherwise |
271 # or store the commit log message otherwise |
271 m = re_70.match(line) |
272 m = re_70.match(line) |
272 if m: |
273 if m: |
273 e.branches = [tuple([int(y) for y in x.strip().split('.')]) |
274 e.branches = [tuple([int(y) for y in x.strip().split('.')]) |
274 for x in m.group(1).split(';')] |
275 for x in m.group(1).split(';')] |
299 e.tags = [scache(x) for x in tags.get(e.revision, [])] |
300 e.tags = [scache(x) for x in tags.get(e.revision, [])] |
300 e.tags.sort() |
301 e.tags.sort() |
301 e.comment = scache('\n'.join(e.comment)) |
302 e.comment = scache('\n'.join(e.comment)) |
302 |
303 |
303 revn = len(e.revision) |
304 revn = len(e.revision) |
304 if revn>3 and (revn%2) == 0: |
305 if revn > 3 and (revn % 2) == 0: |
305 e.branch = tags.get(e.revision[:-1], [None])[0] |
306 e.branch = tags.get(e.revision[:-1], [None])[0] |
306 else: |
307 else: |
307 e.branch = None |
308 e.branch = None |
308 |
309 |
309 log.append(e) |
310 log.append(e) |
310 |
311 |
311 if len(log)%100 == 0: |
312 if len(log) % 100 == 0: |
312 ui.status(util.ellipsis('%d %s'%(len(log), e.file), 80)+'\n') |
313 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n') |
313 |
314 |
314 listsort(log, key=lambda x:(x.rcs, x.revision)) |
315 listsort(log, key=lambda x:(x.rcs, x.revision)) |
315 |
316 |
316 # find parent revisions of individual files |
317 # find parent revisions of individual files |
317 versions = {} |
318 versions = {} |
328 if log: |
329 if log: |
329 # join up the old and new logs |
330 # join up the old and new logs |
330 listsort(log, key=lambda x:x.date) |
331 listsort(log, key=lambda x:x.date) |
331 |
332 |
332 if oldlog and oldlog[-1].date >= log[0].date: |
333 if oldlog and oldlog[-1].date >= log[0].date: |
333 raise logerror('Log cache overlaps with new log entries, re-run without cache.') |
334 raise logerror('Log cache overlaps with new log entries,' |
334 |
335 ' re-run without cache.') |
335 log = oldlog+log |
336 |
|
337 log = oldlog + log |
336 |
338 |
337 # write the new cachefile |
339 # write the new cachefile |
338 ui.note(_('writing cvs log cache %s\n') % cachefile) |
340 ui.note(_('writing cvs log cache %s\n') % cachefile) |
339 pickle.dump(log, file(cachefile, 'w')) |
341 pickle.dump(log, file(cachefile, 'w')) |
340 else: |
342 else: |
375 # Check if log entry belongs to the current changeset or not. |
377 # Check if log entry belongs to the current changeset or not. |
376 if not (c and |
378 if not (c and |
377 e.comment == c.comment and |
379 e.comment == c.comment and |
378 e.author == c.author and |
380 e.author == c.author and |
379 e.branch == c.branch and |
381 e.branch == c.branch and |
380 (c.date[0]+c.date[1]) <= (e.date[0]+e.date[1]) <= (c.date[0]+c.date[1])+fuzz and |
382 ((c.date[0] + c.date[1]) <= |
|
383 (e.date[0] + e.date[1]) <= |
|
384 (c.date[0] + c.date[1]) + fuzz) and |
381 e.file not in files): |
385 e.file not in files): |
382 c = changeset(comment=e.comment, author=e.author, |
386 c = changeset(comment=e.comment, author=e.author, |
383 branch=e.branch, date=e.date, entries=[]) |
387 branch=e.branch, date=e.date, entries=[]) |
384 changesets.append(c) |
388 changesets.append(c) |
385 files = {} |
389 files = {} |
386 if len(changesets)%100 == 0: |
390 if len(changesets) % 100 == 0: |
387 ui.status(util.ellipsis('%d %s'%(len(changesets), repr(e.comment)[1:-1]), 80)+'\n') |
391 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1]) |
|
392 ui.status(util.ellipsis(t, 80) + '\n') |
388 |
393 |
389 e.Changeset = c |
394 e.Changeset = c |
390 c.entries.append(e) |
395 c.entries.append(e) |
391 files[e.file] = True |
396 files[e.file] = True |
392 c.date = e.date # changeset date is date of latest commit in it |
397 c.date = e.date # changeset date is date of latest commit in it |
400 r = r.split('/') |
405 r = r.split('/') |
401 nl = len(l) |
406 nl = len(l) |
402 nr = len(r) |
407 nr = len(r) |
403 n = min(nl, nr) |
408 n = min(nl, nr) |
404 for i in range(n): |
409 for i in range(n): |
405 if i+1 == nl and nl<nr: |
410 if i + 1 == nl and nl < nr: |
406 return -1 |
411 return -1 |
407 elif i+1 == nr and nl>nr: |
412 elif i + 1 == nr and nl > nr: |
408 return +1 |
413 return +1 |
409 elif l[i]<r[i]: |
414 elif l[i] < r[i]: |
410 return -1 |
415 return -1 |
411 elif l[i]>r[i]: |
416 elif l[i] > r[i]: |
412 return +1 |
417 return +1 |
413 return 0 |
418 return 0 |
414 def entitycompare(l, r): |
419 def entitycompare(l, r): |
415 return pathcompare(l.file, r.file) |
420 return pathcompare(l.file, r.file) |
416 |
421 |
417 c.entries.sort(entitycompare) |
422 c.entries.sort(entitycompare) |
418 |
423 |
419 # Sort changesets by date |
424 # Sort changesets by date |
420 |
425 |
421 def cscmp(l, r): |
426 def cscmp(l, r): |
422 d = sum(l.date)-sum(r.date) |
427 d = sum(l.date) - sum(r.date) |
423 if d: |
428 if d: |
424 return d |
429 return d |
425 |
430 |
426 # detect vendor branches and initial commits on a branch |
431 # detect vendor branches and initial commits on a branch |
427 le = {} |
432 le = {} |
524 if m in branches and c.branch != m: |
529 if m in branches and c.branch != m: |
525 # insert empty changeset for merge |
530 # insert empty changeset for merge |
526 cc = changeset(author=c.author, branch=m, date=c.date, |
531 cc = changeset(author=c.author, branch=m, date=c.date, |
527 comment='convert-repo: CVS merge from branch %s' % c.branch, |
532 comment='convert-repo: CVS merge from branch %s' % c.branch, |
528 entries=[], tags=[], parents=[changesets[branches[m]], c]) |
533 entries=[], tags=[], parents=[changesets[branches[m]], c]) |
529 changesets.insert(i+1, cc) |
534 changesets.insert(i + 1, cc) |
530 branches[m] = i+1 |
535 branches[m] = i + 1 |
531 |
536 |
532 # adjust our loop counters now we have inserted a new entry |
537 # adjust our loop counters now we have inserted a new entry |
533 n += 1 |
538 n += 1 |
534 i += 2 |
539 i += 2 |
535 continue |
540 continue |