20 from . import shallowutil |
20 from . import shallowutil |
21 |
21 |
22 propertycache = util.propertycache |
22 propertycache = util.propertycache |
23 FASTLOG_TIMEOUT_IN_SECS = 0.5 |
23 FASTLOG_TIMEOUT_IN_SECS = 0.5 |
24 |
24 |
|
25 |
25 class remotefilectx(context.filectx): |
26 class remotefilectx(context.filectx): |
26 def __init__(self, repo, path, changeid=None, fileid=None, |
27 def __init__( |
27 filelog=None, changectx=None, ancestormap=None): |
28 self, |
|
29 repo, |
|
30 path, |
|
31 changeid=None, |
|
32 fileid=None, |
|
33 filelog=None, |
|
34 changectx=None, |
|
35 ancestormap=None, |
|
36 ): |
28 if fileid == nullrev: |
37 if fileid == nullrev: |
29 fileid = nullid |
38 fileid = nullid |
30 if fileid and len(fileid) == 40: |
39 if fileid and len(fileid) == 40: |
31 fileid = bin(fileid) |
40 fileid = bin(fileid) |
32 super(remotefilectx, self).__init__(repo, path, changeid, |
41 super(remotefilectx, self).__init__( |
33 fileid, filelog, changectx) |
42 repo, path, changeid, fileid, filelog, changectx |
|
43 ) |
34 self._ancestormap = ancestormap |
44 self._ancestormap = ancestormap |
35 |
45 |
36 def size(self): |
46 def size(self): |
37 return self._filelog.size(self._filenode) |
47 return self._filelog.size(self._filenode) |
38 |
48 |
43 elif r'_changectx' in self.__dict__: |
53 elif r'_changectx' in self.__dict__: |
44 return self._changectx.rev() |
54 return self._changectx.rev() |
45 elif r'_descendantrev' in self.__dict__: |
55 elif r'_descendantrev' in self.__dict__: |
46 # this file context was created from a revision with a known |
56 # this file context was created from a revision with a known |
47 # descendant, we can (lazily) correct for linkrev aliases |
57 # descendant, we can (lazily) correct for linkrev aliases |
48 linknode = self._adjustlinknode(self._path, self._filelog, |
58 linknode = self._adjustlinknode( |
49 self._filenode, self._descendantrev) |
59 self._path, self._filelog, self._filenode, self._descendantrev |
|
60 ) |
50 return self._repo.unfiltered().changelog.rev(linknode) |
61 return self._repo.unfiltered().changelog.rev(linknode) |
51 else: |
62 else: |
52 return self.linkrev() |
63 return self.linkrev() |
53 |
64 |
54 def filectx(self, fileid, changeid=None): |
65 def filectx(self, fileid, changeid=None): |
55 '''opens an arbitrary revision of the file without |
66 '''opens an arbitrary revision of the file without |
56 opening a new filelog''' |
67 opening a new filelog''' |
57 return remotefilectx(self._repo, self._path, fileid=fileid, |
68 return remotefilectx( |
58 filelog=self._filelog, changeid=changeid) |
69 self._repo, |
|
70 self._path, |
|
71 fileid=fileid, |
|
72 filelog=self._filelog, |
|
73 changeid=changeid, |
|
74 ) |
59 |
75 |
60 def linkrev(self): |
76 def linkrev(self): |
61 return self._linkrev |
77 return self._linkrev |
62 |
78 |
63 @propertycache |
79 @propertycache |
77 cl = self._repo.unfiltered().changelog |
93 cl = self._repo.unfiltered().changelog |
78 mfl = self._repo.manifestlog |
94 mfl = self._repo.manifestlog |
79 |
95 |
80 for rev in range(len(cl) - 1, 0, -1): |
96 for rev in range(len(cl) - 1, 0, -1): |
81 node = cl.node(rev) |
97 node = cl.node(rev) |
82 data = cl.read(node) # get changeset data (we avoid object creation) |
98 data = cl.read( |
83 if path in data[3]: # checking the 'files' field. |
99 node |
|
100 ) # get changeset data (we avoid object creation) |
|
101 if path in data[3]: # checking the 'files' field. |
84 # The file has been touched, check if the hash is what we're |
102 # The file has been touched, check if the hash is what we're |
85 # looking for. |
103 # looking for. |
86 if fileid == mfl[data[0]].readfast().get(path): |
104 if fileid == mfl[data[0]].readfast().get(path): |
87 return rev |
105 return rev |
88 |
106 |
102 lkr = self.linkrev() |
120 lkr = self.linkrev() |
103 attrs = vars(self) |
121 attrs = vars(self) |
104 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs) |
122 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs) |
105 if noctx or self.rev() == lkr: |
123 if noctx or self.rev() == lkr: |
106 return lkr |
124 return lkr |
107 linknode = self._adjustlinknode(self._path, self._filelog, |
125 linknode = self._adjustlinknode( |
108 self._filenode, self.rev(), |
126 self._path, |
109 inclusive=True) |
127 self._filelog, |
|
128 self._filenode, |
|
129 self.rev(), |
|
130 inclusive=True, |
|
131 ) |
110 return self._repo.changelog.rev(linknode) |
132 return self._repo.changelog.rev(linknode) |
111 |
133 |
112 def renamed(self): |
134 def renamed(self): |
113 """check if file was actually renamed in this changeset revision |
135 """check if file was actually renamed in this changeset revision |
114 |
136 |
153 p1, p2, linknode, copyfrom = ancestormap[self._filenode] |
175 p1, p2, linknode, copyfrom = ancestormap[self._filenode] |
154 results = [] |
176 results = [] |
155 if p1 != nullid: |
177 if p1 != nullid: |
156 path = copyfrom or self._path |
178 path = copyfrom or self._path |
157 flog = repo.file(path) |
179 flog = repo.file(path) |
158 p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog, |
180 p1ctx = remotefilectx( |
159 ancestormap=ancestormap) |
181 repo, path, fileid=p1, filelog=flog, ancestormap=ancestormap |
|
182 ) |
160 p1ctx._descendantrev = self.rev() |
183 p1ctx._descendantrev = self.rev() |
161 results.append(p1ctx) |
184 results.append(p1ctx) |
162 |
185 |
163 if p2 != nullid: |
186 if p2 != nullid: |
164 path = self._path |
187 path = self._path |
165 flog = repo.file(path) |
188 flog = repo.file(path) |
166 p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog, |
189 p2ctx = remotefilectx( |
167 ancestormap=ancestormap) |
190 repo, path, fileid=p2, filelog=flog, ancestormap=ancestormap |
|
191 ) |
168 p2ctx._descendantrev = self.rev() |
192 p2ctx._descendantrev = self.rev() |
169 results.append(p2ctx) |
193 results.append(p2ctx) |
170 |
194 |
171 return results |
195 return results |
172 |
196 |
173 def _nodefromancrev(self, ancrev, cl, mfl, path, fnode): |
197 def _nodefromancrev(self, ancrev, cl, mfl, path, fnode): |
174 """returns the node for <path> in <ancrev> if content matches <fnode>""" |
198 """returns the node for <path> in <ancrev> if content matches <fnode>""" |
175 ancctx = cl.read(ancrev) # This avoids object creation. |
199 ancctx = cl.read(ancrev) # This avoids object creation. |
176 manifestnode, files = ancctx[0], ancctx[3] |
200 manifestnode, files = ancctx[0], ancctx[3] |
177 # If the file was touched in this ancestor, and the content is similar |
201 # If the file was touched in this ancestor, and the content is similar |
178 # to the one we are searching for. |
202 # to the one we are searching for. |
179 if path in files and fnode == mfl[manifestnode].readfast().get(path): |
203 if path in files and fnode == mfl[manifestnode].readfast().get(path): |
180 return cl.node(ancrev) |
204 return cl.node(ancrev) |
212 linknode = ancestormap[fnode][2] |
236 linknode = ancestormap[fnode][2] |
213 |
237 |
214 if srcrev is None: |
238 if srcrev is None: |
215 # wctx case, used by workingfilectx during mergecopy |
239 # wctx case, used by workingfilectx during mergecopy |
216 revs = [p.rev() for p in self._repo[None].parents()] |
240 revs = [p.rev() for p in self._repo[None].parents()] |
217 inclusive = True # we skipped the real (revless) source |
241 inclusive = True # we skipped the real (revless) source |
218 else: |
242 else: |
219 revs = [srcrev] |
243 revs = [srcrev] |
220 |
244 |
221 if self._verifylinknode(revs, linknode): |
245 if self._verifylinknode(revs, linknode): |
222 return linknode |
246 return linknode |
245 # prefetch |
269 # prefetch |
246 if not seenpublic and pc.phase(repo, ancrev) == phases.public: |
270 if not seenpublic and pc.phase(repo, ancrev) == phases.public: |
247 # TODO: there used to be a codepath to fetch linknodes |
271 # TODO: there used to be a codepath to fetch linknodes |
248 # from a server as a fast path, but it appeared to |
272 # from a server as a fast path, but it appeared to |
249 # depend on an API FB added to their phabricator. |
273 # depend on an API FB added to their phabricator. |
250 lnode = self._forceprefetch(repo, path, fnode, revs, |
274 lnode = self._forceprefetch( |
251 commonlogkwargs) |
275 repo, path, fnode, revs, commonlogkwargs |
|
276 ) |
252 if lnode: |
277 if lnode: |
253 return lnode |
278 return lnode |
254 seenpublic = True |
279 seenpublic = True |
255 |
280 |
256 return linknode |
281 return linknode |
257 |
282 |
258 def _forceprefetch(self, repo, path, fnode, revs, |
283 def _forceprefetch(self, repo, path, fnode, revs, commonlogkwargs): |
259 commonlogkwargs): |
|
260 # This next part is super non-obvious, so big comment block time! |
284 # This next part is super non-obvious, so big comment block time! |
261 # |
285 # |
262 # It is possible to get extremely bad performance here when a fairly |
286 # It is possible to get extremely bad performance here when a fairly |
263 # common set of circumstances occur when this extension is combined |
287 # common set of circumstances occur when this extension is combined |
264 # with a server-side commit rewriting extension like pushrebase. |
288 # with a server-side commit rewriting extension like pushrebase. |
305 |
329 |
306 # Now that we've downloaded a new blob from the server, |
330 # Now that we've downloaded a new blob from the server, |
307 # we need to rebuild the ancestor map to recompute the |
331 # we need to rebuild the ancestor map to recompute the |
308 # linknodes. |
332 # linknodes. |
309 self._ancestormap = None |
333 self._ancestormap = None |
310 linknode = self.ancestormap()[fnode][2] # 2 is linknode |
334 linknode = self.ancestormap()[fnode][2] # 2 is linknode |
311 if self._verifylinknode(revs, linknode): |
335 if self._verifylinknode(revs, linknode): |
312 logmsg = 'remotefilelog prefetching succeeded' |
336 logmsg = 'remotefilelog prefetching succeeded' |
313 return linknode |
337 return linknode |
314 logmsg = 'remotefilelog prefetching not found' |
338 logmsg = 'remotefilelog prefetching not found' |
315 return None |
339 return None |
316 except Exception as e: |
340 except Exception as e: |
317 logmsg = 'remotefilelog prefetching failed (%s)' % e |
341 logmsg = 'remotefilelog prefetching failed (%s)' % e |
318 return None |
342 return None |
319 finally: |
343 finally: |
320 elapsed = time.time() - start |
344 elapsed = time.time() - start |
321 repo.ui.log('linkrevfixup', logmsg + '\n', elapsed=elapsed * 1000, |
345 repo.ui.log( |
322 **commonlogkwargs) |
346 'linkrevfixup', |
|
347 logmsg + '\n', |
|
348 elapsed=elapsed * 1000, |
|
349 **commonlogkwargs |
|
350 ) |
323 |
351 |
324 def _verifylinknode(self, revs, linknode): |
352 def _verifylinknode(self, revs, linknode): |
325 """ |
353 """ |
326 Check if a linknode is correct one for the current history. |
354 Check if a linknode is correct one for the current history. |
327 |
355 |
368 # Remove self |
396 # Remove self |
369 ancestors.pop(0) |
397 ancestors.pop(0) |
370 |
398 |
371 # Sort by linkrev |
399 # Sort by linkrev |
372 # The copy tracing algorithm depends on these coming out in order |
400 # The copy tracing algorithm depends on these coming out in order |
373 ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev()) |
401 ancestors = sorted(ancestors, reverse=True, key=lambda x: x.linkrev()) |
374 |
402 |
375 for ancestor in ancestors: |
403 for ancestor in ancestors: |
376 yield ancestor |
404 yield ancestor |
377 |
405 |
378 def ancestor(self, fc2, actx): |
406 def ancestor(self, fc2, actx): |
402 a = (self.path(), self.filenode()) |
430 a = (self.path(), self.filenode()) |
403 b = (fc2.path(), fc2.filenode()) |
431 b = (fc2.path(), fc2.filenode()) |
404 result = ancestor.genericancestor(a, b, parents) |
432 result = ancestor.genericancestor(a, b, parents) |
405 if result: |
433 if result: |
406 f, n = result |
434 f, n = result |
407 r = remotefilectx(self._repo, f, fileid=n, |
435 r = remotefilectx(self._repo, f, fileid=n, ancestormap=amap) |
408 ancestormap=amap) |
|
409 return r |
436 return r |
410 |
437 |
411 return None |
438 return None |
412 |
439 |
413 def annotate(self, *args, **kwargs): |
440 def annotate(self, *args, **kwargs): |
415 prefetchskip = kwargs.pop(r'prefetchskip', None) |
442 prefetchskip = kwargs.pop(r'prefetchskip', None) |
416 if prefetchskip: |
443 if prefetchskip: |
417 # use introrev so prefetchskip can be accurately tested |
444 # use introrev so prefetchskip can be accurately tested |
418 introrev = self.introrev() |
445 introrev = self.introrev() |
419 if self.rev() != introrev: |
446 if self.rev() != introrev: |
420 introctx = remotefilectx(self._repo, self._path, |
447 introctx = remotefilectx( |
421 changeid=introrev, |
448 self._repo, |
422 fileid=self._filenode, |
449 self._path, |
423 filelog=self._filelog, |
450 changeid=introrev, |
424 ancestormap=self._ancestormap) |
451 fileid=self._filenode, |
|
452 filelog=self._filelog, |
|
453 ancestormap=self._ancestormap, |
|
454 ) |
425 |
455 |
426 # like self.ancestors, but append to "fetch" and skip visiting parents |
456 # like self.ancestors, but append to "fetch" and skip visiting parents |
427 # of nodes in "prefetchskip". |
457 # of nodes in "prefetchskip". |
428 fetch = [] |
458 fetch = [] |
429 seen = set() |
459 seen = set() |
440 for parent in current.parents(): |
470 for parent in current.parents(): |
441 if parent.node() not in seen: |
471 if parent.node() not in seen: |
442 seen.add(parent.node()) |
472 seen.add(parent.node()) |
443 queue.append(parent) |
473 queue.append(parent) |
444 |
474 |
445 self._repo.ui.debug('remotefilelog: prefetching %d files ' |
475 self._repo.ui.debug( |
446 'for annotate\n' % len(fetch)) |
476 'remotefilelog: prefetching %d files ' 'for annotate\n' % len(fetch) |
|
477 ) |
447 if fetch: |
478 if fetch: |
448 self._repo.fileservice.prefetch(fetch) |
479 self._repo.fileservice.prefetch(fetch) |
449 return super(remotefilectx, self).annotate(*args, **kwargs) |
480 return super(remotefilectx, self).annotate(*args, **kwargs) |
450 |
481 |
451 # Return empty set so that the hg serve and thg don't stack trace |
482 # Return empty set so that the hg serve and thg don't stack trace |
452 def children(self): |
483 def children(self): |
453 return [] |
484 return [] |
454 |
485 |
|
486 |
455 class remoteworkingfilectx(context.workingfilectx, remotefilectx): |
487 class remoteworkingfilectx(context.workingfilectx, remotefilectx): |
456 def __init__(self, repo, path, filelog=None, workingctx=None): |
488 def __init__(self, repo, path, filelog=None, workingctx=None): |
457 self._ancestormap = None |
489 self._ancestormap = None |
458 super(remoteworkingfilectx, self).__init__(repo, path, filelog, |
490 super(remoteworkingfilectx, self).__init__( |
459 workingctx) |
491 repo, path, filelog, workingctx |
|
492 ) |
460 |
493 |
461 def parents(self): |
494 def parents(self): |
462 return remotefilectx.parents(self) |
495 return remotefilectx.parents(self) |
463 |
496 |
464 def ancestormap(self): |
497 def ancestormap(self): |