5 # This software may be used and distributed according to the terms of the |
5 # This software may be used and distributed according to the terms of the |
6 # GNU General Public License version 2 or any later version. |
6 # GNU General Public License version 2 or any later version. |
7 |
7 |
8 from node import bin, hex, nullid, nullrev, short |
8 from node import bin, hex, nullid, nullrev, short |
9 from i18n import _ |
9 from i18n import _ |
10 import repo, changegroup, subrepo |
10 import repo, changegroup, subrepo, discovery |
11 import changelog, dirstate, filelog, manifest, context |
11 import changelog, dirstate, filelog, manifest, context |
12 import lock, transaction, store, encoding |
12 import lock, transaction, store, encoding |
13 import util, extensions, hook, error |
13 import util, extensions, hook, error |
14 import match as matchmod |
14 import match as matchmod |
15 import merge as mergemod |
15 import merge as mergemod |
1270 |
1270 |
1271 r.append(l) |
1271 r.append(l) |
1272 |
1272 |
1273 return r |
1273 return r |
1274 |
1274 |
1275 def findincoming(self, remote, base=None, heads=None, force=False): |
|
1276 """Return list of roots of the subsets of missing nodes from remote |
|
1277 |
|
1278 If base dict is specified, assume that these nodes and their parents |
|
1279 exist on the remote side and that no child of a node of base exists |
|
1280 in both remote and self. |
|
1281 Furthermore base will be updated to include the nodes that exists |
|
1282 in self and remote but no children exists in self and remote. |
|
1283 If a list of heads is specified, return only nodes which are heads |
|
1284 or ancestors of these heads. |
|
1285 |
|
1286 All the ancestors of base are in self and in remote. |
|
1287 All the descendants of the list returned are missing in self. |
|
1288 (and so we know that the rest of the nodes are missing in remote, see |
|
1289 outgoing) |
|
1290 """ |
|
1291 return self.findcommonincoming(remote, base, heads, force)[1] |
|
1292 |
|
1293 def findcommonincoming(self, remote, base=None, heads=None, force=False): |
|
1294 """Return a tuple (common, missing roots, heads) used to identify |
|
1295 missing nodes from remote. |
|
1296 |
|
1297 If base dict is specified, assume that these nodes and their parents |
|
1298 exist on the remote side and that no child of a node of base exists |
|
1299 in both remote and self. |
|
1300 Furthermore base will be updated to include the nodes that exists |
|
1301 in self and remote but no children exists in self and remote. |
|
1302 If a list of heads is specified, return only nodes which are heads |
|
1303 or ancestors of these heads. |
|
1304 |
|
1305 All the ancestors of base are in self and in remote. |
|
1306 """ |
|
1307 m = self.changelog.nodemap |
|
1308 search = [] |
|
1309 fetch = set() |
|
1310 seen = set() |
|
1311 seenbranch = set() |
|
1312 if base is None: |
|
1313 base = {} |
|
1314 |
|
1315 if not heads: |
|
1316 heads = remote.heads() |
|
1317 |
|
1318 if self.changelog.tip() == nullid: |
|
1319 base[nullid] = 1 |
|
1320 if heads != [nullid]: |
|
1321 return [nullid], [nullid], list(heads) |
|
1322 return [nullid], [], [] |
|
1323 |
|
1324 # assume we're closer to the tip than the root |
|
1325 # and start by examining the heads |
|
1326 self.ui.status(_("searching for changes\n")) |
|
1327 |
|
1328 unknown = [] |
|
1329 for h in heads: |
|
1330 if h not in m: |
|
1331 unknown.append(h) |
|
1332 else: |
|
1333 base[h] = 1 |
|
1334 |
|
1335 heads = unknown |
|
1336 if not unknown: |
|
1337 return base.keys(), [], [] |
|
1338 |
|
1339 req = set(unknown) |
|
1340 reqcnt = 0 |
|
1341 |
|
1342 # search through remote branches |
|
1343 # a 'branch' here is a linear segment of history, with four parts: |
|
1344 # head, root, first parent, second parent |
|
1345 # (a branch always has two parents (or none) by definition) |
|
1346 unknown = remote.branches(unknown) |
|
1347 while unknown: |
|
1348 r = [] |
|
1349 while unknown: |
|
1350 n = unknown.pop(0) |
|
1351 if n[0] in seen: |
|
1352 continue |
|
1353 |
|
1354 self.ui.debug("examining %s:%s\n" |
|
1355 % (short(n[0]), short(n[1]))) |
|
1356 if n[0] == nullid: # found the end of the branch |
|
1357 pass |
|
1358 elif n in seenbranch: |
|
1359 self.ui.debug("branch already found\n") |
|
1360 continue |
|
1361 elif n[1] and n[1] in m: # do we know the base? |
|
1362 self.ui.debug("found incomplete branch %s:%s\n" |
|
1363 % (short(n[0]), short(n[1]))) |
|
1364 search.append(n[0:2]) # schedule branch range for scanning |
|
1365 seenbranch.add(n) |
|
1366 else: |
|
1367 if n[1] not in seen and n[1] not in fetch: |
|
1368 if n[2] in m and n[3] in m: |
|
1369 self.ui.debug("found new changeset %s\n" % |
|
1370 short(n[1])) |
|
1371 fetch.add(n[1]) # earliest unknown |
|
1372 for p in n[2:4]: |
|
1373 if p in m: |
|
1374 base[p] = 1 # latest known |
|
1375 |
|
1376 for p in n[2:4]: |
|
1377 if p not in req and p not in m: |
|
1378 r.append(p) |
|
1379 req.add(p) |
|
1380 seen.add(n[0]) |
|
1381 |
|
1382 if r: |
|
1383 reqcnt += 1 |
|
1384 self.ui.progress(_('searching'), reqcnt, unit=_('queries')) |
|
1385 self.ui.debug("request %d: %s\n" % |
|
1386 (reqcnt, " ".join(map(short, r)))) |
|
1387 for p in xrange(0, len(r), 10): |
|
1388 for b in remote.branches(r[p:p + 10]): |
|
1389 self.ui.debug("received %s:%s\n" % |
|
1390 (short(b[0]), short(b[1]))) |
|
1391 unknown.append(b) |
|
1392 |
|
1393 # do binary search on the branches we found |
|
1394 while search: |
|
1395 newsearch = [] |
|
1396 reqcnt += 1 |
|
1397 self.ui.progress(_('searching'), reqcnt, unit=_('queries')) |
|
1398 for n, l in zip(search, remote.between(search)): |
|
1399 l.append(n[1]) |
|
1400 p = n[0] |
|
1401 f = 1 |
|
1402 for i in l: |
|
1403 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i))) |
|
1404 if i in m: |
|
1405 if f <= 2: |
|
1406 self.ui.debug("found new branch changeset %s\n" % |
|
1407 short(p)) |
|
1408 fetch.add(p) |
|
1409 base[i] = 1 |
|
1410 else: |
|
1411 self.ui.debug("narrowed branch search to %s:%s\n" |
|
1412 % (short(p), short(i))) |
|
1413 newsearch.append((p, i)) |
|
1414 break |
|
1415 p, f = i, f * 2 |
|
1416 search = newsearch |
|
1417 |
|
1418 # sanity check our fetch list |
|
1419 for f in fetch: |
|
1420 if f in m: |
|
1421 raise error.RepoError(_("already have changeset ") |
|
1422 + short(f[:4])) |
|
1423 |
|
1424 if base.keys() == [nullid]: |
|
1425 if force: |
|
1426 self.ui.warn(_("warning: repository is unrelated\n")) |
|
1427 else: |
|
1428 raise util.Abort(_("repository is unrelated")) |
|
1429 |
|
1430 self.ui.debug("found new changesets starting at " + |
|
1431 " ".join([short(f) for f in fetch]) + "\n") |
|
1432 |
|
1433 self.ui.progress(_('searching'), None) |
|
1434 self.ui.debug("%d total queries\n" % reqcnt) |
|
1435 |
|
1436 return base.keys(), list(fetch), heads |
|
1437 |
|
1438 def findoutgoing(self, remote, base=None, heads=None, force=False): |
|
1439 """Return list of nodes that are roots of subsets not in remote |
|
1440 |
|
1441 If base dict is specified, assume that these nodes and their parents |
|
1442 exist on the remote side. |
|
1443 If a list of heads is specified, return only nodes which are heads |
|
1444 or ancestors of these heads, and return a second element which |
|
1445 contains all remote heads which get new children. |
|
1446 """ |
|
1447 if base is None: |
|
1448 base = {} |
|
1449 self.findincoming(remote, base, heads, force=force) |
|
1450 |
|
1451 self.ui.debug("common changesets up to " |
|
1452 + " ".join(map(short, base.keys())) + "\n") |
|
1453 |
|
1454 remain = set(self.changelog.nodemap) |
|
1455 |
|
1456 # prune everything remote has from the tree |
|
1457 remain.remove(nullid) |
|
1458 remove = base.keys() |
|
1459 while remove: |
|
1460 n = remove.pop(0) |
|
1461 if n in remain: |
|
1462 remain.remove(n) |
|
1463 for p in self.changelog.parents(n): |
|
1464 remove.append(p) |
|
1465 |
|
1466 # find every node whose parents have been pruned |
|
1467 subset = [] |
|
1468 # find every remote head that will get new children |
|
1469 updated_heads = set() |
|
1470 for n in remain: |
|
1471 p1, p2 = self.changelog.parents(n) |
|
1472 if p1 not in remain and p2 not in remain: |
|
1473 subset.append(n) |
|
1474 if heads: |
|
1475 if p1 in heads: |
|
1476 updated_heads.add(p1) |
|
1477 if p2 in heads: |
|
1478 updated_heads.add(p2) |
|
1479 |
|
1480 # this is the set of all roots we have to push |
|
1481 if heads: |
|
1482 return subset, list(updated_heads) |
|
1483 else: |
|
1484 return subset |
|
1485 |
|
1486 def pull(self, remote, heads=None, force=False): |
1275 def pull(self, remote, heads=None, force=False): |
1487 lock = self.lock() |
1276 lock = self.lock() |
1488 try: |
1277 try: |
1489 common, fetch, rheads = self.findcommonincoming(remote, heads=heads, |
1278 tmp = discovery.findcommonincoming(self, remote, heads=heads, |
1490 force=force) |
1279 force=force) |
|
1280 common, fetch, rheads = tmp |
1491 if not fetch: |
1281 if not fetch: |
1492 self.ui.status(_("no changes found\n")) |
1282 self.ui.status(_("no changes found\n")) |
1493 return 0 |
1283 return 0 |
1494 |
1284 |
1495 if fetch == [nullid]: |
1285 if fetch == [nullid]: |
1528 |
1318 |
1529 if remote.capable('unbundle'): |
1319 if remote.capable('unbundle'): |
1530 return self.push_unbundle(remote, force, revs, newbranch) |
1320 return self.push_unbundle(remote, force, revs, newbranch) |
1531 return self.push_addchangegroup(remote, force, revs, newbranch) |
1321 return self.push_addchangegroup(remote, force, revs, newbranch) |
1532 |
1322 |
1533 def prepush(self, remote, force, revs, newbranch): |
|
1534 '''Analyze the local and remote repositories and determine which |
|
1535 changesets need to be pushed to the remote. Return value depends |
|
1536 on circumstances: |
|
1537 |
|
1538 If we are not going to push anything, return a tuple (None, |
|
1539 outgoing) where outgoing is 0 if there are no outgoing |
|
1540 changesets and 1 if there are, but we refuse to push them |
|
1541 (e.g. would create new remote heads). |
|
1542 |
|
1543 Otherwise, return a tuple (changegroup, remoteheads), where |
|
1544 changegroup is a readable file-like object whose read() returns |
|
1545 successive changegroup chunks ready to be sent over the wire and |
|
1546 remoteheads is the list of remote heads.''' |
|
1547 common = {} |
|
1548 remote_heads = remote.heads() |
|
1549 inc = self.findincoming(remote, common, remote_heads, force=force) |
|
1550 |
|
1551 cl = self.changelog |
|
1552 update, updated_heads = self.findoutgoing(remote, common, remote_heads) |
|
1553 outg, bases, heads = cl.nodesbetween(update, revs) |
|
1554 |
|
1555 if not bases: |
|
1556 self.ui.status(_("no changes found\n")) |
|
1557 return None, 1 |
|
1558 |
|
1559 if not force and remote_heads != [nullid]: |
|
1560 |
|
1561 def fail_multiple_heads(unsynced, branch=None): |
|
1562 if branch: |
|
1563 msg = _("abort: push creates new remote heads" |
|
1564 " on branch '%s'!\n") % branch |
|
1565 else: |
|
1566 msg = _("abort: push creates new remote heads!\n") |
|
1567 self.ui.warn(msg) |
|
1568 if unsynced: |
|
1569 self.ui.status(_("(you should pull and merge or" |
|
1570 " use push -f to force)\n")) |
|
1571 else: |
|
1572 self.ui.status(_("(did you forget to merge?" |
|
1573 " use push -f to force)\n")) |
|
1574 return None, 0 |
|
1575 |
|
1576 if remote.capable('branchmap'): |
|
1577 # Check for each named branch if we're creating new remote heads. |
|
1578 # To be a remote head after push, node must be either: |
|
1579 # - unknown locally |
|
1580 # - a local outgoing head descended from update |
|
1581 # - a remote head that's known locally and not |
|
1582 # ancestral to an outgoing head |
|
1583 # |
|
1584 # New named branches cannot be created without --force. |
|
1585 |
|
1586 # 1. Create set of branches involved in the push. |
|
1587 branches = set(self[n].branch() for n in outg) |
|
1588 |
|
1589 # 2. Check for new branches on the remote. |
|
1590 remotemap = remote.branchmap() |
|
1591 newbranches = branches - set(remotemap) |
|
1592 if newbranches and not newbranch: # new branch requires --new-branch |
|
1593 branchnames = ', '.join("%s" % b for b in newbranches) |
|
1594 self.ui.warn(_("abort: push creates " |
|
1595 "new remote branches: %s!\n") |
|
1596 % branchnames) |
|
1597 self.ui.status(_("(use 'hg push --new-branch' to create new " |
|
1598 "remote branches)\n")) |
|
1599 return None, 0 |
|
1600 branches.difference_update(newbranches) |
|
1601 |
|
1602 # 3. Construct the initial oldmap and newmap dicts. |
|
1603 # They contain information about the remote heads before and |
|
1604 # after the push, respectively. |
|
1605 # Heads not found locally are not included in either dict, |
|
1606 # since they won't be affected by the push. |
|
1607 # unsynced contains all branches with incoming changesets. |
|
1608 oldmap = {} |
|
1609 newmap = {} |
|
1610 unsynced = set() |
|
1611 for branch in branches: |
|
1612 remoteheads = remotemap[branch] |
|
1613 prunedheads = [h for h in remoteheads if h in cl.nodemap] |
|
1614 oldmap[branch] = prunedheads |
|
1615 newmap[branch] = list(prunedheads) |
|
1616 if len(remoteheads) > len(prunedheads): |
|
1617 unsynced.add(branch) |
|
1618 |
|
1619 # 4. Update newmap with outgoing changes. |
|
1620 # This will possibly add new heads and remove existing ones. |
|
1621 ctxgen = (self[n] for n in outg) |
|
1622 self._updatebranchcache(newmap, ctxgen) |
|
1623 |
|
1624 # 5. Check for new heads. |
|
1625 # If there are more heads after the push than before, a suitable |
|
1626 # warning, depending on unsynced status, is displayed. |
|
1627 for branch in branches: |
|
1628 if len(newmap[branch]) > len(oldmap[branch]): |
|
1629 return fail_multiple_heads(branch in unsynced, branch) |
|
1630 |
|
1631 # 6. Check for unsynced changes on involved branches. |
|
1632 if unsynced: |
|
1633 self.ui.warn(_("note: unsynced remote changes!\n")) |
|
1634 |
|
1635 else: |
|
1636 # Old servers: Check for new topological heads. |
|
1637 # Code based on _updatebranchcache. |
|
1638 newheads = set(h for h in remote_heads if h in cl.nodemap) |
|
1639 oldheadcnt = len(newheads) |
|
1640 newheads.update(outg) |
|
1641 if len(newheads) > 1: |
|
1642 for latest in reversed(outg): |
|
1643 if latest not in newheads: |
|
1644 continue |
|
1645 minhrev = min(cl.rev(h) for h in newheads) |
|
1646 reachable = cl.reachable(latest, cl.node(minhrev)) |
|
1647 reachable.remove(latest) |
|
1648 newheads.difference_update(reachable) |
|
1649 if len(newheads) > oldheadcnt: |
|
1650 return fail_multiple_heads(inc) |
|
1651 if inc: |
|
1652 self.ui.warn(_("note: unsynced remote changes!\n")) |
|
1653 |
|
1654 if revs is None: |
|
1655 # use the fast path, no race possible on push |
|
1656 nodes = self.changelog.findmissing(common.keys()) |
|
1657 cg = self._changegroup(nodes, 'push') |
|
1658 else: |
|
1659 cg = self.changegroupsubset(update, revs, 'push') |
|
1660 return cg, remote_heads |
|
1661 |
|
1662 def push_addchangegroup(self, remote, force, revs, newbranch): |
1323 def push_addchangegroup(self, remote, force, revs, newbranch): |
1663 '''Push a changegroup by locking the remote and sending the |
1324 '''Push a changegroup by locking the remote and sending the |
1664 addchangegroup command to it. Used for local and old SSH repos. |
1325 addchangegroup command to it. Used for local and old SSH repos. |
1665 Return an integer: see push(). |
1326 Return an integer: see push(). |
1666 ''' |
1327 ''' |
1667 lock = remote.lock() |
1328 lock = remote.lock() |
1668 try: |
1329 try: |
1669 ret = self.prepush(remote, force, revs, newbranch) |
1330 ret = discovery.prepush(self, remote, force, revs, newbranch) |
1670 if ret[0] is not None: |
1331 if ret[0] is not None: |
1671 cg, remote_heads = ret |
1332 cg, remote_heads = ret |
1672 # here, we return an integer indicating remote head count change |
1333 # here, we return an integer indicating remote head count change |
1673 return remote.addchangegroup(cg, 'push', self.url()) |
1334 return remote.addchangegroup(cg, 'push', self.url()) |
1674 # and here we return 0 for "nothing to push" or 1 for |
1335 # and here we return 0 for "nothing to push" or 1 for |