152 |
152 |
153 configtable = {} |
153 configtable = {} |
154 configitem = registrar.configitem(configtable) |
154 configitem = registrar.configitem(configtable) |
155 |
155 |
156 configitem( |
156 configitem( |
157 b'infinitepush', b'server', default=False, |
157 b'infinitepush', |
|
158 b'server', |
|
159 default=False, |
158 ) |
160 ) |
159 configitem( |
161 configitem( |
160 b'infinitepush', b'storetype', default=b'', |
162 b'infinitepush', |
|
163 b'storetype', |
|
164 default=b'', |
161 ) |
165 ) |
162 configitem( |
166 configitem( |
163 b'infinitepush', b'indextype', default=b'', |
167 b'infinitepush', |
|
168 b'indextype', |
|
169 default=b'', |
164 ) |
170 ) |
165 configitem( |
171 configitem( |
166 b'infinitepush', b'indexpath', default=b'', |
172 b'infinitepush', |
|
173 b'indexpath', |
|
174 default=b'', |
167 ) |
175 ) |
168 configitem( |
176 configitem( |
169 b'infinitepush', b'storeallparts', default=False, |
177 b'infinitepush', |
|
178 b'storeallparts', |
|
179 default=False, |
170 ) |
180 ) |
171 configitem( |
181 configitem( |
172 b'infinitepush', b'reponame', default=b'', |
182 b'infinitepush', |
|
183 b'reponame', |
|
184 default=b'', |
173 ) |
185 ) |
174 configitem( |
186 configitem( |
175 b'scratchbranch', b'storepath', default=b'', |
187 b'scratchbranch', |
|
188 b'storepath', |
|
189 default=b'', |
176 ) |
190 ) |
177 configitem( |
191 configitem( |
178 b'infinitepush', b'branchpattern', default=b'', |
192 b'infinitepush', |
|
193 b'branchpattern', |
|
194 default=b'', |
179 ) |
195 ) |
180 configitem( |
196 configitem( |
181 b'infinitepush', b'pushtobundlestore', default=False, |
197 b'infinitepush', |
|
198 b'pushtobundlestore', |
|
199 default=False, |
182 ) |
200 ) |
183 configitem( |
201 configitem( |
184 b'experimental', b'server-bundlestore-bookmark', default=b'', |
202 b'experimental', |
|
203 b'server-bundlestore-bookmark', |
|
204 default=b'', |
185 ) |
205 ) |
186 configitem( |
206 configitem( |
187 b'experimental', b'infinitepush-scratchpush', default=False, |
207 b'experimental', |
|
208 b'infinitepush-scratchpush', |
|
209 default=False, |
188 ) |
210 ) |
189 |
211 |
190 experimental = b'experimental' |
212 experimental = b'experimental' |
191 configbookmark = b'server-bundlestore-bookmark' |
213 configbookmark = b'server-bundlestore-bookmark' |
192 configscratchpush = b'infinitepush-scratchpush' |
214 configscratchpush = b'infinitepush-scratchpush' |
247 raise error.Abort(_(b'invalid log level %s') % loglevel) |
269 raise error.Abort(_(b'invalid log level %s') % loglevel) |
248 return numeric_loglevel |
270 return numeric_loglevel |
249 |
271 |
250 |
272 |
251 def _tryhoist(ui, remotebookmark): |
273 def _tryhoist(ui, remotebookmark): |
252 '''returns a bookmarks with hoisted part removed |
274 """returns a bookmarks with hoisted part removed |
253 |
275 |
254 Remotenames extension has a 'hoist' config that allows to use remote |
276 Remotenames extension has a 'hoist' config that allows to use remote |
255 bookmarks without specifying remote path. For example, 'hg update master' |
277 bookmarks without specifying remote path. For example, 'hg update master' |
256 works as well as 'hg update remote/master'. We want to allow the same in |
278 works as well as 'hg update remote/master'. We want to allow the same in |
257 infinitepush. |
279 infinitepush. |
258 ''' |
280 """ |
259 |
281 |
260 if common.isremotebooksenabled(ui): |
282 if common.isremotebooksenabled(ui): |
261 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/' |
283 hoist = ui.config(b'remotenames', b'hoistedpeer') + b'/' |
262 if remotebookmark.startswith(hoist): |
284 if remotebookmark.startswith(hoist): |
263 return remotebookmark[len(hoist) :] |
285 return remotebookmark[len(hoist) :] |
425 def _readbundlerevs(bundlerepo): |
447 def _readbundlerevs(bundlerepo): |
426 return list(bundlerepo.revs(b'bundle()')) |
448 return list(bundlerepo.revs(b'bundle()')) |
427 |
449 |
428 |
450 |
429 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): |
451 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui): |
430 '''Tells remotefilelog to include all changed files to the changegroup |
452 """Tells remotefilelog to include all changed files to the changegroup |
431 |
453 |
432 By default remotefilelog doesn't include file content to the changegroup. |
454 By default remotefilelog doesn't include file content to the changegroup. |
433 But we need to include it if we are fetching from bundlestore. |
455 But we need to include it if we are fetching from bundlestore. |
434 ''' |
456 """ |
435 changedfiles = set() |
457 changedfiles = set() |
436 cl = bundlerepo.changelog |
458 cl = bundlerepo.changelog |
437 for r in bundlerevs: |
459 for r in bundlerevs: |
438 # [3] means changed files |
460 # [3] means changed files |
439 changedfiles.update(cl.read(r)[3]) |
461 changedfiles.update(cl.read(r)[3]) |
455 |
477 |
456 return newcaps |
478 return newcaps |
457 |
479 |
458 |
480 |
459 def _rebundle(bundlerepo, bundleroots, unknownhead): |
481 def _rebundle(bundlerepo, bundleroots, unknownhead): |
460 ''' |
482 """ |
461 Bundle may include more revision then user requested. For example, |
483 Bundle may include more revision then user requested. For example, |
462 if user asks for revision but bundle also consists its descendants. |
484 if user asks for revision but bundle also consists its descendants. |
463 This function will filter out all revision that user is not requested. |
485 This function will filter out all revision that user is not requested. |
464 ''' |
486 """ |
465 parts = [] |
487 parts = [] |
466 |
488 |
467 version = b'02' |
489 version = b'02' |
468 outgoing = discovery.outgoing( |
490 outgoing = discovery.outgoing( |
469 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead] |
491 bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead] |
497 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head |
519 len(bundleheads) == 1 and bundlerepo[bundleheads[0]].node() == head |
498 ) |
520 ) |
499 |
521 |
500 |
522 |
501 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): |
523 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): |
502 '''generates bundle that will be send to the user |
524 """generates bundle that will be send to the user |
503 |
525 |
504 returns tuple with raw bundle string and bundle type |
526 returns tuple with raw bundle string and bundle type |
505 ''' |
527 """ |
506 parts = [] |
528 parts = [] |
507 if not _needsrebundling(head, bundlerepo): |
529 if not _needsrebundling(head, bundlerepo): |
508 with util.posixfile(bundlefile, b"rb") as f: |
530 with util.posixfile(bundlefile, b"rb") as f: |
509 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) |
531 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) |
510 if isinstance(unbundler, changegroup.cg1unpacker): |
532 if isinstance(unbundler, changegroup.cg1unpacker): |
1020 rpart.addparam( |
1042 rpart.addparam( |
1021 b'in-reply-to', b'%d' % part.id, mandatory=False |
1043 b'in-reply-to', b'%d' % part.id, mandatory=False |
1022 ) |
1044 ) |
1023 rpart.addparam(b'return', b'1', mandatory=False) |
1045 rpart.addparam(b'return', b'1', mandatory=False) |
1024 |
1046 |
1025 op.records.add(part.type, {b'return': 1,}) |
1047 op.records.add( |
|
1048 part.type, |
|
1049 { |
|
1050 b'return': 1, |
|
1051 }, |
|
1052 ) |
1026 if bundlepart: |
1053 if bundlepart: |
1027 bundler.addpart(bundlepart) |
1054 bundler.addpart(bundlepart) |
1028 |
1055 |
1029 # storing the bundle in the bundlestore |
1056 # storing the bundle in the bundlestore |
1030 buf = util.chunkbuffer(bundler.getchunks()) |
1057 buf = util.chunkbuffer(bundler.getchunks()) |
1110 rpart.addparam(b'return', b'1', mandatory=False) |
1137 rpart.addparam(b'return', b'1', mandatory=False) |
1111 else: |
1138 else: |
1112 bundle2._processpart(op, part) |
1139 bundle2._processpart(op, part) |
1113 |
1140 |
1114 if handleallparts: |
1141 if handleallparts: |
1115 op.records.add(part.type, {b'return': 1,}) |
1142 op.records.add( |
|
1143 part.type, |
|
1144 { |
|
1145 b'return': 1, |
|
1146 }, |
|
1147 ) |
1116 if bundlepart: |
1148 if bundlepart: |
1117 bundler.addpart(bundlepart) |
1149 bundler.addpart(bundlepart) |
1118 |
1150 |
1119 # If commits were sent, store them |
1151 # If commits were sent, store them |
1120 if cgparams: |
1152 if cgparams: |
1282 b'pushkey', mandatoryparams=pycompat.iteritems(params) |
1314 b'pushkey', mandatoryparams=pycompat.iteritems(params) |
1283 ) |
1315 ) |
1284 |
1316 |
1285 |
1317 |
1286 def bundle2pushkey(orig, op, part): |
1318 def bundle2pushkey(orig, op, part): |
1287 '''Wrapper of bundle2.handlepushkey() |
1319 """Wrapper of bundle2.handlepushkey() |
1288 |
1320 |
1289 The only goal is to skip calling the original function if flag is set. |
1321 The only goal is to skip calling the original function if flag is set. |
1290 It's set if infinitepush push is happening. |
1322 It's set if infinitepush push is happening. |
1291 ''' |
1323 """ |
1292 if op.records[scratchbranchparttype + b'_skippushkey']: |
1324 if op.records[scratchbranchparttype + b'_skippushkey']: |
1293 if op.reply is not None: |
1325 if op.reply is not None: |
1294 rpart = op.reply.newpart(b'reply:pushkey') |
1326 rpart = op.reply.newpart(b'reply:pushkey') |
1295 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False) |
1327 rpart.addparam(b'in-reply-to', str(part.id), mandatory=False) |
1296 rpart.addparam(b'return', b'1', mandatory=False) |
1328 rpart.addparam(b'return', b'1', mandatory=False) |
1298 |
1330 |
1299 return orig(op, part) |
1331 return orig(op, part) |
1300 |
1332 |
1301 |
1333 |
1302 def bundle2handlephases(orig, op, part): |
1334 def bundle2handlephases(orig, op, part): |
1303 '''Wrapper of bundle2.handlephases() |
1335 """Wrapper of bundle2.handlephases() |
1304 |
1336 |
1305 The only goal is to skip calling the original function if flag is set. |
1337 The only goal is to skip calling the original function if flag is set. |
1306 It's set if infinitepush push is happening. |
1338 It's set if infinitepush push is happening. |
1307 ''' |
1339 """ |
1308 |
1340 |
1309 if op.records[scratchbranchparttype + b'_skipphaseheads']: |
1341 if op.records[scratchbranchparttype + b'_skipphaseheads']: |
1310 return |
1342 return |
1311 |
1343 |
1312 return orig(op, part) |
1344 return orig(op, part) |
1313 |
1345 |
1314 |
1346 |
1315 def _asyncsavemetadata(root, nodes): |
1347 def _asyncsavemetadata(root, nodes): |
1316 '''starts a separate process that fills metadata for the nodes |
1348 """starts a separate process that fills metadata for the nodes |
1317 |
1349 |
1318 This function creates a separate process and doesn't wait for it's |
1350 This function creates a separate process and doesn't wait for it's |
1319 completion. This was done to avoid slowing down pushes |
1351 completion. This was done to avoid slowing down pushes |
1320 ''' |
1352 """ |
1321 |
1353 |
1322 maxnodes = 50 |
1354 maxnodes = 50 |
1323 if len(nodes) > maxnodes: |
1355 if len(nodes) > maxnodes: |
1324 return |
1356 return |
1325 nodesargs = [] |
1357 nodesargs = [] |