merge with stable
authorYuya Nishihara <yuya@tcha.org>
Fri, 25 Jan 2019 18:55:23 +0900
changeset 41351 0ae3ddb4fbed
parent 41347 40787a96fda7 (diff)
parent 41350 ab0d762d89ef (current diff)
child 41352 73ccba60aaa1
merge with stable
--- a/contrib/chg/hgclient.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/contrib/chg/hgclient.c	Fri Jan 25 18:55:23 2019 +0900
@@ -84,8 +84,9 @@
 
 static void enlargecontext(context_t *ctx, size_t newsize)
 {
-	if (newsize <= ctx->maxdatasize)
+	if (newsize <= ctx->maxdatasize) {
 		return;
+	}
 
 	newsize = defaultdatasize *
 	          ((newsize + defaultdatasize - 1) / defaultdatasize);
@@ -117,22 +118,25 @@
 
 	uint32_t datasize_n;
 	rsize = recv(hgc->sockfd, &datasize_n, sizeof(datasize_n), 0);
-	if (rsize != sizeof(datasize_n))
+	if (rsize != sizeof(datasize_n)) {
 		abortmsg("failed to read data size");
+	}
 
 	/* datasize denotes the maximum size to write if input request */
 	hgc->ctx.datasize = ntohl(datasize_n);
 	enlargecontext(&hgc->ctx, hgc->ctx.datasize);
 
-	if (isupper(hgc->ctx.ch) && hgc->ctx.ch != 'S')
+	if (isupper(hgc->ctx.ch) && hgc->ctx.ch != 'S') {
 		return; /* assumes input request */
+	}
 
 	size_t cursize = 0;
 	while (cursize < hgc->ctx.datasize) {
 		rsize = recv(hgc->sockfd, hgc->ctx.data + cursize,
 		             hgc->ctx.datasize - cursize, 0);
-		if (rsize < 1)
+		if (rsize < 1) {
 			abortmsg("failed to read data block");
+		}
 		cursize += rsize;
 	}
 }
@@ -143,8 +147,9 @@
 	const char *const endp = p + datasize;
 	while (p < endp) {
 		ssize_t r = send(sockfd, p, endp - p, 0);
-		if (r < 0)
+		if (r < 0) {
 			abortmsgerrno("cannot communicate");
+		}
 		p += r;
 	}
 }
@@ -186,8 +191,9 @@
 		ctx->datasize += n;
 	}
 
-	if (ctx->datasize > 0)
+	if (ctx->datasize > 0) {
 		--ctx->datasize; /* strip last '\0' */
+	}
 }
 
 /* Extract '\0'-separated list of args to new buffer, terminated by NULL */
@@ -205,8 +211,9 @@
 		args[nargs] = s;
 		nargs++;
 		s = memchr(s, '\0', e - s);
-		if (!s)
+		if (!s) {
 			break;
+		}
 		s++;
 	}
 	args[nargs] = NULL;
@@ -225,8 +232,9 @@
 static void handlereadlinerequest(hgclient_t *hgc)
 {
 	context_t *ctx = &hgc->ctx;
-	if (!fgets(ctx->data, ctx->datasize, stdin))
+	if (!fgets(ctx->data, ctx->datasize, stdin)) {
 		ctx->data[0] = '\0';
+	}
 	ctx->datasize = strlen(ctx->data);
 	writeblock(hgc);
 }
@@ -239,8 +247,9 @@
 	ctx->data[ctx->datasize] = '\0'; /* terminate last string */
 
 	const char **args = unpackcmdargsnul(ctx);
-	if (!args[0] || !args[1] || !args[2])
+	if (!args[0] || !args[1] || !args[2]) {
 		abortmsg("missing type or command or cwd in system request");
+	}
 	if (strcmp(args[0], "system") == 0) {
 		debugmsg("run '%s' at '%s'", args[1], args[2]);
 		int32_t r = runshellcmd(args[1], args + 3, args[2]);
@@ -252,8 +261,9 @@
 		writeblock(hgc);
 	} else if (strcmp(args[0], "pager") == 0) {
 		setuppager(args[1], args + 3);
-		if (hgc->capflags & CAP_ATTACHIO)
+		if (hgc->capflags & CAP_ATTACHIO) {
 			attachio(hgc);
+		}
 		/* unblock the server */
 		static const char emptycmd[] = "\n";
 		sendall(hgc->sockfd, emptycmd, sizeof(emptycmd) - 1);
@@ -296,9 +306,10 @@
 			handlesystemrequest(hgc);
 			break;
 		default:
-			if (isupper(ctx->ch))
+			if (isupper(ctx->ch)) {
 				abortmsg("cannot handle response (ch = %c)",
 				         ctx->ch);
+			}
 		}
 	}
 }
@@ -308,8 +319,9 @@
 	unsigned int flags = 0;
 	while (s < e) {
 		const char *t = strchr(s, ' ');
-		if (!t || t > e)
+		if (!t || t > e) {
 			t = e;
+		}
 		const cappair_t *cap;
 		for (cap = captable; cap->flag; ++cap) {
 			size_t n = t - s;
@@ -346,11 +358,13 @@
 	const char *const dataend = ctx->data + ctx->datasize;
 	while (s < dataend) {
 		const char *t = strchr(s, ':');
-		if (!t || t[1] != ' ')
+		if (!t || t[1] != ' ') {
 			break;
+		}
 		const char *u = strchr(t + 2, '\n');
-		if (!u)
+		if (!u) {
 			u = dataend;
+		}
 		if (strncmp(s, "capabilities:", t - s + 1) == 0) {
 			hgc->capflags = parsecapabilities(t + 2, u);
 		} else if (strncmp(s, "pgid:", t - s + 1) == 0) {
@@ -367,8 +381,9 @@
 {
 	int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize, "chg[worker/%d]",
 	                 (int)getpid());
-	if (r < 0 || (size_t)r >= hgc->ctx.maxdatasize)
+	if (r < 0 || (size_t)r >= hgc->ctx.maxdatasize) {
 		abortmsg("insufficient buffer to write procname (r = %d)", r);
+	}
 	hgc->ctx.datasize = (size_t)r;
 	writeblockrequest(hgc, "setprocname");
 }
@@ -380,8 +395,9 @@
 	sendall(hgc->sockfd, chcmd, sizeof(chcmd) - 1);
 	readchannel(hgc);
 	context_t *ctx = &hgc->ctx;
-	if (ctx->ch != 'I')
+	if (ctx->ch != 'I') {
 		abortmsg("unexpected response for attachio (ch = %c)", ctx->ch);
+	}
 
 	static const int fds[3] = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO};
 	struct msghdr msgh;
@@ -399,23 +415,27 @@
 	memcpy(CMSG_DATA(cmsg), fds, sizeof(fds));
 	msgh.msg_controllen = cmsg->cmsg_len;
 	ssize_t r = sendmsg(hgc->sockfd, &msgh, 0);
-	if (r < 0)
+	if (r < 0) {
 		abortmsgerrno("sendmsg failed");
+	}
 
 	handleresponse(hgc);
 	int32_t n;
-	if (ctx->datasize != sizeof(n))
+	if (ctx->datasize != sizeof(n)) {
 		abortmsg("unexpected size of attachio result");
+	}
 	memcpy(&n, ctx->data, sizeof(n));
 	n = ntohl(n);
-	if (n != sizeof(fds) / sizeof(fds[0]))
+	if (n != sizeof(fds) / sizeof(fds[0])) {
 		abortmsg("failed to send fds (n = %d)", n);
+	}
 }
 
 static void chdirtocwd(hgclient_t *hgc)
 {
-	if (!getcwd(hgc->ctx.data, hgc->ctx.maxdatasize))
+	if (!getcwd(hgc->ctx.data, hgc->ctx.maxdatasize)) {
 		abortmsgerrno("failed to getcwd");
+	}
 	hgc->ctx.datasize = strlen(hgc->ctx.data);
 	writeblockrequest(hgc, "chdir");
 }
@@ -440,8 +460,9 @@
 hgclient_t *hgc_open(const char *sockname)
 {
 	int fd = socket(AF_UNIX, SOCK_STREAM, 0);
-	if (fd < 0)
+	if (fd < 0) {
 		abortmsgerrno("cannot create socket");
+	}
 
 	/* don't keep fd on fork(), so that it can be closed when the parent
 	 * process get terminated. */
@@ -456,34 +477,39 @@
 	{
 		const char *split = strrchr(sockname, '/');
 		if (split && split != sockname) {
-			if (split[1] == '\0')
+			if (split[1] == '\0') {
 				abortmsg("sockname cannot end with a slash");
+			}
 			size_t len = split - sockname;
 			char sockdir[len + 1];
 			memcpy(sockdir, sockname, len);
 			sockdir[len] = '\0';
 
 			bakfd = open(".", O_DIRECTORY);
-			if (bakfd == -1)
+			if (bakfd == -1) {
 				abortmsgerrno("cannot open cwd");
+			}
 
 			int r = chdir(sockdir);
-			if (r != 0)
+			if (r != 0) {
 				abortmsgerrno("cannot chdir %s", sockdir);
+			}
 
 			basename = split + 1;
 		}
 	}
-	if (strlen(basename) >= sizeof(addr.sun_path))
+	if (strlen(basename) >= sizeof(addr.sun_path)) {
 		abortmsg("sockname is too long: %s", basename);
+	}
 	strncpy(addr.sun_path, basename, sizeof(addr.sun_path));
 	addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
 
 	/* real connect */
 	int r = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
 	if (r < 0) {
-		if (errno != ENOENT && errno != ECONNREFUSED)
+		if (errno != ENOENT && errno != ECONNREFUSED) {
 			abortmsgerrno("cannot connect to %s", sockname);
+		}
 	}
 	if (bakfd != -1) {
 		fchdirx(bakfd);
@@ -501,16 +527,21 @@
 	initcontext(&hgc->ctx);
 
 	readhello(hgc);
-	if (!(hgc->capflags & CAP_RUNCOMMAND))
+	if (!(hgc->capflags & CAP_RUNCOMMAND)) {
 		abortmsg("insufficient capability: runcommand");
-	if (hgc->capflags & CAP_SETPROCNAME)
+	}
+	if (hgc->capflags & CAP_SETPROCNAME) {
 		updateprocname(hgc);
-	if (hgc->capflags & CAP_ATTACHIO)
+	}
+	if (hgc->capflags & CAP_ATTACHIO) {
 		attachio(hgc);
-	if (hgc->capflags & CAP_CHDIR)
+	}
+	if (hgc->capflags & CAP_CHDIR) {
 		chdirtocwd(hgc);
-	if (hgc->capflags & CAP_SETUMASK2)
+	}
+	if (hgc->capflags & CAP_SETUMASK2) {
 		forwardumask(hgc);
+	}
 
 	return hgc;
 }
@@ -555,16 +586,18 @@
                           size_t argsize)
 {
 	assert(hgc);
-	if (!(hgc->capflags & CAP_VALIDATE))
+	if (!(hgc->capflags & CAP_VALIDATE)) {
 		return NULL;
+	}
 
 	packcmdargs(&hgc->ctx, args, argsize);
 	writeblockrequest(hgc, "validate");
 	handleresponse(hgc);
 
 	/* the server returns '\0' if it can handle our request */
-	if (hgc->ctx.datasize <= 1)
+	if (hgc->ctx.datasize <= 1) {
 		return NULL;
+	}
 
 	/* make sure the buffer is '\0' terminated */
 	enlargecontext(&hgc->ctx, hgc->ctx.datasize + 1);
@@ -599,8 +632,9 @@
 void hgc_attachio(hgclient_t *hgc)
 {
 	assert(hgc);
-	if (!(hgc->capflags & CAP_ATTACHIO))
+	if (!(hgc->capflags & CAP_ATTACHIO)) {
 		return;
+	}
 	attachio(hgc);
 }
 
@@ -613,8 +647,9 @@
 void hgc_setenv(hgclient_t *hgc, const char *const envp[])
 {
 	assert(hgc && envp);
-	if (!(hgc->capflags & CAP_SETENV))
+	if (!(hgc->capflags & CAP_SETENV)) {
 		return;
+	}
 	packcmdargs(&hgc->ctx, envp, /*argsize*/ -1);
 	writeblockrequest(hgc, "setenv");
 }
--- a/contrib/chg/procutil.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/contrib/chg/procutil.c	Fri Jan 25 18:55:23 2019 +0900
@@ -25,8 +25,9 @@
 static void forwardsignal(int sig)
 {
 	assert(peerpid > 0);
-	if (kill(peerpid, sig) < 0)
+	if (kill(peerpid, sig) < 0) {
 		abortmsgerrno("cannot kill %d", peerpid);
+	}
 	debugmsg("forward signal %d", sig);
 }
 
@@ -34,8 +35,9 @@
 {
 	/* prefer kill(-pgid, sig), fallback to pid if pgid is invalid */
 	pid_t killpid = peerpgid > 1 ? -peerpgid : peerpid;
-	if (kill(killpid, sig) < 0)
+	if (kill(killpid, sig) < 0) {
 		abortmsgerrno("cannot kill %d", killpid);
+	}
 	debugmsg("forward signal %d to %d", sig, killpid);
 }
 
@@ -43,28 +45,36 @@
 {
 	sigset_t unblockset, oldset;
 	struct sigaction sa, oldsa;
-	if (sigemptyset(&unblockset) < 0)
+	if (sigemptyset(&unblockset) < 0) {
 		goto error;
-	if (sigaddset(&unblockset, sig) < 0)
+	}
+	if (sigaddset(&unblockset, sig) < 0) {
 		goto error;
+	}
 	memset(&sa, 0, sizeof(sa));
 	sa.sa_handler = SIG_DFL;
 	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
+	if (sigemptyset(&sa.sa_mask) < 0) {
 		goto error;
+	}
 
 	forwardsignal(sig);
-	if (raise(sig) < 0) /* resend to self */
+	if (raise(sig) < 0) { /* resend to self */
 		goto error;
-	if (sigaction(sig, &sa, &oldsa) < 0)
+	}
+	if (sigaction(sig, &sa, &oldsa) < 0) {
 		goto error;
-	if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0)
+	}
+	if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0) {
 		goto error;
+	}
 	/* resent signal will be handled before sigprocmask() returns */
-	if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
+	if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0) {
 		goto error;
-	if (sigaction(sig, &oldsa, NULL) < 0)
+	}
+	if (sigaction(sig, &oldsa, NULL) < 0) {
 		goto error;
+	}
 	return;
 
 error:
@@ -73,19 +83,22 @@
 
 static void handlechildsignal(int sig UNUSED_)
 {
-	if (peerpid == 0 || pagerpid == 0)
+	if (peerpid == 0 || pagerpid == 0) {
 		return;
+	}
 	/* if pager exits, notify the server with SIGPIPE immediately.
 	 * otherwise the server won't get SIGPIPE if it does not write
 	 * anything. (issue5278) */
-	if (waitpid(pagerpid, NULL, WNOHANG) == pagerpid)
+	if (waitpid(pagerpid, NULL, WNOHANG) == pagerpid) {
 		kill(peerpid, SIGPIPE);
+	}
 }
 
 void setupsignalhandler(pid_t pid, pid_t pgid)
 {
-	if (pid <= 0)
+	if (pid <= 0) {
 		return;
+	}
 	peerpid = pid;
 	peerpgid = (pgid <= 1 ? 0 : pgid);
 
@@ -98,42 +111,52 @@
 	 * - SIGINT: usually generated by the terminal */
 	sa.sa_handler = forwardsignaltogroup;
 	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
+	if (sigemptyset(&sa.sa_mask) < 0) {
+		goto error;
+	}
+	if (sigaction(SIGHUP, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGHUP, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGINT, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGINT, &sa, NULL) < 0)
-		goto error;
+	}
 
 	/* terminate frontend by double SIGTERM in case of server freeze */
 	sa.sa_handler = forwardsignal;
 	sa.sa_flags |= SA_RESETHAND;
-	if (sigaction(SIGTERM, &sa, NULL) < 0)
+	if (sigaction(SIGTERM, &sa, NULL) < 0) {
 		goto error;
+	}
 
 	/* notify the worker about window resize events */
 	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+	if (sigaction(SIGWINCH, &sa, NULL) < 0) {
 		goto error;
+	}
 	/* forward user-defined signals */
-	if (sigaction(SIGUSR1, &sa, NULL) < 0)
+	if (sigaction(SIGUSR1, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGUSR2, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGUSR2, &sa, NULL) < 0) {
 		goto error;
+	}
 	/* propagate job control requests to worker */
 	sa.sa_handler = forwardsignal;
 	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGCONT, &sa, NULL) < 0)
+	if (sigaction(SIGCONT, &sa, NULL) < 0) {
 		goto error;
+	}
 	sa.sa_handler = handlestopsignal;
 	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+	if (sigaction(SIGTSTP, &sa, NULL) < 0) {
 		goto error;
+	}
 	/* get notified when pager exits */
 	sa.sa_handler = handlechildsignal;
 	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGCHLD, &sa, NULL) < 0)
+	if (sigaction(SIGCHLD, &sa, NULL) < 0) {
 		goto error;
+	}
 
 	return;
 
@@ -147,26 +170,34 @@
 	memset(&sa, 0, sizeof(sa));
 	sa.sa_handler = SIG_DFL;
 	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
+	if (sigemptyset(&sa.sa_mask) < 0) {
 		goto error;
+	}
 
-	if (sigaction(SIGHUP, &sa, NULL) < 0)
+	if (sigaction(SIGHUP, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGTERM, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGTERM, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGWINCH, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGCONT, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGCONT, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGTSTP, &sa, NULL) < 0) {
 		goto error;
-	if (sigaction(SIGCHLD, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGCHLD, &sa, NULL) < 0) {
 		goto error;
+	}
 
 	/* ignore Ctrl+C while shutting down to make pager exits cleanly */
 	sa.sa_handler = SIG_IGN;
-	if (sigaction(SIGINT, &sa, NULL) < 0)
+	if (sigaction(SIGINT, &sa, NULL) < 0) {
 		goto error;
+	}
 
 	peerpid = 0;
 	return;
@@ -180,22 +211,27 @@
 pid_t setuppager(const char *pagercmd, const char *envp[])
 {
 	assert(pagerpid == 0);
-	if (!pagercmd)
+	if (!pagercmd) {
 		return 0;
+	}
 
 	int pipefds[2];
-	if (pipe(pipefds) < 0)
+	if (pipe(pipefds) < 0) {
 		return 0;
+	}
 	pid_t pid = fork();
-	if (pid < 0)
+	if (pid < 0) {
 		goto error;
+	}
 	if (pid > 0) {
 		close(pipefds[0]);
-		if (dup2(pipefds[1], fileno(stdout)) < 0)
+		if (dup2(pipefds[1], fileno(stdout)) < 0) {
 			goto error;
+		}
 		if (isatty(fileno(stderr))) {
-			if (dup2(pipefds[1], fileno(stderr)) < 0)
+			if (dup2(pipefds[1], fileno(stderr)) < 0) {
 				goto error;
+			}
 		}
 		close(pipefds[1]);
 		pagerpid = pid;
@@ -222,16 +258,18 @@
 
 void waitpager(void)
 {
-	if (pagerpid == 0)
+	if (pagerpid == 0) {
 		return;
+	}
 
 	/* close output streams to notify the pager its input ends */
 	fclose(stdout);
 	fclose(stderr);
 	while (1) {
 		pid_t ret = waitpid(pagerpid, NULL, 0);
-		if (ret == -1 && errno == EINTR)
+		if (ret == -1 && errno == EINTR) {
 			continue;
+		}
 		break;
 	}
 }
--- a/contrib/chg/util.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/contrib/chg/util.c	Fri Jan 25 18:55:23 2019 +0900
@@ -25,8 +25,9 @@
 
 static inline void fsetcolor(FILE *fp, const char *code)
 {
-	if (!colorenabled)
+	if (!colorenabled) {
 		return;
+	}
 	fprintf(fp, "\033[%sm", code);
 }
 
@@ -35,8 +36,9 @@
 	fsetcolor(stderr, "1;31");
 	fputs("chg: abort: ", stderr);
 	vfprintf(stderr, fmt, args);
-	if (no != 0)
+	if (no != 0) {
 		fprintf(stderr, " (errno = %d, %s)", no, strerror(no));
+	}
 	fsetcolor(stderr, "");
 	fputc('\n', stderr);
 	exit(255);
@@ -82,8 +84,9 @@
 
 void debugmsg(const char *fmt, ...)
 {
-	if (!debugmsgenabled)
+	if (!debugmsgenabled) {
 		return;
+	}
 
 	va_list args;
 	va_start(args, fmt);
@@ -98,32 +101,37 @@
 void fchdirx(int dirfd)
 {
 	int r = fchdir(dirfd);
-	if (r == -1)
+	if (r == -1) {
 		abortmsgerrno("failed to fchdir");
+	}
 }
 
 void fsetcloexec(int fd)
 {
 	int flags = fcntl(fd, F_GETFD);
-	if (flags < 0)
+	if (flags < 0) {
 		abortmsgerrno("cannot get flags of fd %d", fd);
-	if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0)
+	}
+	if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) {
 		abortmsgerrno("cannot set flags of fd %d", fd);
+	}
 }
 
 void *mallocx(size_t size)
 {
 	void *result = malloc(size);
-	if (!result)
+	if (!result) {
 		abortmsg("failed to malloc");
+	}
 	return result;
 }
 
 void *reallocx(void *ptr, size_t size)
 {
 	void *result = realloc(ptr, size);
-	if (!result)
+	if (!result) {
 		abortmsg("failed to realloc");
+	}
 	return result;
 }
 
@@ -144,30 +152,37 @@
 	memset(&newsa, 0, sizeof(newsa));
 	newsa.sa_handler = SIG_IGN;
 	newsa.sa_flags = 0;
-	if (sigemptyset(&newsa.sa_mask) < 0)
+	if (sigemptyset(&newsa.sa_mask) < 0) {
 		goto done;
-	if (sigaction(SIGINT, &newsa, &oldsaint) < 0)
+	}
+	if (sigaction(SIGINT, &newsa, &oldsaint) < 0) {
 		goto done;
+	}
 	doneflags |= F_SIGINT;
-	if (sigaction(SIGQUIT, &newsa, &oldsaquit) < 0)
+	if (sigaction(SIGQUIT, &newsa, &oldsaquit) < 0) {
 		goto done;
+	}
 	doneflags |= F_SIGQUIT;
 
-	if (sigaddset(&newsa.sa_mask, SIGCHLD) < 0)
+	if (sigaddset(&newsa.sa_mask, SIGCHLD) < 0) {
 		goto done;
-	if (sigprocmask(SIG_BLOCK, &newsa.sa_mask, &oldmask) < 0)
+	}
+	if (sigprocmask(SIG_BLOCK, &newsa.sa_mask, &oldmask) < 0) {
 		goto done;
+	}
 	doneflags |= F_SIGMASK;
 
 	pid_t pid = fork();
-	if (pid < 0)
+	if (pid < 0) {
 		goto done;
+	}
 	if (pid == 0) {
 		sigaction(SIGINT, &oldsaint, NULL);
 		sigaction(SIGQUIT, &oldsaquit, NULL);
 		sigprocmask(SIG_SETMASK, &oldmask, NULL);
-		if (cwd && chdir(cwd) < 0)
+		if (cwd && chdir(cwd) < 0) {
 			_exit(127);
+		}
 		const char *argv[] = {"sh", "-c", cmd, NULL};
 		if (envp) {
 			execve("/bin/sh", (char **)argv, (char **)envp);
@@ -176,25 +191,32 @@
 		}
 		_exit(127);
 	} else {
-		if (waitpid(pid, &status, 0) < 0)
+		if (waitpid(pid, &status, 0) < 0) {
 			goto done;
+		}
 		doneflags |= F_WAITPID;
 	}
 
 done:
-	if (doneflags & F_SIGINT)
+	if (doneflags & F_SIGINT) {
 		sigaction(SIGINT, &oldsaint, NULL);
-	if (doneflags & F_SIGQUIT)
+	}
+	if (doneflags & F_SIGQUIT) {
 		sigaction(SIGQUIT, &oldsaquit, NULL);
-	if (doneflags & F_SIGMASK)
+	}
+	if (doneflags & F_SIGMASK) {
 		sigprocmask(SIG_SETMASK, &oldmask, NULL);
+	}
 
 	/* no way to report other errors, use 127 (= shell termination) */
-	if (!(doneflags & F_WAITPID))
+	if (!(doneflags & F_WAITPID)) {
 		return 127;
-	if (WIFEXITED(status))
+	}
+	if (WIFEXITED(status)) {
 		return WEXITSTATUS(status);
-	if (WIFSIGNALED(status))
+	}
+	if (WIFSIGNALED(status)) {
 		return -WTERMSIG(status);
+	}
 	return 127;
 }
--- a/contrib/fuzz/manifest.cc	Wed Jan 23 07:49:36 2019 -0500
+++ b/contrib/fuzz/manifest.cc	Fri Jan 25 18:55:23 2019 +0900
@@ -20,11 +20,19 @@
   lm = lazymanifest(mdata)
   # iterate the whole thing, which causes the code to fully parse
   # every line in the manifest
-  list(lm.iterentries())
+  for e, _, _ in lm.iterentries():
+      # also exercise __getitem__ et al
+      lm[e]
+      e in lm
+      (e + 'nope') in lm
   lm[b'xyzzy'] = (b'\0' * 20, 'x')
   # do an insert, text should change
   assert lm.text() != mdata, "insert should change text and didn't: %r %r" % (lm.text(), mdata)
+  cloned = lm.filtercopy(lambda x: x != 'xyzzy')
+  assert cloned.text() == mdata, 'cloned text should equal mdata'
+  cloned.diff(lm)
   del lm[b'xyzzy']
+  cloned.diff(lm)
   # should be back to the same
   assert lm.text() == mdata, "delete should have restored text but didn't: %r %r" % (lm.text(), mdata)
 except Exception as e:
@@ -39,6 +47,11 @@
 
 int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
 {
+	// Don't allow fuzzer inputs larger than 100k, since we'll just bog
+	// down and not accomplish much.
+	if (Size > 100000) {
+		return 0;
+	}
 	PyObject *mtext =
 	    PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
 	PyObject *locals = PyDict_New();
--- a/contrib/fuzz/revlog.cc	Wed Jan 23 07:49:36 2019 -0500
+++ b/contrib/fuzz/revlog.cc	Fri Jan 25 18:55:23 2019 +0900
@@ -19,6 +19,11 @@
 for inline in (True, False):
     try:
         index, cache = parse_index2(data, inline)
+        index.slicechunktodensity(list(range(len(index))), 0.5, 262144)
+        for rev in range(len(index)):
+            node = index[rev][7]
+            partial = index.shortest(node)
+            index.partialmatch(node[:partial])
     except Exception as e:
         pass
         # uncomment this print if you're editing this Python code
@@ -31,6 +36,11 @@
 
 int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
 {
+	// Don't allow fuzzer inputs larger than 60k, since we'll just bog
+	// down and not accomplish much.
+	if (Size > 60000) {
+		return 0;
+	}
 	PyObject *text =
 	    PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
 	PyObject *locals = PyDict_New();
--- a/contrib/python3-whitelist	Wed Jan 23 07:49:36 2019 -0500
+++ b/contrib/python3-whitelist	Fri Jan 25 18:55:23 2019 +0900
@@ -14,6 +14,7 @@
 test-ancestor.py
 test-annotate.py
 test-annotate.t
+test-arbitraryfilectx.t
 test-archive-symlinks.t
 test-archive.t
 test-atomictempfile.py
@@ -25,6 +26,7 @@
 test-bad-extension.t
 test-bad-pull.t
 test-basic.t
+test-batching.py
 test-bdiff.py
 test-bheads.t
 test-bisect.t
@@ -42,6 +44,7 @@
 test-branch-option.t
 test-branch-tag-confict.t
 test-branches.t
+test-bugzilla.t
 test-bundle-phases.t
 test-bundle-r.t
 test-bundle-type.t
@@ -54,7 +57,9 @@
 test-bundle2-remote-changegroup.t
 test-cache-abuse.t
 test-cappedreader.py
+test-casecollision-merge.t
 test-casecollision.t
+test-casefolding.t
 test-cat.t
 test-cbor.py
 test-censor.t
@@ -115,6 +120,7 @@
 test-convert-hg-source.t
 test-convert-hg-startrev.t
 test-convert-splicemap.t
+test-convert-svn-sink.t
 test-convert-tagsbranch-topology.t
 test-copy-move-merge.t
 test-copy.t
@@ -159,6 +165,7 @@
 test-empty-group.t
 test-empty.t
 test-encode.t
+test-encoding-align.t
 test-encoding-func.py
 test-encoding-textwrap.t
 test-encoding.t
@@ -252,16 +259,21 @@
 test-hgrc.t
 test-hgweb-annotate-whitespace.t
 test-hgweb-bundle.t
+test-hgweb-commands.t
 test-hgweb-csp.t
 test-hgweb-descend-empties.t
 test-hgweb-diffs.t
 test-hgweb-empty.t
 test-hgweb-filelog.t
+test-hgweb-no-path-info.t
+test-hgweb-no-request-uri.t
 test-hgweb-non-interactive.t
 test-hgweb-raw.t
 test-hgweb-removed.t
+test-hgweb-symrev.t
 test-hgweb.t
 test-hgwebdir-paths.py
+test-hgwebdir.t
 test-hgwebdirsym.t
 test-histedit-arguments.t
 test-histedit-base.t
@@ -278,6 +290,7 @@
 test-histedit-obsolete.t
 test-histedit-outgoing.t
 test-histedit-templates.t
+test-http-api.t
 test-http-branchmap.t
 test-http-bundle1.t
 test-http-clone-r.t
@@ -300,6 +313,7 @@
 test-infinitepush.t
 test-inherit-mode.t
 test-init.t
+test-install.t
 test-issue1089.t
 test-issue1102.t
 test-issue1175.t
@@ -381,11 +395,13 @@
 test-merge9.t
 test-minifileset.py
 test-minirst.py
+test-missing-capability.t
 test-mq-git.t
 test-mq-guards.t
 test-mq-header-date.t
 test-mq-header-from.t
 test-mq-merge.t
+test-mq-missingfiles.t
 test-mq-pull-from-bundle.t
 test-mq-qclone-http.t
 test-mq-qdelete.t
@@ -393,6 +409,7 @@
 test-mq-qfold.t
 test-mq-qgoto.t
 test-mq-qimport-fail-cleanup.t
+test-mq-qimport.t
 test-mq-qnew.t
 test-mq-qpush-exact.t
 test-mq-qpush-fail.t
@@ -438,6 +455,7 @@
 test-narrow.t
 test-nested-repo.t
 test-newbranch.t
+test-newcgi.t
 test-newercgi.t
 test-nointerrupt.t
 test-obshistory.t
@@ -558,6 +576,7 @@
 test-remotefilelog-clone-tree.t
 test-remotefilelog-clone.t
 test-remotefilelog-gcrepack.t
+test-remotefilelog-histpack.py
 test-remotefilelog-http.t
 test-remotefilelog-keepset.t
 test-remotefilelog-local.t
@@ -658,6 +677,8 @@
 test-template-map.t
 test-tools.t
 test-transplant.t
+test-treediscovery-legacy.t
+test-treediscovery.t
 test-treemanifest.t
 test-ui-color.py
 test-ui-config.py
@@ -669,6 +690,7 @@
 test-unionrepo.t
 test-unrelated-pull.t
 test-up-local-change.t
+test-update-atomic.t
 test-update-branches.t
 test-update-dest.t
 test-update-issue1456.t
@@ -699,5 +721,6 @@
 test-wireproto-framing.py
 test-wireproto-serverreactor.py
 test-wireproto.py
+test-wireproto.t
 test-wsgirequest.py
 test-xdg.t
--- a/hgext/bugzilla.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/hgext/bugzilla.py	Fri Jan 25 18:55:23 2019 +0900
@@ -303,6 +303,7 @@
     error,
     logcmdutil,
     mail,
+    pycompat,
     registrar,
     url,
     util,
@@ -342,10 +343,10 @@
     default='bugs',
 )
 configitem('bugzilla', 'fixregexp',
-    default=(r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
-             r'(?:nos?\.?|num(?:ber)?s?)?\s*'
-             r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
-             r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
+    default=(br'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
+             br'(?:nos?\.?|num(?:ber)?s?)?\s*'
+             br'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
+             br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
 )
 configitem('bugzilla', 'fixresolution',
     default='FIXED',
@@ -363,9 +364,9 @@
     default=None,
 )
 configitem('bugzilla', 'regexp',
-    default=(r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
-             r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
-             r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
+    default=(br'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
+             br'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
+             br'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
 )
 configitem('bugzilla', 'strip',
     default=0,
@@ -733,7 +734,7 @@
         c = self.bzproxy.Bug.comments({'ids': [id],
                                        'include_fields': ['text'],
                                        'token': self.bztoken})
-        return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
+        return ''.join([t['text'] for t in c['bugs']['%d' % id]['comments']])
 
     def filter_real_bug_ids(self, bugs):
         probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
@@ -804,11 +805,11 @@
 
     def makecommandline(self, fieldname, value):
         if self.bzvermajor >= 4:
-            return "@%s %s" % (fieldname, str(value))
+            return "@%s %s" % (fieldname, pycompat.bytestr(value))
         else:
             if fieldname == "id":
                 fieldname = "bug_id"
-            return "@%s = %s" % (fieldname, str(value))
+            return "@%s = %s" % (fieldname, pycompat.bytestr(value))
 
     def send_bug_modify_email(self, bugid, commands, comment, committer):
         '''send modification message to Bugzilla bug via email.
@@ -873,7 +874,7 @@
         self.fixresolution = self.ui.config('bugzilla', 'fixresolution')
 
     def apiurl(self, targets, include_fields=None):
-        url = '/'.join([self.bzroot] + [str(t) for t in targets])
+        url = '/'.join([self.bzroot] + [pycompat.bytestr(t) for t in targets])
         qv = {}
         if self.apikey:
             qv['api_key'] = self.apikey
@@ -938,7 +939,7 @@
         for bugid in bugs.keys():
             burl = self.apiurl(('bug', bugid, 'comment'), include_fields='text')
             result = self._fetch(burl)
-            comments = result['bugs'][str(bugid)]['comments']
+            comments = result['bugs'][pycompat.bytestr(bugid)]['comments']
             if any(sn in c['text'] for c in comments):
                 self.ui.status(_('bug %d already knows about changeset %s\n') %
                                (bugid, sn))
@@ -1011,7 +1012,7 @@
             self.ui.config('bugzilla', 'regexp'), re.IGNORECASE)
         self.fix_re = re.compile(
             self.ui.config('bugzilla', 'fixregexp'), re.IGNORECASE)
-        self.split_re = re.compile(r'\D+')
+        self.split_re = re.compile(br'\D+')
 
     def find_bugs(self, ctx):
         '''return bugs dictionary created from commit comment.
@@ -1098,7 +1099,7 @@
         t = logcmdutil.changesettemplater(self.ui, self.repo, spec)
         self.ui.pushbuffer()
         t.show(ctx, changes=ctx.changeset(),
-               bug=str(bugid),
+               bug=pycompat.bytestr(bugid),
                hgweb=self.ui.config('web', 'baseurl'),
                root=self.repo.root,
                webroot=webroot(self.repo.root))
--- a/hgext/convert/subversion.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/hgext/convert/subversion.py	Fri Jan 25 18:55:23 2019 +0900
@@ -1183,12 +1183,12 @@
         m = set()
         output = self.run0('ls', recursive=True, xml=True)
         doc = xml.dom.minidom.parseString(output)
-        for e in doc.getElementsByTagName('entry'):
+        for e in doc.getElementsByTagName(r'entry'):
             for n in e.childNodes:
-                if n.nodeType != n.ELEMENT_NODE or n.tagName != 'name':
+                if n.nodeType != n.ELEMENT_NODE or n.tagName != r'name':
                     continue
-                name = ''.join(c.data for c in n.childNodes
-                               if c.nodeType == c.TEXT_NODE)
+                name = r''.join(c.data for c in n.childNodes
+                                if c.nodeType == c.TEXT_NODE)
                 # Entries are compared with names coming from
                 # mercurial, so bytes with undefined encoding. Our
                 # best bet is to assume they are in local
--- a/hgext/remotefilelog/basepack.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/hgext/remotefilelog/basepack.py	Fri Jan 25 18:55:23 2019 +0900
@@ -270,9 +270,9 @@
                 # only affect this instance
                 self.VERSION = version
             elif self.VERSION != version:
-                raise RuntimeError('inconsistent version: %s' % version)
+                raise RuntimeError('inconsistent version: %d' % version)
         else:
-            raise RuntimeError('unsupported version: %s' % version)
+            raise RuntimeError('unsupported version: %d' % version)
 
 class basepack(versionmixin):
     # The maximum amount we should read via mmap before remmaping so the old
--- a/hgext/uncommit.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/hgext/uncommit.py	Fri Jan 25 18:55:23 2019 +0900
@@ -25,7 +25,7 @@
     cmdutil,
     commands,
     context,
-    copies,
+    copies as copiesmod,
     error,
     node,
     obsutil,
@@ -70,7 +70,7 @@
         return ctx.parents()[0].node()
 
     # Filter copies
-    copied = copies.pathcopies(base, ctx)
+    copied = copiesmod.pathcopies(base, ctx)
     copied = dict((dst, src) for dst, src in copied.iteritems()
                   if dst in files)
     def filectxfn(repo, memctx, path, contentctx=ctx, redirect=()):
@@ -93,13 +93,14 @@
                          extra=ctx.extra())
     return repo.commitctx(new)
 
-def _fixdirstate(repo, oldctx, newctx, status):
+def _fixdirstate(repo, oldctx, newctx, match=None):
     """ fix the dirstate after switching the working directory from oldctx to
     newctx which can be result of either unamend or uncommit.
     """
     ds = repo.dirstate
+    ds.setparents(newctx.node(), node.nullid)
     copies = dict(ds.copies())
-    s = status
+    s = newctx.status(oldctx, match=match)
     for f in s.modified:
         if ds[f] == 'r':
             # modified + removed -> removed
@@ -121,11 +122,7 @@
             ds.remove(f)
 
     # Merge old parent and old working dir copies
-    oldcopies = {}
-    for f in (s.modified + s.added):
-        src = oldctx[f].renamed()
-        if src:
-            oldcopies[f] = src[0]
+    oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
     oldcopies.update(copies)
     copies = dict((dst, oldcopies.get(src, src))
                   for dst, src in oldcopies.iteritems())
@@ -179,12 +176,10 @@
                 # Fully removed the old commit
                 mapping[old.node()] = ()
 
-            scmutil.cleanupnodes(repo, mapping, 'uncommit', fixphase=True)
+            with repo.dirstate.parentchange():
+                _fixdirstate(repo, old, repo[newid], match)
 
-            with repo.dirstate.parentchange():
-                repo.dirstate.setparents(newid, node.nullid)
-                s = old.p1().status(old, match=match)
-                _fixdirstate(repo, old, repo[newid], s)
+            scmutil.cleanupnodes(repo, mapping, 'uncommit', fixphase=True)
 
 def predecessormarkers(ctx):
     """yields the obsolete markers marking the given changeset as a successor"""
@@ -244,9 +239,7 @@
         dirstate = repo.dirstate
 
         with dirstate.parentchange():
-            dirstate.setparents(newprednode, node.nullid)
-            s = repo.status(predctx, curctx)
-            _fixdirstate(repo, curctx, newpredctx, s)
+            _fixdirstate(repo, curctx, newpredctx)
 
         mapping = {curctx.node(): (newprednode,)}
         scmutil.cleanupnodes(repo, mapping, 'unamend', fixphase=True)
--- a/mercurial/bdiff.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/bdiff.c	Fri Jan 25 18:55:23 2019 +0900
@@ -35,15 +35,19 @@
 
 	/* count the lines */
 	i = 1; /* extra line for sentinel */
-	for (p = a; p < plast; p++)
-		if (*p == '\n')
+	for (p = a; p < plast; p++) {
+		if (*p == '\n') {
 			i++;
-	if (p == plast)
+		}
+	}
+	if (p == plast) {
 		i++;
+	}
 
 	*lr = l = (struct bdiff_line *)calloc(i, sizeof(struct bdiff_line));
-	if (!l)
+	if (!l) {
 		return -1;
+	}
 
 	/* build the line array and calculate hashes */
 	hash = 0;
@@ -90,18 +94,21 @@
 	struct pos *h = NULL;
 
 	/* build a hash table of the next highest power of 2 */
-	while (buckets < bn + 1)
+	while (buckets < bn + 1) {
 		buckets *= 2;
+	}
 
 	/* try to allocate a large hash table to avoid collisions */
 	for (scale = 4; scale; scale /= 2) {
 		h = (struct pos *)calloc(buckets, scale * sizeof(struct pos));
-		if (h)
+		if (h) {
 			break;
+		}
 	}
 
-	if (!h)
+	if (!h) {
 		return 0;
+	}
 
 	buckets = buckets * scale - 1;
 
@@ -115,9 +122,11 @@
 	for (i = 0; i < bn; i++) {
 		/* find the equivalence class */
 		for (j = b[i].hash & buckets; h[j].pos != -1;
-		     j = (j + 1) & buckets)
-			if (!cmp(b + i, b + h[j].pos))
+		     j = (j + 1) & buckets) {
+			if (!cmp(b + i, b + h[j].pos)) {
 				break;
+			}
+		}
 
 		/* add to the head of the equivalence class */
 		b[i].n = h[j].pos;
@@ -133,15 +142,18 @@
 	for (i = 0; i < an; i++) {
 		/* find the equivalence class */
 		for (j = a[i].hash & buckets; h[j].pos != -1;
-		     j = (j + 1) & buckets)
-			if (!cmp(a + i, b + h[j].pos))
+		     j = (j + 1) & buckets) {
+			if (!cmp(a + i, b + h[j].pos)) {
 				break;
+			}
+		}
 
 		a[i].e = j; /* use equivalence class for quick compare */
-		if (h[j].len <= t)
+		if (h[j].len <= t) {
 			a[i].n = h[j].pos; /* point to head of match list */
-		else
+		} else {
 			a[i].n = -1; /* too popular */
+		}
 	}
 
 	/* discard hash tables */
@@ -158,16 +170,18 @@
 	/* window our search on large regions to better bound
 	   worst-case performance. by choosing a window at the end, we
 	   reduce skipping overhead on the b chains. */
-	if (a2 - a1 > 30000)
+	if (a2 - a1 > 30000) {
 		a1 = a2 - 30000;
+	}
 
 	half = (a1 + a2 - 1) / 2;
 	bhalf = (b1 + b2 - 1) / 2;
 
 	for (i = a1; i < a2; i++) {
 		/* skip all lines in b after the current block */
-		for (j = a[i].n; j >= b2; j = b[j].n)
+		for (j = a[i].n; j >= b2; j = b[j].n) {
 			;
+		}
 
 		/* loop through all lines match a[i] in b */
 		for (; j >= b1; j = b[j].n) {
@@ -179,8 +193,9 @@
 					break;
 				}
 				/* previous line mismatch? */
-				if (a[i - k].e != b[j - k].e)
+				if (a[i - k].e != b[j - k].e) {
 					break;
+				}
 			}
 
 			pos[j].pos = i;
@@ -212,8 +227,9 @@
 	}
 
 	/* expand match to include subsequent popular lines */
-	while (mi + mk < a2 && mj + mk < b2 && a[mi + mk].e == b[mj + mk].e)
+	while (mi + mk < a2 && mj + mk < b2 && a[mi + mk].e == b[mj + mk].e) {
 		mk++;
+	}
 
 	*omi = mi;
 	*omj = mj;
@@ -230,18 +246,21 @@
 	while (1) {
 		/* find the longest match in this chunk */
 		k = longest_match(a, b, pos, a1, a2, b1, b2, &i, &j);
-		if (!k)
+		if (!k) {
 			return l;
+		}
 
 		/* and recurse on the remaining chunks on either side */
 		l = recurse(a, b, pos, a1, i, b1, j, l);
-		if (!l)
+		if (!l) {
 			return NULL;
+		}
 
 		l->next =
 		    (struct bdiff_hunk *)malloc(sizeof(struct bdiff_hunk));
-		if (!l->next)
+		if (!l->next) {
 			return NULL;
+		}
 
 		l = l->next;
 		l->a1 = i;
@@ -271,14 +290,16 @@
 		/* generate the matching block list */
 
 		curr = recurse(a, b, pos, 0, an, 0, bn, base);
-		if (!curr)
+		if (!curr) {
 			return -1;
+		}
 
 		/* sentinel end hunk */
 		curr->next =
 		    (struct bdiff_hunk *)malloc(sizeof(struct bdiff_hunk));
-		if (!curr->next)
+		if (!curr->next) {
 			return -1;
+		}
 		curr = curr->next;
 		curr->a1 = curr->a2 = an;
 		curr->b1 = curr->b2 = bn;
@@ -291,10 +312,11 @@
 	for (curr = base->next; curr; curr = curr->next) {
 		struct bdiff_hunk *next = curr->next;
 
-		if (!next)
+		if (!next) {
 			break;
+		}
 
-		if (curr->a2 == next->a1 || curr->b2 == next->b1)
+		if (curr->a2 == next->a1 || curr->b2 == next->b1) {
 			while (curr->a2 < an && curr->b2 < bn &&
 			       next->a1 < next->a2 && next->b1 < next->b2 &&
 			       !cmp(a + curr->a2, b + curr->b2)) {
@@ -303,10 +325,12 @@
 				curr->b2++;
 				next->b1++;
 			}
+		}
 	}
 
-	for (curr = base->next; curr; curr = curr->next)
+	for (curr = base->next; curr; curr = curr->next) {
 		count++;
+	}
 	return count;
 }
 
--- a/mercurial/cext/base85.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/cext/base85.c	Fri Jan 25 18:55:23 2019 +0900
@@ -24,8 +24,9 @@
 	unsigned i;
 
 	memset(b85dec, 0, sizeof(b85dec));
-	for (i = 0; i < sizeof(b85chars); i++)
+	for (i = 0; i < sizeof(b85chars); i++) {
 		b85dec[(int)(b85chars[i])] = i + 1;
+	}
 }
 
 static PyObject *b85encode(PyObject *self, PyObject *args)
@@ -37,19 +38,22 @@
 	unsigned int acc, val, ch;
 	int pad = 0;
 
-	if (!PyArg_ParseTuple(args, PY23("s#|i", "y#|i"), &text, &len, &pad))
+	if (!PyArg_ParseTuple(args, PY23("s#|i", "y#|i"), &text, &len, &pad)) {
 		return NULL;
+	}
 
-	if (pad)
+	if (pad) {
 		olen = ((len + 3) / 4 * 5) - 3;
-	else {
+	} else {
 		olen = len % 4;
-		if (olen)
+		if (olen) {
 			olen++;
+		}
 		olen += len / 4 * 5;
 	}
-	if (!(out = PyBytes_FromStringAndSize(NULL, olen + 3)))
+	if (!(out = PyBytes_FromStringAndSize(NULL, olen + 3))) {
 		return NULL;
+	}
 
 	dst = PyBytes_AsString(out);
 
@@ -58,8 +62,9 @@
 		for (i = 24; i >= 0; i -= 8) {
 			ch = *text++;
 			acc |= ch << i;
-			if (--len == 0)
+			if (--len == 0) {
 				break;
+			}
 		}
 		for (i = 4; i >= 0; i--) {
 			val = acc % 85;
@@ -69,8 +74,9 @@
 		dst += 5;
 	}
 
-	if (!pad)
+	if (!pad) {
 		_PyBytes_Resize(&out, olen);
+	}
 
 	return out;
 }
@@ -84,15 +90,18 @@
 	int c;
 	unsigned int acc;
 
-	if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &text, &len))
+	if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &text, &len)) {
 		return NULL;
+	}
 
 	olen = len / 5 * 4;
 	i = len % 5;
-	if (i)
+	if (i) {
 		olen += i - 1;
-	if (!(out = PyBytes_FromStringAndSize(NULL, olen)))
+	}
+	if (!(out = PyBytes_FromStringAndSize(NULL, olen))) {
 		return NULL;
+	}
 
 	dst = PyBytes_AsString(out);
 
@@ -100,8 +109,9 @@
 	while (i < len) {
 		acc = 0;
 		cap = len - i - 1;
-		if (cap > 4)
+		if (cap > 4) {
 			cap = 4;
+		}
 		for (j = 0; j < cap; i++, j++) {
 			c = b85dec[(int)*text++] - 1;
 			if (c < 0) {
@@ -136,10 +146,12 @@
 
 		cap = olen < 4 ? olen : 4;
 		olen -= cap;
-		for (j = 0; j < 4 - cap; j++)
+		for (j = 0; j < 4 - cap; j++) {
 			acc *= 85;
-		if (cap && cap < 4)
+		}
+		if (cap && cap < 4) {
 			acc += 0xffffff >> (cap - 1) * 8;
+		}
 		for (j = 0; j < cap; j++) {
 			acc = (acc << 8) | (acc >> 24);
 			*dst++ = acc;
--- a/mercurial/cext/bdiff.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/cext/bdiff.c	Fri Jan 25 18:55:23 2019 +0900
@@ -29,22 +29,26 @@
 
 	l.next = NULL;
 
-	if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb))
+	if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb)) {
 		return NULL;
+	}
 
 	an = bdiff_splitlines(PyBytes_AsString(sa), PyBytes_Size(sa), &a);
 	bn = bdiff_splitlines(PyBytes_AsString(sb), PyBytes_Size(sb), &b);
 
-	if (!a || !b)
+	if (!a || !b) {
 		goto nomem;
+	}
 
 	count = bdiff_diff(a, an, b, bn, &l);
-	if (count < 0)
+	if (count < 0) {
 		goto nomem;
+	}
 
 	rl = PyList_New(count);
-	if (!rl)
+	if (!rl) {
 		goto nomem;
+	}
 
 	for (h = l.next; h; h = h->next) {
 		m = Py_BuildValue("iiii", h->a1, h->a2, h->b1, h->b2);
@@ -72,8 +76,10 @@
 
 	l.next = NULL;
 
-	if (!PyArg_ParseTuple(args, PY23("s*s*:bdiff", "y*y*:bdiff"), &ba, &bb))
+	if (!PyArg_ParseTuple(args, PY23("s*s*:bdiff", "y*y*:bdiff"), &ba,
+	                      &bb)) {
 		return NULL;
+	}
 
 	if (!PyBuffer_IsContiguous(&ba, 'C') || ba.ndim > 1) {
 		PyErr_SetString(PyExc_ValueError, "bdiff input not contiguous");
@@ -98,8 +104,9 @@
 	lmax = la > lb ? lb : la;
 	for (ia = ba.buf, ib = bb.buf; li < lmax && *ia == *ib;
 	     ++li, ++ia, ++ib) {
-		if (*ia == '\n')
+		if (*ia == '\n') {
 			lcommon = li + 1;
+		}
 	}
 	/* we can almost add: if (li == lmax) lcommon = li; */
 
@@ -119,8 +126,9 @@
 	/* calculate length of output */
 	la = lb = 0;
 	for (h = l.next; h; h = h->next) {
-		if (h->a1 != la || h->b1 != lb)
+		if (h->a1 != la || h->b1 != lb) {
 			len += 12 + bl[h->b1].l - bl[lb].l;
+		}
 		la = h->a2;
 		lb = h->b2;
 	}
@@ -129,8 +137,9 @@
 
 	result = PyBytes_FromStringAndSize(NULL, len);
 
-	if (!result)
+	if (!result) {
 		goto cleanup;
+	}
 
 	/* build binary patch */
 	rb = PyBytes_AsString(result);
@@ -151,8 +160,9 @@
 	}
 
 cleanup:
-	if (_save)
+	if (_save) {
 		PyEval_RestoreThread(_save);
+	}
 	PyBuffer_Release(&ba);
 	PyBuffer_Release(&bb);
 	free(al);
@@ -174,20 +184,23 @@
 	Py_ssize_t i, rlen, wlen = 0;
 	char *w;
 
-	if (!PyArg_ParseTuple(args, "Sb:fixws", &s, &allws))
+	if (!PyArg_ParseTuple(args, "Sb:fixws", &s, &allws)) {
 		return NULL;
+	}
 	r = PyBytes_AsString(s);
 	rlen = PyBytes_Size(s);
 
 	w = (char *)PyMem_Malloc(rlen ? rlen : 1);
-	if (!w)
+	if (!w) {
 		goto nomem;
+	}
 
 	for (i = 0; i != rlen; i++) {
 		c = r[i];
 		if (c == ' ' || c == '\t' || c == '\r') {
-			if (!allws && (wlen == 0 || w[wlen - 1] != ' '))
+			if (!allws && (wlen == 0 || w[wlen - 1] != ' ')) {
 				w[wlen++] = ' ';
+			}
 		} else if (c == '\n' && !allws && wlen > 0 &&
 		           w[wlen - 1] == ' ') {
 			w[wlen - 1] = '\n';
@@ -207,8 +220,9 @@
                           const char *source, Py_ssize_t len)
 {
 	PyObject *sliced = PyBytes_FromStringAndSize(source, len);
-	if (sliced == NULL)
+	if (sliced == NULL) {
 		return false;
+	}
 	PyList_SET_ITEM(list, destidx, sliced);
 	return true;
 }
@@ -232,19 +246,22 @@
 			++nelts;
 		}
 	}
-	if ((result = PyList_New(nelts + 1)) == NULL)
+	if ((result = PyList_New(nelts + 1)) == NULL) {
 		goto abort;
+	}
 	nelts = 0;
 	for (i = 0; i < size - 1; ++i) {
 		if (text[i] == '\n') {
 			if (!sliceintolist(result, nelts++, text + start,
-			                   i - start + 1))
+			                   i - start + 1)) {
 				goto abort;
+			}
 			start = i + 1;
 		}
 	}
-	if (!sliceintolist(result, nelts++, text + start, size - start))
+	if (!sliceintolist(result, nelts++, text + start, size - start)) {
 		goto abort;
+	}
 	return result;
 abort:
 	Py_XDECREF(result);
@@ -257,8 +274,9 @@
 	PyObject *rl = (PyObject *)priv;
 	PyObject *m = Py_BuildValue("LLLL", a1, a2, b1, b2);
 	int r;
-	if (!m)
+	if (!m) {
 		return -1;
+	}
 	r = PyList_Append(rl, m);
 	Py_DECREF(m);
 	return r;
@@ -282,15 +300,17 @@
 	};
 
 	if (!PyArg_ParseTuple(args, PY23("s#s#", "y#y#"), &a.ptr, &la, &b.ptr,
-	                      &lb))
+	                      &lb)) {
 		return NULL;
+	}
 
 	a.size = la;
 	b.size = lb;
 
 	rl = PyList_New(0);
-	if (!rl)
+	if (!rl) {
 		return PyErr_NoMemory();
+	}
 
 	ecb.priv = rl;
 
--- a/mercurial/cext/charencode.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/cext/charencode.c	Fri Jan 25 18:55:23 2019 +0900
@@ -114,8 +114,9 @@
 
 	ret = PyBytes_FromStringAndSize(NULL, len / 2);
 
-	if (!ret)
+	if (!ret) {
 		return NULL;
+	}
 
 	d = PyBytes_AsString(ret);
 
@@ -133,21 +134,24 @@
 	const char *buf;
 	Py_ssize_t i, len;
 	if (!PyArg_ParseTuple(args, PY23("s#:isasciistr", "y#:isasciistr"),
-	                      &buf, &len))
+	                      &buf, &len)) {
 		return NULL;
+	}
 	i = 0;
 	/* char array in PyStringObject should be at least 4-byte aligned */
 	if (((uintptr_t)buf & 3) == 0) {
 		const uint32_t *p = (const uint32_t *)buf;
 		for (; i < len / 4; i++) {
-			if (p[i] & 0x80808080U)
+			if (p[i] & 0x80808080U) {
 				Py_RETURN_FALSE;
+			}
 		}
 		i *= 4;
 	}
 	for (; i < len; i++) {
-		if (buf[i] & 0x80)
+		if (buf[i] & 0x80) {
 			Py_RETURN_FALSE;
+		}
 	}
 	Py_RETURN_TRUE;
 }
@@ -164,8 +168,9 @@
 	len = PyBytes_GET_SIZE(str_obj);
 
 	newobj = PyBytes_FromStringAndSize(NULL, len);
-	if (!newobj)
+	if (!newobj) {
 		goto quit;
+	}
 
 	newstr = PyBytes_AS_STRING(newobj);
 
@@ -197,16 +202,18 @@
 PyObject *asciilower(PyObject *self, PyObject *args)
 {
 	PyObject *str_obj;
-	if (!PyArg_ParseTuple(args, "O!:asciilower", &PyBytes_Type, &str_obj))
+	if (!PyArg_ParseTuple(args, "O!:asciilower", &PyBytes_Type, &str_obj)) {
 		return NULL;
+	}
 	return _asciitransform(str_obj, lowertable, NULL);
 }
 
 PyObject *asciiupper(PyObject *self, PyObject *args)
 {
 	PyObject *str_obj;
-	if (!PyArg_ParseTuple(args, "O!:asciiupper", &PyBytes_Type, &str_obj))
+	if (!PyArg_ParseTuple(args, "O!:asciiupper", &PyBytes_Type, &str_obj)) {
 		return NULL;
+	}
 	return _asciitransform(str_obj, uppertable, NULL);
 }
 
@@ -222,8 +229,9 @@
 
 	if (!PyArg_ParseTuple(args, "O!O!O!:make_file_foldmap", &PyDict_Type,
 	                      &dmap, &PyInt_Type, &spec_obj, &PyFunction_Type,
-	                      &normcase_fallback))
+	                      &normcase_fallback)) {
 		goto quit;
+	}
 
 	spec = (int)PyInt_AS_LONG(spec_obj);
 	switch (spec) {
@@ -244,8 +252,9 @@
 	/* Add some more entries to deal with additions outside this
 	   function. */
 	file_foldmap = _dict_new_presized((PyDict_Size(dmap) / 10) * 11);
-	if (file_foldmap == NULL)
+	if (file_foldmap == NULL) {
 		goto quit;
+	}
 
 	while (PyDict_Next(dmap, &pos, &k, &v)) {
 		if (!dirstate_tuple_check(v)) {
@@ -265,8 +274,9 @@
 				    normcase_fallback, k, NULL);
 			}
 
-			if (normed == NULL)
+			if (normed == NULL) {
 				goto quit;
+			}
 			if (PyDict_SetItem(file_foldmap, normed, k) == -1) {
 				Py_DECREF(normed);
 				goto quit;
@@ -377,22 +387,25 @@
 	Py_ssize_t origlen, esclen;
 	int paranoid;
 	if (!PyArg_ParseTuple(args, "O!i:jsonescapeu8fast", &PyBytes_Type,
-	                      &origstr, &paranoid))
+	                      &origstr, &paranoid)) {
 		return NULL;
+	}
 
 	origbuf = PyBytes_AS_STRING(origstr);
 	origlen = PyBytes_GET_SIZE(origstr);
 	esclen = jsonescapelen(origbuf, origlen, paranoid);
-	if (esclen < 0)
+	if (esclen < 0) {
 		return NULL; /* unsupported char found or overflow */
+	}
 	if (origlen == esclen) {
 		Py_INCREF(origstr);
 		return origstr;
 	}
 
 	escstr = PyBytes_FromStringAndSize(NULL, esclen);
-	if (!escstr)
+	if (!escstr) {
 		return NULL;
+	}
 	encodejsonescape(PyBytes_AS_STRING(escstr), esclen, origbuf, origlen,
 	                 paranoid);
 
--- a/mercurial/cext/mpatch.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/cext/mpatch.c	Fri Jan 25 18:55:23 2019 +0900
@@ -55,13 +55,16 @@
 	int r;
 
 	PyObject *tmp = PyList_GetItem((PyObject *)bins, pos);
-	if (!tmp)
+	if (!tmp) {
 		return NULL;
-	if (PyObject_GetBuffer(tmp, &buffer, PyBUF_CONTIG_RO))
+	}
+	if (PyObject_GetBuffer(tmp, &buffer, PyBUF_CONTIG_RO)) {
 		return NULL;
+	}
 	if ((r = mpatch_decode(buffer.buf, buffer.len, &res)) < 0) {
-		if (!PyErr_Occurred())
+		if (!PyErr_Occurred()) {
 			setpyerr(r);
+		}
 		res = NULL;
 	}
 
@@ -78,8 +81,9 @@
 	char *out;
 	Py_ssize_t len, outlen;
 
-	if (!PyArg_ParseTuple(args, "OO:mpatch", &text, &bins))
+	if (!PyArg_ParseTuple(args, "OO:mpatch", &text, &bins)) {
 		return NULL;
+	}
 
 	len = PyList_Size(bins);
 	if (!len) {
@@ -94,8 +98,9 @@
 
 	patch = mpatch_fold(bins, cpygetitem, 0, len);
 	if (!patch) { /* error already set or memory error */
-		if (!PyErr_Occurred())
+		if (!PyErr_Occurred()) {
 			PyErr_NoMemory();
+		}
 		result = NULL;
 		goto cleanup;
 	}
@@ -126,8 +131,9 @@
 cleanup:
 	mpatch_lfree(patch);
 	PyBuffer_Release(&buffer);
-	if (!result && !PyErr_Occurred())
+	if (!result && !PyErr_Occurred()) {
 		setpyerr(r);
+	}
 	return result;
 }
 
@@ -138,15 +144,18 @@
 	Py_ssize_t patchlen;
 	char *bin;
 
-	if (!PyArg_ParseTuple(args, PY23("ls#", "ly#"), &orig, &bin, &patchlen))
+	if (!PyArg_ParseTuple(args, PY23("ls#", "ly#"), &orig, &bin,
+	                      &patchlen)) {
 		return NULL;
+	}
 
 	while (pos >= 0 && pos < patchlen) {
 		start = getbe32(bin + pos);
 		end = getbe32(bin + pos + 4);
 		len = getbe32(bin + pos + 8);
-		if (start > end)
+		if (start > end) {
 			break; /* sanity check */
+		}
 		pos += 12 + len;
 		outlen += start - last;
 		last = end;
@@ -154,9 +163,10 @@
 	}
 
 	if (pos != patchlen) {
-		if (!PyErr_Occurred())
+		if (!PyErr_Occurred()) {
 			PyErr_SetString(mpatch_Error,
 			                "patch cannot be decoded");
+		}
 		return NULL;
 	}
 
--- a/mercurial/cext/parsers.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/cext/parsers.c	Fri Jan 25 18:55:23 2019 +0900
@@ -32,8 +32,9 @@
 {
 	Py_ssize_t expected_size;
 
-	if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size))
+	if (!PyArg_ParseTuple(args, "n:make_presized_dict", &expected_size)) {
 		return NULL;
+	}
 
 	return _dict_new_presized(expected_size);
 }
@@ -43,8 +44,9 @@
 {
 	dirstateTupleObject *t =
 	    PyObject_New(dirstateTupleObject, &dirstateTupleType);
-	if (!t)
+	if (!t) {
 		return NULL;
+	}
 	t->state = state;
 	t->mode = mode;
 	t->size = size;
@@ -60,12 +62,14 @@
 	dirstateTupleObject *t;
 	char state;
 	int size, mode, mtime;
-	if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime))
+	if (!PyArg_ParseTuple(args, "ciii", &state, &mode, &size, &mtime)) {
 		return NULL;
+	}
 
 	t = (dirstateTupleObject *)subtype->tp_alloc(subtype, 1);
-	if (!t)
+	if (!t) {
 		return NULL;
+	}
 	t->state = state;
 	t->mode = mode;
 	t->size = size;
@@ -165,8 +169,9 @@
 
 	if (!PyArg_ParseTuple(
 	        args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"),
-	        &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen))
+	        &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) {
 		goto quit;
+	}
 
 	len = readlen;
 
@@ -178,8 +183,9 @@
 	}
 
 	parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, 20, str + 20, 20);
-	if (!parents)
+	if (!parents) {
 		goto quit;
+	}
 
 	/* read filenames */
 	while (pos >= 40 && pos < len) {
@@ -212,13 +218,16 @@
 			    cpos + 1, flen - (cpos - cur) - 1);
 			if (!fname || !cname ||
 			    PyDict_SetItem(cmap, fname, cname) == -1 ||
-			    PyDict_SetItem(dmap, fname, entry) == -1)
+			    PyDict_SetItem(dmap, fname, entry) == -1) {
 				goto quit;
+			}
 			Py_DECREF(cname);
 		} else {
 			fname = PyBytes_FromStringAndSize(cur, flen);
-			if (!fname || PyDict_SetItem(dmap, fname, entry) == -1)
+			if (!fname ||
+			    PyDict_SetItem(dmap, fname, entry) == -1) {
 				goto quit;
+			}
 		}
 		Py_DECREF(fname);
 		Py_DECREF(entry);
@@ -245,16 +254,20 @@
 	PyObject *nonnset = NULL, *otherpset = NULL, *result = NULL;
 	Py_ssize_t pos;
 
-	if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type, &dmap))
+	if (!PyArg_ParseTuple(args, "O!:nonnormalentries", &PyDict_Type,
+	                      &dmap)) {
 		goto bail;
+	}
 
 	nonnset = PySet_New(NULL);
-	if (nonnset == NULL)
+	if (nonnset == NULL) {
 		goto bail;
+	}
 
 	otherpset = PySet_New(NULL);
-	if (otherpset == NULL)
+	if (otherpset == NULL) {
 		goto bail;
+	}
 
 	pos = 0;
 	while (PyDict_Next(dmap, &pos, &fname, &v)) {
@@ -272,15 +285,18 @@
 			}
 		}
 
-		if (t->state == 'n' && t->mtime != -1)
+		if (t->state == 'n' && t->mtime != -1) {
 			continue;
-		if (PySet_Add(nonnset, fname) == -1)
+		}
+		if (PySet_Add(nonnset, fname) == -1) {
 			goto bail;
+		}
 	}
 
 	result = Py_BuildValue("(OO)", nonnset, otherpset);
-	if (result == NULL)
+	if (result == NULL) {
 		goto bail;
+	}
 	Py_DECREF(nonnset);
 	Py_DECREF(otherpset);
 	return result;
@@ -304,8 +320,10 @@
 	int now;
 
 	if (!PyArg_ParseTuple(args, "O!O!O!i:pack_dirstate", &PyDict_Type, &map,
-	                      &PyDict_Type, &copymap, &PyTuple_Type, &pl, &now))
+	                      &PyDict_Type, &copymap, &PyTuple_Type, &pl,
+	                      &now)) {
 		return NULL;
+	}
 
 	if (PyTuple_Size(pl) != 2) {
 		PyErr_SetString(PyExc_TypeError, "expected 2-element tuple");
@@ -332,8 +350,9 @@
 	}
 
 	packobj = PyBytes_FromStringAndSize(NULL, nbytes);
-	if (packobj == NULL)
+	if (packobj == NULL) {
 		goto bail;
+	}
 
 	p = PyBytes_AS_STRING(packobj);
 
@@ -377,10 +396,12 @@
 			mtime = -1;
 			mtime_unset = (PyObject *)make_dirstate_tuple(
 			    state, mode, size, mtime);
-			if (!mtime_unset)
+			if (!mtime_unset) {
 				goto bail;
-			if (PyDict_SetItem(map, k, mtime_unset) == -1)
+			}
+			if (PyDict_SetItem(map, k, mtime_unset) == -1) {
 				goto bail;
+			}
 			Py_DECREF(mtime_unset);
 			mtime_unset = NULL;
 		}
@@ -664,8 +685,9 @@
 	manifest_module_init(mod);
 	revlog_module_init(mod);
 
-	if (PyType_Ready(&dirstateTupleType) < 0)
+	if (PyType_Ready(&dirstateTupleType) < 0) {
 		return;
+	}
 	Py_INCREF(&dirstateTupleType);
 	PyModule_AddObject(mod, "dirstatetuple",
 	                   (PyObject *)&dirstateTupleType);
@@ -675,12 +697,14 @@
 {
 	PyObject *sys = PyImport_ImportModule("sys"), *ver;
 	long hexversion;
-	if (!sys)
+	if (!sys) {
 		return -1;
+	}
 	ver = PyObject_GetAttrString(sys, "hexversion");
 	Py_DECREF(sys);
-	if (!ver)
+	if (!ver) {
 		return -1;
+	}
 	hexversion = PyInt_AsLong(ver);
 	Py_DECREF(ver);
 	/* sys.hexversion is a 32-bit number by default, so the -1 case
@@ -720,8 +744,9 @@
 {
 	PyObject *mod;
 
-	if (check_python_version() == -1)
+	if (check_python_version() == -1) {
 		return;
+	}
 	mod = Py_InitModule3("parsers", methods, parsers_doc);
 	module_init(mod);
 }
--- a/mercurial/cext/pathencode.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/cext/pathencode.c	Fri Jan 25 18:55:23 2019 +0900
@@ -126,8 +126,9 @@
 			if (src[i] == 'g') {
 				state = DHGDI;
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DDEFAULT;
+			}
 			break;
 		case DHGDI:
 			if (src[i] == '/') {
@@ -137,8 +138,9 @@
 			state = DDEFAULT;
 			break;
 		case DDEFAULT:
-			if (src[i] == '.')
+			if (src[i] == '.') {
 				state = DDOT;
+			}
 			charcopy(dest, &destlen, destsize, src[i++]);
 			break;
 		}
@@ -153,8 +155,9 @@
 	PyObject *pathobj, *newobj;
 	char *path;
 
-	if (!PyArg_ParseTuple(args, "O:encodedir", &pathobj))
+	if (!PyArg_ParseTuple(args, "O:encodedir", &pathobj)) {
 		return NULL;
+	}
 
 	if (PyBytes_AsStringAndSize(pathobj, &path, &len) == -1) {
 		PyErr_SetString(PyExc_TypeError, "expected a string");
@@ -235,15 +238,17 @@
 			if (src[i] == 'u') {
 				state = AU;
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case AU:
 			if (src[i] == 'x') {
 				state = THIRD;
 				i++;
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case THIRD:
 			state = DEFAULT;
@@ -262,8 +267,9 @@
 			if (src[i] == 'o') {
 				state = CO;
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case CO:
 			if (src[i] == 'm') {
@@ -272,8 +278,9 @@
 			} else if (src[i] == 'n') {
 				state = THIRD;
 				i++;
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case COMLPT:
 			switch (src[i]) {
@@ -314,43 +321,49 @@
 			if (src[i] == 'p') {
 				state = LP;
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case LP:
 			if (src[i] == 't') {
 				state = COMLPT;
 				i++;
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case N:
 			if (src[i] == 'u') {
 				state = NU;
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case NU:
 			if (src[i] == 'l') {
 				state = THIRD;
 				i++;
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case P:
 			if (src[i] == 'r') {
 				state = PR;
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case PR:
 			if (src[i] == 'n') {
 				state = THIRD;
 				i++;
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case LDOT:
 			switch (src[i]) {
@@ -397,18 +410,21 @@
 			if (src[i] == 'g') {
 				state = HGDI;
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case HGDI:
 			if (src[i] == '/') {
 				state = START;
-				if (encodedir)
+				if (encodedir) {
 					memcopy(dest, &destlen, destsize, ".hg",
 					        3);
+				}
 				charcopy(dest, &destlen, destsize, src[i++]);
-			} else
+			} else {
 				state = DEFAULT;
+			}
 			break;
 		case SPACE:
 			switch (src[i]) {
@@ -427,8 +443,9 @@
 		case DEFAULT:
 			while (inset(onebyte, src[i])) {
 				charcopy(dest, &destlen, destsize, src[i++]);
-				if (i == len)
+				if (i == len) {
 					goto done;
+				}
 			}
 			switch (src[i]) {
 			case '.':
@@ -456,9 +473,10 @@
 					charcopy(dest, &destlen, destsize, '_');
 					charcopy(dest, &destlen, destsize,
 					         c == '_' ? '_' : c + 32);
-				} else
+				} else {
 					escape3(dest, &destlen, destsize,
 					        src[i++]);
+				}
 				break;
 			}
 			break;
@@ -498,12 +516,13 @@
 	Py_ssize_t i, destlen = 0;
 
 	for (i = 0; i < len; i++) {
-		if (inset(onebyte, src[i]))
+		if (inset(onebyte, src[i])) {
 			charcopy(dest, &destlen, destsize, src[i]);
-		else if (inset(lower, src[i]))
+		} else if (inset(lower, src[i])) {
 			charcopy(dest, &destlen, destsize, src[i] + 32);
-		else
+		} else {
 			escape3(dest, &destlen, destsize, src[i]);
+		}
 	}
 
 	return destlen;
@@ -516,13 +535,15 @@
 	PyObject *ret;
 
 	if (!PyArg_ParseTuple(args, PY23("s#:lowerencode", "y#:lowerencode"),
-	                      &path, &len))
+	                      &path, &len)) {
 		return NULL;
+	}
 
 	newlen = _lowerencode(NULL, 0, path, len);
 	ret = PyBytes_FromStringAndSize(NULL, newlen);
-	if (ret)
+	if (ret) {
 		_lowerencode(PyBytes_AS_STRING(ret), newlen, path, len);
+	}
 
 	return ret;
 }
@@ -551,8 +572,9 @@
 	Py_ssize_t destsize, destlen = 0, slop, used;
 
 	while (lastslash >= 0 && src[lastslash] != '/') {
-		if (src[lastslash] == '.' && lastdot == -1)
+		if (src[lastslash] == '.' && lastdot == -1) {
 			lastdot = lastslash;
+		}
 		lastslash--;
 	}
 
@@ -570,12 +592,14 @@
 	/* If src contains a suffix, we will append it to the end of
 	   the new string, so make room. */
 	destsize = 120;
-	if (lastdot >= 0)
+	if (lastdot >= 0) {
 		destsize += len - lastdot - 1;
+	}
 
 	ret = PyBytes_FromStringAndSize(NULL, destsize);
-	if (ret == NULL)
+	if (ret == NULL) {
 		return NULL;
+	}
 
 	dest = PyBytes_AS_STRING(ret);
 	memcopy(dest, &destlen, destsize, "dh/", 3);
@@ -587,30 +611,36 @@
 			char d = dest[destlen - 1];
 			/* After truncation, a directory name may end
 			   in a space or dot, which are unportable. */
-			if (d == '.' || d == ' ')
+			if (d == '.' || d == ' ') {
 				dest[destlen - 1] = '_';
-			/* The + 3 is to account for "dh/" in the beginning */
-			if (destlen > maxshortdirslen + 3)
+				/* The + 3 is to account for "dh/" in the
+				 * beginning */
+			}
+			if (destlen > maxshortdirslen + 3) {
 				break;
+			}
 			charcopy(dest, &destlen, destsize, src[i]);
 			p = -1;
-		} else if (p < dirprefixlen)
+		} else if (p < dirprefixlen) {
 			charcopy(dest, &destlen, destsize, src[i]);
+		}
 	}
 
 	/* Rewind to just before the last slash copied. */
-	if (destlen > maxshortdirslen + 3)
+	if (destlen > maxshortdirslen + 3) {
 		do {
 			destlen--;
 		} while (destlen > 0 && dest[destlen] != '/');
+	}
 
 	if (destlen > 3) {
 		if (lastslash > 0) {
 			char d = dest[destlen - 1];
 			/* The last directory component may be
 			   truncated, so make it safe. */
-			if (d == '.' || d == ' ')
+			if (d == '.' || d == ' ') {
 				dest[destlen - 1] = '_';
+			}
 		}
 
 		charcopy(dest, &destlen, destsize, '/');
@@ -620,27 +650,32 @@
 	   depends on the number of bytes left after accounting for
 	   hash and suffix. */
 	used = destlen + 40;
-	if (lastdot >= 0)
+	if (lastdot >= 0) {
 		used += len - lastdot - 1;
+	}
 	slop = maxstorepathlen - used;
 	if (slop > 0) {
 		Py_ssize_t basenamelen =
 		    lastslash >= 0 ? len - lastslash - 2 : len - 1;
 
-		if (basenamelen > slop)
+		if (basenamelen > slop) {
 			basenamelen = slop;
-		if (basenamelen > 0)
+		}
+		if (basenamelen > 0) {
 			memcopy(dest, &destlen, destsize, &src[lastslash + 1],
 			        basenamelen);
+		}
 	}
 
 	/* Add hash and suffix. */
-	for (i = 0; i < 20; i++)
+	for (i = 0; i < 20; i++) {
 		hexencode(dest, &destlen, destsize, sha[i]);
+	}
 
-	if (lastdot >= 0)
+	if (lastdot >= 0) {
 		memcopy(dest, &destlen, destsize, &src[lastdot],
 		        len - lastdot - 1);
+	}
 
 	assert(PyBytes_Check(ret));
 	Py_SIZE(ret) = destlen;
@@ -677,13 +712,15 @@
 
 	shaobj = PyObject_CallFunction(shafunc, PY23("s#", "y#"), str, len);
 
-	if (shaobj == NULL)
+	if (shaobj == NULL) {
 		return -1;
+	}
 
 	hashobj = PyObject_CallMethod(shaobj, "digest", "");
 	Py_DECREF(shaobj);
-	if (hashobj == NULL)
+	if (hashobj == NULL) {
 		return -1;
+	}
 
 	if (!PyBytes_Check(hashobj) || PyBytes_GET_SIZE(hashobj) != 20) {
 		PyErr_SetString(PyExc_TypeError,
@@ -714,8 +751,9 @@
 	}
 
 	dirlen = _encodedir(dired, baselen, src, len);
-	if (sha1hash(sha, dired, dirlen - 1) == -1)
+	if (sha1hash(sha, dired, dirlen - 1) == -1) {
 		return NULL;
+	}
 	lowerlen = _lowerencode(lowered, baselen, dired + 5, dirlen - 5);
 	auxlen = auxencode(auxed, baselen, lowered, lowerlen);
 	return hashmangle(auxed, auxlen, sha);
@@ -727,18 +765,20 @@
 	PyObject *pathobj, *newobj;
 	char *path;
 
-	if (!PyArg_ParseTuple(args, "O:pathencode", &pathobj))
+	if (!PyArg_ParseTuple(args, "O:pathencode", &pathobj)) {
 		return NULL;
+	}
 
 	if (PyBytes_AsStringAndSize(pathobj, &path, &len) == -1) {
 		PyErr_SetString(PyExc_TypeError, "expected a string");
 		return NULL;
 	}
 
-	if (len > maxstorepathlen)
+	if (len > maxstorepathlen) {
 		newlen = maxstorepathlen + 2;
-	else
+	} else {
 		newlen = len ? basicencode(NULL, 0, path, len + 1) : 1;
+	}
 
 	if (newlen <= maxstorepathlen + 1) {
 		if (newlen == len + 1) {
@@ -754,8 +794,9 @@
 			basicencode(PyBytes_AS_STRING(newobj), newlen, path,
 			            len + 1);
 		}
-	} else
+	} else {
 		newobj = hashencode(path, len + 1);
+	}
 
 	return newobj;
 }
--- a/mercurial/config.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/config.py	Fri Jan 25 18:55:23 2019 +0900
@@ -78,6 +78,10 @@
         return list(self._data.get(section, {}).iteritems())
     def set(self, section, item, value, source=""):
         if pycompat.ispy3:
+            assert not isinstance(section, str), (
+                'config section may not be unicode strings on Python 3')
+            assert not isinstance(item, str), (
+                'config item may not be unicode strings on Python 3')
             assert not isinstance(value, str), (
                 'config values may not be unicode strings on Python 3')
         if section not in self:
--- a/mercurial/hgweb/hgwebdir_mod.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/hgweb/hgwebdir_mod.py	Fri Jan 25 18:55:23 2019 +0900
@@ -510,7 +510,7 @@
         if style == styles[0]:
             vars['style'] = style
 
-        sessionvars = webutil.sessionvars(vars, r'?')
+        sessionvars = webutil.sessionvars(vars, '?')
         logourl = config('web', 'logourl')
         logoimg = config('web', 'logoimg')
         staticurl = (config('web', 'staticurl')
--- a/mercurial/hgweb/webutil.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/hgweb/webutil.py	Fri Jan 25 18:55:23 2019 +0900
@@ -456,13 +456,13 @@
     files = listfilediffs(ctx.files(), n, web.maxfiles)
 
     entry = commonentry(repo, ctx)
-    entry.update(
-        allparents=_kwfunc(lambda context, mapping: parents(ctx)),
-        parent=_kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
-        child=_kwfunc(lambda context, mapping: children(ctx, rev + 1)),
-        changelogtag=showtags,
-        files=files,
-    )
+    entry.update({
+        'allparents': _kwfunc(lambda context, mapping: parents(ctx)),
+        'parent': _kwfunc(lambda context, mapping: parents(ctx, rev - 1)),
+        'child': _kwfunc(lambda context, mapping: children(ctx, rev + 1)),
+        'changelogtag': showtags,
+        'files': files,
+    })
     return entry
 
 def changelistentries(web, revs, maxcount, parityfn):
--- a/mercurial/httppeer.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/httppeer.py	Fri Jan 25 18:55:23 2019 +0900
@@ -816,8 +816,8 @@
             return
 
         raise error.CapabilityError(
-            _('cannot %s; client or remote repository does not support the %r '
-              'capability') % (purpose, name))
+            _('cannot %s; client or remote repository does not support the '
+              '\'%s\' capability') % (purpose, name))
 
     # End of ipeercapabilities.
 
--- a/mercurial/mpatch.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/mpatch.c	Fri Jan 25 18:55:23 2019 +0900
@@ -41,8 +41,9 @@
 {
 	struct mpatch_flist *a = NULL;
 
-	if (size < 1)
+	if (size < 1) {
 		size = 1;
+	}
 
 	a = (struct mpatch_flist *)malloc(sizeof(struct mpatch_flist));
 	if (a) {
@@ -110,10 +111,12 @@
 
 	while (s != src->tail) {
 		int soffset = s->start;
-		if (!safeadd(offset, &soffset))
+		if (!safeadd(offset, &soffset)) {
 			break; /* add would overflow, oh well */
-		if (soffset >= cut)
+		}
+		if (soffset >= cut) {
 			break; /* we've gone far enough */
+		}
 
 		postend = offset;
 		if (!safeadd(s->start, &postend) ||
@@ -139,11 +142,13 @@
 			if (!safesub(offset, &c)) {
 				break;
 			}
-			if (s->end < c)
+			if (s->end < c) {
 				c = s->end;
+			}
 			l = cut - offset - s->start;
-			if (s->len < l)
+			if (s->len < l) {
 				l = s->len;
+			}
 
 			offset += s->start + l - c;
 
@@ -176,8 +181,9 @@
 		if (!safeadd(offset, &cmpcut)) {
 			break;
 		}
-		if (cmpcut >= cut)
+		if (cmpcut >= cut) {
 			break;
+		}
 
 		postend = offset;
 		if (!safeadd(s->start, &postend)) {
@@ -205,11 +211,13 @@
 			if (!safesub(offset, &c)) {
 				break;
 			}
-			if (s->end < c)
+			if (s->end < c) {
 				c = s->end;
+			}
 			l = cut - offset - s->start;
-			if (s->len < l)
+			if (s->len < l) {
 				l = s->len;
+			}
 
 			offset += s->start + l - c;
 			s->start = c;
@@ -233,8 +241,9 @@
 	struct mpatch_frag *bh, *ct;
 	int offset = 0, post;
 
-	if (a && b)
+	if (a && b) {
 		c = lalloc((lsize(a) + lsize(b)) * 2);
+	}
 
 	if (c) {
 
@@ -284,8 +293,9 @@
 
 	/* assume worst case size, we won't have many of these lists */
 	l = lalloc(len / 12 + 1);
-	if (!l)
+	if (!l) {
 		return MPATCH_ERR_NO_MEM;
+	}
 
 	lt = l->tail;
 
@@ -295,8 +305,9 @@
 		lt->start = getbe32(bin + pos);
 		lt->end = getbe32(bin + pos + 4);
 		lt->len = getbe32(bin + pos + 8);
-		if (lt->start < 0 || lt->start > lt->end || lt->len < 0)
+		if (lt->start < 0 || lt->start > lt->end || lt->len < 0) {
 			break; /* sanity check */
+		}
 		if (!safeadd(12, &pos)) {
 			break;
 		}
--- a/mercurial/repository.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/mercurial/repository.py	Fri Jan 25 18:55:23 2019 +0900
@@ -346,8 +346,8 @@
             return
 
         raise error.CapabilityError(
-            _('cannot %s; remote repository does not support the %r '
-              'capability') % (purpose, name))
+            _('cannot %s; remote repository does not support the '
+              '\'%s\' capability') % (purpose, name))
 
 class iverifyproblem(interfaceutil.Interface):
     """Represents a problem with the integrity of the repository.
--- a/rust/chg/src/sighandlers.c	Wed Jan 23 07:49:36 2019 -0500
+++ b/rust/chg/src/sighandlers.c	Fri Jan 25 18:55:23 2019 +0900
@@ -33,28 +33,36 @@
 {
 	sigset_t unblockset, oldset;
 	struct sigaction sa, oldsa;
-	if (sigemptyset(&unblockset) < 0)
+	if (sigemptyset(&unblockset) < 0) {
 		return;
-	if (sigaddset(&unblockset, sig) < 0)
+	}
+	if (sigaddset(&unblockset, sig) < 0) {
 		return;
+	}
 	memset(&sa, 0, sizeof(sa));
 	sa.sa_handler = SIG_DFL;
 	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
+	if (sigemptyset(&sa.sa_mask) < 0) {
 		return;
+	}
 
 	forwardsignal(sig);
-	if (raise(sig) < 0) /* resend to self */
+	if (raise(sig) < 0) { /* resend to self */
 		return;
-	if (sigaction(sig, &sa, &oldsa) < 0)
+	}
+	if (sigaction(sig, &sa, &oldsa) < 0) {
 		return;
-	if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0)
+	}
+	if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0) {
 		return;
+	}
 	/* resent signal will be handled before sigprocmask() returns */
-	if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
+	if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0) {
 		return;
-	if (sigaction(sig, &oldsa, NULL) < 0)
+	}
+	if (sigaction(sig, &oldsa, NULL) < 0) {
 		return;
+	}
 }
 
 /*
@@ -81,37 +89,46 @@
 	 * - SIGINT: usually generated by the terminal */
 	sa.sa_handler = forwardsignaltogroup;
 	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
+	if (sigemptyset(&sa.sa_mask) < 0) {
+		return -1;
+	}
+	if (sigaction(SIGHUP, &sa, NULL) < 0) {
 		return -1;
-	if (sigaction(SIGHUP, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGINT, &sa, NULL) < 0) {
 		return -1;
-	if (sigaction(SIGINT, &sa, NULL) < 0)
-		return -1;
+	}
 
 	/* terminate frontend by double SIGTERM in case of server freeze */
 	sa.sa_handler = forwardsignal;
 	sa.sa_flags |= SA_RESETHAND;
-	if (sigaction(SIGTERM, &sa, NULL) < 0)
+	if (sigaction(SIGTERM, &sa, NULL) < 0) {
 		return -1;
+	}
 
 	/* notify the worker about window resize events */
 	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+	if (sigaction(SIGWINCH, &sa, NULL) < 0) {
 		return -1;
+	}
 	/* forward user-defined signals */
-	if (sigaction(SIGUSR1, &sa, NULL) < 0)
+	if (sigaction(SIGUSR1, &sa, NULL) < 0) {
 		return -1;
-	if (sigaction(SIGUSR2, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGUSR2, &sa, NULL) < 0) {
 		return -1;
+	}
 	/* propagate job control requests to worker */
 	sa.sa_handler = forwardsignal;
 	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGCONT, &sa, NULL) < 0)
+	if (sigaction(SIGCONT, &sa, NULL) < 0) {
 		return -1;
+	}
 	sa.sa_handler = handlestopsignal;
 	sa.sa_flags = SA_RESTART;
-	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+	if (sigaction(SIGTSTP, &sa, NULL) < 0) {
 		return -1;
+	}
 
 	return 0;
 }
@@ -127,24 +144,31 @@
 	memset(&sa, 0, sizeof(sa));
 	sa.sa_handler = SIG_DFL;
 	sa.sa_flags = SA_RESTART;
-	if (sigemptyset(&sa.sa_mask) < 0)
+	if (sigemptyset(&sa.sa_mask) < 0) {
 		return -1;
+	}
 
-	if (sigaction(SIGHUP, &sa, NULL) < 0)
+	if (sigaction(SIGHUP, &sa, NULL) < 0) {
 		return -1;
-	if (sigaction(SIGTERM, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGTERM, &sa, NULL) < 0) {
 		return -1;
-	if (sigaction(SIGWINCH, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGWINCH, &sa, NULL) < 0) {
 		return -1;
-	if (sigaction(SIGCONT, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGCONT, &sa, NULL) < 0) {
 		return -1;
-	if (sigaction(SIGTSTP, &sa, NULL) < 0)
+	}
+	if (sigaction(SIGTSTP, &sa, NULL) < 0) {
 		return -1;
+	}
 
 	/* ignore Ctrl+C while shutting down to make pager exits cleanly */
 	sa.sa_handler = SIG_IGN;
-	if (sigaction(SIGINT, &sa, NULL) < 0)
+	if (sigaction(SIGINT, &sa, NULL) < 0) {
 		return -1;
+	}
 
 	peerpid = 0;
 	return 0;
--- a/tests/artifacts/scripts/generate-churning-bundle.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/artifacts/scripts/generate-churning-bundle.py	Fri Jan 25 18:55:23 2019 +0900
@@ -42,7 +42,6 @@
 FILENAME='SPARSE-REVLOG-TEST-FILE'
 NB_LINES = 10500
 ALWAYS_CHANGE_LINES = 500
-FILENAME = 'SPARSE-REVLOG-TEST-FILE'
 OTHER_CHANGES = 300
 
 def nextcontent(previous_content):
--- a/tests/notcapable	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/notcapable	Fri Jan 25 18:55:23 2019 +0900
@@ -11,7 +11,7 @@
     extensions.wrapfunction(repository.peer, 'capable', wrapcapable)
     extensions.wrapfunction(localrepo.localrepository, 'peer', wrappeer)
 def wrapcapable(orig, self, name, *args, **kwargs):
-    if name in '$CAP'.split(' '):
+    if name in b'$CAP'.split(b' '):
         return False
     return orig(self, name, *args, **kwargs)
 def wrappeer(orig, self):
--- a/tests/svnxml.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/svnxml.py	Fri Jan 25 18:55:23 2019 +0900
@@ -20,10 +20,10 @@
     if paths:
         paths = paths[0]
         for p in paths.getElementsByTagName('path'):
-            action = p.getAttribute('action')
-            path = xmltext(p)
-            frompath = p.getAttribute('copyfrom-path')
-            fromrev = p.getAttribute('copyfrom-rev')
+            action = p.getAttribute('action').encode('utf-8')
+            path = xmltext(p).encode('utf-8')
+            frompath = p.getAttribute('copyfrom-path').encode('utf-8')
+            fromrev = p.getAttribute('copyfrom-rev').encode('utf-8')
             e['paths'].append((path, action, frompath, fromrev))
     return e
 
@@ -43,11 +43,11 @@
         for k in ('revision', 'author', 'msg'):
             fp.write(('%s: %s\n' % (k, e[k])).encode('utf-8'))
         for path, action, fpath, frev in sorted(e['paths']):
-            frominfo = ''
+            frominfo = b''
             if frev:
-                frominfo = ' (from %s@%s)' % (fpath, frev)
-            p = ' %s %s%s\n' % (action, path, frominfo)
-            fp.write(p.encode('utf-8'))
+                frominfo = b' (from %s@%s)' % (fpath, frev)
+            p = b' %s %s%s\n' % (action, path, frominfo)
+            fp.write(p)
 
 if __name__ == '__main__':
     data = sys.stdin.read()
--- a/tests/test-acl.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-acl.t	Fri Jan 25 18:55:23 2019 +0900
@@ -39,7 +39,7 @@
   >     try:
   >         return acl._getusersorig(ui, group)
   >     except:
-  >         return ["fred", "betty"]
+  >         return [b"fred", b"betty"]
   > acl._getusersorig = acl._getusers
   > acl._getusers = fakegetusers
   > EOF
--- a/tests/test-arbitraryfilectx.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-arbitraryfilectx.t	Fri Jan 25 18:55:23 2019 +0900
@@ -72,30 +72,30 @@
 These files are different and should return True (different):
 (Note that filecmp.cmp's return semantics are inverted from ours, so we invert
 for simplicity):
-  $ hg eval "context.arbitraryfilectx('A', repo).cmp(repo[None]['real_A'])"
+  $ hg eval "context.arbitraryfilectx(b'A', repo).cmp(repo[None][b'real_A'])"
   True (no-eol)
-  $ hg eval "not filecmp.cmp('A', 'real_A')"
+  $ hg eval "not filecmp.cmp(b'A', b'real_A')"
   True (no-eol)
 
 These files are identical and should return False (same):
-  $ hg eval "context.arbitraryfilectx('A', repo).cmp(repo[None]['A'])"
+  $ hg eval "context.arbitraryfilectx(b'A', repo).cmp(repo[None][b'A'])"
   False (no-eol)
-  $ hg eval "context.arbitraryfilectx('A', repo).cmp(repo[None]['B'])"
+  $ hg eval "context.arbitraryfilectx(b'A', repo).cmp(repo[None][b'B'])"
   False (no-eol)
-  $ hg eval "not filecmp.cmp('A', 'B')"
+  $ hg eval "not filecmp.cmp(b'A', b'B')"
   False (no-eol)
 
 This comparison should also return False, since A and sym_A are substantially
 the same in the eyes of ``filectx.cmp``, which looks at data only.
-  $ hg eval "context.arbitraryfilectx('real_A', repo).cmp(repo[None]['sym_A'])"
+  $ hg eval "context.arbitraryfilectx(b'real_A', repo).cmp(repo[None][b'sym_A'])"
   False (no-eol)
 
 A naive use of filecmp on those two would wrongly return True, since it follows
 the symlink to "A", which has different contents.
 #if symlink
-  $ hg eval "not filecmp.cmp('real_A', 'sym_A')"
+  $ hg eval "not filecmp.cmp(b'real_A', b'sym_A')"
   True (no-eol)
 #else
-  $ hg eval "not filecmp.cmp('real_A', 'sym_A')"
+  $ hg eval "not filecmp.cmp(b'real_A', b'sym_A')"
   False (no-eol)
 #endif
--- a/tests/test-archive.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-archive.t	Fri Jan 25 18:55:23 2019 +0900
@@ -187,7 +187,7 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=(1377|1461), sha1=(677b14d3d048778d5eb5552c14a67e6192068650|be6d3983aa13dfe930361b2569291cdedd02b537) (re)
+  body: size=(1377|1461|1489), sha1=(677b14d3d048778d5eb5552c14a67e6192068650|be6d3983aa13dfe930361b2569291cdedd02b537|1897e496871aa89ad685a92b936f5fa0d008b9e8) (re)
   % tar.gz and tar.bz2 disallowed should both give 403
   403 Archive type not allowed: gz
   content-type: text/html; charset=ascii
@@ -274,7 +274,7 @@
   server: testing stub value
   transfer-encoding: chunked
   
-  body: size=(1377|1461), sha1=(677b14d3d048778d5eb5552c14a67e6192068650|be6d3983aa13dfe930361b2569291cdedd02b537) (re)
+  body: size=(1377|1461|1489), sha1=(677b14d3d048778d5eb5552c14a67e6192068650|be6d3983aa13dfe930361b2569291cdedd02b537|1897e496871aa89ad685a92b936f5fa0d008b9e8) (re)
   % tar.gz and tar.bz2 disallowed should both give 403
   403 Archive type not allowed: gz
   content-type: text/html; charset=ascii
--- a/tests/test-batching.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-batching.py	Fri Jan 25 18:55:23 2019 +0900
@@ -11,25 +11,28 @@
 
 from mercurial import (
     localrepo,
+    pycompat,
     wireprotov1peer,
+)
 
-)
+def bprint(*bs):
+    print(*[pycompat.sysstr(b) for b in bs])
 
 # equivalent of repo.repository
 class thing(object):
     def hello(self):
-        return "Ready."
+        return b"Ready."
 
 # equivalent of localrepo.localrepository
 class localthing(thing):
     def foo(self, one, two=None):
         if one:
-            return "%s and %s" % (one, two,)
-        return "Nope"
+            return b"%s and %s" % (one, two,)
+        return b"Nope"
     def bar(self, b, a):
-        return "%s und %s" % (b, a,)
+        return b"%s und %s" % (b, a,)
     def greet(self, name=None):
-        return "Hello, %s" % name
+        return b"Hello, %s" % name
 
     @contextlib.contextmanager
     def commandexecutor(self):
@@ -43,27 +46,27 @@
 def use(it):
 
     # Direct call to base method shared between client and server.
-    print(it.hello())
+    bprint(it.hello())
 
     # Direct calls to proxied methods. They cause individual roundtrips.
-    print(it.foo("Un", two="Deux"))
-    print(it.bar("Eins", "Zwei"))
+    bprint(it.foo(b"Un", two=b"Deux"))
+    bprint(it.bar(b"Eins", b"Zwei"))
 
     # Batched call to a couple of proxied methods.
 
     with it.commandexecutor() as e:
-        ffoo = e.callcommand('foo', {'one': 'One', 'two': 'Two'})
-        fbar = e.callcommand('bar', {'b': 'Eins', 'a': 'Zwei'})
-        fbar2 = e.callcommand('bar', {'b': 'Uno', 'a': 'Due'})
+        ffoo = e.callcommand(b'foo', {b'one': b'One', b'two': b'Two'})
+        fbar = e.callcommand(b'bar', {b'b': b'Eins', b'a': b'Zwei'})
+        fbar2 = e.callcommand(b'bar', {b'b': b'Uno', b'a': b'Due'})
 
-    print(ffoo.result())
-    print(fbar.result())
-    print(fbar2.result())
+    bprint(ffoo.result())
+    bprint(fbar.result())
+    bprint(fbar2.result())
 
 # local usage
 mylocal = localthing()
 print()
-print("== Local")
+bprint(b"== Local")
 use(mylocal)
 
 # demo remoting; mimicks what wireproto and HTTP/SSH do
@@ -72,16 +75,16 @@
 
 def escapearg(plain):
     return (plain
-            .replace(':', '::')
-            .replace(',', ':,')
-            .replace(';', ':;')
-            .replace('=', ':='))
+            .replace(b':', b'::')
+            .replace(b',', b':,')
+            .replace(b';', b':;')
+            .replace(b'=', b':='))
 def unescapearg(escaped):
     return (escaped
-            .replace(':=', '=')
-            .replace(':;', ';')
-            .replace(':,', ',')
-            .replace('::', ':'))
+            .replace(b':=', b'=')
+            .replace(b':;', b';')
+            .replace(b':,', b',')
+            .replace(b'::', b':'))
 
 # server side
 
@@ -90,27 +93,28 @@
     def __init__(self, local):
         self.local = local
     def _call(self, name, args):
-        args = dict(arg.split('=', 1) for arg in args)
+        args = dict(arg.split(b'=', 1) for arg in args)
         return getattr(self, name)(**args)
     def perform(self, req):
-        print("REQ:", req)
-        name, args = req.split('?', 1)
-        args = args.split('&')
-        vals = dict(arg.split('=', 1) for arg in args)
-        res = getattr(self, name)(**vals)
-        print("  ->", res)
+        bprint(b"REQ:", req)
+        name, args = req.split(b'?', 1)
+        args = args.split(b'&')
+        vals = dict(arg.split(b'=', 1) for arg in args)
+        res = getattr(self, pycompat.sysstr(name))(**pycompat.strkwargs(vals))
+        bprint(b"  ->", res)
         return res
     def batch(self, cmds):
         res = []
-        for pair in cmds.split(';'):
-            name, args = pair.split(':', 1)
+        for pair in cmds.split(b';'):
+            name, args = pair.split(b':', 1)
             vals = {}
-            for a in args.split(','):
+            for a in args.split(b','):
                 if a:
-                    n, v = a.split('=')
+                    n, v = a.split(b'=')
                     vals[n] = unescapearg(v)
-            res.append(escapearg(getattr(self, name)(**vals)))
-        return ';'.join(res)
+            res.append(escapearg(getattr(self, pycompat.sysstr(name))(
+                **pycompat.strkwargs(vals))))
+        return b';'.join(res)
     def foo(self, one, two):
         return mangle(self.local.foo(unmangle(one), unmangle(two)))
     def bar(self, b, a):
@@ -124,25 +128,25 @@
 # equivalent of wireproto.encode/decodelist, that is, type-specific marshalling
 # here we just transform the strings a bit to check we're properly en-/decoding
 def mangle(s):
-    return ''.join(chr(ord(c) + 1) for c in s)
+    return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s))
 def unmangle(s):
-    return ''.join(chr(ord(c) - 1) for c in s)
+    return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s))
 
 # equivalent of wireproto.wirerepository and something like http's wire format
 class remotething(thing):
     def __init__(self, server):
         self.server = server
     def _submitone(self, name, args):
-        req = name + '?' + '&'.join(['%s=%s' % (n, v) for n, v in args])
+        req = name + b'?' + b'&'.join([b'%s=%s' % (n, v) for n, v in args])
         return self.server.perform(req)
     def _submitbatch(self, cmds):
         req = []
         for name, args in cmds:
-            args = ','.join(n + '=' + escapearg(v) for n, v in args)
-            req.append(name + ':' + args)
-        req = ';'.join(req)
-        res = self._submitone('batch', [('cmds', req,)])
-        for r in res.split(';'):
+            args = b','.join(n + b'=' + escapearg(v) for n, v in args)
+            req.append(name + b':' + args)
+        req = b';'.join(req)
+        res = self._submitone(b'batch', [(b'cmds', req,)])
+        for r in res.split(b';'):
             yield r
 
     @contextlib.contextmanager
@@ -155,7 +159,7 @@
 
     @wireprotov1peer.batchable
     def foo(self, one, two=None):
-        encargs = [('one', mangle(one),), ('two', mangle(two),)]
+        encargs = [(b'one', mangle(one),), (b'two', mangle(two),)]
         encresref = wireprotov1peer.future()
         yield encargs, encresref
         yield unmangle(encresref.value)
@@ -163,18 +167,18 @@
     @wireprotov1peer.batchable
     def bar(self, b, a):
         encresref = wireprotov1peer.future()
-        yield [('b', mangle(b),), ('a', mangle(a),)], encresref
+        yield [(b'b', mangle(b),), (b'a', mangle(a),)], encresref
         yield unmangle(encresref.value)
 
     # greet is coded directly. It therefore does not support batching. If it
     # does appear in a batch, the batch is split around greet, and the call to
     # greet is done in its own roundtrip.
     def greet(self, name=None):
-        return unmangle(self._submitone('greet', [('name', mangle(name),)]))
+        return unmangle(self._submitone(b'greet', [(b'name', mangle(name),)]))
 
 # demo remote usage
 
 myproxy = remotething(myserver)
 print()
-print("== Remote")
+bprint(b"== Remote")
 use(myproxy)
--- a/tests/test-bugzilla.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-bugzilla.t	Fri Jan 25 18:55:23 2019 +0900
@@ -3,7 +3,9 @@
   $ cat <<EOF > bzmock.py
   > from __future__ import absolute_import
   > from mercurial import extensions
+  > from mercurial import pycompat
   > from mercurial import registrar
+  > from mercurial.utils import stringutil
   > 
   > configtable = {}
   > configitem = registrar.configitem(configtable)
@@ -18,14 +20,17 @@
   >             super(bzmock, self).__init__(ui)
   >             self._logfile = ui.config(b'bugzilla', b'mocklog')
   >         def updatebug(self, bugid, newstate, text, committer):
-  >             with open(self._logfile, 'a') as f:
-  >                 f.write('update bugid=%r, newstate=%r, committer=%r\n'
-  >                         % (bugid, newstate, committer))
-  >                 f.write('----\n' + text + '\n----\n')
+  >             with open(pycompat.fsdecode(self._logfile), 'ab') as f:
+  >                 f.write(b'update bugid=%s, newstate=%s, committer=%s\n'
+  >                         % (stringutil.pprint(bugid),
+  >                            stringutil.pprint(newstate),
+  >                            stringutil.pprint(committer)))
+  >                 f.write(b'----\n' + text + b'\n----\n')
   >         def notify(self, bugs, committer):
-  >             with open(self._logfile, 'a') as f:
-  >                 f.write('notify bugs=%r, committer=%r\n'
-  >                         % (bugs, committer))
+  >             with open(pycompat.fsdecode(self._logfile), 'ab') as f:
+  >                 f.write(b'notify bugs=%s, committer=%s\n'
+  >                         % (stringutil.pprint(bugs),
+  >                            stringutil.pprint(committer)))
   >     bugzilla.bugzilla._versions[b'mock'] = bzmock
   > EOF
 
--- a/tests/test-encoding-align.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-encoding-align.t	Fri Jan 25 18:55:23 2019 +0900
@@ -5,6 +5,7 @@
   $ hg init t
   $ cd t
   $ "$PYTHON" << EOF
+  > from mercurial import pycompat
   > # (byte, width) = (6, 4)
   > s = b"\xe7\x9f\xad\xe5\x90\x8d"
   > # (byte, width) = (7, 7): odd width is good for alignment test
@@ -21,14 +22,17 @@
   > command = registrar.command(cmdtable)
   > 
   > @command(b'showoptlist',
-  >     [('s', 'opt1', '', 'short width'  + ' %(s)s' * 8, '%(s)s'),
-  >     ('m', 'opt2', '', 'middle width' + ' %(m)s' * 8, '%(m)s'),
-  >     ('l', 'opt3', '', 'long width'   + ' %(l)s' * 8, '%(l)s')],
-  >     '')
+  >     [(b's', b'opt1', b'', b'short width'  + (b' ' +%(s)s) * 8, %(s)s),
+  >     (b'm', b'opt2', b'', b'middle width' + (b' ' + %(m)s) * 8, %(m)s),
+  >     (b'l', b'opt3', b'', b'long width'   + (b' ' + %(l)s) * 8, %(l)s)],
+  >     b'')
   > def showoptlist(ui, repo, *pats, **opts):
   >     '''dummy command to show option descriptions'''
   >     return 0
-  > """ % globals())
+  > """ % {b's': pycompat.byterepr(s),
+  >        b'm': pycompat.byterepr(m),
+  >        b'l': pycompat.byterepr(l),
+  >       })
   > f.close()
   > EOF
   $ S=`cat s`
--- a/tests/test-http-api.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-http-api.t	Fri Jan 25 18:55:23 2019 +0900
@@ -156,6 +156,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
+  s> setsockopt(6, 1, 1) -> None (py3 !)
   s>     GET /api HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
@@ -177,6 +178,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
+  s> setsockopt(6, 1, 1) -> None (py3 !)
   s>     GET /api/ HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
@@ -200,6 +202,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
+  s> setsockopt(6, 1, 1) -> None (py3 !)
   s>     GET /api/unknown HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
@@ -222,6 +225,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
+  s> setsockopt(6, 1, 1) -> None (py3 !)
   s>     GET /api/exp-http-v2-0003 HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
@@ -255,6 +259,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
+  s> setsockopt(6, 1, 1) -> None (py3 !)
   s>     GET /api HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
@@ -276,6 +281,7 @@
   >     user-agent: test
   > EOF
   using raw connection to peer
+  s> setsockopt(6, 1, 1) -> None (py3 !)
   s>     GET /api/ HTTP/1.1\r\n
   s>     Accept-Encoding: identity\r\n
   s>     user-agent: test\r\n
--- a/tests/test-install.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-install.t	Fri Jan 25 18:55:23 2019 +0900
@@ -161,6 +161,7 @@
   > import subprocess
   > import sys
   > import xml.etree.ElementTree as ET
+  > from mercurial import pycompat
   > 
   > # MSYS mangles the path if it expands $TESTDIR
   > testdir = os.environ['TESTDIR']
@@ -177,7 +178,7 @@
   >     files = node.findall('./{%(wix)s}Component/{%(wix)s}File' % ns)
   > 
   >     for f in files:
-  >         yield relpath + f.attrib['Name']
+  >         yield pycompat.sysbytes(relpath + f.attrib['Name'])
   > 
   > def hgdirectory(relpath):
   >     '''generator of tracked files, rooted at relpath'''
@@ -204,11 +205,11 @@
   > 
   > print('Not installed:')
   > for f in sorted(set(tracked) - set(installed)):
-  >     print('  %s' % f)
+  >     print('  %s' % pycompat.sysstr(f))
   > 
   > print('Not tracked:')
   > for f in sorted(set(installed) - set(tracked)):
-  >     print('  %s' % f)
+  >     print('  %s' % pycompat.sysstr(f))
   > EOF
 
   $ ( testrepohgenv; "$PYTHON" wixxml.py help )
--- a/tests/test-missing-capability.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-missing-capability.t	Fri Jan 25 18:55:23 2019 +0900
@@ -15,7 +15,7 @@
   > from mercurial import extensions, wireprotov1server
   > def wcapabilities(orig, *args, **kwargs):
   >   cap = orig(*args, **kwargs)
-  >   cap.remove('$1')
+  >   cap.remove(b'$1')
   >   return cap
   > extensions.wrapfunction(wireprotov1server, '_capabilities', wcapabilities)
   > EOF
--- a/tests/test-mq-eol.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-mq-eol.t	Fri Jan 25 18:55:23 2019 +0900
@@ -30,10 +30,14 @@
 
   $ cat > cateol.py <<EOF
   > import sys
+  > try:
+  >     stdout = sys.stdout.buffer
+  > except AttributeError:
+  >     stdout = sys.stdout
   > for line in open(sys.argv[1], 'rb'):
   >     line = line.replace(b'\r', b'<CR>')
   >     line = line.replace(b'\n', b'<LF>')
-  >     print(line)
+  >     stdout.write(line + b'\n')
   > EOF
 
   $ hg init repo
--- a/tests/test-mq-missingfiles.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-mq-missingfiles.t	Fri Jan 25 18:55:23 2019 +0900
@@ -5,6 +5,10 @@
 
   $ cat > writelines.py <<EOF
   > import sys
+  > if sys.version_info[0] >= 3:
+  >     encode = lambda x: x.encode('utf-8').decode('unicode_escape').encode('utf-8')
+  > else:
+  >     encode = lambda x: x.decode('string_escape')
   > path = sys.argv[1]
   > args = sys.argv[2:]
   > assert (len(args) % 2) == 0
@@ -13,7 +17,7 @@
   > for i in range(len(args) // 2):
   >    count, s = args[2*i:2*i+2]
   >    count = int(count)
-  >    s = s.decode('string_escape')
+  >    s = encode(s)
   >    f.write(s*count)
   > f.close()
   > EOF
--- a/tests/test-mq-qimport.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-mq-qimport.t	Fri Jan 25 18:55:23 2019 +0900
@@ -1,5 +1,9 @@
   $ cat > writelines.py <<EOF
   > import sys
+  > if sys.version_info[0] >= 3:
+  >     encode = lambda x: x.encode('utf-8').decode('unicode_escape').encode('utf-8')
+  > else:
+  >     encode = lambda x: x.decode('string_escape')
   > path = sys.argv[1]
   > args = sys.argv[2:]
   > assert (len(args) % 2) == 0
@@ -8,7 +12,7 @@
   > for i in range(len(args)//2):
   >    count, s = args[2*i:2*i+2]
   >    count = int(count)
-  >    s = s.decode('string_escape')
+  >    s = encode(s)
   >    f.write(s*count)
   > f.close()
   > 
--- a/tests/test-mq-subrepo-svn.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-mq-subrepo-svn.t	Fri Jan 25 18:55:23 2019 +0900
@@ -23,10 +23,17 @@
   $ svnadmin create svn-repo-2499
 
   $ SVNREPOPATH=`pwd`/svn-repo-2499/project
+
+#if py3
+  $ pathquoted=`"$PYTHON" -c "import sys, urllib.parse; sys.stdout.write(urllib.parse.quote(sys.argv[1]))" "$SVNREPOPATH"`
+#else
+  $ pathquoted=`"$PYTHON" -c "import sys, urllib; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+#endif
+
 #if windows
-  $ SVNREPOURL=file:///`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file:///$pathquoted
 #else
-  $ SVNREPOURL=file://`"$PYTHON" -c "import urllib, sys; sys.stdout.write(urllib.quote(sys.argv[1]))" "$SVNREPOPATH"`
+  $ SVNREPOURL=file://$pathquoted
 #endif
 
   $ mkdir -p svn-project-2499/trunk
--- a/tests/test-newcgi.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-newcgi.t	Fri Jan 25 18:55:23 2019 +0900
@@ -18,7 +18,7 @@
   > from mercurial.hgweb.request import wsgiapplication
   > 
   > def make_web_app():
-  >     return hgweb("test", "Empty test repository")
+  >     return hgweb(b"test", b"Empty test repository")
   > 
   > wsgicgi.launch(wsgiapplication(make_web_app))
   > HGWEB
@@ -44,7 +44,7 @@
   > from mercurial.hgweb.request import wsgiapplication
   > 
   > def make_web_app():
-  >     return hgwebdir("hgweb.config")
+  >     return hgwebdir(b"hgweb.config")
   > 
   > wsgicgi.launch(wsgiapplication(make_web_app))
   > HGWEBDIR
--- a/tests/test-remotefilelog-datapack.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-remotefilelog-datapack.py	Fri Jan 25 18:55:23 2019 +0900
@@ -79,11 +79,11 @@
         revisions = [(filename, node, nullid, content)]
         pack = self.createPack(revisions)
         if self.paramsavailable:
-            self.assertEquals(pack.params.fanoutprefix,
-                              basepack.SMALLFANOUTPREFIX)
+            self.assertEqual(pack.params.fanoutprefix,
+                             basepack.SMALLFANOUTPREFIX)
 
         chain = pack.getdeltachain(filename, node)
-        self.assertEquals(content, chain[0][4])
+        self.assertEqual(content, chain[0][4])
 
     def testAddSingle(self):
         self._testAddSingle('')
@@ -106,10 +106,10 @@
 
         for filename, node, base, content in revisions:
             entry = pack.getdelta(filename, node)
-            self.assertEquals((content, filename, base, {}), entry)
+            self.assertEqual((content, filename, base, {}), entry)
 
             chain = pack.getdeltachain(filename, node)
-            self.assertEquals(content, chain[0][4])
+            self.assertEqual(content, chain[0][4])
 
     def testAddDeltas(self):
         """Test putting multiple delta blobs into a pack and read the chain.
@@ -127,13 +127,13 @@
 
         entry = pack.getdelta(filename, revisions[0][1])
         realvalue = (revisions[0][3], filename, revisions[0][2], {})
-        self.assertEquals(entry, realvalue)
+        self.assertEqual(entry, realvalue)
 
         # Test that the chain for the final entry has all the others
         chain = pack.getdeltachain(filename, node)
         for i in range(10):
             content = "abcdef%s" % i
-            self.assertEquals(content, chain[-i - 1][4])
+            self.assertEqual(content, chain[-i - 1][4])
 
     def testPackMany(self):
         """Pack many related and unrelated objects.
@@ -162,7 +162,7 @@
             chain = pack.getdeltachain(filename, node)
             for entry in chain:
                 expectedcontent = blobs[(entry[0], entry[1], entry[3])]
-                self.assertEquals(entry[4], expectedcontent)
+                self.assertEqual(entry[4], expectedcontent)
 
     def testPackMetadata(self):
         revisions = []
@@ -181,7 +181,7 @@
             # flag == 0 should be optimized out
             if origmeta[constants.METAKEYFLAG] == 0:
                 del origmeta[constants.METAKEYFLAG]
-            self.assertEquals(parsedmeta, origmeta)
+            self.assertEqual(parsedmeta, origmeta)
 
     def testGetMissing(self):
         """Test the getmissing() api.
@@ -206,7 +206,7 @@
 
         fakenode = self.getFakeHash()
         missing = pack.getmissing([("foo", revisions[0][1]), ("foo", fakenode)])
-        self.assertEquals(missing, [("foo", fakenode)])
+        self.assertEqual(missing, [("foo", fakenode)])
 
     def testAddThrows(self):
         pack = self.createPack()
@@ -238,7 +238,7 @@
         revisions = [("filename", fakenode, self.getFakeHash(), "content")]
         pack = self.createPack(revisions)
         chain = pack.getdeltachain("filename", fakenode)
-        self.assertEquals(len(chain), 1)
+        self.assertEqual(len(chain), 1)
 
     def testLargePack(self):
         """Test creating and reading from a large pack with over X entries.
@@ -255,12 +255,12 @@
 
         pack = self.createPack(revisions)
         if self.paramsavailable:
-            self.assertEquals(pack.params.fanoutprefix,
-                              basepack.LARGEFANOUTPREFIX)
+            self.assertEqual(pack.params.fanoutprefix,
+                             basepack.LARGEFANOUTPREFIX)
 
         for (filename, node), content in blobs.iteritems():
             actualcontent = pack.getdeltachain(filename, node)[0][4]
-            self.assertEquals(actualcontent, content)
+            self.assertEqual(actualcontent, content)
 
     def testPacksCache(self):
         """Test that we remember the most recent packs while fetching the delta
@@ -300,12 +300,12 @@
             chain = store.getdeltachain(revision[0], revision[1])
 
             mostrecentpack = next(iter(store.packs), None)
-            self.assertEquals(
+            self.assertEqual(
                 mostrecentpack.getdeltachain(revision[0], revision[1]),
                 chain
             )
 
-            self.assertEquals(randomchain.index(revision) + 1, len(chain))
+            self.assertEqual(randomchain.index(revision) + 1, len(chain))
 
     # perf test off by default since it's slow
     def _testIndexPerf(self):
--- a/tests/test-remotefilelog-histpack.py	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-remotefilelog-histpack.py	Fri Jan 25 18:55:23 2019 +0900
@@ -52,7 +52,7 @@
         node, p1node, p2node, and linknode.
         """
         if revisions is None:
-            revisions = [("filename", self.getFakeHash(), nullid, nullid,
+            revisions = [(b"filename", self.getFakeHash(), nullid, nullid,
                           self.getFakeHash(), None)]
 
         packdir = pycompat.fsencode(self.makeTempDir())
@@ -68,7 +68,7 @@
     def testAddSingle(self):
         """Test putting a single entry into a pack and reading it out.
         """
-        filename = "foo"
+        filename = b"foo"
         node = self.getFakeHash()
         p1 = self.getFakeHash()
         p2 = self.getFakeHash()
@@ -78,9 +78,9 @@
         pack = self.createPack(revisions)
 
         actual = pack.getancestors(filename, node)[node]
-        self.assertEquals(p1, actual[0])
-        self.assertEquals(p2, actual[1])
-        self.assertEquals(linknode, actual[2])
+        self.assertEqual(p1, actual[0])
+        self.assertEqual(p2, actual[1])
+        self.assertEqual(linknode, actual[2])
 
     def testAddMultiple(self):
         """Test putting multiple unrelated revisions into a pack and reading
@@ -88,7 +88,7 @@
         """
         revisions = []
         for i in range(10):
-            filename = "foo-%s" % i
+            filename = b"foo-%d" % i
             node = self.getFakeHash()
             p1 = self.getFakeHash()
             p2 = self.getFakeHash()
@@ -99,10 +99,10 @@
 
         for filename, node, p1, p2, linknode, copyfrom in revisions:
             actual = pack.getancestors(filename, node)[node]
-            self.assertEquals(p1, actual[0])
-            self.assertEquals(p2, actual[1])
-            self.assertEquals(linknode, actual[2])
-            self.assertEquals(copyfrom, actual[3])
+            self.assertEqual(p1, actual[0])
+            self.assertEqual(p2, actual[1])
+            self.assertEqual(linknode, actual[2])
+            self.assertEqual(copyfrom, actual[3])
 
     def testAddAncestorChain(self):
         """Test putting multiple revisions in into a pack and read the ancestor
@@ -124,10 +124,10 @@
         ancestors = pack.getancestors(revisions[0][0], revisions[0][1])
         for filename, node, p1, p2, linknode, copyfrom in revisions:
             ap1, ap2, alinknode, acopyfrom = ancestors[node]
-            self.assertEquals(ap1, p1)
-            self.assertEquals(ap2, p2)
-            self.assertEquals(alinknode, linknode)
-            self.assertEquals(acopyfrom, copyfrom)
+            self.assertEqual(ap1, p1)
+            self.assertEqual(ap2, p2)
+            self.assertEqual(alinknode, linknode)
+            self.assertEqual(acopyfrom, copyfrom)
 
     def testPackMany(self):
         """Pack many related and unrelated ancestors.
@@ -163,14 +163,14 @@
         # Verify the pack contents
         for (filename, node), (p1, p2, lastnode) in allentries.items():
             ancestors = pack.getancestors(filename, node)
-            self.assertEquals(ancestorcounts[(filename, node)],
-                              len(ancestors))
+            self.assertEqual(ancestorcounts[(filename, node)],
+                             len(ancestors))
             for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.items():
                 ep1, ep2, elinknode = allentries[(filename, anode)]
-                self.assertEquals(ap1, ep1)
-                self.assertEquals(ap2, ep2)
-                self.assertEquals(alinknode, elinknode)
-                self.assertEquals(copyfrom, None)
+                self.assertEqual(ap1, ep1)
+                self.assertEqual(ap2, ep2)
+                self.assertEqual(alinknode, elinknode)
+                self.assertEqual(copyfrom, None)
 
     def testGetNodeInfo(self):
         revisions = []
@@ -186,10 +186,10 @@
         # Test that getnodeinfo returns the expected results
         for filename, node, p1, p2, linknode, copyfrom in revisions:
             ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node)
-            self.assertEquals(ap1, p1)
-            self.assertEquals(ap2, p2)
-            self.assertEquals(alinknode, linknode)
-            self.assertEquals(acopyfrom, copyfrom)
+            self.assertEqual(ap1, p1)
+            self.assertEqual(ap2, p2)
+            self.assertEqual(alinknode, linknode)
+            self.assertEqual(acopyfrom, copyfrom)
 
     def testGetMissing(self):
         """Test the getmissing() api.
@@ -215,11 +215,11 @@
         fakenode = self.getFakeHash()
         missing = pack.getmissing([(filename, revisions[0][1]),
                                    (filename, fakenode)])
-        self.assertEquals(missing, [(filename, fakenode)])
+        self.assertEqual(missing, [(filename, fakenode)])
 
         # Test getmissing on a non-existant filename
-        missing = pack.getmissing([("bar", fakenode)])
-        self.assertEquals(missing, [("bar", fakenode)])
+        missing = pack.getmissing([(b"bar", fakenode)])
+        self.assertEqual(missing, [(b"bar", fakenode)])
 
     def testAddThrows(self):
         pack = self.createPack()
@@ -232,12 +232,12 @@
 
     def testBadVersionThrows(self):
         pack = self.createPack()
-        path = pack.path + '.histpack'
-        with open(path) as f:
+        path = pack.path + b'.histpack'
+        with open(path, 'rb') as f:
             raw = f.read()
         raw = struct.pack('!B', 255) + raw[1:]
         os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
-        with open(path, 'w+') as f:
+        with open(path, 'wb+') as f:
             f.write(raw)
 
         try:
@@ -260,14 +260,14 @@
             revisions.append((filename, node, p1, p2, linknode, None))
 
         pack = self.createPack(revisions)
-        self.assertEquals(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX)
+        self.assertEqual(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX)
 
         for filename, node, p1, p2, linknode, copyfrom in revisions:
             actual = pack.getancestors(filename, node)[node]
-            self.assertEquals(p1, actual[0])
-            self.assertEquals(p2, actual[1])
-            self.assertEquals(linknode, actual[2])
-            self.assertEquals(copyfrom, actual[3])
+            self.assertEqual(p1, actual[0])
+            self.assertEqual(p2, actual[1])
+            self.assertEqual(linknode, actual[2])
+            self.assertEqual(copyfrom, actual[3])
 # TODO:
 # histpack store:
 # - repack two packs into one
--- a/tests/test-unamend.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-unamend.t	Fri Jan 25 18:55:23 2019 +0900
@@ -232,6 +232,7 @@
 
   $ hg revert --all
   forgetting bar
+  $ rm bar
 
 Unamending in middle of a stack
 
@@ -302,7 +303,6 @@
 Testing whether unamend retains copies or not
 
   $ hg status
-  ? bar
 
   $ hg mv a foo
 
@@ -370,3 +370,42 @@
   diff --git a/c b/wat
   rename from c
   rename to wat
+  $ hg revert -qa
+  $ rm foobar wat
+
+Rename a->b, then amend b->c. After unamend, should look like b->c.
+
+  $ hg co -q 0
+  $ hg mv a b
+  $ hg ci -qm 'move to a b'
+  $ hg mv b c
+  $ hg amend
+  $ hg unamend
+  $ hg st --copies --change .
+  A b
+    a
+  R a
+  $ hg st --copies
+  A c
+    b
+  R b
+  $ hg revert -qa
+  $ rm c
+
+Rename a->b, then amend b->c, and working copy change c->d. After unamend, should look like b->d
+
+  $ hg co -q 0
+  $ hg mv a b
+  $ hg ci -qm 'move to a b'
+  $ hg mv b c
+  $ hg amend
+  $ hg mv c d
+  $ hg unamend
+  $ hg st --copies --change .
+  A b
+    a
+  R a
+  $ hg st --copies
+  A d
+    b
+  R b
--- a/tests/test-uncommit.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-uncommit.t	Fri Jan 25 18:55:23 2019 +0900
@@ -398,3 +398,43 @@
   |/
   o  0:ea4e33293d4d274a2ba73150733c2612231f398c a 1
   
+
+Rename a->b, then remove b in working copy. Result should remove a.
+
+  $ hg co -q 0
+  $ hg mv a b
+  $ hg ci -qm 'move a to b'
+  $ hg rm b
+  $ hg uncommit --config experimental.uncommitondirtywdir=True
+  $ hg st --copies
+  R a
+  $ hg revert a
+
+Rename a->b, then rename b->c in working copy. Result should rename a->c.
+
+  $ hg co -q 0
+  $ hg mv a b
+  $ hg ci -qm 'move a to b'
+  $ hg mv b c
+  $ hg uncommit --config experimental.uncommitondirtywdir=True
+  $ hg st --copies
+  A c
+    a
+  R a
+  $ hg revert a
+  $ hg forget c
+  $ rm c
+
+Copy a->b1 and a->b2, then rename b1->c in working copy. Result should copy a->b2 and a->c.
+
+  $ hg co -q 0
+  $ hg cp a b1
+  $ hg cp a b2
+  $ hg ci -qm 'move a to b1 and b2'
+  $ hg mv b1 c
+  $ hg uncommit --config experimental.uncommitondirtywdir=True
+  $ hg st --copies
+  A b2
+    a
+  A c
+    a
--- a/tests/test-update-atomic.t	Wed Jan 23 07:49:36 2019 -0500
+++ b/tests/test-update-atomic.t	Fri Jan 25 18:55:23 2019 +0900
@@ -10,7 +10,7 @@
   > 
   > for file_path in sys.argv[1:]:
   >     file_stat = os.stat(file_path)
-  >     octal_mode = oct(file_stat[ST_MODE] & 0o777)
+  >     octal_mode = oct(file_stat[ST_MODE] & 0o777).replace('o', '')
   >     print("%s:%s" % (file_path, octal_mode))
   > 
   > EOF
@@ -19,11 +19,15 @@
   $ cd repo
 
   $ cat > .hg/showwrites.py <<EOF
+  > from __future__ import print_function
+  > from mercurial import pycompat
+  > from mercurial.utils import stringutil
   > def uisetup(ui):
   >   from mercurial import vfs
   >   class newvfs(vfs.vfs):
   >     def __call__(self, *args, **kwargs):
-  >       print('vfs open', args, sorted(list(kwargs.items())))
+  >       print(pycompat.sysstr(stringutil.pprint(
+  >           ('vfs open', args, sorted(list(kwargs.items()))))))
   >       return super(newvfs, self).__call__(*args, **kwargs)
   >   vfs.vfs = newvfs
   > EOF