Fix memory leak when using hg commands over http repositories
When using hg commands over an http repository in a long running process, a
httphandler instance is leaked for each command, because of a loop
handler.parent -> OpenerDirector and OpenerDirector.handlers -> handler which
is not handled by Python's gc. Discussion on #mercurial concluded that removing
the __del__ method solved the problem.
#!/bin/sh
# Test that qpush cleans things up if it doesn't complete
echo "[extensions]" >> $HGRCPATH
echo "mq=" >> $HGRCPATH
hg init repo
cd repo
echo foo > foo
hg ci -Am 'add foo'
touch untracked-file
echo 'syntax: glob' > .hgignore
echo '.hgignore' >> .hgignore
hg qinit
echo '% test qpush on empty series'
hg qpush
hg qnew patch1
echo >> foo
hg qrefresh -m 'patch 1'
hg qnew patch2
echo bar > bar
hg add bar
hg qrefresh -m 'patch 2'
hg qnew --config 'mq.plain=true' bad-patch
echo >> foo
hg qrefresh
hg qpop -a
python -c 'print "\xe9"' > message
cat .hg/patches/bad-patch >> message
mv message .hg/patches/bad-patch
hg qpush -a && echo 'qpush succeded?!'
hg parents
echo '% bar should be gone; other unknown/ignored files should still be around'
hg status -A
echo '% preparing qpush of a missing patch'
hg qpop -a
hg qpush
rm .hg/patches/patch2
echo '% now we expect the push to fail, but it should NOT complain about patch1'
hg qpush
echo '% preparing qpush of missing patch with no patch applied'
hg qpop -a
rm .hg/patches/patch1
echo '% qpush should fail the same way as below'
hg qpush
true # happy ending