2009-03-26 05:55:24 +01:00
|
|
|
#include "cache.h"
|
2017-06-14 20:07:36 +02:00
|
|
|
#include "config.h"
|
2009-03-26 05:55:24 +01:00
|
|
|
#include "commit.h"
|
|
|
|
#include "diff.h"
|
|
|
|
#include "revision.h"
|
2009-03-26 05:55:54 +01:00
|
|
|
#include "refs.h"
|
|
|
|
#include "list-objects.h"
|
2009-03-26 05:55:59 +01:00
|
|
|
#include "quote.h"
|
2020-12-31 12:56:23 +01:00
|
|
|
#include "hash-lookup.h"
|
2009-04-19 11:56:07 +02:00
|
|
|
#include "run-command.h"
|
2009-05-28 23:21:16 +02:00
|
|
|
#include "log-tree.h"
|
2009-03-26 05:55:24 +01:00
|
|
|
#include "bisect.h"
|
2020-03-30 16:03:46 +02:00
|
|
|
#include "oid-array.h"
|
2020-07-28 22:23:39 +02:00
|
|
|
#include "strvec.h"
|
2018-05-19 07:28:25 +02:00
|
|
|
#include "commit-slab.h"
|
2018-07-20 18:33:04 +02:00
|
|
|
#include "commit-reach.h"
|
2018-09-02 09:42:50 +02:00
|
|
|
#include "object-store.h"
|
2020-08-07 23:58:37 +02:00
|
|
|
#include "dir.h"
|
2009-05-09 17:55:38 +02:00
|
|
|
|
2017-03-31 03:40:00 +02:00
|
|
|
static struct oid_array good_revs;
|
|
|
|
static struct oid_array skipped_revs;
|
2009-03-26 05:55:54 +01:00
|
|
|
|
2015-03-14 00:39:29 +01:00
|
|
|
static struct object_id *current_bad_oid;
|
2009-04-19 11:56:07 +02:00
|
|
|
|
|
|
|
static const char *argv_checkout[] = {"checkout", "-q", NULL, "--", NULL};
|
|
|
|
|
2015-06-29 17:40:29 +02:00
|
|
|
static const char *term_bad;
|
|
|
|
static const char *term_good;
|
|
|
|
|
2014-03-25 14:23:26 +01:00
|
|
|
/* Remember to update object flag allocation in object.h */
|
2009-03-26 05:55:24 +01:00
|
|
|
#define COUNTED (1u<<16)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a truly stupid algorithm, but it's only
|
|
|
|
* used for bisection, and we just don't care enough.
|
|
|
|
*
|
|
|
|
* We care just barely enough to avoid recursing for
|
|
|
|
* non-merge entries.
|
|
|
|
*/
|
|
|
|
static int count_distance(struct commit_list *entry)
|
|
|
|
{
|
|
|
|
int nr = 0;
|
|
|
|
|
|
|
|
while (entry) {
|
|
|
|
struct commit *commit = entry->item;
|
|
|
|
struct commit_list *p;
|
|
|
|
|
|
|
|
if (commit->object.flags & (UNINTERESTING | COUNTED))
|
|
|
|
break;
|
|
|
|
if (!(commit->object.flags & TREESAME))
|
|
|
|
nr++;
|
|
|
|
commit->object.flags |= COUNTED;
|
|
|
|
p = commit->parents;
|
|
|
|
entry = p;
|
|
|
|
if (p) {
|
|
|
|
p = p->next;
|
|
|
|
while (p) {
|
|
|
|
nr += count_distance(p);
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_distance(struct commit_list *list)
|
|
|
|
{
|
|
|
|
while (list) {
|
|
|
|
struct commit *commit = list->item;
|
|
|
|
commit->object.flags &= ~COUNTED;
|
|
|
|
list = list->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-19 07:28:25 +02:00
|
|
|
define_commit_slab(commit_weight, int *);
|
|
|
|
static struct commit_weight commit_weight;
|
|
|
|
|
2009-03-26 05:55:24 +01:00
|
|
|
#define DEBUG_BISECT 0
|
|
|
|
|
|
|
|
static inline int weight(struct commit_list *elem)
|
|
|
|
{
|
2018-05-19 07:28:25 +02:00
|
|
|
return **commit_weight_at(&commit_weight, elem->item);
|
2009-03-26 05:55:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void weight_set(struct commit_list *elem, int weight)
|
|
|
|
{
|
2018-05-19 07:28:25 +02:00
|
|
|
**commit_weight_at(&commit_weight, elem->item) = weight;
|
2009-03-26 05:55:24 +01:00
|
|
|
}
|
|
|
|
|
2020-08-07 23:58:38 +02:00
|
|
|
static int count_interesting_parents(struct commit *commit, unsigned bisect_flags)
|
2009-03-26 05:55:24 +01:00
|
|
|
{
|
|
|
|
struct commit_list *p;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
for (count = 0, p = commit->parents; p; p = p->next) {
|
2020-08-07 23:58:35 +02:00
|
|
|
if (!(p->item->object.flags & UNINTERESTING))
|
|
|
|
count++;
|
2020-08-07 23:58:38 +02:00
|
|
|
if (bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY)
|
2020-08-07 23:58:35 +02:00
|
|
|
break;
|
2009-03-26 05:55:24 +01:00
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
bisect: loosen halfway() check for a large number of commits
'git bisect start ...' and subsequent 'git bisect (good|bad)' commands
can take quite a while when the given/remaining revision range between
good and bad commits is big and contains a lot of merge commits, e.g.
in git.git:
$ git rev-list --count v1.6.0..v2.28.0
44284
$ time git bisect start v2.28.0 v1.6.0
Bisecting: 22141 revisions left to test after this (roughly 15 steps)
[e197c21807dacadc8305250baa0b9228819189d4] unable_to_lock_die(): rename function from unable_to_lock_index_die()
real 0m15.472s
user 0m15.220s
sys 0m0.255s
The majority of the runtime is spent in do_find_bisection(), where we
try to find a commit as close as possible to the halfway point between
the bad and good revisions, i.e. a commit from which the number of
reachable commits that are in the good-bad range is half the total
number of commits in that range. So we count how many commits are
reachable in the good-bad range for each commit in that range, which
is quick and easy for a linear history, even over 300k commits in a
linear range are handled in ~0.3s on my machine. Alas, handling merge
commits is non-trivial and quite expensive as the algorithm used seems
to be quadratic, causing the long runtime shown above.
Interestingly, look at what a big difference one additional commit
can make:
$ git rev-list --count v1.6.0^..v2.28.0
44285
$ time git bisect start v2.28.0 v1.6.0^
Bisecting: 22142 revisions left to test after this (roughly 15 steps)
[565301e41670825ceedf75220f2918ae76831240] Sync with 2.1.2
real 0m5.848s
user 0m5.600s
sys 0m0.252s
The difference is caused by one of the optimizations attempting to cut
down the runtime added in 1c4fea3a40 (git-rev-list --bisect:
optimization, 2007-03-21):
Another small optimization is whenever we find a half-way commit
(that is, a commit that can reach exactly half of the commits),
we stop giving counts to remaining commits, as we will not find
any better commit than we just found.
In this second 'git bisect start' command we happen to find a commit
exactly at the halfway point and can return early, but in the first
case there is no such commit, so we can't return early and end up
counting the number of reachable commits from all commits in the
good-bad range.
However, when we have thousands of commits it's not all that important
to find the _exact_ halfway point, a few commits more or less doesn't
make any real difference for the bisection.
So let's loosen the check in the halfway() helper to consider commits
within about 0.1% of the exact halfway point as halfway as well, and
rename the function to approx_halfway() accordingly. This will allow
us to return early on a bigger good-bad range, even when there is no
commit exactly at the halfway point, thereby reducing the runtime of
the first command above considerably, from ~15s to 4.901s.
Furthermore, even if there is a commit exactly at the halfway point,
we might still stumble upon a commit within that 0.1% range before
finding the exact halfway point, allowing us to return a bit earlier,
slightly reducing the runtime of the second command from 5.848s to
5.058s. Note that this change doesn't affect good-bad ranges
containing ~2000 commits or less, because that 0.1% tolerance becomes
zero due to integer arithmetic; however, if the range is that small
then counting the reachable commits for all commits is already fast
enough anyway.
Naturally, this will likely change which commits get picked at each
bisection step, and, in turn, might change how many bisection steps
are necessary to find the first bad commit. If the number of
necessary bisection steps were to increase often, then this change
could backfire, because building and testing at each step might take
much longer than the time spared. OTOH, if the number of steps were
to decrease, then it would be a double win.
So I ran some tests to see how often that happens: picked random good
and bad starting revisions at least 50k commits apart and a random
first bad commit in between in git.git, and used 'git bisect run git
merge-base --is-ancestor HEAD $first_bad_commit' to check the number
of necessary bisection steps. After repeating all this 1000 times
both with and without this patch I found that:
- 146 cases needed one more bisection step than before, 149 cases
needed one less step, while in the remaining 705 cases the number
of steps didn't change. So the number of bisection steps does
indeed change in a non-negligible number of cases, but it seems
that the average number of steps doesn't change in the long run.
- The first 'git bisect start' command got over 3x faster in 456
cases, so this "no commit at the exact halfway point" case seems
to be common enough to care about.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-12 17:19:38 +01:00
|
|
|
static inline int approx_halfway(struct commit_list *p, int nr)
|
2009-03-26 05:55:24 +01:00
|
|
|
{
|
bisect: loosen halfway() check for a large number of commits
'git bisect start ...' and subsequent 'git bisect (good|bad)' commands
can take quite a while when the given/remaining revision range between
good and bad commits is big and contains a lot of merge commits, e.g.
in git.git:
$ git rev-list --count v1.6.0..v2.28.0
44284
$ time git bisect start v2.28.0 v1.6.0
Bisecting: 22141 revisions left to test after this (roughly 15 steps)
[e197c21807dacadc8305250baa0b9228819189d4] unable_to_lock_die(): rename function from unable_to_lock_index_die()
real 0m15.472s
user 0m15.220s
sys 0m0.255s
The majority of the runtime is spent in do_find_bisection(), where we
try to find a commit as close as possible to the halfway point between
the bad and good revisions, i.e. a commit from which the number of
reachable commits that are in the good-bad range is half the total
number of commits in that range. So we count how many commits are
reachable in the good-bad range for each commit in that range, which
is quick and easy for a linear history, even over 300k commits in a
linear range are handled in ~0.3s on my machine. Alas, handling merge
commits is non-trivial and quite expensive as the algorithm used seems
to be quadratic, causing the long runtime shown above.
Interestingly, look at what a big difference one additional commit
can make:
$ git rev-list --count v1.6.0^..v2.28.0
44285
$ time git bisect start v2.28.0 v1.6.0^
Bisecting: 22142 revisions left to test after this (roughly 15 steps)
[565301e41670825ceedf75220f2918ae76831240] Sync with 2.1.2
real 0m5.848s
user 0m5.600s
sys 0m0.252s
The difference is caused by one of the optimizations attempting to cut
down the runtime added in 1c4fea3a40 (git-rev-list --bisect:
optimization, 2007-03-21):
Another small optimization is whenever we find a half-way commit
(that is, a commit that can reach exactly half of the commits),
we stop giving counts to remaining commits, as we will not find
any better commit than we just found.
In this second 'git bisect start' command we happen to find a commit
exactly at the halfway point and can return early, but in the first
case there is no such commit, so we can't return early and end up
counting the number of reachable commits from all commits in the
good-bad range.
However, when we have thousands of commits it's not all that important
to find the _exact_ halfway point, a few commits more or less doesn't
make any real difference for the bisection.
So let's loosen the check in the halfway() helper to consider commits
within about 0.1% of the exact halfway point as halfway as well, and
rename the function to approx_halfway() accordingly. This will allow
us to return early on a bigger good-bad range, even when there is no
commit exactly at the halfway point, thereby reducing the runtime of
the first command above considerably, from ~15s to 4.901s.
Furthermore, even if there is a commit exactly at the halfway point,
we might still stumble upon a commit within that 0.1% range before
finding the exact halfway point, allowing us to return a bit earlier,
slightly reducing the runtime of the second command from 5.848s to
5.058s. Note that this change doesn't affect good-bad ranges
containing ~2000 commits or less, because that 0.1% tolerance becomes
zero due to integer arithmetic; however, if the range is that small
then counting the reachable commits for all commits is already fast
enough anyway.
Naturally, this will likely change which commits get picked at each
bisection step, and, in turn, might change how many bisection steps
are necessary to find the first bad commit. If the number of
necessary bisection steps were to increase often, then this change
could backfire, because building and testing at each step might take
much longer than the time spared. OTOH, if the number of steps were
to decrease, then it would be a double win.
So I ran some tests to see how often that happens: picked random good
and bad starting revisions at least 50k commits apart and a random
first bad commit in between in git.git, and used 'git bisect run git
merge-base --is-ancestor HEAD $first_bad_commit' to check the number
of necessary bisection steps. After repeating all this 1000 times
both with and without this patch I found that:
- 146 cases needed one more bisection step than before, 149 cases
needed one less step, while in the remaining 705 cases the number
of steps didn't change. So the number of bisection steps does
indeed change in a non-negligible number of cases, but it seems
that the average number of steps doesn't change in the long run.
- The first 'git bisect start' command got over 3x faster in 456
cases, so this "no commit at the exact halfway point" case seems
to be common enough to care about.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-12 17:19:38 +01:00
|
|
|
int diff;
|
|
|
|
|
2009-03-26 05:55:24 +01:00
|
|
|
/*
|
|
|
|
* Don't short-cut something we are not going to return!
|
|
|
|
*/
|
|
|
|
if (p->item->object.flags & TREESAME)
|
|
|
|
return 0;
|
|
|
|
if (DEBUG_BISECT)
|
|
|
|
return 0;
|
|
|
|
/*
|
bisect: loosen halfway() check for a large number of commits
'git bisect start ...' and subsequent 'git bisect (good|bad)' commands
can take quite a while when the given/remaining revision range between
good and bad commits is big and contains a lot of merge commits, e.g.
in git.git:
$ git rev-list --count v1.6.0..v2.28.0
44284
$ time git bisect start v2.28.0 v1.6.0
Bisecting: 22141 revisions left to test after this (roughly 15 steps)
[e197c21807dacadc8305250baa0b9228819189d4] unable_to_lock_die(): rename function from unable_to_lock_index_die()
real 0m15.472s
user 0m15.220s
sys 0m0.255s
The majority of the runtime is spent in do_find_bisection(), where we
try to find a commit as close as possible to the halfway point between
the bad and good revisions, i.e. a commit from which the number of
reachable commits that are in the good-bad range is half the total
number of commits in that range. So we count how many commits are
reachable in the good-bad range for each commit in that range, which
is quick and easy for a linear history, even over 300k commits in a
linear range are handled in ~0.3s on my machine. Alas, handling merge
commits is non-trivial and quite expensive as the algorithm used seems
to be quadratic, causing the long runtime shown above.
Interestingly, look at what a big difference one additional commit
can make:
$ git rev-list --count v1.6.0^..v2.28.0
44285
$ time git bisect start v2.28.0 v1.6.0^
Bisecting: 22142 revisions left to test after this (roughly 15 steps)
[565301e41670825ceedf75220f2918ae76831240] Sync with 2.1.2
real 0m5.848s
user 0m5.600s
sys 0m0.252s
The difference is caused by one of the optimizations attempting to cut
down the runtime added in 1c4fea3a40 (git-rev-list --bisect:
optimization, 2007-03-21):
Another small optimization is whenever we find a half-way commit
(that is, a commit that can reach exactly half of the commits),
we stop giving counts to remaining commits, as we will not find
any better commit than we just found.
In this second 'git bisect start' command we happen to find a commit
exactly at the halfway point and can return early, but in the first
case there is no such commit, so we can't return early and end up
counting the number of reachable commits from all commits in the
good-bad range.
However, when we have thousands of commits it's not all that important
to find the _exact_ halfway point, a few commits more or less doesn't
make any real difference for the bisection.
So let's loosen the check in the halfway() helper to consider commits
within about 0.1% of the exact halfway point as halfway as well, and
rename the function to approx_halfway() accordingly. This will allow
us to return early on a bigger good-bad range, even when there is no
commit exactly at the halfway point, thereby reducing the runtime of
the first command above considerably, from ~15s to 4.901s.
Furthermore, even if there is a commit exactly at the halfway point,
we might still stumble upon a commit within that 0.1% range before
finding the exact halfway point, allowing us to return a bit earlier,
slightly reducing the runtime of the second command from 5.848s to
5.058s. Note that this change doesn't affect good-bad ranges
containing ~2000 commits or less, because that 0.1% tolerance becomes
zero due to integer arithmetic; however, if the range is that small
then counting the reachable commits for all commits is already fast
enough anyway.
Naturally, this will likely change which commits get picked at each
bisection step, and, in turn, might change how many bisection steps
are necessary to find the first bad commit. If the number of
necessary bisection steps were to increase often, then this change
could backfire, because building and testing at each step might take
much longer than the time spared. OTOH, if the number of steps were
to decrease, then it would be a double win.
So I ran some tests to see how often that happens: picked random good
and bad starting revisions at least 50k commits apart and a random
first bad commit in between in git.git, and used 'git bisect run git
merge-base --is-ancestor HEAD $first_bad_commit' to check the number
of necessary bisection steps. After repeating all this 1000 times
both with and without this patch I found that:
- 146 cases needed one more bisection step than before, 149 cases
needed one less step, while in the remaining 705 cases the number
of steps didn't change. So the number of bisection steps does
indeed change in a non-negligible number of cases, but it seems
that the average number of steps doesn't change in the long run.
- The first 'git bisect start' command got over 3x faster in 456
cases, so this "no commit at the exact halfway point" case seems
to be common enough to care about.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-12 17:19:38 +01:00
|
|
|
* For small number of commits 2 and 3 are halfway of 5, and
|
2009-03-26 05:55:24 +01:00
|
|
|
* 3 is halfway of 6 but 2 and 4 are not.
|
|
|
|
*/
|
bisect: loosen halfway() check for a large number of commits
'git bisect start ...' and subsequent 'git bisect (good|bad)' commands
can take quite a while when the given/remaining revision range between
good and bad commits is big and contains a lot of merge commits, e.g.
in git.git:
$ git rev-list --count v1.6.0..v2.28.0
44284
$ time git bisect start v2.28.0 v1.6.0
Bisecting: 22141 revisions left to test after this (roughly 15 steps)
[e197c21807dacadc8305250baa0b9228819189d4] unable_to_lock_die(): rename function from unable_to_lock_index_die()
real 0m15.472s
user 0m15.220s
sys 0m0.255s
The majority of the runtime is spent in do_find_bisection(), where we
try to find a commit as close as possible to the halfway point between
the bad and good revisions, i.e. a commit from which the number of
reachable commits that are in the good-bad range is half the total
number of commits in that range. So we count how many commits are
reachable in the good-bad range for each commit in that range, which
is quick and easy for a linear history, even over 300k commits in a
linear range are handled in ~0.3s on my machine. Alas, handling merge
commits is non-trivial and quite expensive as the algorithm used seems
to be quadratic, causing the long runtime shown above.
Interestingly, look at what a big difference one additional commit
can make:
$ git rev-list --count v1.6.0^..v2.28.0
44285
$ time git bisect start v2.28.0 v1.6.0^
Bisecting: 22142 revisions left to test after this (roughly 15 steps)
[565301e41670825ceedf75220f2918ae76831240] Sync with 2.1.2
real 0m5.848s
user 0m5.600s
sys 0m0.252s
The difference is caused by one of the optimizations attempting to cut
down the runtime added in 1c4fea3a40 (git-rev-list --bisect:
optimization, 2007-03-21):
Another small optimization is whenever we find a half-way commit
(that is, a commit that can reach exactly half of the commits),
we stop giving counts to remaining commits, as we will not find
any better commit than we just found.
In this second 'git bisect start' command we happen to find a commit
exactly at the halfway point and can return early, but in the first
case there is no such commit, so we can't return early and end up
counting the number of reachable commits from all commits in the
good-bad range.
However, when we have thousands of commits it's not all that important
to find the _exact_ halfway point, a few commits more or less doesn't
make any real difference for the bisection.
So let's loosen the check in the halfway() helper to consider commits
within about 0.1% of the exact halfway point as halfway as well, and
rename the function to approx_halfway() accordingly. This will allow
us to return early on a bigger good-bad range, even when there is no
commit exactly at the halfway point, thereby reducing the runtime of
the first command above considerably, from ~15s to 4.901s.
Furthermore, even if there is a commit exactly at the halfway point,
we might still stumble upon a commit within that 0.1% range before
finding the exact halfway point, allowing us to return a bit earlier,
slightly reducing the runtime of the second command from 5.848s to
5.058s. Note that this change doesn't affect good-bad ranges
containing ~2000 commits or less, because that 0.1% tolerance becomes
zero due to integer arithmetic; however, if the range is that small
then counting the reachable commits for all commits is already fast
enough anyway.
Naturally, this will likely change which commits get picked at each
bisection step, and, in turn, might change how many bisection steps
are necessary to find the first bad commit. If the number of
necessary bisection steps were to increase often, then this change
could backfire, because building and testing at each step might take
much longer than the time spared. OTOH, if the number of steps were
to decrease, then it would be a double win.
So I ran some tests to see how often that happens: picked random good
and bad starting revisions at least 50k commits apart and a random
first bad commit in between in git.git, and used 'git bisect run git
merge-base --is-ancestor HEAD $first_bad_commit' to check the number
of necessary bisection steps. After repeating all this 1000 times
both with and without this patch I found that:
- 146 cases needed one more bisection step than before, 149 cases
needed one less step, while in the remaining 705 cases the number
of steps didn't change. So the number of bisection steps does
indeed change in a non-negligible number of cases, but it seems
that the average number of steps doesn't change in the long run.
- The first 'git bisect start' command got over 3x faster in 456
cases, so this "no commit at the exact halfway point" case seems
to be common enough to care about.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-12 17:19:38 +01:00
|
|
|
diff = 2 * weight(p) - nr;
|
|
|
|
switch (diff) {
|
2009-03-26 05:55:24 +01:00
|
|
|
case -1: case 0: case 1:
|
|
|
|
return 1;
|
|
|
|
default:
|
bisect: loosen halfway() check for a large number of commits
'git bisect start ...' and subsequent 'git bisect (good|bad)' commands
can take quite a while when the given/remaining revision range between
good and bad commits is big and contains a lot of merge commits, e.g.
in git.git:
$ git rev-list --count v1.6.0..v2.28.0
44284
$ time git bisect start v2.28.0 v1.6.0
Bisecting: 22141 revisions left to test after this (roughly 15 steps)
[e197c21807dacadc8305250baa0b9228819189d4] unable_to_lock_die(): rename function from unable_to_lock_index_die()
real 0m15.472s
user 0m15.220s
sys 0m0.255s
The majority of the runtime is spent in do_find_bisection(), where we
try to find a commit as close as possible to the halfway point between
the bad and good revisions, i.e. a commit from which the number of
reachable commits that are in the good-bad range is half the total
number of commits in that range. So we count how many commits are
reachable in the good-bad range for each commit in that range, which
is quick and easy for a linear history, even over 300k commits in a
linear range are handled in ~0.3s on my machine. Alas, handling merge
commits is non-trivial and quite expensive as the algorithm used seems
to be quadratic, causing the long runtime shown above.
Interestingly, look at what a big difference one additional commit
can make:
$ git rev-list --count v1.6.0^..v2.28.0
44285
$ time git bisect start v2.28.0 v1.6.0^
Bisecting: 22142 revisions left to test after this (roughly 15 steps)
[565301e41670825ceedf75220f2918ae76831240] Sync with 2.1.2
real 0m5.848s
user 0m5.600s
sys 0m0.252s
The difference is caused by one of the optimizations attempting to cut
down the runtime added in 1c4fea3a40 (git-rev-list --bisect:
optimization, 2007-03-21):
Another small optimization is whenever we find a half-way commit
(that is, a commit that can reach exactly half of the commits),
we stop giving counts to remaining commits, as we will not find
any better commit than we just found.
In this second 'git bisect start' command we happen to find a commit
exactly at the halfway point and can return early, but in the first
case there is no such commit, so we can't return early and end up
counting the number of reachable commits from all commits in the
good-bad range.
However, when we have thousands of commits it's not all that important
to find the _exact_ halfway point, a few commits more or less doesn't
make any real difference for the bisection.
So let's loosen the check in the halfway() helper to consider commits
within about 0.1% of the exact halfway point as halfway as well, and
rename the function to approx_halfway() accordingly. This will allow
us to return early on a bigger good-bad range, even when there is no
commit exactly at the halfway point, thereby reducing the runtime of
the first command above considerably, from ~15s to 4.901s.
Furthermore, even if there is a commit exactly at the halfway point,
we might still stumble upon a commit within that 0.1% range before
finding the exact halfway point, allowing us to return a bit earlier,
slightly reducing the runtime of the second command from 5.848s to
5.058s. Note that this change doesn't affect good-bad ranges
containing ~2000 commits or less, because that 0.1% tolerance becomes
zero due to integer arithmetic; however, if the range is that small
then counting the reachable commits for all commits is already fast
enough anyway.
Naturally, this will likely change which commits get picked at each
bisection step, and, in turn, might change how many bisection steps
are necessary to find the first bad commit. If the number of
necessary bisection steps were to increase often, then this change
could backfire, because building and testing at each step might take
much longer than the time spared. OTOH, if the number of steps were
to decrease, then it would be a double win.
So I ran some tests to see how often that happens: picked random good
and bad starting revisions at least 50k commits apart and a random
first bad commit in between in git.git, and used 'git bisect run git
merge-base --is-ancestor HEAD $first_bad_commit' to check the number
of necessary bisection steps. After repeating all this 1000 times
both with and without this patch I found that:
- 146 cases needed one more bisection step than before, 149 cases
needed one less step, while in the remaining 705 cases the number
of steps didn't change. So the number of bisection steps does
indeed change in a non-negligible number of cases, but it seems
that the average number of steps doesn't change in the long run.
- The first 'git bisect start' command got over 3x faster in 456
cases, so this "no commit at the exact halfway point" case seems
to be common enough to care about.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-12 17:19:38 +01:00
|
|
|
/*
|
|
|
|
* For large number of commits we are not so strict, it's
|
|
|
|
* good enough if it's within ~0.1% of the halfway point,
|
|
|
|
* e.g. 5000 is exactly halfway of 10000, but we consider
|
|
|
|
* the values [4996, 5004] as halfway as well.
|
|
|
|
*/
|
|
|
|
if (abs(diff) < nr / 1024)
|
|
|
|
return 1;
|
2009-03-26 05:55:24 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void show_list(const char *debug, int counted, int nr,
|
|
|
|
struct commit_list *list)
|
|
|
|
{
|
|
|
|
struct commit_list *p;
|
|
|
|
|
2018-09-02 09:42:50 +02:00
|
|
|
if (!DEBUG_BISECT)
|
|
|
|
return;
|
|
|
|
|
2009-03-26 05:55:24 +01:00
|
|
|
fprintf(stderr, "%s (%d/%d)\n", debug, counted, nr);
|
|
|
|
|
|
|
|
for (p = list; p; p = p->next) {
|
|
|
|
struct commit_list *pp;
|
|
|
|
struct commit *commit = p->item;
|
2020-08-07 23:58:38 +02:00
|
|
|
unsigned commit_flags = commit->object.flags;
|
2009-03-26 05:55:24 +01:00
|
|
|
enum object_type type;
|
|
|
|
unsigned long size;
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
char *buf = read_object_file(&commit->object.oid, &type,
|
|
|
|
&size);
|
2010-07-22 15:18:33 +02:00
|
|
|
const char *subject_start;
|
|
|
|
int subject_len;
|
2009-03-26 05:55:24 +01:00
|
|
|
|
|
|
|
fprintf(stderr, "%c%c%c ",
|
2020-08-07 23:58:38 +02:00
|
|
|
(commit_flags & TREESAME) ? ' ' : 'T',
|
|
|
|
(commit_flags & UNINTERESTING) ? 'U' : ' ',
|
|
|
|
(commit_flags & COUNTED) ? 'C' : ' ');
|
2018-09-02 09:42:50 +02:00
|
|
|
if (*commit_weight_at(&commit_weight, p->item))
|
2009-03-26 05:55:24 +01:00
|
|
|
fprintf(stderr, "%3d", weight(p));
|
|
|
|
else
|
|
|
|
fprintf(stderr, "---");
|
2018-03-25 12:57:36 +02:00
|
|
|
fprintf(stderr, " %.*s", 8, oid_to_hex(&commit->object.oid));
|
2009-03-26 05:55:24 +01:00
|
|
|
for (pp = commit->parents; pp; pp = pp->next)
|
|
|
|
fprintf(stderr, " %.*s", 8,
|
2018-03-25 12:57:36 +02:00
|
|
|
oid_to_hex(&pp->item->object.oid));
|
2009-03-26 05:55:24 +01:00
|
|
|
|
2010-07-22 15:18:33 +02:00
|
|
|
subject_len = find_commit_subject(buf, &subject_start);
|
|
|
|
if (subject_len)
|
|
|
|
fprintf(stderr, " %.*s", subject_len, subject_start);
|
2009-03-26 05:55:24 +01:00
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct commit_list *best_bisection(struct commit_list *list, int nr)
|
|
|
|
{
|
|
|
|
struct commit_list *p, *best;
|
|
|
|
int best_distance = -1;
|
|
|
|
|
|
|
|
best = list;
|
|
|
|
for (p = list; p; p = p->next) {
|
|
|
|
int distance;
|
2020-08-07 23:58:38 +02:00
|
|
|
unsigned commit_flags = p->item->object.flags;
|
2009-03-26 05:55:24 +01:00
|
|
|
|
2020-08-07 23:58:38 +02:00
|
|
|
if (commit_flags & TREESAME)
|
2009-03-26 05:55:24 +01:00
|
|
|
continue;
|
|
|
|
distance = weight(p);
|
|
|
|
if (nr - distance < distance)
|
|
|
|
distance = nr - distance;
|
|
|
|
if (distance > best_distance) {
|
|
|
|
best = p;
|
|
|
|
best_distance = distance;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct commit_dist {
|
|
|
|
struct commit *commit;
|
|
|
|
int distance;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int compare_commit_dist(const void *a_, const void *b_)
|
|
|
|
{
|
|
|
|
struct commit_dist *a, *b;
|
|
|
|
|
|
|
|
a = (struct commit_dist *)a_;
|
|
|
|
b = (struct commit_dist *)b_;
|
|
|
|
if (a->distance != b->distance)
|
|
|
|
return b->distance - a->distance; /* desc sort */
|
2015-11-10 03:22:28 +01:00
|
|
|
return oidcmp(&a->commit->object.oid, &b->commit->object.oid);
|
2009-03-26 05:55:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct commit_list *best_bisection_sorted(struct commit_list *list, int nr)
|
|
|
|
{
|
|
|
|
struct commit_list *p;
|
|
|
|
struct commit_dist *array = xcalloc(nr, sizeof(*array));
|
2017-03-28 21:46:50 +02:00
|
|
|
struct strbuf buf = STRBUF_INIT;
|
2009-03-26 05:55:24 +01:00
|
|
|
int cnt, i;
|
|
|
|
|
|
|
|
for (p = list, cnt = 0; p; p = p->next) {
|
|
|
|
int distance;
|
2020-08-07 23:58:38 +02:00
|
|
|
unsigned commit_flags = p->item->object.flags;
|
2009-03-26 05:55:24 +01:00
|
|
|
|
2020-08-07 23:58:38 +02:00
|
|
|
if (commit_flags & TREESAME)
|
2009-03-26 05:55:24 +01:00
|
|
|
continue;
|
|
|
|
distance = weight(p);
|
|
|
|
if (nr - distance < distance)
|
|
|
|
distance = nr - distance;
|
|
|
|
array[cnt].commit = p->item;
|
|
|
|
array[cnt].distance = distance;
|
|
|
|
cnt++;
|
|
|
|
}
|
2016-09-29 17:27:31 +02:00
|
|
|
QSORT(array, cnt, compare_commit_dist);
|
2009-03-26 05:55:24 +01:00
|
|
|
for (p = list, i = 0; i < cnt; i++) {
|
|
|
|
struct object *obj = &(array[i].commit->object);
|
|
|
|
|
2017-03-28 21:46:50 +02:00
|
|
|
strbuf_reset(&buf);
|
|
|
|
strbuf_addf(&buf, "dist=%d", array[i].distance);
|
|
|
|
add_name_decoration(DECORATION_NONE, buf.buf, obj);
|
2014-08-26 12:23:36 +02:00
|
|
|
|
2009-03-26 05:55:24 +01:00
|
|
|
p->item = array[i].commit;
|
bisect: fix off-by-one error in `best_bisection_sorted()`
After we have sorted the `cnt`-many commits that we have selected, we
place them into the commit list. We then set `p->next` to NULL, but as
we do so, `p` is already pointing one beyond item number `cnt`. Indeed,
we check whether `p` is NULL before dereferencing it.
This only matters if there are TREESAME-commits. Since they should be
skipped, they are not included in `cnt` and we will hit the situation
where we set `p->next` to NULL. As a result, the list will be one longer
than it should be. The last commit in the list will be one which occurs
earlier, or which shouldn't be included.
Do not update `p` the very last round in the loop. This ensures that
after the loop, `p->next` points to the remainder of the list, and we
can set it to NULL. While we're here, free that remainder to fix a
memory leak.
Signed-off-by: Martin Ågren <martin.agren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-11-05 21:24:30 +01:00
|
|
|
if (i < cnt - 1)
|
|
|
|
p = p->next;
|
2009-03-26 05:55:24 +01:00
|
|
|
}
|
2018-01-03 19:48:52 +01:00
|
|
|
if (p) {
|
|
|
|
free_commit_list(p->next);
|
|
|
|
p->next = NULL;
|
|
|
|
}
|
2017-03-28 21:46:50 +02:00
|
|
|
strbuf_release(&buf);
|
2009-03-26 05:55:24 +01:00
|
|
|
free(array);
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* zero or positive weight is the number of interesting commits it can
|
|
|
|
* reach, including itself. Especially, weight = 0 means it does not
|
|
|
|
* reach any tree-changing commits (e.g. just above uninteresting one
|
|
|
|
* but traversal is with pathspec).
|
|
|
|
*
|
|
|
|
* weight = -1 means it has one parent and its distance is yet to
|
|
|
|
* be computed.
|
|
|
|
*
|
|
|
|
* weight = -2 means it has more than one parent and its distance is
|
|
|
|
* unknown. After running count_distance() first, they will get zero
|
|
|
|
* or positive distance.
|
|
|
|
*/
|
|
|
|
static struct commit_list *do_find_bisection(struct commit_list *list,
|
|
|
|
int nr, int *weights,
|
2020-08-07 23:58:38 +02:00
|
|
|
unsigned bisect_flags)
|
2009-03-26 05:55:24 +01:00
|
|
|
{
|
|
|
|
int n, counted;
|
|
|
|
struct commit_list *p;
|
|
|
|
|
|
|
|
counted = 0;
|
|
|
|
|
|
|
|
for (n = 0, p = list; p; p = p->next) {
|
|
|
|
struct commit *commit = p->item;
|
2020-08-07 23:58:38 +02:00
|
|
|
unsigned commit_flags = commit->object.flags;
|
2009-03-26 05:55:24 +01:00
|
|
|
|
2018-05-19 07:28:25 +02:00
|
|
|
*commit_weight_at(&commit_weight, p->item) = &weights[n++];
|
2020-08-07 23:58:38 +02:00
|
|
|
switch (count_interesting_parents(commit, bisect_flags)) {
|
2009-03-26 05:55:24 +01:00
|
|
|
case 0:
|
2020-08-07 23:58:38 +02:00
|
|
|
if (!(commit_flags & TREESAME)) {
|
2009-03-26 05:55:24 +01:00
|
|
|
weight_set(p, 1);
|
|
|
|
counted++;
|
|
|
|
show_list("bisection 2 count one",
|
|
|
|
counted, nr, list);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* otherwise, it is known not to reach any
|
|
|
|
* tree-changing commit and gets weight 0.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
weight_set(p, -1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
weight_set(p, -2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
show_list("bisection 2 initialize", counted, nr, list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If you have only one parent in the resulting set
|
|
|
|
* then you can reach one commit more than that parent
|
|
|
|
* can reach. So we do not have to run the expensive
|
|
|
|
* count_distance() for single strand of pearls.
|
|
|
|
*
|
|
|
|
* However, if you have more than one parents, you cannot
|
|
|
|
* just add their distance and one for yourself, since
|
|
|
|
* they usually reach the same ancestor and you would
|
|
|
|
* end up counting them twice that way.
|
|
|
|
*
|
|
|
|
* So we will first count distance of merges the usual
|
|
|
|
* way, and then fill the blanks using cheaper algorithm.
|
|
|
|
*/
|
|
|
|
for (p = list; p; p = p->next) {
|
|
|
|
if (p->item->object.flags & UNINTERESTING)
|
|
|
|
continue;
|
|
|
|
if (weight(p) != -2)
|
|
|
|
continue;
|
2020-08-07 23:58:38 +02:00
|
|
|
if (bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY)
|
2020-08-07 23:58:35 +02:00
|
|
|
BUG("shouldn't be calling count-distance in fp mode");
|
2009-03-26 05:55:24 +01:00
|
|
|
weight_set(p, count_distance(p));
|
|
|
|
clear_distance(list);
|
|
|
|
|
bisect: loosen halfway() check for a large number of commits
'git bisect start ...' and subsequent 'git bisect (good|bad)' commands
can take quite a while when the given/remaining revision range between
good and bad commits is big and contains a lot of merge commits, e.g.
in git.git:
$ git rev-list --count v1.6.0..v2.28.0
44284
$ time git bisect start v2.28.0 v1.6.0
Bisecting: 22141 revisions left to test after this (roughly 15 steps)
[e197c21807dacadc8305250baa0b9228819189d4] unable_to_lock_die(): rename function from unable_to_lock_index_die()
real 0m15.472s
user 0m15.220s
sys 0m0.255s
The majority of the runtime is spent in do_find_bisection(), where we
try to find a commit as close as possible to the halfway point between
the bad and good revisions, i.e. a commit from which the number of
reachable commits that are in the good-bad range is half the total
number of commits in that range. So we count how many commits are
reachable in the good-bad range for each commit in that range, which
is quick and easy for a linear history, even over 300k commits in a
linear range are handled in ~0.3s on my machine. Alas, handling merge
commits is non-trivial and quite expensive as the algorithm used seems
to be quadratic, causing the long runtime shown above.
Interestingly, look at what a big difference one additional commit
can make:
$ git rev-list --count v1.6.0^..v2.28.0
44285
$ time git bisect start v2.28.0 v1.6.0^
Bisecting: 22142 revisions left to test after this (roughly 15 steps)
[565301e41670825ceedf75220f2918ae76831240] Sync with 2.1.2
real 0m5.848s
user 0m5.600s
sys 0m0.252s
The difference is caused by one of the optimizations attempting to cut
down the runtime added in 1c4fea3a40 (git-rev-list --bisect:
optimization, 2007-03-21):
Another small optimization is whenever we find a half-way commit
(that is, a commit that can reach exactly half of the commits),
we stop giving counts to remaining commits, as we will not find
any better commit than we just found.
In this second 'git bisect start' command we happen to find a commit
exactly at the halfway point and can return early, but in the first
case there is no such commit, so we can't return early and end up
counting the number of reachable commits from all commits in the
good-bad range.
However, when we have thousands of commits it's not all that important
to find the _exact_ halfway point, a few commits more or less doesn't
make any real difference for the bisection.
So let's loosen the check in the halfway() helper to consider commits
within about 0.1% of the exact halfway point as halfway as well, and
rename the function to approx_halfway() accordingly. This will allow
us to return early on a bigger good-bad range, even when there is no
commit exactly at the halfway point, thereby reducing the runtime of
the first command above considerably, from ~15s to 4.901s.
Furthermore, even if there is a commit exactly at the halfway point,
we might still stumble upon a commit within that 0.1% range before
finding the exact halfway point, allowing us to return a bit earlier,
slightly reducing the runtime of the second command from 5.848s to
5.058s. Note that this change doesn't affect good-bad ranges
containing ~2000 commits or less, because that 0.1% tolerance becomes
zero due to integer arithmetic; however, if the range is that small
then counting the reachable commits for all commits is already fast
enough anyway.
Naturally, this will likely change which commits get picked at each
bisection step, and, in turn, might change how many bisection steps
are necessary to find the first bad commit. If the number of
necessary bisection steps were to increase often, then this change
could backfire, because building and testing at each step might take
much longer than the time spared. OTOH, if the number of steps were
to decrease, then it would be a double win.
So I ran some tests to see how often that happens: picked random good
and bad starting revisions at least 50k commits apart and a random
first bad commit in between in git.git, and used 'git bisect run git
merge-base --is-ancestor HEAD $first_bad_commit' to check the number
of necessary bisection steps. After repeating all this 1000 times
both with and without this patch I found that:
- 146 cases needed one more bisection step than before, 149 cases
needed one less step, while in the remaining 705 cases the number
of steps didn't change. So the number of bisection steps does
indeed change in a non-negligible number of cases, but it seems
that the average number of steps doesn't change in the long run.
- The first 'git bisect start' command got over 3x faster in 456
cases, so this "no commit at the exact halfway point" case seems
to be common enough to care about.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-12 17:19:38 +01:00
|
|
|
/* Does it happen to be at half-way? */
|
|
|
|
if (!(bisect_flags & FIND_BISECTION_ALL) &&
|
|
|
|
approx_halfway(p, nr))
|
2009-03-26 05:55:24 +01:00
|
|
|
return p;
|
|
|
|
counted++;
|
|
|
|
}
|
|
|
|
|
|
|
|
show_list("bisection 2 count_distance", counted, nr, list);
|
|
|
|
|
|
|
|
while (counted < nr) {
|
|
|
|
for (p = list; p; p = p->next) {
|
|
|
|
struct commit_list *q;
|
2020-08-07 23:58:38 +02:00
|
|
|
unsigned commit_flags = p->item->object.flags;
|
2009-03-26 05:55:24 +01:00
|
|
|
|
|
|
|
if (0 <= weight(p))
|
|
|
|
continue;
|
2020-08-07 23:58:35 +02:00
|
|
|
|
|
|
|
for (q = p->item->parents;
|
|
|
|
q;
|
2020-08-07 23:58:38 +02:00
|
|
|
q = bisect_flags & FIND_BISECTION_FIRST_PARENT_ONLY ? NULL : q->next) {
|
2009-03-26 05:55:24 +01:00
|
|
|
if (q->item->object.flags & UNINTERESTING)
|
|
|
|
continue;
|
|
|
|
if (0 <= weight(q))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!q)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* weight for p is unknown but q is known.
|
|
|
|
* add one for p itself if p is to be counted,
|
|
|
|
* otherwise inherit it from q directly.
|
|
|
|
*/
|
2020-08-07 23:58:38 +02:00
|
|
|
if (!(commit_flags & TREESAME)) {
|
2009-03-26 05:55:24 +01:00
|
|
|
weight_set(p, weight(q)+1);
|
|
|
|
counted++;
|
|
|
|
show_list("bisection 2 count one",
|
|
|
|
counted, nr, list);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
weight_set(p, weight(q));
|
|
|
|
|
bisect: loosen halfway() check for a large number of commits
'git bisect start ...' and subsequent 'git bisect (good|bad)' commands
can take quite a while when the given/remaining revision range between
good and bad commits is big and contains a lot of merge commits, e.g.
in git.git:
$ git rev-list --count v1.6.0..v2.28.0
44284
$ time git bisect start v2.28.0 v1.6.0
Bisecting: 22141 revisions left to test after this (roughly 15 steps)
[e197c21807dacadc8305250baa0b9228819189d4] unable_to_lock_die(): rename function from unable_to_lock_index_die()
real 0m15.472s
user 0m15.220s
sys 0m0.255s
The majority of the runtime is spent in do_find_bisection(), where we
try to find a commit as close as possible to the halfway point between
the bad and good revisions, i.e. a commit from which the number of
reachable commits that are in the good-bad range is half the total
number of commits in that range. So we count how many commits are
reachable in the good-bad range for each commit in that range, which
is quick and easy for a linear history, even over 300k commits in a
linear range are handled in ~0.3s on my machine. Alas, handling merge
commits is non-trivial and quite expensive as the algorithm used seems
to be quadratic, causing the long runtime shown above.
Interestingly, look at what a big difference one additional commit
can make:
$ git rev-list --count v1.6.0^..v2.28.0
44285
$ time git bisect start v2.28.0 v1.6.0^
Bisecting: 22142 revisions left to test after this (roughly 15 steps)
[565301e41670825ceedf75220f2918ae76831240] Sync with 2.1.2
real 0m5.848s
user 0m5.600s
sys 0m0.252s
The difference is caused by one of the optimizations attempting to cut
down the runtime added in 1c4fea3a40 (git-rev-list --bisect:
optimization, 2007-03-21):
Another small optimization is whenever we find a half-way commit
(that is, a commit that can reach exactly half of the commits),
we stop giving counts to remaining commits, as we will not find
any better commit than we just found.
In this second 'git bisect start' command we happen to find a commit
exactly at the halfway point and can return early, but in the first
case there is no such commit, so we can't return early and end up
counting the number of reachable commits from all commits in the
good-bad range.
However, when we have thousands of commits it's not all that important
to find the _exact_ halfway point, a few commits more or less doesn't
make any real difference for the bisection.
So let's loosen the check in the halfway() helper to consider commits
within about 0.1% of the exact halfway point as halfway as well, and
rename the function to approx_halfway() accordingly. This will allow
us to return early on a bigger good-bad range, even when there is no
commit exactly at the halfway point, thereby reducing the runtime of
the first command above considerably, from ~15s to 4.901s.
Furthermore, even if there is a commit exactly at the halfway point,
we might still stumble upon a commit within that 0.1% range before
finding the exact halfway point, allowing us to return a bit earlier,
slightly reducing the runtime of the second command from 5.848s to
5.058s. Note that this change doesn't affect good-bad ranges
containing ~2000 commits or less, because that 0.1% tolerance becomes
zero due to integer arithmetic; however, if the range is that small
then counting the reachable commits for all commits is already fast
enough anyway.
Naturally, this will likely change which commits get picked at each
bisection step, and, in turn, might change how many bisection steps
are necessary to find the first bad commit. If the number of
necessary bisection steps were to increase often, then this change
could backfire, because building and testing at each step might take
much longer than the time spared. OTOH, if the number of steps were
to decrease, then it would be a double win.
So I ran some tests to see how often that happens: picked random good
and bad starting revisions at least 50k commits apart and a random
first bad commit in between in git.git, and used 'git bisect run git
merge-base --is-ancestor HEAD $first_bad_commit' to check the number
of necessary bisection steps. After repeating all this 1000 times
both with and without this patch I found that:
- 146 cases needed one more bisection step than before, 149 cases
needed one less step, while in the remaining 705 cases the number
of steps didn't change. So the number of bisection steps does
indeed change in a non-negligible number of cases, but it seems
that the average number of steps doesn't change in the long run.
- The first 'git bisect start' command got over 3x faster in 456
cases, so this "no commit at the exact halfway point" case seems
to be common enough to care about.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-11-12 17:19:38 +01:00
|
|
|
/* Does it happen to be at half-way? */
|
|
|
|
if (!(bisect_flags & FIND_BISECTION_ALL) &&
|
|
|
|
approx_halfway(p, nr))
|
2009-03-26 05:55:24 +01:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
show_list("bisection 2 counted all", counted, nr, list);
|
|
|
|
|
2020-08-07 23:58:38 +02:00
|
|
|
if (!(bisect_flags & FIND_BISECTION_ALL))
|
2009-03-26 05:55:24 +01:00
|
|
|
return best_bisection(list, nr);
|
|
|
|
else
|
|
|
|
return best_bisection_sorted(list, nr);
|
|
|
|
}
|
|
|
|
|
2017-11-05 21:24:28 +01:00
|
|
|
void find_bisection(struct commit_list **commit_list, int *reaches,
|
2020-08-07 23:58:38 +02:00
|
|
|
int *all, unsigned bisect_flags)
|
2009-03-26 05:55:24 +01:00
|
|
|
{
|
|
|
|
int nr, on_list;
|
2017-11-05 21:24:28 +01:00
|
|
|
struct commit_list *list, *p, *best, *next, *last;
|
2009-03-26 05:55:24 +01:00
|
|
|
int *weights;
|
|
|
|
|
2017-11-05 21:24:28 +01:00
|
|
|
show_list("bisection 2 entry", 0, 0, *commit_list);
|
2018-05-19 07:28:25 +02:00
|
|
|
init_commit_weight(&commit_weight);
|
2009-03-26 05:55:24 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Count the number of total and tree-changing items on the
|
|
|
|
* list, while reversing the list.
|
|
|
|
*/
|
2017-11-05 21:24:28 +01:00
|
|
|
for (nr = on_list = 0, last = NULL, p = *commit_list;
|
2009-03-26 05:55:24 +01:00
|
|
|
p;
|
|
|
|
p = next) {
|
2020-08-07 23:58:38 +02:00
|
|
|
unsigned commit_flags = p->item->object.flags;
|
2009-03-26 05:55:24 +01:00
|
|
|
|
|
|
|
next = p->next;
|
2020-08-07 23:58:38 +02:00
|
|
|
if (commit_flags & UNINTERESTING) {
|
2017-11-05 21:24:29 +01:00
|
|
|
free(p);
|
2009-03-26 05:55:24 +01:00
|
|
|
continue;
|
2017-11-05 21:24:29 +01:00
|
|
|
}
|
2009-03-26 05:55:24 +01:00
|
|
|
p->next = last;
|
|
|
|
last = p;
|
2020-08-07 23:58:38 +02:00
|
|
|
if (!(commit_flags & TREESAME))
|
2009-03-26 05:55:24 +01:00
|
|
|
nr++;
|
|
|
|
on_list++;
|
|
|
|
}
|
|
|
|
list = last;
|
|
|
|
show_list("bisection 2 sorted", 0, nr, list);
|
|
|
|
|
|
|
|
*all = nr;
|
2021-03-13 17:17:22 +01:00
|
|
|
CALLOC_ARRAY(weights, on_list);
|
2009-03-26 05:55:24 +01:00
|
|
|
|
|
|
|
/* Do the real work of finding bisection commit. */
|
2020-08-07 23:58:38 +02:00
|
|
|
best = do_find_bisection(list, nr, weights, bisect_flags);
|
2009-03-26 05:55:24 +01:00
|
|
|
if (best) {
|
2020-08-07 23:58:38 +02:00
|
|
|
if (!(bisect_flags & FIND_BISECTION_ALL)) {
|
2017-11-05 21:24:31 +01:00
|
|
|
list->item = best->item;
|
|
|
|
free_commit_list(list->next);
|
|
|
|
best = list;
|
2009-03-26 05:55:24 +01:00
|
|
|
best->next = NULL;
|
2017-11-05 21:24:31 +01:00
|
|
|
}
|
2009-03-26 05:55:24 +01:00
|
|
|
*reaches = weight(best);
|
|
|
|
}
|
|
|
|
free(weights);
|
2017-11-05 21:24:28 +01:00
|
|
|
*commit_list = best;
|
2018-05-19 07:28:25 +02:00
|
|
|
clear_commit_weight(&commit_weight);
|
2009-03-26 05:55:24 +01:00
|
|
|
}
|
|
|
|
|
2015-05-25 20:38:31 +02:00
|
|
|
static int register_ref(const char *refname, const struct object_id *oid,
|
2009-03-26 05:55:54 +01:00
|
|
|
int flags, void *cb_data)
|
|
|
|
{
|
2015-06-29 17:40:29 +02:00
|
|
|
struct strbuf good_prefix = STRBUF_INIT;
|
|
|
|
strbuf_addstr(&good_prefix, term_good);
|
|
|
|
strbuf_addstr(&good_prefix, "-");
|
|
|
|
|
|
|
|
if (!strcmp(refname, term_bad)) {
|
2015-03-14 00:39:29 +01:00
|
|
|
current_bad_oid = xmalloc(sizeof(*current_bad_oid));
|
2015-05-25 20:38:31 +02:00
|
|
|
oidcpy(current_bad_oid, oid);
|
2015-06-29 17:40:29 +02:00
|
|
|
} else if (starts_with(refname, good_prefix.buf)) {
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_append(&good_revs, oid);
|
2013-11-30 21:55:40 +01:00
|
|
|
} else if (starts_with(refname, "skip-")) {
|
2017-03-31 03:40:00 +02:00
|
|
|
oid_array_append(&skipped_revs, oid);
|
2009-03-26 05:55:54 +01:00
|
|
|
}
|
|
|
|
|
2015-06-29 17:40:29 +02:00
|
|
|
strbuf_release(&good_prefix);
|
|
|
|
|
2009-03-26 05:55:54 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int read_bisect_refs(void)
|
|
|
|
{
|
2015-05-25 20:38:31 +02:00
|
|
|
return for_each_ref_in("refs/bisect/", register_ref, NULL);
|
2009-03-26 05:55:54 +01:00
|
|
|
}
|
|
|
|
|
memoize common git-path "constant" files
One of the most common uses of git_path() is to pass a
constant, like git_path("MERGE_MSG"). This has two
drawbacks:
1. The return value is a static buffer, and the lifetime
is dependent on other calls to git_path, etc.
2. There's no compile-time checking of the pathname. This
is OK for a one-off (after all, we have to spell it
correctly at least once), but many of these constant
strings appear throughout the code.
This patch introduces a series of functions to "memoize"
these strings, which are essentially globals for the
lifetime of the program. We compute the value once, take
ownership of the buffer, and return the cached value for
subsequent calls. cache.h provides a helper macro for
defining these functions as one-liners, and defines a few
common ones for global use.
Using a macro is a little bit gross, but it does nicely
document the purpose of the functions. If we need to touch
them all later (e.g., because we learned how to change the
git_dir variable at runtime, and need to invalidate all of
the stored values), it will be much easier to have the
complete list.
Note that the shared-global functions have separate, manual
declarations. We could do something clever with the macros
(e.g., expand it to a declaration in some places, and a
declaration _and_ a definition in path.c). But there aren't
that many, and it's probably better to stay away from
too-magical macros.
Likewise, if we abandon the C preprocessor in favor of
generating these with a script, we could get much fancier.
E.g., normalizing "FOO/BAR-BAZ" into "git_path_foo_bar_baz".
But the small amount of saved typing is probably not worth
the resulting confusion to readers who want to grep for the
function's definition.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-10 11:38:57 +02:00
|
|
|
static GIT_PATH_FUNC(git_path_bisect_names, "BISECT_NAMES")
|
|
|
|
static GIT_PATH_FUNC(git_path_bisect_expected_rev, "BISECT_EXPECTED_REV")
|
2017-09-29 08:49:39 +02:00
|
|
|
static GIT_PATH_FUNC(git_path_bisect_ancestors_ok, "BISECT_ANCESTORS_OK")
|
|
|
|
static GIT_PATH_FUNC(git_path_bisect_run, "BISECT_RUN")
|
|
|
|
static GIT_PATH_FUNC(git_path_bisect_start, "BISECT_START")
|
|
|
|
static GIT_PATH_FUNC(git_path_bisect_log, "BISECT_LOG")
|
2017-04-20 23:08:25 +02:00
|
|
|
static GIT_PATH_FUNC(git_path_bisect_terms, "BISECT_TERMS")
|
2020-08-07 23:58:37 +02:00
|
|
|
static GIT_PATH_FUNC(git_path_bisect_first_parent, "BISECT_FIRST_PARENT")
|
2017-09-29 08:49:39 +02:00
|
|
|
static GIT_PATH_FUNC(git_path_head_name, "head-name")
|
memoize common git-path "constant" files
One of the most common uses of git_path() is to pass a
constant, like git_path("MERGE_MSG"). This has two
drawbacks:
1. The return value is a static buffer, and the lifetime
is dependent on other calls to git_path, etc.
2. There's no compile-time checking of the pathname. This
is OK for a one-off (after all, we have to spell it
correctly at least once), but many of these constant
strings appear throughout the code.
This patch introduces a series of functions to "memoize"
these strings, which are essentially globals for the
lifetime of the program. We compute the value once, take
ownership of the buffer, and return the cached value for
subsequent calls. cache.h provides a helper macro for
defining these functions as one-liners, and defines a few
common ones for global use.
Using a macro is a little bit gross, but it does nicely
document the purpose of the functions. If we need to touch
them all later (e.g., because we learned how to change the
git_dir variable at runtime, and need to invalidate all of
the stored values), it will be much easier to have the
complete list.
Note that the shared-global functions have separate, manual
declarations. We could do something clever with the macros
(e.g., expand it to a declaration in some places, and a
declaration _and_ a definition in path.c). But there aren't
that many, and it's probably better to stay away from
too-magical macros.
Likewise, if we abandon the C preprocessor in favor of
generating these with a script, we could get much fancier.
E.g., normalizing "FOO/BAR-BAZ" into "git_path_foo_bar_baz".
But the small amount of saved typing is probably not worth
the resulting confusion to readers who want to grep for the
function's definition.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-10 11:38:57 +02:00
|
|
|
|
2020-07-28 22:24:53 +02:00
|
|
|
static void read_bisect_paths(struct strvec *array)
|
2009-03-26 05:55:59 +01:00
|
|
|
{
|
|
|
|
struct strbuf str = STRBUF_INIT;
|
memoize common git-path "constant" files
One of the most common uses of git_path() is to pass a
constant, like git_path("MERGE_MSG"). This has two
drawbacks:
1. The return value is a static buffer, and the lifetime
is dependent on other calls to git_path, etc.
2. There's no compile-time checking of the pathname. This
is OK for a one-off (after all, we have to spell it
correctly at least once), but many of these constant
strings appear throughout the code.
This patch introduces a series of functions to "memoize"
these strings, which are essentially globals for the
lifetime of the program. We compute the value once, take
ownership of the buffer, and return the cached value for
subsequent calls. cache.h provides a helper macro for
defining these functions as one-liners, and defines a few
common ones for global use.
Using a macro is a little bit gross, but it does nicely
document the purpose of the functions. If we need to touch
them all later (e.g., because we learned how to change the
git_dir variable at runtime, and need to invalidate all of
the stored values), it will be much easier to have the
complete list.
Note that the shared-global functions have separate, manual
declarations. We could do something clever with the macros
(e.g., expand it to a declaration in some places, and a
declaration _and_ a definition in path.c). But there aren't
that many, and it's probably better to stay away from
too-magical macros.
Likewise, if we abandon the C preprocessor in favor of
generating these with a script, we could get much fancier.
E.g., normalizing "FOO/BAR-BAZ" into "git_path_foo_bar_baz".
But the small amount of saved typing is probably not worth
the resulting confusion to readers who want to grep for the
function's definition.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-10 11:38:57 +02:00
|
|
|
const char *filename = git_path_bisect_names();
|
2017-05-03 12:16:46 +02:00
|
|
|
FILE *fp = xfopen(filename, "r");
|
2009-03-26 05:55:59 +01:00
|
|
|
|
2016-01-14 00:31:17 +01:00
|
|
|
while (strbuf_getline_lf(&str, fp) != EOF) {
|
2009-03-26 05:55:59 +01:00
|
|
|
strbuf_trim(&str);
|
2020-07-28 22:24:02 +02:00
|
|
|
if (sq_dequote_to_strvec(str.buf, array))
|
2016-06-17 22:21:12 +02:00
|
|
|
die(_("Badly quoted content in file '%s': %s"),
|
2011-09-13 23:58:14 +02:00
|
|
|
filename, str.buf);
|
2009-03-26 05:55:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
strbuf_release(&str);
|
|
|
|
fclose(fp);
|
|
|
|
}
|
|
|
|
|
2020-03-30 16:04:06 +02:00
|
|
|
static char *join_oid_array_hex(struct oid_array *array, char delim)
|
2009-05-09 17:55:45 +02:00
|
|
|
{
|
|
|
|
struct strbuf joined_hexs = STRBUF_INIT;
|
|
|
|
int i;
|
|
|
|
|
2011-05-19 23:34:33 +02:00
|
|
|
for (i = 0; i < array->nr; i++) {
|
2017-03-26 18:01:37 +02:00
|
|
|
strbuf_addstr(&joined_hexs, oid_to_hex(array->oid + i));
|
2011-05-19 23:34:33 +02:00
|
|
|
if (i + 1 < array->nr)
|
2009-05-09 17:55:45 +02:00
|
|
|
strbuf_addch(&joined_hexs, delim);
|
|
|
|
}
|
|
|
|
|
|
|
|
return strbuf_detach(&joined_hexs, NULL);
|
|
|
|
}
|
|
|
|
|
2009-06-06 06:41:33 +02:00
|
|
|
/*
|
|
|
|
* In this function, passing a not NULL skipped_first is very special.
|
|
|
|
* It means that we want to know if the first commit in the list is
|
|
|
|
* skipped because we will want to test a commit away from it if it is
|
|
|
|
* indeed skipped.
|
|
|
|
* So if the first commit is skipped, we cannot take the shortcut to
|
|
|
|
* just "return list" when we find the first non skipped commit, we
|
|
|
|
* have to return a fully filtered list.
|
|
|
|
*
|
|
|
|
* We use (*skipped_first == -1) to mean "it has been found that the
|
|
|
|
* first commit is not skipped". In this case *skipped_first is set back
|
|
|
|
* to 0 just before the function returns.
|
|
|
|
*/
|
2009-03-26 05:55:49 +01:00
|
|
|
struct commit_list *filter_skipped(struct commit_list *list,
|
|
|
|
struct commit_list **tried,
|
2009-06-06 06:41:33 +02:00
|
|
|
int show_all,
|
|
|
|
int *count,
|
|
|
|
int *skipped_first)
|
2009-03-26 05:55:49 +01:00
|
|
|
{
|
|
|
|
struct commit_list *filtered = NULL, **f = &filtered;
|
|
|
|
|
|
|
|
*tried = NULL;
|
|
|
|
|
2009-06-06 06:41:33 +02:00
|
|
|
if (skipped_first)
|
|
|
|
*skipped_first = 0;
|
|
|
|
if (count)
|
|
|
|
*count = 0;
|
|
|
|
|
2011-05-19 23:34:33 +02:00
|
|
|
if (!skipped_revs.nr)
|
2009-03-26 05:55:49 +01:00
|
|
|
return list;
|
|
|
|
|
|
|
|
while (list) {
|
|
|
|
struct commit_list *next = list->next;
|
|
|
|
list->next = NULL;
|
2017-03-31 03:40:00 +02:00
|
|
|
if (0 <= oid_array_lookup(&skipped_revs, &list->item->object.oid)) {
|
2009-06-06 06:41:33 +02:00
|
|
|
if (skipped_first && !*skipped_first)
|
|
|
|
*skipped_first = 1;
|
2009-03-26 05:55:49 +01:00
|
|
|
/* Move current to tried list */
|
|
|
|
*tried = list;
|
|
|
|
tried = &list->next;
|
|
|
|
} else {
|
2009-06-06 06:41:33 +02:00
|
|
|
if (!show_all) {
|
|
|
|
if (!skipped_first || !*skipped_first)
|
|
|
|
return list;
|
|
|
|
} else if (skipped_first && !*skipped_first) {
|
|
|
|
/* This means we know it's not skipped */
|
|
|
|
*skipped_first = -1;
|
|
|
|
}
|
2009-03-26 05:55:49 +01:00
|
|
|
/* Move current to filtered list */
|
|
|
|
*f = list;
|
|
|
|
f = &list->next;
|
2009-06-06 06:41:33 +02:00
|
|
|
if (count)
|
|
|
|
(*count)++;
|
2009-03-26 05:55:49 +01:00
|
|
|
}
|
|
|
|
list = next;
|
|
|
|
}
|
|
|
|
|
2009-06-06 06:41:33 +02:00
|
|
|
if (skipped_first && *skipped_first == -1)
|
|
|
|
*skipped_first = 0;
|
|
|
|
|
2009-03-26 05:55:49 +01:00
|
|
|
return filtered;
|
|
|
|
}
|
2009-03-26 05:55:54 +01:00
|
|
|
|
2009-06-13 07:21:06 +02:00
|
|
|
#define PRN_MODULO 32768
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a pseudo random number generator based on "man 3 rand".
|
|
|
|
* It is not used properly because the seed is the argument and it
|
|
|
|
* is increased by one between each call, but that should not matter
|
|
|
|
* for this application.
|
|
|
|
*/
|
2018-12-09 11:25:21 +01:00
|
|
|
static unsigned get_prn(unsigned count)
|
|
|
|
{
|
2009-06-13 07:21:06 +02:00
|
|
|
count = count * 1103515245 + 12345;
|
2013-04-03 21:17:55 +02:00
|
|
|
return (count/65536) % PRN_MODULO;
|
2009-06-13 07:21:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Custom integer square root from
|
2017-05-13 11:54:51 +02:00
|
|
|
* https://en.wikipedia.org/wiki/Integer_square_root
|
2009-06-13 07:21:06 +02:00
|
|
|
*/
|
|
|
|
static int sqrti(int val)
|
|
|
|
{
|
|
|
|
float d, x = val;
|
|
|
|
|
2020-02-17 09:40:30 +01:00
|
|
|
if (!val)
|
2009-06-13 07:21:06 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
float y = (x + (float)val / x) / 2;
|
|
|
|
d = (y > x) ? y - x : x - y;
|
|
|
|
x = y;
|
|
|
|
} while (d >= 0.5);
|
|
|
|
|
|
|
|
return (int)x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct commit_list *skip_away(struct commit_list *list, int count)
|
2009-06-06 06:41:34 +02:00
|
|
|
{
|
|
|
|
struct commit_list *cur, *previous;
|
|