#
#
# patch "ChangeLog"
# from [c043bd85a2e9679b923704de459b832585dff231]
# to [e130fcd32c009c5f7ad8b31d5736aed3e9f67c09]
#
# patch "enumerator.cc"
# from [31a5efd62fdaaada910d5b15b7fe31648a0ad17a]
# to [9cd3afa7ef779cb7b0c53a053a9567b57b59f660]
#
# patch "enumerator.hh"
# from [b2662b4dea76a8d832663ed48342920248449f70]
# to [c0e45bd4447e10c0cbf3ca1e3ed54ef91f23c6e5]
#
============================================================
--- ChangeLog c043bd85a2e9679b923704de459b832585dff231
+++ ChangeLog e130fcd32c009c5f7ad8b31d5736aed3e9f67c09
@@ -1,3 +1,10 @@
+2006-02-23 Matt Johnston
+
+ * enumerator.{cc,hh}: avoid transferring deltas on both sides of merge
+ revisions, and prefer deltas to data when both are available.
+ See
+ https://savannah.nongnu.org/bugs/?func=detailitem&item_id=15846
+
2006-02-21 Nathaniel Smith
* work.cc (detach_node): This time for sure!
============================================================
--- enumerator.cc 31a5efd62fdaaada910d5b15b7fe31648a0ad17a
+++ enumerator.cc 9cd3afa7ef779cb7b0c53a053a9567b57b59f660
@@ -75,6 +75,89 @@
return revs.empty() && items.empty();
}
+void
+revision_enumerator::files_for_revision(revision_id const & r,
+ set & full_files,
+ set > & del_files)
+{
+ // when we're sending a merge, we have to be careful if we
+ // want to send as little data as possible. see bug #15846
+ //
+ // njs's solution: "when sending the files for a revision,
+ // look at both csets. If a given hash is not listed as new
+ // in _both_ csets, throw it out. Now, for everything left
+ // over, if one side says "add" and the other says "delta",
+ // do a delta. If both sides say "add", do a data."
+
+ set file_adds;
+ // map. src is arbitrary.
+ map file_deltas;
+ map file_edge_counts;
+
+ revision_set rs;
+ MM(rs);
+ app.db.get_revision(r, rs);
+
+ for (edge_map::const_iterator i = rs.edges.begin();
+ i != rs.edges.end(); ++i)
+ {
+ set file_dsts;
+ cset const & cs = edge_changes(i);
+
+ // Queue up all the file-adds
+ for (map::const_iterator fa = cs.files_added.begin();
+ fa != cs.files_added.end(); ++fa)
+ {
+ file_adds.insert(fa->second);
+ file_dsts.insert(fa->second);
+ }
+
+ // Queue up all the file-deltas
+ for (map >::const_iterator fd
+ = cs.deltas_applied.begin();
+ fd != cs.deltas_applied.end(); ++fd)
+ {
+ file_deltas[fd->second.second] = fd->second.first;
+ file_dsts.insert(fd->second.second);
+ }
+
+ // we don't want to be counting files twice in a single edge
+ for (set::const_iterator i = file_dsts.begin();
+ i != file_dsts.end(); i++)
+ file_edge_counts[*i]++;
+ }
+
+ del_files.clear();
+ full_files.clear();
+ size_t num_edges = rs.edges.size();
+
+ for (map::const_iterator i = file_edge_counts.begin();
+ i != file_edge_counts.end(); i++)
+ {
+ MM(i->first);
+ if (i->second < num_edges)
+ continue;
+
+ // first preference is to send as a delta...
+ map::const_iterator fd = file_deltas.find(i->first);
+ if (fd != file_deltas.end())
+ {
+ del_files.insert(make_pair(fd->second, fd->first));
+ continue;
+ }
+
+ // ... otherwise as a full file.
+ set::const_iterator f = file_adds.find(i->first);
+ if (f != file_adds.end())
+ {
+ full_files.insert(*f);
+ continue;
+ }
+
+ I(false);
+ }
+}
+
void
revision_enumerator::step()
{
@@ -118,42 +201,38 @@
L(FL("revision_enumerator::step expanding "
"contents of rev '%d'\n") % r);
- revision_set rs;
- app.db.get_revision(r, rs);
- for (edge_map::const_iterator i = rs.edges.begin();
- i != rs.edges.end(); ++i)
- {
- cset const & cs = edge_changes(i);
-
- // Queue up all the file-adds
- for (map::const_iterator fa = cs.files_added.begin();
- fa != cs.files_added.end(); ++fa)
- {
- if (cb.queue_this_file(fa->second.inner()))
- {
- enumerator_item item;
- item.tag = enumerator_item::fdata;
- item.ident_a = fa->second.inner();
- items.push_back(item);
- }
- }
-
- // Queue up all the file-deltas
- for (map >::const_iterator fd
- = cs.deltas_applied.begin();
- fd != cs.deltas_applied.end(); ++fd)
- {
- if (cb.queue_this_file(fd->second.second.inner()))
- {
- enumerator_item item;
- item.tag = enumerator_item::fdelta;
- item.ident_a = fd->second.first.inner();
- item.ident_b = fd->second.second.inner();
- items.push_back(item);
- }
- }
- }
-
+ // The rev's files and fdeltas
+ {
+ set full_files;
+ set > del_files;
+ files_for_revision(r, full_files, del_files);
+
+ for (set::const_iterator f = full_files.begin();
+ f != full_files.end(); f++)
+ {
+ if (cb.queue_this_file(f->inner()))
+ {
+ enumerator_item item;
+ item.tag = enumerator_item::fdata;
+ item.ident_a = f->inner();
+ items.push_back(item);
+ }
+ }
+
+ for (set >::const_iterator fd = del_files.begin();
+ fd != del_files.end(); fd++)
+ {
+ if (cb.queue_this_file(fd->second.inner()))
+ {
+ enumerator_item item;
+ item.tag = enumerator_item::fdelta;
+ item.ident_a = fd->first.inner();
+ item.ident_b = fd->second.inner();
+ items.push_back(item);
+ }
+ }
+ }
+
// Queue up the rev itself
{
enumerator_item item;
============================================================
--- enumerator.hh b2662b4dea76a8d832663ed48342920248449f70
+++ enumerator.hh c0e45bd4447e10c0cbf3ca1e3ed54ef91f23c6e5
@@ -63,6 +63,9 @@
app_state & app);
void load_graphs();
bool all_parents_enumerated(revision_id const & child);
+ void revision_enumerator::files_for_revision(revision_id const & r,
+ std::set & full_files,
+ std::set > & del_files);
void step();
bool done();
};