# # # patch "HACKING" # from [7c3e0d63c56fdfa54662ff237122e30d460440b6] # to [b05dd82114101d138b4d85bd2cb401f8e9c98572] # # patch "annotate.cc" # from [d359129ebda4db42123b8b92f9effdfdadb2f8f4] # to [c53402defbc6caabca1cc2af8439d6dc93b78c42] # # patch "asciik.cc" # from [48e1e8f7b0fb36535ccf5b043b34d43c0f284bfc] # to [3da3edbd264cbf8932a700816e7228d93781d629] # # patch "automate.cc" # from [0446feeafe34cea813cda4b135c3439f562d903e] # to [f391cb466a1a2e64c65b10a6d92a4a7ecf3c2dec] # # patch "cert.cc" # from [468eea09a888a9d13318db7c7476010f1f6e1ae4] # to [b9f4a223c51537671b6dbcc7814a6069244504f3] # # patch "cert.hh" # from [8bca058362ed2b204d8bf6cd799a7d77ebbad464] # to [6f92a79897c7ab18113e593bc45f1afc8fbf7fc5] # # patch "cmd_db.cc" # from [d10b58e1e26edc1c8794f4034ecd95c38a4e839b] # to [d27033cf306a7fff9098b06061a13699914f4032] # # patch "cmd_diff_log.cc" # from [edadb0edb91f959484f2b26bf17f17dfa9625d52] # to [0a948c2b416b4c325c8702ebe824a8b007ddc28f] # # patch "cmd_files.cc" # from [c8196d8cd2834fdb9d12b3d8af67c974ab8d9ca9] # to [68eafab3955008cebf7ad4f22e2f1d4dd6bad55a] # # patch "cmd_key_cert.cc" # from [3f2e18f03e88934ae3ef032a2f5892ecb24ae201] # to [e9e4fa74701942d70c5ac1ca5d5139e6decc3ee2] # # patch "cmd_list.cc" # from [2f535360b35d1e2851b6862286548bacc198bd5a] # to [079dd89658dd5b56622e60256d797e0d02b02f22] # # patch "cmd_merging.cc" # from [9422eb65503503e2d252471de548a64ddfe6ba6f] # to [fe110559e7d83804c10fb36352b9ea22de724861] # # patch "cmd_netsync.cc" # from [1c92b50b791690633a9979dfdf5b9e552ee3bb09] # to [68b7aa1513248fa388336d5def0d12e6d515453f] # # patch "cmd_othervcs.cc" # from [805ca2c1f8f3ec3a05a2b5a9cfb50ca7cbb7fba5] # to [9d46c385ed7557d720aa71c99da22e40b6549304] # # patch "cmd_ws_commit.cc" # from [f702e00ec483e72dc1fcfe58dbf88e20a4f9f907] # to [7f18334033c144ff6d9027e0bbedc92ebbb9de5c] # # patch "database.cc" # from [f2b3da8910f19eaaa8af403cc7b41429866c0f68] # to [74c819293bca360a8df5a82776edfc5f12c22bdd] # # patch "database.hh" # from [db3eba0e6cb68d5fa4da20c346bebd2bc6c6ef51] # to [eb927e49718ef1e00b7b6130b01b4abfaaae387e] # # patch "diff_patch.cc" # from [bf0138e49ccc702cb042fa0f95e272e3a072ea96] # to [863137e97631b7f95879bb4baef1644d2435ec68] # # patch "enumerator.cc" # from [9d7d81efbfa8befe3a150399b2bde472d9e5e3b9] # to [ca9940ca45aa35f45aa5a09a75b57b9c813fea3c] # # patch "enumerator.hh" # from [c911c8622932915c90e96b53668df5bdd5317c7a] # to [32f0295cfac6f68ed5c345d06af5e9a20548f117] # # patch "keys.cc" # from [b818f882c867150822c9826b8b78e33adee46541] # to [d96a6b057ee9ba109e52e6cf2d7a5e8785435b85] # # patch "keys.hh" # from [d4c34c10f4c29dbdbc6b7471011ee57aa438fb5c] # to [20459245774ad7410220fe28dd5c9025987bf287] # # patch "merge.cc" # from [cc1fbab1643cd6188637296b9806c637134c1f78] # to [f9f44792ab7eeb5b8637b49099fc26030b0eceae] # # patch "merge.hh" # from [e37513afabd41ebafa5bc2b42a42121900c23269] # to [f3613e195ad378f7666d003b959b322abbce97d0] # # patch "monotone.cc" # from [2c9baedc7ac7d288f62bfdfb1bb44d711c6ea943] # to [3602b1d8a957547153408a9a13c44d3a33803ecb] # # patch "netcmd.hh" # from [7a72dc722db614fd522f07c040cad8be2de3ecfb] # to [15406c426ad26a06900fd8fd6b171e746b1ec1ee] # # patch "netsync.cc" # from [43d0a7da662099a6f8912fe5615172343049975e] # to [80b2879ab8f06fd0b93b8774b69671debba79c40] # # patch "project.cc" # from [8f4a3f95b006e6c824bfdd32b9dfd75f3e85f824] # to [919ba0487836de6e7520f368e202b6e146683297] # # patch "rcs_import.cc" # from [0f51bc2b23658e9ef7477a1ba9c7f4f8eff691a5] # to [b32cdb110fdf01057d6afac6b1d91263b0d74551] # # patch "rcs_import.hh" # from [a1bdda69e06ea25a4b50cfea9c8e1dbf5f61edd5] # to [a8e67b44e7960ed7ef21eb46abac7d658ce03689] # # patch "restrictions.cc" # from [60b8793d468dfdcd60f0c9f5844a01d4e059435f] # to [a4bb673571007d154c6f40c87305348acc2b6fb4] # # patch "restrictions.hh" # from [587fd1930120667fa026468bfa7c7ab58fcd29fc] # to [0b2c8a2c94abcc8d9d84e6684430f4324ce29f7a] # # patch "revision.cc" # from [515d7f7cf392614ea54f70bc9a58bf91495c6d7f] # to [86c0d8a6aacc484acae33d7d192dcc294cbe9080] # # patch "revision.hh" # from [8a83775b59c469630040469f6a8ad0c5711f28f9] # to [40fdeded2fc3bf25dd780a6aaa583326084b7811] # # patch "roster.cc" # from [0ab37af195f9be17a9a4ffe1db7a2c6b2b84440d] # to [5250b2f5913c0bd144a9828a1317384956729fc9] # # patch "roster.hh" # from [06321f820ddcd3415431c1d1fae35d9a637e8568] # to [944e852bb147760f804ceb6063a1934b016d4c86] # # patch "schema_migration.cc" # from [5e346221c35a8797dd6255b834ec06f6bfa70006] # to [f5bb54455a2fb596c5091b3ec2677f7c350395ec] # # patch "schema_migration.hh" # from [c02720141fbd073e228691f9aa049a5a8309ded6] # to [2c414db665de886231c317fb5292415c51b63fae] # # patch "selectors.cc" # from [9682d2ded09a6d978251d9e251d67e174297deb7] # to [acbef6eec0355e5d85a3a775de4fcb2cecb7e252] # # patch "update.cc" # from [4ff426265325f41011fa8460cd261ca645646d5a] # to [e4e1f8d71ba2133af8d22537033ed7f1bb350cbd] # # patch "update.hh" # from [af0e12d08bd51ce590f3f7de9e6ac7bf252712cc] # to [182e0764bd0c84327c2b1f073fc3eaf3642ad4d2] # # patch "work.cc" # from [7855294ba0c0228c5c0c19c15dc974f03e6ecf05] # to [f7da6e8d4930dfe92ba826ba76033b70513fdaf0] # # patch "work.hh" # from [17520ad35c3888799b82336286f832c535039261] # to [f86a3eb582bbf5f33572dac57d4281cb81076dbd] # ============================================================ --- HACKING 7c3e0d63c56fdfa54662ff237122e30d460440b6 +++ HACKING b05dd82114101d138b4d85bd2cb401f8e9c98572 @@ -273,7 +273,69 @@ bare capital letters scattered around. function. You can easily add an overload to "dump" to support new types. +"Application state" objects +--------------------------- +There are nine object types which hold a substantial portion of the +overall state of the program. You will see them frequently in +argument lists. Most, but not all, of these are allocated only once +for the entire program. + +Because many functions take some of these objects as arguments, we +have a convention for their position and order: all such arguments +appear first within the overall argument list, and in the order of +the list below. + + * "app_state" is being phased out; it used to be an umbrella object + carrying almost all the state of the program, with sub-objects of + the types listed below. Most of those are now allocated + separately, but the options and lua_hooks objects are still under + the umbrella. Also, there are a very few operations that are still + app_state methods. Do not introduce new functions which take an + app_state argument, unless there is no alternative. + + * "options" holds information from all of the command-line options. + It does *not* record the non-option command line arguments. Some + of its fields may default to other information sources as well. + + To the maximum extent practical, "options" objects should not + appear in function arguments. Instead, pass down specific fields + that are relevant to the lower-level code. + + * "lua_hooks" holds the Lua interpreter handle and all the associated + state, in particular all the hook functions that the user may + override. It is, unfortunately, not possible to pass around single + hook functions, so any C++ function that (transitively) calls some + hook must get the lua_hooks object somehow. + + * There are three types that encapsulate the database of revisions at + different levels of abstraction. No function should take more than + one of the following types. + + - "project_t" represents a development project within the + database, that is, a database plus a set of branch names and + trust decisions. + + - "database" represents the database as a whole, at a level where + trust decisions are irrelevant. At present, the database + object does do some trust checking and has responsibility for + all public key operations (signature checks and nonce + encryption); these may be moved to the project object in the + future. + + - "sqlite3" is the raw SQLite library handle. Some very + low-level internal functions use this instead of a database + object. Introducing more of them is to be avoided. + + * "node_id_source" is not really a top-level state object, but if a + function takes one of them, it goes right after the database in the + argument list. + + * "key_store" holds the user's private keys, and is responsible for all + private key operations (creating signatures and decrypting nonces). + + * "workspace" is responsible for manipulating checked-out source trees. + Reporting errors to the user ---------------------------- ============================================================ --- annotate.cc d359129ebda4db42123b8b92f9effdfdadb2f8f4 +++ annotate.cc c53402defbc6caabca1cc2af8439d6dc93b78c42 @@ -58,7 +58,7 @@ public: class annotate_context { public: - annotate_context(file_id fid, project_t & project); + annotate_context(project_t & project, file_id fid); shared_ptr initial_lineage() const; @@ -206,7 +206,7 @@ typedef multi_index_container< > work_units; -annotate_context::annotate_context(file_id fid, project_t & project) +annotate_context::annotate_context(project_t & project, file_id fid) : project(project), annotated_lines_completed(0) { // initialize file_lines @@ -393,7 +393,7 @@ annotate_context::build_revisions_to_ann { vector< revision > certs; project.get_revision_certs(*i, certs); - erase_bogus_certs(certs, project.db); + erase_bogus_certs(project.db, certs); string author(cert_string_value(certs, author_cert_name, true, false, "@< ")); @@ -695,8 +695,8 @@ static void } static void -do_annotate_node(annotate_node_work const & work_unit, - database & db, +do_annotate_node(database & db, + annotate_node_work const & work_unit, work_units & work_units) { L(FL("do_annotate_node for node %s") % work_unit.revision); @@ -823,7 +823,7 @@ do_annotate (project_t & project, file_t % file_node->self % file_node->content % rid); shared_ptr - acp(new annotate_context(file_node->content, project)); + acp(new annotate_context(project, file_node->content)); shared_ptr lineage = acp->initial_lineage(); @@ -858,7 +858,7 @@ do_annotate (project_t & project, file_t annotate_node_work work = *w; work_units.erase(w); - do_annotate_node(work, project.db, work_units); + do_annotate_node(project.db, work, work_units); } acp->annotate_equivalent_lines(); ============================================================ --- asciik.cc 48e1e8f7b0fb36535ccf5b043b34d43c0f284bfc +++ asciik.cc 3da3edbd264cbf8932a700816e7228d93781d629 @@ -380,7 +380,7 @@ CMD(asciik, "asciik", "", CMD_REF(debug) complete(app, project, idx(args, 0)(), revs); vector sorted; - toposort(revs, sorted, app.db); + toposort(app.db, revs, sorted); reverse(sorted.begin(), sorted.end()); asciik graph(std::cout, 10); ============================================================ --- automate.cc 0446feeafe34cea813cda4b135c3439f562d903e +++ automate.cc f391cb466a1a2e64c65b10a6d92a4a7ecf3c2dec @@ -223,7 +223,7 @@ CMD_AUTOMATE(erase_ancestors, N_("[REV1 N(db.revision_exists(rid), F("no such revision '%s'") % rid); revs.insert(rid); } - erase_ancestors(revs, db); + erase_ancestors(db, revs); for (set::const_iterator i = revs.begin(); i != revs.end(); ++i) output << (*i).inner()() << '\n'; } @@ -253,7 +253,7 @@ CMD_AUTOMATE(toposort, N_("[REV1 [REV2 [ revs.insert(rid); } vector sorted; - toposort(revs, sorted, db); + toposort(db, revs, sorted); for (vector::const_iterator i = sorted.begin(); i != sorted.end(); ++i) output << (*i).inner()() << '\n'; @@ -298,10 +298,10 @@ CMD_AUTOMATE(ancestry_difference, N_("NE bs.insert(b); } set ancestors; - ancestry_difference(a, bs, ancestors, db); + ancestry_difference(db, a, bs, ancestors); vector sorted; - toposort(ancestors, sorted, db); + toposort(db, ancestors, sorted); for (vector::const_iterator i = sorted.begin(); i != sorted.end(); ++i) output << (*i).inner()() << '\n'; @@ -751,9 +751,10 @@ struct inventory_itemizer : public tree_ inodeprint_map ipm; workspace & work; - inventory_itemizer(path_restriction const & m, inventory_map & i, - workspace & work) : - mask(m), inventory(i), work(work) + inventory_itemizer(workspace & work, + path_restriction const & m, + inventory_map & i) + : mask(m), inventory(i), work(work) { if (work.in_inodeprints_mode()) { @@ -797,10 +798,11 @@ static void } static void -inventory_filesystem(path_restriction const & mask, inventory_map & inventory, - workspace & work) +inventory_filesystem(workspace & work, + path_restriction const & mask, + inventory_map & inventory) { - inventory_itemizer itemizer(mask, inventory, work); + inventory_itemizer itemizer(work, mask, inventory); file_path const root; // The constructor file_path() returns ""; the root directory. walk_tree // does not visit that node, so set fs_type now, if it meets the @@ -980,7 +982,7 @@ CMD_AUTOMATE(inventory, N_("[PATH]...") CMD_REQUIRES_WORKSPACE(app); parent_map parents; - work.get_parent_rosters(parents, app.db); + work.get_parent_rosters(app.db, parents); // for now, until we've figured out what the format could look like // and what conceptional model we can implement // see: http://www.venge.net/mtn-wiki/MultiParentWorkspaceFallout @@ -990,7 +992,7 @@ CMD_AUTOMATE(inventory, N_("[PATH]...") roster_t new_roster, old_roster = parent_roster(parents.begin()); temp_node_id_source nis; - work.get_current_roster_shape(new_roster, app.db, nis); + work.get_current_roster_shape(app.db, nis, new_roster); inventory_map inventory; vector includes = args_to_paths(args); @@ -1010,14 +1012,14 @@ CMD_AUTOMATE(inventory, N_("[PATH]...") inserter(excludes, excludes.end())); } - node_restriction nmask(includes, excludes, app.opts.depth, old_roster, new_roster, app.work); + node_restriction nmask(app.work, includes, excludes, app.opts.depth, old_roster, new_roster); // skip the check of the workspace paths because some of them might // be missing and the user might want to query the recorded structure // of them anyways - path_restriction pmask(includes, excludes, app.opts.depth, app.work, path_restriction::skip_check); + path_restriction pmask(app.work, includes, excludes, app.opts.depth, path_restriction::skip_check); inventory_rosters(old_roster, new_roster, nmask, pmask, inventory); - inventory_filesystem(pmask, inventory, app.work); + inventory_filesystem(app.work, pmask, inventory); basic_io::printer pr; @@ -1202,8 +1204,8 @@ CMD_AUTOMATE(get_revision, N_("[REVID]") parent_map old_rosters; revision_t rev; - work.get_parent_rosters(old_rosters, db); - work.get_current_roster_shape(new_roster, db, nis); + work.get_parent_rosters(db, old_rosters); + work.get_current_roster_shape(db, nis, new_roster); work.update_current_roster_from_filesystem(new_roster); make_revision(old_rosters, new_roster, rev); @@ -1240,7 +1242,7 @@ CMD_AUTOMATE(get_base_revision_id, "", CMD_REQUIRES_WORKSPACE(app); parent_map parents; - work.get_parent_rosters(parents, app.db); + work.get_parent_rosters(app.db, parents); N(parents.size() == 1, F("this command can only be used in a single-parent workspace")); @@ -1273,10 +1275,10 @@ CMD_AUTOMATE(get_current_revision_id, "" revision_t rev; temp_node_id_source nis; - work.get_current_roster_shape(new_roster, db, nis); + work.get_current_roster_shape(db, nis, new_roster); work.update_current_roster_from_filesystem(new_roster); - work.get_parent_rosters(parents, db); + work.get_parent_rosters(db, parents); make_revision(parents, new_roster, rev); calculate_ident(rev, new_revision_id); @@ -1345,7 +1347,7 @@ CMD_AUTOMATE(get_manifest_of, N_("[REVID temp_node_id_source nis; - work.get_current_roster_shape(new_roster, db, nis); + work.get_current_roster_shape(db, nis, new_roster); work.update_current_roster_from_filesystem(new_roster); } else @@ -2075,9 +2077,9 @@ CMD_AUTOMATE(cert, N_("REVISION-ID NAME N(db.revision_exists(rid), F("no such revision '%s'") % rid); - cache_user_key(app.opts, app.lua, keys, db); - put_simple_revision_cert(rid, cert_name(idx(args, 1)()), - cert_value(idx(args, 2)()), db, keys); + cache_user_key(app.opts, app.lua, db, keys); + put_simple_revision_cert(db, keys, rid, cert_name(idx(args, 1)()), + cert_value(idx(args, 2)())); } // Name: get_db_variables ============================================================ --- cert.cc 468eea09a888a9d13318db7c7476010f1f6e1ae4 +++ cert.cc b9f4a223c51537671b6dbcc7814a6069244504f3 @@ -105,8 +105,8 @@ void void -erase_bogus_certs(vector< manifest > & certs, - database & db) +erase_bogus_certs(database & db, + vector< manifest > & certs) { typedef vector< manifest >::iterator it; it e = remove_if(certs.begin(), certs.end(), bogus_cert_p(db)); @@ -161,8 +161,8 @@ void } void -erase_bogus_certs(vector< revision > & certs, - database & db) +erase_bogus_certs(database & db, + vector< revision > & certs) { typedef vector< revision >::iterator it; it e = remove_if(certs.begin(), certs.end(), bogus_cert_p(db)); @@ -370,11 +370,11 @@ bool } bool -put_simple_revision_cert(revision_id const & id, +put_simple_revision_cert(database & db, + key_store & keys, + revision_id const & id, cert_name const & nm, - cert_value const & val, - database & db, - key_store & keys) + cert_value const & val) { I(!keys.signing_key().empty()); @@ -397,8 +397,8 @@ void // OPTS may override. Branch name is returned in BRANCHNAME. // Does not modify branch state in OPTS. void -guess_branch(revision_id const & ident, options & opts, - project_t & project, branch_name & branchname) +guess_branch(options & opts, project_t & project, + revision_id const & ident, branch_name & branchname) { if (opts.branch_given && !opts.branchname().empty()) branchname = opts.branchname; @@ -428,86 +428,91 @@ void // As above, but set the branch name in the options // if it wasn't already set. void -guess_branch(revision_id const & ident, options & opts, project_t & project) +guess_branch(options & opts, project_t & project, revision_id const & ident) { branch_name branchname; - guess_branch(ident, opts, project, branchname); + guess_branch(opts, project, ident, branchname); opts.branchname = branchname; } void -cert_revision_in_branch(revision_id const & rev, - branch_name const & branch, - database & db, - key_store & keys) +cert_revision_in_branch(database & db, + key_store & keys, + revision_id const & rev, + branch_name const & branch) { - put_simple_revision_cert(rev, branch_cert_name, cert_value(branch()), - db, keys); + put_simple_revision_cert(db, keys, rev, branch_cert_name, + cert_value(branch())); } void -cert_revision_suspended_in_branch(revision_id const & rev, - branch_name const & branch, - database & db, - key_store & keys) +cert_revision_suspended_in_branch(database & db, + key_store & keys, + revision_id const & rev, + branch_name const & branch) { - put_simple_revision_cert (rev, suspend_cert_name, cert_value(branch()), - db, keys); + put_simple_revision_cert(db, keys, rev, suspend_cert_name, + cert_value(branch())); } // "standard certs" void -cert_revision_date_time(revision_id const & m, - date_t const & t, - database & db, - key_store & keys) +cert_revision_date_time(database & db, + key_store & keys, + revision_id const & rev, + date_t const & t) { cert_value val = cert_value(t.as_iso_8601_extended()); - put_simple_revision_cert(m, date_cert_name, val, db, keys); + put_simple_revision_cert(db, keys, rev, date_cert_name, val); } void -cert_revision_author(revision_id const & m, - string const & author, - database & db, - key_store & keys) +cert_revision_author(database & db, + key_store & keys, + revision_id const & rev, + string const & author) { - put_simple_revision_cert(m, author_cert_name, cert_value(author), db, keys); + put_simple_revision_cert(db, keys, rev, author_cert_name, + cert_value(author)); } void -cert_revision_tag(revision_id const & m, - string const & tagname, - database & db, key_store & keys) +cert_revision_tag(database & db, + key_store & keys, + revision_id const & rev, + string const & tagname) { - put_simple_revision_cert(m, tag_cert_name, cert_value(tagname), db, keys); + put_simple_revision_cert(db, keys, rev, tag_cert_name, + cert_value(tagname)); } - void -cert_revision_changelog(revision_id const & m, - utf8 const & log, - database & db, key_store & keys) +cert_revision_changelog(database & db, + key_store & keys, + revision_id const & rev, + utf8 const & log) { - put_simple_revision_cert(m, changelog_cert_name, cert_value(log()), - db, keys); + put_simple_revision_cert(db, keys, rev, changelog_cert_name, + cert_value(log())); } void -cert_revision_comment(revision_id const & m, - utf8 const & comment, - database & db, key_store & keys) +cert_revision_comment(database & db, + key_store & keys, + revision_id const & rev, + utf8 const & comment) { - put_simple_revision_cert(m, comment_cert_name, cert_value(comment()), - db, keys); + put_simple_revision_cert(db, keys, rev, comment_cert_name, + cert_value(comment())); } void -cert_revision_testresult(revision_id const & r, - string const & results, - database & db, key_store & keys) +cert_revision_testresult(database & db, + key_store & keys, + revision_id const & rev, + string const & results) { bool passed = false; if (lowercase(results) == "true" || @@ -525,8 +530,8 @@ cert_revision_testresult(revision_id con "tried '0/1' 'yes/no', 'true/false', " "'pass/fail'"); - put_simple_revision_cert(r, testresult_cert_name, - cert_value(lexical_cast(passed)), db, keys); + put_simple_revision_cert(db, keys, rev, testresult_cert_name, + cert_value(lexical_cast(passed))); } // Local Variables: ============================================================ --- cert.hh 8bca058362ed2b204d8bf6cd799a7d77ebbad464 +++ cert.hh 6f92a79897c7ab18113e593bc45f1afc8fbf7fc5 @@ -67,26 +67,23 @@ cert_status check_cert(database & db, ce void cert_signable_text(cert const & t,std::string & out); cert_status check_cert(database & db, cert const & t); -bool put_simple_revision_cert(revision_id const & id, +bool put_simple_revision_cert(database & db, + key_store & keys, + revision_id const & id, cert_name const & nm, - cert_value const & val, - database & db, - key_store & keys); + cert_value const & val); -void erase_bogus_certs(std::vector< revision > & certs, - database & db); +void erase_bogus_certs(database & db, std::vector< revision > & certs); +void erase_bogus_certs(database & db, std::vector< manifest > & certs); -void erase_bogus_certs(std::vector< manifest > & certs, - database & db); - // Special certs -- system won't work without them. #define branch_cert_name cert_name("branch") void -cert_revision_in_branch(revision_id const & ctx, - branch_name const & branchname, - database & db, key_store & keys); +cert_revision_in_branch(database & db, key_store & keys, + revision_id const & rev, + branch_name const & branchname); // We also define some common cert types, to help establish useful @@ -94,10 +91,10 @@ void // reason not to. void -guess_branch(revision_id const & id, options & opts, project_t & project, +guess_branch(options & opts, project_t & project, revision_id const & rev, branch_name & branchname); void -guess_branch(revision_id const & id, options & opts, project_t & project); +guess_branch(options & opts, project_t & project, revision_id const & rev); #define date_cert_name cert_name("date") #define author_cert_name cert_name("author") @@ -108,39 +105,39 @@ void #define suspend_cert_name cert_name("suspend") void -cert_revision_suspended_in_branch(revision_id const & ctx, - branch_name const & branchname, - database & db, key_store & keys); +cert_revision_suspended_in_branch(database & db, key_store & keys, + revision_id const & rev, + branch_name const & branchname); void -cert_revision_date_time(revision_id const & m, - date_t const & t, - database & db, key_store & keys); +cert_revision_date_time(database & db, key_store & keys, + revision_id const & rev, + date_t const & t); void -cert_revision_author(revision_id const & m, - std::string const & author, - database & db, key_store & keys); +cert_revision_author(database & db, key_store & keys, + revision_id const & m, + std::string const & author); void -cert_revision_tag(revision_id const & m, - std::string const & tagname, - database & db, key_store & keys); +cert_revision_tag(database & db, key_store & keys, + revision_id const & rev, + std::string const & tagname); void -cert_revision_changelog(revision_id const & m, - utf8 const & changelog, - database & db, key_store & keys); +cert_revision_changelog(database & db, key_store & keys, + revision_id const & rev, + utf8 const & changelog); void -cert_revision_comment(revision_id const & m, - utf8 const & comment, - database & db, key_store & keys); +cert_revision_comment(database & db, key_store & keys, + revision_id const & m, + utf8 const & comment); void -cert_revision_testresult(revision_id const & m, - std::string const & results, - database & db, key_store & keys); +cert_revision_testresult(database & db, key_store & keys, + revision_id const & m, + std::string const & results); // Local Variables: ============================================================ --- cmd_db.cc d10b58e1e26edc1c8794f4034ecd95c38a4e839b +++ cmd_db.cc d27033cf306a7fff9098b06061a13699914f4032 @@ -233,7 +233,7 @@ CMD(db_changesetify, "changesetify", "", app.db.check_is_not_rosterified(); // early short-circuit to avoid failure after lots of work - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); build_changesets_from_manifest_ancestry(app.db, keys, set()); } @@ -252,7 +252,7 @@ CMD(db_rosterify, "rosterify", "", CMD_R app.db.check_is_not_rosterified(); // early short-circuit to avoid failure after lots of work - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); build_roster_style_revs_from_manifest_style_revs(app.db, keys, app.opts.attrs_to_drop); @@ -395,7 +395,7 @@ CMD_HIDDEN(test_migration_step, "test_mi if (args.size() != 1) throw usage(execid); - app.db.test_migration_step(idx(args,0)(), keys); + app.db.test_migration_step(keys, idx(args,0)()); } CMD_HIDDEN(rev_height, "rev_height", "", CMD_REF(informative), N_("REV"), ============================================================ --- cmd_diff_log.cc edadb0edb91f959484f2b26bf17f17dfa9625d52 +++ cmd_diff_log.cc 0a948c2b416b4c325c8702ebe824a8b007ddc28f @@ -367,7 +367,7 @@ prepare_diff(cset & included, revision_id old_rid; parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); // With no arguments, which parent should we diff against? N(parents.size() == 1, @@ -376,12 +376,12 @@ prepare_diff(cset & included, old_rid = parent_id(parents.begin()); old_roster = parent_roster(parents.begin()); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - old_roster, new_roster, app.work); + old_roster, new_roster); app.work.update_current_roster_from_filesystem(new_roster, mask); @@ -402,12 +402,12 @@ prepare_diff(cset & included, complete(app, project, idx(app.opts.revision_selectors, 0)(), r_old_id); app.db.get_roster(r_old_id, old_roster); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - old_roster, new_roster, app.work); + old_roster, new_roster); app.work.update_current_roster_from_filesystem(new_roster, mask); @@ -431,10 +431,10 @@ prepare_diff(cset & included, app.db.get_roster(r_old_id, old_roster); app.db.get_roster(r_new_id, new_roster); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - old_roster, new_roster, app.work); + old_roster, new_roster); // FIXME: this is *possibly* a UI bug, insofar as we // look at the restriction name(s) you provided on the command @@ -555,7 +555,7 @@ static void static void -log_certs(ostream & os, project_t & project, revision_id id, cert_name name, +log_certs(project_t & project, ostream & os, revision_id id, cert_name name, string label, string separator, bool multiline, bool newline) { vector< revision > certs; @@ -587,16 +587,16 @@ static void } static void -log_certs(ostream & os, project_t & project, revision_id id, cert_name name, +log_certs(project_t & project, ostream & os, revision_id id, cert_name name, string label, bool multiline) { - log_certs(os, project, id, name, label, label, multiline, true); + log_certs(project, os, id, name, label, label, multiline, true); } static void -log_certs(ostream & os, project_t & project, revision_id id, cert_name name) +log_certs(project_t & project, ostream & os, revision_id id, cert_name name) { - log_certs(os, project, id, name, " ", ",", false, false); + log_certs(project, os, id, name, " ", ",", false, false); } @@ -682,13 +682,12 @@ CMD(log, "log", "", CMD_REF(informative) parent_map parents; temp_node_id_source nis; - app.work.get_parent_rosters(parents, app.db); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_parent_rosters(app.db, parents); + app.work.get_current_roster_shape(app.db, nis, new_roster); - mask = node_restriction(args_to_paths(args), + mask = node_restriction(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), - app.opts.depth, parents, new_roster, - app.work); + app.opts.depth, parents, new_roster); } else { @@ -697,9 +696,9 @@ CMD(log, "log", "", CMD_REF(informative) roster_t roster; app.db.get_roster(first_rid, roster); - mask = node_restriction(args_to_paths(args), + mask = node_restriction(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), - app.opts.depth, roster, app.work); + app.opts.depth, roster); } } @@ -820,9 +819,8 @@ CMD(log, "log", "", CMD_REF(informative) if (!use_markings || marked_revs.find(rid) != marked_revs.end()) { set nodes_modified; - select_nodes_modified_by_rev(rev, roster, - nodes_modified, - app.db); + select_nodes_modified_by_rev(app.db, rev, roster, + nodes_modified); for (set::const_iterator n = nodes_modified.begin(); n != nodes_modified.end(); ++n) @@ -868,16 +866,16 @@ CMD(log, "log", "", CMD_REF(informative) if (app.opts.brief) { out << rid; - log_certs(out, project, rid, author_name); + log_certs(project, out, rid, author_name); if (app.opts.no_graph) - log_certs(out, project, rid, date_name); + log_certs(project, out, rid, date_name); else { out << '\n'; - log_certs(out, project, rid, date_name, + log_certs(project, out, rid, date_name, string(), string(), false, false); } - log_certs(out, project, rid, branch_name); + log_certs(project, out, rid, branch_name); out << '\n'; } else @@ -900,10 +898,10 @@ CMD(log, "log", "", CMD_REF(informative) anc != ancestors.end(); ++anc) out << "Ancestor: " << *anc << '\n'; - log_certs(out, project, rid, author_name, "Author: ", false); - log_certs(out, project, rid, date_name, "Date: ", false); - log_certs(out, project, rid, branch_name, "Branch: ", false); - log_certs(out, project, rid, tag_name, "Tag: ", false); + log_certs(project, out, rid, author_name, "Author: ", false); + log_certs(project, out, rid, date_name, "Date: ", false); + log_certs(project, out, rid, branch_name, "Branch: ", false); + log_certs(project, out, rid, tag_name, "Tag: ", false); if (!app.opts.no_files && !csum.cs.empty()) { @@ -912,8 +910,8 @@ CMD(log, "log", "", CMD_REF(informative) out << '\n'; } - log_certs(out, project, rid, changelog_name, "ChangeLog: ", true); - log_certs(out, project, rid, comment_name, "Comments: ", true); + log_certs(project, out, rid, changelog_name, "ChangeLog: ", true); + log_certs(project, out, rid, comment_name, "Comments: ", true); } if (app.opts.diffs) ============================================================ --- cmd_files.cc c8196d8cd2834fdb9d12b3d8af67c974ab8d9ca9 +++ cmd_files.cc 68eafab3955008cebf7ad4f22e2f1d4dd6bad55a @@ -244,7 +244,7 @@ static void } static void -dump_file(std::ostream & output, database & db, file_id & ident) +dump_file(database & db, std::ostream & output, file_id & ident) { N(db.file_version_exists(ident), F("no file version %s found in database") % ident); @@ -256,7 +256,7 @@ static void } static void -dump_file(std::ostream & output, database & db, revision_id rid, utf8 filename) +dump_file(database & db, std::ostream & output, revision_id rid, utf8 filename) { N(db.revision_exists(rid), F("no such revision '%s'") % rid); @@ -276,7 +276,7 @@ dump_file(std::ostream & output, databas F("no file '%s' found in revision '%s'") % fp % rid); file_t file_node = downcast_to_file_t(node); - dump_file(output, db, file_node->content); + dump_file(db, output, file_node->content); } CMD(cat, "cat", "", CMD_REF(informative), @@ -295,7 +295,7 @@ CMD(cat, "cat", "", CMD_REF(informative) app.require_workspace(); parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); N(parents.size() == 1, F("this command can only be used in a single-parent workspace")); rid = parent_id(parents.begin()); @@ -306,7 +306,7 @@ CMD(cat, "cat", "", CMD_REF(informative) complete(app, project, idx(app.opts.revision_selectors, 0)(), rid); } - dump_file(cout, app.db, rid, idx(args, 0)); + dump_file(app.db, cout, rid, idx(args, 0)); } // Name: get_file @@ -328,7 +328,7 @@ CMD_AUTOMATE(get_file, N_("FILEID"), F("wrong argument count")); file_id ident(idx(args, 0)()); - dump_file(output, app.db, ident); + dump_file(app.db, output, ident); } // Name: get_file_of @@ -361,7 +361,7 @@ CMD_AUTOMATE(get_file_of, N_("FILENAME") CMD_REQUIRES_WORKSPACE(app); parent_map parents; - work.get_parent_rosters(parents, db); + work.get_parent_rosters(db, parents); N(parents.size() == 1, F("this command can only be used in a single-parent workspace")); rid = parent_id(parents.begin()); @@ -372,7 +372,7 @@ CMD_AUTOMATE(get_file_of, N_("FILENAME") complete(app, project, idx(app.opts.revision_selectors, 0)(), rid); } - dump_file(output, db, rid, idx(args, 0)); + dump_file(db, output, rid, idx(args, 0)); } // Local Variables: ============================================================ --- cmd_key_cert.cc 3f2e18f03e88934ae3ef032a2f5892ecb24ae201 +++ cmd_key_cert.cc e9e4fa74701942d70c5ac1ca5d5139e6decc3ee2 @@ -115,7 +115,7 @@ CMD(ssh_agent_export, "ssh_agent_export" throw usage(execid); rsa_keypair_id id; - get_user_key(id, app.opts, app.lua, keys, app.db); + get_user_key(app.opts, app.lua, app.db, keys, id); if (args.size() == 0) keys.export_key_for_agent(id, cout); @@ -138,7 +138,7 @@ CMD(ssh_agent_add, "ssh_agent_add", "", throw usage(execid); rsa_keypair_id id; - get_user_key(id, app.opts, app.lua, keys, app.db); + get_user_key(app.opts, app.lua, app.db, keys, id); keys.add_key_to_agent(id); } @@ -162,7 +162,7 @@ CMD(cert, "cert", "", CMD_REF(key_and_ce cert_name cname; internalize_cert_name(idx(args, 1), cname); - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); cert_value val; if (args.size() == 3) @@ -246,7 +246,7 @@ CMD(tag, "tag", "", CMD_REF(review), N_( revision_id r; complete(app, project, idx(args, 0)(), r); - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); project.put_tag(keys, r, idx(args, 1)()); } @@ -266,8 +266,8 @@ CMD(testresult, "testresult", "", CMD_RE revision_id r; complete(app, project, idx(args, 0)(), r); - cache_user_key(app.opts, app.lua, keys, app.db); - cert_revision_testresult(r, idx(args, 1)(), app.db, keys); + cache_user_key(app.opts, app.lua, app.db, keys); + cert_revision_testresult(app.db, keys, r, idx(args, 1)()); } @@ -284,10 +284,10 @@ CMD(approve, "approve", "", CMD_REF(revi revision_id r; complete(app, project, idx(args, 0)(), r); - guess_branch(r, app.opts, project); + guess_branch(app.opts, project, r); N(app.opts.branchname() != "", F("need --branch argument for approval")); - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); project.put_revision_in_branch(keys, r, app.opts.branchname); } @@ -304,10 +304,10 @@ CMD(suspend, "suspend", "", CMD_REF(revi revision_id r; complete(app, project, idx(args, 0)(), r); - guess_branch(r, app.opts, project); + guess_branch(app.opts, project, r); N(app.opts.branchname() != "", F("need --branch argument to suspend")); - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); project.suspend_revision_in_branch(keys, r, app.opts.branchname); } @@ -339,8 +339,8 @@ CMD(comment, "comment", "", CMD_REF(revi revision_id r; complete(app, project, idx(args, 0)(), r); - cache_user_key(app.opts, app.lua, keys, app.db); - cert_revision_comment(r, comment, app.db, keys); + cache_user_key(app.opts, app.lua, app.db, keys); + cert_revision_comment(app.db, keys, r, comment); } // Local Variables: ============================================================ --- cmd_list.cc 2f535360b35d1e2851b6862286548bacc198bd5a +++ cmd_list.cc 079dd89658dd5b56622e60256d797e0d02b02f22 @@ -380,12 +380,12 @@ CMD(known, "known", "", CMD_REF(list), " temp_node_id_source nis; app.require_workspace(); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - new_roster, app.work); + new_roster); // to be printed sorted vector print_paths; @@ -418,15 +418,15 @@ CMD(unknown, "unknown", "ignored", CMD_R app.require_workspace(); vector roots = args_to_paths(args); - path_restriction mask(roots, args_to_paths(app.opts.exclude_patterns), - app.opts.depth, app.work); + path_restriction mask(app.work, roots, args_to_paths(app.opts.exclude_patterns), + app.opts.depth); set unknown, ignored; // if no starting paths have been specified use the workspace root if (roots.empty()) roots.push_back(file_path()); - app.work.find_unknown_and_ignored(mask, roots, unknown, ignored, app.db); + app.work.find_unknown_and_ignored(app.db, mask, roots, unknown, ignored); utf8 const & realname = execid[execid.size() - 1]; if (realname() == "ignored") @@ -447,11 +447,11 @@ CMD(missing, "missing", "", CMD_REF(list { temp_node_id_source nis; roster_t current_roster_shape; - app.work.get_current_roster_shape(current_roster_shape, app.db, nis); - node_restriction mask(args_to_paths(args), + app.work.get_current_roster_shape(app.db, nis, current_roster_shape); + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - current_roster_shape, app.work); + current_roster_shape); set missing; app.work.find_missing(current_roster_shape, mask, missing); @@ -472,15 +472,15 @@ CMD(changed, "changed", "", CMD_REF(list app.require_workspace(); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); app.work.update_current_roster_from_filesystem(new_roster); - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - parents, new_roster, app.work); + parents, new_roster); revision_t rrev; make_restricted_revision(parents, new_roster, mask, rrev); ============================================================ --- cmd_merging.cc 9422eb65503503e2d252471de548a64ddfe6ba6f +++ cmd_merging.cc fe110559e7d83804c10fb36352b9ea22de724861 @@ -93,7 +93,7 @@ pick_branch_for_update(revision_id chose // figure out which branches the target is in vector< revision > certs; app.db.get_revision_certs(chosen_rid, branch_cert_name, certs); - erase_bogus_certs(certs, app.db); + erase_bogus_certs(app.db, certs); set< branch_name > branches; for (vector< revision >::const_iterator i = certs.begin(); @@ -157,7 +157,7 @@ CMD(update, "update", "", CMD_REF(worksp // Figure out where we are parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); N(parents.size() == 1, F("this command can only be used in a single-parent workspace")); @@ -175,8 +175,9 @@ CMD(update, "update", "", CMD_REF(worksp { P(F("updating along branch '%s'") % app.opts.branchname); set candidates; - pick_update_candidates(candidates, old_rid, app.opts.branchname, - project, app.opts.ignore_suspend_certs, app.lua); + pick_update_candidates(app.lua, project, candidates, old_rid, + app.opts.branchname, + app.opts.ignore_suspend_certs); N(!candidates.empty(), F("your request matches no descendents of the current revision\n" "in fact, it doesn't even match the current revision\n" @@ -252,7 +253,7 @@ CMD(update, "update", "", CMD_REF(worksp shared_ptr working_roster = shared_ptr(new roster_t()); MM(*working_roster); - app.work.get_current_roster_shape(*working_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, *working_roster); app.work.update_current_roster_from_filesystem(*working_roster); revision_t working_rev; @@ -281,8 +282,8 @@ CMD(update, "update", "", CMD_REF(worksp content_merge_workspace_adaptor wca(app.db, old_rid, old_roster, left_markings, right_markings, paths); wca.cache_roster(working_rid, working_roster); - resolve_merge_conflicts(*working_roster, chosen_roster, - result, wca, app.lua); + resolve_merge_conflicts(app.lua, *working_roster, chosen_roster, + result, wca); // Make sure it worked... I(result.is_clean()); @@ -291,7 +292,7 @@ CMD(update, "update", "", CMD_REF(worksp // Now finally modify the workspace cset update; make_cset(*working_roster, merged_roster, update); - app.work.perform_content_update(update, wca, app.db); + app.work.perform_content_update(app.db, update, wca); revision_t remaining; make_revision_for_workspace(chosen_rid, chosen_roster, @@ -313,11 +314,11 @@ static void // placing results onto BRANCH. Note that interactive_merge_and_store may // bomb out, and therefore so may this. static void -merge_two(revision_id const & left, revision_id const & right, +merge_two(options & opts, lua_hooks & lua, project_t & project, + key_store & keys, + revision_id const & left, revision_id const & right, branch_name const & branch, string const & caller, - std::ostream & output, bool automate, - project_t & project, options & opts, lua_hooks & lua, - key_store & keys) + std::ostream & output, bool automate) { // The following mess constructs a neatly formatted log message that looks // like this: @@ -356,7 +357,7 @@ merge_two(revision_id const & left, revi revision_id merged; transaction_guard guard(project.db); - interactive_merge_and_store(left, right, merged, project.db, lua); + interactive_merge_and_store(lua, project.db, left, right, merged); project.put_standard_certs_from_options(opts, lua, keys, merged, branch, utf8(log.str())); @@ -404,7 +405,7 @@ CMD(merge, "merge", "", CMD_REF(tree), " % heads.size() % app.opts.branchname); // avoid failure after lots of work - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); map heads_for_ancestor; set ancestors; @@ -438,7 +439,7 @@ CMD(merge, "merge", "", CMD_REF(tree), " continue; revision_id ancestor; - find_common_ancestor_for_merge(*i, *j, ancestor, app.db); + find_common_ancestor_for_merge(app.db, *i, *j, ancestor); // More than one pair might have the same ancestor (e.g. if we // have three heads all with the same parent); as this table @@ -451,16 +452,16 @@ CMD(merge, "merge", "", CMD_REF(tree), " // Erasing ancestors from ANCESTORS will now produce a set of merge // ancestors each of which is not itself an ancestor of any other // merge ancestor. - erase_ancestors(ancestors, app.db); + erase_ancestors(app.db, ancestors); I(ancestors.size() > 0); // Take the first ancestor from the above set and merge its // corresponding pair of heads. revpair p = heads_for_ancestor[*ancestors.begin()]; - merge_two(p.first, p.second, app.opts.branchname, string("merge"), - std::cout, false, - project, app.opts, app.lua, keys); + merge_two(app.opts, app.lua, project, keys, + p.first, p.second, app.opts.branchname, string("merge"), + std::cout, false); ancestors.clear(); heads_for_ancestor.clear(); @@ -479,8 +480,9 @@ CMD(merge, "merge", "", CMD_REF(tree), " revision_id right = *i++; I(i == heads.end()); - merge_two(left, right, app.opts.branchname, string("merge"), - std::cout, false, project, app.opts, app.lua, keys); + merge_two(app.opts, app.lua, project, keys, + left, right, app.opts.branchname, string("merge"), + std::cout, false); P(F("note: your workspaces have not been updated")); } @@ -550,7 +552,7 @@ CMD(merge_into_dir, "merge_into_dir", "" set::const_iterator src_i = src_heads.begin(); set::const_iterator dst_i = dst_heads.begin(); - if (*src_i == *dst_i || is_ancestor(*src_i, *dst_i, app.db)) + if (*src_i == *dst_i || is_ancestor(app.db, *src_i, *dst_i)) { P(F("branch '%s' is up-to-date with respect to branch '%s'") % idx(args, 1)() % idx(args, 0)()); @@ -558,14 +560,14 @@ CMD(merge_into_dir, "merge_into_dir", "" return; } - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); P(F("propagating %s -> %s") % idx(args,0) % idx(args,1)); P(F("[left] %s") % *src_i); P(F("[right] %s") % *dst_i); // check for special cases - if (is_ancestor(*dst_i, *src_i, app.db)) + if (is_ancestor(app.db, *dst_i, *src_i)) { P(F("no merge necessary; putting %s in branch '%s'") % (*src_i) % idx(args, 1)()); @@ -628,8 +630,8 @@ CMD(merge_into_dir, "merge_into_dir", "" content_merge_database_adaptor dba(app.db, left_rid, right_rid, left_marking_map, right_marking_map); - resolve_merge_conflicts(left_roster, right_roster, - result, dba, app.lua); + resolve_merge_conflicts(app.lua, left_roster, right_roster, + result, dba); { dir_t moved_root = left_roster.root(); @@ -638,9 +640,8 @@ CMD(merge_into_dir, "merge_into_dir", "" } // Write new files into the db. - store_roster_merge_result(left_roster, right_roster, result, - left_rid, right_rid, merged, - app.db); + store_roster_merge_result(app.db, left_roster, right_roster, result, + left_rid, right_rid, merged); } bool log_message_given; @@ -691,12 +692,12 @@ CMD(merge_into_workspace, "merge_into_wo { parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); N(parents.size() == 1, F("this command can only be used in a single-parent workspace")); temp_node_id_source nis; - app.work.get_current_roster_shape(*working_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, *working_roster); app.work.update_current_roster_from_filesystem(*working_roster); N(parent_roster(parents.begin()) == *working_roster, @@ -731,7 +732,7 @@ CMD(merge_into_workspace, "merge_into_wo revision_id lca_id; database::cached_roster lca; - find_common_ancestor_for_merge(left_id, right_id, lca_id, app.db); + find_common_ancestor_for_merge(app.db, left_id, right_id, lca_id); app.db.get_roster(lca_id, lca); map paths; @@ -740,7 +741,7 @@ CMD(merge_into_workspace, "merge_into_wo content_merge_workspace_adaptor wca(app.db, lca_id, lca.first, *left.second, *right.second, paths); wca.cache_roster(working_rid, working_roster); - resolve_merge_conflicts(*left.first, *right.first, merge_result, wca, app.lua); + resolve_merge_conflicts(app.lua, *left.first, *right.first, merge_result, wca); // Make sure it worked... I(merge_result.is_clean()); @@ -760,7 +761,7 @@ CMD(merge_into_workspace, "merge_into_wo make_cset(*left.first, merge_result.roster, update); // small race condition here... - app.work.perform_content_update(update, wca, app.db); + app.work.perform_content_update(app.db, update, wca); app.work.put_work_rev(merged_rev); app.work.update_any_attrs(app.db); app.work.maybe_update_inodeprints(app.db); @@ -791,15 +792,16 @@ CMD(explicit_merge, "explicit_merge", "" N(!(left == right), F("%s and %s are the same revision, aborting") % left % right); - N(!is_ancestor(left, right, app.db), + N(!is_ancestor(app.db, left, right), F("%s is already an ancestor of %s") % left % right); - N(!is_ancestor(right, left, app.db), + N(!is_ancestor(app.db, right, left), F("%s is already an ancestor of %s") % right % left); // avoid failure after lots of work - cache_user_key(app.opts, app.lua, keys, app.db); - merge_two(left, right, branch, string("explicit merge"), - std::cout, false, project, app.opts, app.lua, keys); + cache_user_key(app.opts, app.lua, app.db, keys); + merge_two(app.opts, app.lua, project, keys, + left, right, branch, string("explicit merge"), + std::cout, false); } CMD(show_conflicts, "show_conflicts", "", CMD_REF(informative), N_("REV REV"), @@ -815,9 +817,9 @@ CMD(show_conflicts, "show_conflicts", "" revision_id l_id, r_id; complete(app, project, idx(args,0)(), l_id); complete(app, project, idx(args,1)(), r_id); - N(!is_ancestor(l_id, r_id, app.db), + N(!is_ancestor(app.db, l_id, r_id), F("%s is an ancestor of %s; no merge is needed.") % l_id % r_id); - N(!is_ancestor(r_id, l_id, app.db), + N(!is_ancestor(app.db, r_id, l_id), F("%s is an ancestor of %s; no merge is needed.") % r_id % l_id); roster_t l_roster, r_roster; marking_map l_marking, r_marking; @@ -941,7 +943,7 @@ CMD(pluck, "pluck", "", CMD_REF(workspac // Get the WORKING roster shared_ptr working_roster = shared_ptr(new roster_t()); MM(*working_roster); - app.work.get_current_roster_shape(*working_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, *working_roster); app.work.update_current_roster_from_filesystem(*working_roster); @@ -951,10 +953,10 @@ CMD(pluck, "pluck", "", CMD_REF(workspac { roster_t to_true_roster; app.db.get_roster(to_rid, to_true_roster); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - *from_roster, to_true_roster, app.work); + *from_roster, to_true_roster); roster_t restricted_roster; make_restricted_roster(*from_roster, to_true_roster, @@ -974,7 +976,7 @@ CMD(pluck, "pluck", "", CMD_REF(workspac } parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); revision_t working_rev; revision_id working_rid; @@ -1002,8 +1004,8 @@ CMD(pluck, "pluck", "", CMD_REF(workspac // to_roster is not fetched from the db which does not have temporary nids wca.cache_roster(to_rid, to_roster); - resolve_merge_conflicts(*working_roster, *to_roster, - result, wca, app.lua); + resolve_merge_conflicts(app.lua, *working_roster, *to_roster, + result, wca); I(result.is_clean()); // temporary node ids may appear @@ -1014,7 +1016,7 @@ CMD(pluck, "pluck", "", CMD_REF(workspac MM(update); make_cset(*working_roster, merged_roster, update); E(!update.empty(), F("no changes were applied")); - app.work.perform_content_update(update, wca, app.db); + app.work.perform_content_update(app.db, update, wca); P(F("applied changes to workspace")); @@ -1090,8 +1092,8 @@ CMD(get_roster, "get_roster", "", CMD_RE revision_id rid(fake_id()); app.require_workspace(); - app.work.get_parent_rosters(parents, app.db); - app.work.get_current_roster_shape(roster, app.db, nis); + app.work.get_parent_rosters(app.db, parents); + app.work.get_current_roster_shape(app.db, nis, roster); app.work.update_current_roster_from_filesystem(roster); if (parents.size() == 0) ============================================================ --- cmd_netsync.cc 1c92b50b791690633a9979dfdf5b9e552ee3bb09 +++ cmd_netsync.cc 68b7aa1513248fa388336d5def0d12e6d515453f @@ -36,9 +36,8 @@ static void static char const ws_internal_db_file_name[] = "mtn.db"; static void -extract_address(args_vector const & args, - utf8 & addr, - options & opts, database & db) +extract_address(options & opts, database & db, + args_vector const & args, utf8 & addr) { if (args.size() >= 1) { @@ -61,13 +60,13 @@ static void } static void -find_key(utf8 const & addr, - globish const & include, - globish const & exclude, - options & opts, +find_key(options & opts, lua_hooks & lua, + database & db, key_store & keys, - database & db, + utf8 const & addr, + globish const & include, + globish const & exclude, bool needed = true) { if (!opts.signing_key().empty()) @@ -83,32 +82,31 @@ find_key(utf8 const & addr, if (needed && (key().empty() || !lua.hook_get_netsync_key(host, include, exclude, key))) - get_user_key(key, opts, lua, keys, db); + get_user_key(opts, lua, db, keys, key); opts.signing_key = key; } static void -find_key_if_needed(utf8 const & addr, - globish const & include, - globish const & exclude, - options & opts, +find_key_if_needed(options & opts, lua_hooks & lua, + database & db, key_store & keys, - database & db, + utf8 const & addr, + globish const & include, + globish const & exclude, bool needed = true) { uri u; parse_uri(addr(), u); if (lua.hook_use_transport_auth(u)) - find_key(addr, include, exclude, opts, lua, keys, db, needed); + find_key(opts, lua, db, keys, addr, include, exclude, needed); } static void -extract_patterns(args_vector const & args, - globish & include_pattern, globish & exclude_pattern, - options & opts, database & db) +extract_patterns(options & opts, database & db, args_vector const & args, + globish & include_pattern, globish & exclude_pattern) { if (args.size() >= 2 || opts.exclude_given) { @@ -162,17 +160,17 @@ CMD(push, "push", "", CMD_REF(network), utf8 addr; globish include_pattern, exclude_pattern; - extract_address(args, addr, app.opts, app.db); - extract_patterns(args, include_pattern, exclude_pattern, app.opts, app.db); - find_key_if_needed(addr, include_pattern, exclude_pattern, - app.opts, app.lua, keys, app.db); + extract_address(app.opts, app.db, args, addr); + extract_patterns(app.opts, app.db, args, include_pattern, exclude_pattern); + find_key_if_needed(app.opts, app.lua, app.db, keys, + addr, include_pattern, exclude_pattern); std::list uris; uris.push_back(addr); - run_netsync_protocol(client_voice, source_role, uris, - include_pattern, exclude_pattern, - project, keys, app.lua, app.opts); + run_netsync_protocol(app.opts, app.lua, project, keys, + client_voice, source_role, uris, + include_pattern, exclude_pattern); } CMD(pull, "pull", "", CMD_REF(network), @@ -187,10 +185,10 @@ CMD(pull, "pull", "", CMD_REF(network), utf8 addr; globish include_pattern, exclude_pattern; - extract_address(args, addr, app.opts, app.db); - extract_patterns(args, include_pattern, exclude_pattern, app.opts, app.db); - find_key_if_needed(addr, include_pattern, exclude_pattern, - app.opts, app.lua, keys, app.db, false); + extract_address(app.opts, app.db, args, addr); + extract_patterns(app.opts, app.db, args, include_pattern, exclude_pattern); + find_key_if_needed(app.opts, app.lua, app.db, keys, + addr, include_pattern, exclude_pattern, false); if (app.opts.signing_key() == "") P(F("doing anonymous pull; use -kKEYNAME if you need authentication")); @@ -198,9 +196,9 @@ CMD(pull, "pull", "", CMD_REF(network), std::list uris; uris.push_back(addr); - run_netsync_protocol(client_voice, sink_role, uris, - include_pattern, exclude_pattern, - project, keys, app.lua, app.opts); + run_netsync_protocol(app.opts, app.lua, project, keys, + client_voice, sink_role, uris, + include_pattern, exclude_pattern); } CMD(sync, "sync", "", CMD_REF(network), @@ -216,17 +214,17 @@ CMD(sync, "sync", "", CMD_REF(network), utf8 addr; globish include_pattern, exclude_pattern; - extract_address(args, addr, app.opts, app.db); - extract_patterns(args, include_pattern, exclude_pattern, app.opts, app.db); - find_key_if_needed(addr, include_pattern, exclude_pattern, - app.opts, app.lua, keys, app.db, false); + extract_address(app.opts, app.db, args, addr); + extract_patterns(app.opts, app.db, args, include_pattern, exclude_pattern); + find_key_if_needed(app.opts, app.lua, app.db, keys, + addr, include_pattern, exclude_pattern, false); std::list uris; uris.push_back(addr); - run_netsync_protocol(client_voice, source_and_sink_role, uris, - include_pattern, exclude_pattern, - project, keys, app.lua, app.opts); + run_netsync_protocol(app.opts, app.lua, project, keys, + client_voice, source_and_sink_role, uris, + include_pattern, exclude_pattern); } class dir_cleanup_helper @@ -317,8 +315,8 @@ CMD(clone, "clone", "", CMD_REF(network) globish include_pattern(app.opts.branchname()); globish exclude_pattern(app.opts.exclude_patterns); - find_key_if_needed(addr, include_pattern, exclude_pattern, - app.opts, app.lua, keys, app.db, false); + find_key_if_needed(app.opts, app.lua, app.db, keys, + addr, include_pattern, exclude_pattern, false); if (app.opts.signing_key() == "") P(F("doing anonymous pull; use -kKEYNAME if you need authentication")); @@ -346,9 +344,9 @@ CMD(clone, "clone", "", CMD_REF(network) std::list uris; uris.push_back(addr); - run_netsync_protocol(client_voice, sink_role, uris, - include_pattern, exclude_pattern, - project, keys, app.lua, app.opts); + run_netsync_protocol(app.opts, app.lua, project, keys, + client_voice, sink_role, uris, + include_pattern, exclude_pattern); change_current_working_dir(workspace_dir); @@ -381,7 +379,7 @@ CMD(clone, "clone", "", CMD_REF(network) // use specified revision complete(app, project, idx(app.opts.revision_selectors, 0)(), ident); - guess_branch(ident, app.opts, project); + guess_branch(app.opts, project, ident); I(!app.opts.branchname().empty()); N(project.revision_is_in_branch(ident, app.opts.branchname), @@ -404,7 +402,7 @@ CMD(clone, "clone", "", CMD_REF(network) content_merge_checkout_adaptor wca(app.db); - app.work.perform_content_update(checkout, wca, app.db, false); + app.work.perform_content_update(app.db, checkout, wca, false); app.work.update_any_attrs(app.db); app.work.maybe_update_inodeprints(app.db); @@ -465,19 +463,19 @@ CMD_NO_WORKSPACE(serve, "serve", "", CMD "(see hook persist_phrase_ok())")); if (!app.opts.bind_uris.empty()) - find_key(*app.opts.bind_uris.begin(), globish("*"), globish(""), - app.opts, app.lua, keys, app.db); + find_key(app.opts, app.lua, app.db, keys, + *app.opts.bind_uris.begin(), globish("*"), globish("")); else - find_key(utf8(), globish("*"), globish(""), - app.opts, app.lua, keys, app.db); + find_key(app.opts, app.lua, app.db, keys, + utf8(), globish("*"), globish("")); } else if (!app.opts.bind_stdio) W(F("The --no-transport-auth option is usually only used " "in combination with --stdio")); - run_netsync_protocol(server_voice, source_and_sink_role, app.opts.bind_uris, - globish("*"), globish(""), - project, keys, app.lua, app.opts); + run_netsync_protocol(app.opts, app.lua, project, keys, + server_voice, source_and_sink_role, app.opts.bind_uris, + globish("*"), globish("")); } // Local Variables: ============================================================ --- cmd_othervcs.cc 805ca2c1f8f3ec3a05a2b5a9cfb50ca7cbb7fba5 +++ cmd_othervcs.cc 9d46c385ed7557d720aa71c99da22e40b6549304 @@ -28,9 +28,7 @@ CMD(rcs_import, "rcs_import", "", CMD_RE for (args_vector::const_iterator i = args.begin(); i != args.end(); ++i) - { - test_parse_rcs_file(system_path((*i)()), app.db); - } + test_parse_rcs_file(system_path((*i)())); } @@ -56,9 +54,9 @@ CMD(cvs_import, "cvs_import", "", CMD_RE // make sure we can sign certs using the selected key; also requests // the password (if necessary) up front rather than after some arbitrary // amount of work - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); - import_cvs_repo(cvsroot, keys, project, app.opts.branchname); + import_cvs_repo(project, keys, cvsroot, app.opts.branchname); } ============================================================ --- cmd_ws_commit.cc f702e00ec483e72dc1fcfe58dbf88e20a4f9f907 +++ cmd_ws_commit.cc 7f18334033c144ff6d9027e0bbedc92ebbb9de5c @@ -161,20 +161,20 @@ CMD(revert, "revert", "", CMD_REF(worksp app.require_workspace(); parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); N(parents.size() == 1, F("this command can only be used in a single-parent workspace")); old_roster = parent_roster(parents.begin()); { temp_node_id_source nis; - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); } - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - old_roster, new_roster, app.work); + old_roster, new_roster); if (app.opts.missing) { @@ -198,9 +198,9 @@ CMD(revert, "revert", "", CMD_REF(worksp missing_files.push_back(*i); } // replace the original mask with a more restricted one - mask = node_restriction(missing_files, std::vector(), - app.opts.depth, - old_roster, new_roster, app.work); + mask = node_restriction(app.work, missing_files, + std::vector(), app.opts.depth, + old_roster, new_roster); } // We want the restricted roster to include all the changes @@ -353,13 +353,13 @@ CMD(disapprove, "disapprove", "", CMD_RE N(rev.edges.size() == 1, F("revision %s has %d changesets, cannot invert") % r % rev.edges.size()); - guess_branch(r, app.opts, project); + guess_branch(app.opts, project, r); N(app.opts.branchname() != "", F("need --branch argument for disapproval")); process_commit_message_args(log_message_given, log_message, app, utf8((FL("disapproval of revision '%s'") % r).str())); - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); edge_entry const & old_edge (*rev.edges.begin()); app.db.get_revision_manifest(edge_old_revision(old_edge), @@ -423,7 +423,7 @@ CMD(mkdir, "mkdir", "", CMD_REF(workspac for (set::const_iterator i = paths.begin(); i != paths.end(); ++i) mkdir_p(*i); - app.work.perform_additions(paths, app.db, false, !app.opts.no_ignore); + app.work.perform_additions(app.db, paths, false, !app.opts.no_ignore); } CMD(add, "add", "", CMD_REF(workspace), N_("[PATH]..."), @@ -443,23 +443,24 @@ CMD(add, "add", "", CMD_REF(workspace), bool add_recursive = app.opts.recursive; if (app.opts.unknown) { - path_restriction mask(roots, args_to_paths(app.opts.exclude_patterns), - app.opts.depth, app.work); + path_restriction mask(app.work, roots, + args_to_paths(app.opts.exclude_patterns), + app.opts.depth); set ignored; // if no starting paths have been specified use the workspace root if (roots.empty()) roots.push_back(file_path()); - app.work.find_unknown_and_ignored(mask, roots, paths, ignored, app.db); + app.work.find_unknown_and_ignored(app.db, mask, roots, paths, ignored); - app.work.perform_additions(ignored, app.db, + app.work.perform_additions(app.db, ignored, add_recursive, !app.opts.no_ignore); } else paths = set(roots.begin(), roots.end()); - app.work.perform_additions(paths, app.db, add_recursive, !app.opts.no_ignore); + app.work.perform_additions(app.db, paths, add_recursive, !app.opts.no_ignore); } CMD(drop, "drop", "rm", CMD_REF(workspace), N_("[PATH]..."), @@ -477,11 +478,11 @@ CMD(drop, "drop", "rm", CMD_REF(workspac { temp_node_id_source nis; roster_t current_roster_shape; - app.work.get_current_roster_shape(current_roster_shape, app.db, nis); - node_restriction mask(args_to_paths(args), + app.work.get_current_roster_shape(app.db, nis, current_roster_shape); + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - current_roster_shape, app.work); + current_roster_shape); app.work.find_missing(current_roster_shape, mask, paths); } else @@ -490,7 +491,7 @@ CMD(drop, "drop", "rm", CMD_REF(workspac paths = set(roots.begin(), roots.end()); } - app.work.perform_deletions(paths, app.db, + app.work.perform_deletions(app.db, paths, app.opts.recursive, app.opts.bookkeep_only); } @@ -525,7 +526,7 @@ CMD(rename, "rename", "mv", CMD_REF(work N(get_path_status(dst_path) == path::directory, F(_("The specified target directory %s/ doesn't exist.")) % dst_path); - app.work.perform_rename(src_paths, dst_path, app.db, app.opts.bookkeep_only); + app.work.perform_rename(app.db, src_paths, dst_path, app.opts.bookkeep_only); } @@ -545,7 +546,7 @@ CMD(pivot_root, "pivot_root", "", CMD_RE app.require_workspace(); file_path new_root = file_path_external(idx(args, 0)); file_path put_old = file_path_external(idx(args, 1)); - app.work.perform_pivot_root(new_root, put_old, app.db, + app.work.perform_pivot_root(app.db, new_root, put_old, app.opts.bookkeep_only); } @@ -560,13 +561,13 @@ CMD(status, "status", "", CMD_REF(inform temp_node_id_source nis; app.require_workspace(); - app.work.get_parent_rosters(old_rosters, app.db); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_parent_rosters(app.db, old_rosters); + app.work.get_current_roster_shape(app.db, nis, new_roster); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - old_rosters, new_roster, app.work); + old_rosters, new_roster); app.work.update_current_roster_from_filesystem(new_roster, mask); make_restricted_revision(old_rosters, new_roster, mask, rev); @@ -621,7 +622,7 @@ CMD(checkout, "checkout", "co", CMD_REF( // use specified revision complete(app, project, idx(app.opts.revision_selectors, 0)(), revid); - guess_branch(revid, app.opts, project); + guess_branch(app.opts, project, revid); I(!app.opts.branchname().empty()); @@ -676,7 +677,7 @@ CMD(checkout, "checkout", "co", CMD_REF( content_merge_checkout_adaptor wca(app.db); - app.work.perform_content_update(checkout, wca, app.db, false); + app.work.perform_content_update(app.db, checkout, wca, false); app.work.update_any_attrs(app.db); app.work.maybe_update_inodeprints(app.db); @@ -701,7 +702,7 @@ CMD(attr_drop, "drop", "", CMD_REF(attr) temp_node_id_source nis; app.require_workspace(); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); file_path path = file_path_external(idx(args, 0)); @@ -726,7 +727,7 @@ CMD(attr_drop, "drop", "", CMD_REF(attr) } parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); revision_t new_work; make_revision_for_workspace(parents, new_roster, new_work); @@ -748,7 +749,7 @@ CMD(attr_get, "get", "", CMD_REF(attr), temp_node_id_source nis; app.require_workspace(); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); file_path path = file_path_external(idx(args, 0)); @@ -798,7 +799,7 @@ CMD(attr_set, "set", "", CMD_REF(attr), temp_node_id_source nis; app.require_workspace(); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_current_roster_shape(app.db, nis, new_roster); file_path path = file_path_external(idx(args, 0)); @@ -811,7 +812,7 @@ CMD(attr_set, "set", "", CMD_REF(attr), node->attrs[a_key] = make_pair(true, a_value); parent_map parents; - app.work.get_parent_rosters(parents, app.db); + app.work.get_parent_rosters(app.db, parents); revision_t new_work; make_revision_for_workspace(parents, new_roster, new_work); @@ -856,8 +857,8 @@ CMD_AUTOMATE(get_attributes, N_("PATH"), temp_node_id_source nis; // get the base and the current roster of this workspace - work.get_current_roster_shape(current, app.db, nis); - work.get_parent_rosters(parents, app.db); + work.get_current_roster_shape(app.db, nis, current); + work.get_parent_rosters(app.db, parents); N(parents.size() == 1, F("this command can only be used in a single-parent workspace")); base = parent_roster(parents.begin()); @@ -968,7 +969,7 @@ CMD_AUTOMATE(set_attribute, N_("PATH KEY roster_t new_roster; temp_node_id_source nis; - work.get_current_roster_shape(new_roster, app.db, nis); + work.get_current_roster_shape(app.db, nis, new_roster); file_path path = file_path_external(idx(args,0)); @@ -981,7 +982,7 @@ CMD_AUTOMATE(set_attribute, N_("PATH KEY node->attrs[a_key] = make_pair(true, a_value); parent_map parents; - work.get_parent_rosters(parents, app.db); + work.get_parent_rosters(app.db, parents); revision_t new_work; make_revision_for_workspace(parents, new_roster, new_work); @@ -1013,7 +1014,7 @@ CMD_AUTOMATE(drop_attribute, N_("PATH [K roster_t new_roster; temp_node_id_source nis; - work.get_current_roster_shape(new_roster, app.db, nis); + work.get_current_roster_shape(app.db, nis, new_roster); file_path path = file_path_external(idx(args,0)); @@ -1037,7 +1038,7 @@ CMD_AUTOMATE(drop_attribute, N_("PATH [K } parent_map parents; - work.get_parent_rosters(parents, app.db); + work.get_parent_rosters(app.db, parents); revision_t new_work; make_revision_for_workspace(parents, new_roster, new_work); @@ -1065,13 +1066,13 @@ CMD(commit, "commit", "ci", CMD_REF(work app.require_workspace(); app.make_branch_sticky(); - app.work.get_parent_rosters(old_rosters, app.db); - app.work.get_current_roster_shape(new_roster, app.db, nis); + app.work.get_parent_rosters(app.db, old_rosters); + app.work.get_current_roster_shape(app.db, nis, new_roster); - node_restriction mask(args_to_paths(args), + node_restriction mask(app.work, args_to_paths(args), args_to_paths(app.opts.exclude_patterns), app.opts.depth, - old_rosters, new_roster, app.work); + old_rosters, new_roster); app.work.update_current_roster_from_filesystem(new_roster, mask); make_restricted_revision(old_rosters, new_roster, mask, restricted_rev, @@ -1092,7 +1093,7 @@ CMD(commit, "commit", "ci", CMD_REF(work i++) { // this will prefer --branch if it was set - guess_branch(edge_old_revision(i), app.opts, project, + guess_branch(app.opts, project, edge_old_revision(i), bn_candidate); N(branchname() == "" || branchname == bn_candidate, F("parent revisions of this commit are in different branches:\n" @@ -1154,7 +1155,7 @@ CMD(commit, "commit", "ci", CMD_REF(work message_validated, reason); N(message_validated, F("log message rejected by hook: %s") % reason); - cache_user_key(app.opts, app.lua, keys, app.db); + cache_user_key(app.opts, app.lua, app.db, keys); // for the divergence check, below set heads; @@ -1340,7 +1341,7 @@ CMD_NO_WORKSPACE(import, "import", "", C // use specified revision complete(app, project, idx(app.opts.revision_selectors, 0)(), ident); - guess_branch(ident, app.opts, project); + guess_branch(app.opts, project, ident); I(!app.opts.branchname().empty()); ============================================================ --- database.cc f2b3da8910f19eaaa8af403cc7b41429866c0f68 +++ database.cc 74c819293bca360a8df5a82776edfc5f12c22bdd @@ -981,14 +981,14 @@ database::migrate(key_store & keys) database::migrate(key_store & keys) { ensure_open_for_maintenance(); - migrate_sql_schema(imp->__sql, get_filename(), keys); + migrate_sql_schema(imp->__sql, keys, get_filename()); } void -database::test_migration_step(string const & schema, key_store & keys) +database::test_migration_step(key_store & keys, string const & schema) { ensure_open_for_maintenance(); - ::test_migration_step(imp->__sql, get_filename(), keys, schema); + ::test_migration_step(imp->__sql, keys, get_filename(), schema); } void @@ -2449,7 +2449,7 @@ database::put_roster_for_revision(revisi shared_ptr mm_writeable(new marking_map); MM(*mm_writeable); manifest_id roster_manifest_id; MM(roster_manifest_id); - make_roster_for_revision(rev, new_id, *ros_writeable, *mm_writeable, *this); + make_roster_for_revision(*this, rev, new_id, *ros_writeable, *mm_writeable); calculate_ident(*ros_writeable, roster_manifest_id); I(rev.new_manifest == roster_manifest_id); // const'ify the objects, suitable for caching etc. ============================================================ --- database.hh db3eba0e6cb68d5fa4da20c346bebd2bc6c6ef51 +++ database.hh eb927e49718ef1e00b7b6130b01b4abfaaae387e @@ -147,10 +147,10 @@ private: file_id const & base, file_delta const & del); - friend void rcs_put_raw_file_edge(hexenc const & old_id, + friend void rcs_put_raw_file_edge(database & db, + hexenc const & old_id, hexenc const & new_id, - delta const & del, - database & db); + delta const & del); // @@ -390,7 +390,7 @@ public: void info(std::ostream &); void version(std::ostream &); void migrate(key_store &); - void test_migration_step(std::string const &, key_store &); + void test_migration_step(key_store &, std::string const &); // for kill_rev_locally: void delete_existing_rev_and_certs(revision_id const & rid); // for kill_branch_certs_locally: ============================================================ --- diff_patch.cc bf0138e49ccc702cb042fa0f95e272e3a072ea96 +++ diff_patch.cc 863137e97631b7f95879bb4baef1644d2435ec68 @@ -498,7 +498,7 @@ content_merge_database_adaptor::content_ // FIXME: possibly refactor to run this lazily, as we don't // need to find common ancestors if we're never actually // called on to do content merging. - find_common_ancestor_for_merge(left, right, lca, db); + find_common_ancestor_for_merge(db, left, right, lca); } void @@ -530,10 +530,9 @@ static void } static void -load_and_cache_roster(revision_id const & rid, +load_and_cache_roster(database & db, revision_id const & rid, map > & rmap, - shared_ptr & rout, - database & db) + shared_ptr & rout) { map >::const_iterator i = rmap.find(rid); if (i != rmap.end()) @@ -559,7 +558,7 @@ content_merge_database_adaptor::get_ance // Begin by loading any non-empty file lca roster rid = lca; if (!lca.inner()().empty()) - load_and_cache_roster(lca, rosters, anc, db); + load_and_cache_roster(db, lca, rosters, anc); // If there is no LCA, or the LCA's roster doesn't contain the file, // then use the file's birth roster. @@ -587,7 +586,7 @@ content_merge_database_adaptor::get_ance rid = lmm->second.birth_revision; } - load_and_cache_roster(rid, rosters, anc, db); + load_and_cache_roster(db, rid, rosters, anc); } I(anc); } @@ -662,7 +661,7 @@ content_merge_workspace_adaptor::get_anc rid = lmm->second.birth_revision; } - load_and_cache_roster(rid, rosters, anc, db); + load_and_cache_roster(db, rid, rosters, anc); } I(anc); } ============================================================ --- enumerator.cc 9d7d81efbfa8befe3a150399b2bde472d9e5e3b9 +++ enumerator.cc ca9940ca45aa35f45aa5a09a75b57b9c813fea3c @@ -27,9 +27,9 @@ using std::vector; using std::set; using std::vector; -revision_enumerator::revision_enumerator(enumerator_callbacks & cb, - project_t & project) - : cb(cb), project(project) +revision_enumerator::revision_enumerator(project_t & project, + enumerator_callbacks & cb) + : project(project), cb(cb) { revision_id root; revs.push_back(root); ============================================================ --- enumerator.hh c911c8622932915c90e96b53668df5bdd5317c7a +++ enumerator.hh 32f0295cfac6f68ed5c345d06af5e9a20548f117 @@ -52,8 +52,8 @@ revision_enumerator class revision_enumerator { + project_t & project; enumerator_callbacks & cb; - project_t & project; std::set terminal_nodes; std::set enumerated_nodes; std::deque revs; @@ -70,8 +70,8 @@ public: std::vector > & certs); public: - revision_enumerator(enumerator_callbacks & cb, - project_t & project); + revision_enumerator(project_t & project, + enumerator_callbacks & cb); void get_revision_parents(revision_id const & rid, std::vector & parents); void note_cert(revision_id const & rid, ============================================================ --- keys.cc b818f882c867150822c9826b8b78e33adee46541 +++ keys.cc d96a6b057ee9ba109e52e6cf2d7a5e8785435b85 @@ -112,9 +112,8 @@ void // form, so as not to bother the user for their passphrase later. void -get_user_key(rsa_keypair_id & key, - options const & opts, lua_hooks & lua, - key_store & keys, database & db) +get_user_key(options const & opts, lua_hooks & lua, + database & db, key_store & keys, rsa_keypair_id & key) { if (!keys.signing_key().empty()) { @@ -169,10 +168,10 @@ cache_user_key(options const & opts, lua // the important thing is to have selected one and cached the decrypted key. void cache_user_key(options const & opts, lua_hooks & lua, - key_store & keys, database & db) + database & db, key_store & keys) { rsa_keypair_id key; - get_user_key(key, opts, lua, keys, db); + get_user_key(opts, lua, db, keys, key); } void ============================================================ --- keys.hh d4c34c10f4c29dbdbc6b7471011ee57aa438fb5c +++ keys.hh 20459245774ad7410220fe28dd5c9025987bf287 @@ -29,11 +29,12 @@ get_passphrase(utf8 & phrase, bool generating_key); // N()'s out if there is no unique key for us to use -void get_user_key(rsa_keypair_id & key, options const & opts, lua_hooks & lua, - key_store & keys, database & db); +void get_user_key(options const & opts, lua_hooks & lua, + database & db, key_store & keys, + rsa_keypair_id & key); void cache_user_key(options const & opts, lua_hooks & lua, - key_store & keys, database & db); + database & db, key_store & keys); void load_key_pair(key_store & keys, rsa_keypair_id const & id); ============================================================ --- merge.cc cc1fbab1643cd6188637296b9806c637134c1f78 +++ merge.cc f9f44792ab7eeb5b8637b49099fc26030b0eceae @@ -123,11 +123,11 @@ void } void -resolve_merge_conflicts(roster_t const & left_roster, +resolve_merge_conflicts(lua_hooks & lua, + roster_t const & left_roster, roster_t const & right_roster, roster_merge_result & result, - content_merge_adaptor & adaptor, - lua_hooks & lua) + content_merge_adaptor & adaptor) { // FIXME_ROSTERS: we only have code (below) to invoke the // line-merger on content conflicts. Other classes of conflict will @@ -177,10 +177,10 @@ void } void -interactive_merge_and_store(revision_id const & left_rid, +interactive_merge_and_store(lua_hooks & lua, database & db, + revision_id const & left_rid, revision_id const & right_rid, - revision_id & merged_rid, - database & db, lua_hooks & lua) + revision_id & merged_rid) { roster_t left_roster, right_roster; marking_map left_marking_map, right_marking_map; @@ -193,37 +193,29 @@ interactive_merge_and_store(revision_id roster_merge_result result; -// { -// data tmp; -// write_roster_and_marking(left_roster, left_marking_map, tmp); -// P(F("merge left roster: [[[\n%s\n]]]") % tmp); -// write_roster_and_marking(right_roster, right_marking_map, tmp); -// P(F("merge right roster: [[[\n%s\n]]]") % tmp); -// } - roster_merge(left_roster, left_marking_map, left_uncommon_ancestors, right_roster, right_marking_map, right_uncommon_ancestors, result); content_merge_database_adaptor dba(db, left_rid, right_rid, left_marking_map, right_marking_map); - resolve_merge_conflicts(left_roster, right_roster, - result, dba, lua); + resolve_merge_conflicts(lua, left_roster, right_roster, + result, dba); // write new files into the db - store_roster_merge_result(left_roster, right_roster, result, - left_rid, right_rid, merged_rid, - db); + store_roster_merge_result(db, + left_roster, right_roster, result, + left_rid, right_rid, merged_rid); } void -store_roster_merge_result(roster_t const & left_roster, +store_roster_merge_result(database & db, + roster_t const & left_roster, roster_t const & right_roster, roster_merge_result & result, revision_id const & left_rid, revision_id const & right_rid, - revision_id & merged_rid, - database & db) + revision_id & merged_rid) { I(result.is_clean()); roster_t & merged_roster = result.roster; ============================================================ --- merge.hh e37513afabd41ebafa5bc2b42a42121900c23269 +++ merge.hh f3613e195ad378f7666d003b959b322abbce97d0 @@ -25,11 +25,11 @@ void struct content_merge_adaptor; void -resolve_merge_conflicts(roster_t const & left_roster, +resolve_merge_conflicts(lua_hooks & lua, + roster_t const & left_roster, roster_t const & right_roster, roster_merge_result & result, - content_merge_adaptor & adaptor, - lua_hooks & lua); + content_merge_adaptor & adaptor); // traditional resolve-all-conflicts-as-you-go style merging with 3-way merge // for file texts @@ -43,17 +43,19 @@ void // around the revision and its files not being in the db, and the resulting // revision and its merged files not being written back to the db void -interactive_merge_and_store(revision_id const & left, revision_id const & right, - revision_id & merged, database & db, lua_hooks & lua); +interactive_merge_and_store(lua_hooks & lua, database & db, + revision_id const & left, + revision_id const & right, + revision_id & merged); void -store_roster_merge_result(roster_t const & left_roster, +store_roster_merge_result(database & db, + roster_t const & left_roster, roster_t const & right_roster, roster_merge_result & result, revision_id const & left_rid, revision_id const & right_rid, - revision_id & merged_rid, - database & db); + revision_id & merged_rid); // Local Variables: // mode: C++ ============================================================ --- monotone.cc 2c9baedc7ac7d288f62bfdfb1bb44d711c6ea943 +++ monotone.cc 3602b1d8a957547153408a9a13c44d3a33803ecb @@ -109,7 +109,7 @@ read_global_options(options & opts, args } // read command-line options and return the command name -commands::command_id read_options(option::concrete_option_set & optset, options & opts, args_vector & args) +commands::command_id read_options(options & opts, option::concrete_option_set & optset, args_vector & args) { commands::command_id cmd; @@ -222,7 +222,7 @@ cpp_main(int argc, char ** argv) // now grab any command specific options and parse the command // this needs to happen after the monotonercs have been read - commands::command_id cmd = read_options(optset, app.opts, opt_args); + commands::command_id cmd = read_options(app.opts, optset, opt_args); if (!app.found_workspace) global_sanity.set_dump_path((app.opts.conf_dir / "dump") ============================================================ --- netcmd.hh 7a72dc722db614fd522f07c040cad8be2de3ecfb +++ netcmd.hh 15406c426ad26a06900fd8fd6b171e746b1ec1ee @@ -176,13 +176,13 @@ public: }; -void run_netsync_protocol(protocol_voice voice, +void run_netsync_protocol(options & opts, lua_hooks & lua, + project_t & project, key_store & keys, + protocol_voice voice, protocol_role role, std::list const & addrs, globish const & include_pattern, - globish const & exclude_pattern, - project_t & project, - key_store & keys, lua_hooks & lua, options & opts); + globish const & exclude_pattern); // Local Variables: // mode: C++ ============================================================ --- netsync.cc 43d0a7da662099a6f8912fe5615172343049975e +++ netsync.cc 80b2879ab8f06fd0b93b8774b69671debba79c40 @@ -421,14 +421,14 @@ session: void note_rev(revision_id const & rev); void note_cert(hexenc const & c); - session(protocol_role role, + session(options & opts, + lua_hooks & lua, + project_t & project, + key_store & keys, + protocol_role role, protocol_voice voice, globish const & our_include_pattern, globish const & our_exclude_pattern, - project_t & project, - key_store & keys, - lua_hooks & lua, - options & opts, string const & peer, shared_ptr sock, bool initiated_by_server = false); @@ -538,14 +538,14 @@ size_t session::session_count = 0; }; size_t session::session_count = 0; -session::session(protocol_role role, +session::session(options & opts, + lua_hooks & lua, + project_t & project, + key_store & keys, + protocol_role role, protocol_voice voice, globish const & our_include_pattern, globish const & our_exclude_pattern, - project_t & project, - key_store & keys, - lua_hooks & lua, - options & opts, string const & peer, shared_ptr sock, bool initiated_by_server) : @@ -594,7 +594,7 @@ session::session(protocol_role role, key_refiner(key_item, voice, *this), cert_refiner(cert_item, voice, *this), rev_refiner(revision_item, voice, *this), - rev_enumerator(*this, project), + rev_enumerator(project, *this), initiated_by_server(initiated_by_server) {} @@ -2373,7 +2373,7 @@ static shared_ptr static shared_ptr -build_stream_to_server(lua_hooks & lua, options & opts, +build_stream_to_server(options & opts, lua_hooks & lua, globish const & include_pattern, globish const & exclude_pattern, utf8 const & address, @@ -2414,13 +2414,13 @@ static void } static void -call_server(protocol_role role, - globish const & include_pattern, - globish const & exclude_pattern, +call_server(options & opts, + lua_hooks & lua, project_t & project, key_store & keys, - lua_hooks & lua, - options & opts, + protocol_role role, + globish const & include_pattern, + globish const & exclude_pattern, std::list const & addresses, Netxx::port_type default_port, unsigned long timeout_seconds) @@ -2435,7 +2435,7 @@ call_server(protocol_role role, P(F("connecting to %s") % address); shared_ptr server - = build_stream_to_server(lua, opts, + = build_stream_to_server(opts, lua, include_pattern, exclude_pattern, address, default_port, @@ -2447,10 +2447,11 @@ call_server(protocol_role role, Netxx::SockOpt socket_options(server->get_socketfd(), false); socket_options.set_non_blocking(); - session sess(role, client_voice, + session sess(opts, lua, project, keys, + role, client_voice, include_pattern, exclude_pattern, - project, keys, lua, opts, address(), server); + address(), server); while (true) { @@ -2608,15 +2609,17 @@ static void } static void -handle_new_connection(Netxx::Address & addr, +handle_new_connection(options & opts, + lua_hooks & lua, + project_t & project, + key_store & keys, + Netxx::Address & addr, Netxx::StreamServer & server, Netxx::Timeout & timeout, protocol_role role, globish const & include_pattern, globish const & exclude_pattern, - map > & sessions, - project_t & project, key_store & keys, - lua_hooks & lua, options & opts) + map > & sessions) { L(FL("accepting new connection on %s : %s") % (addr.get_name()?addr.get_name():"") % lexical_cast(addr.get_port())); @@ -2640,9 +2643,9 @@ handle_new_connection(Netxx::Address & a shared_ptr (new Netxx::Stream(client.get_socketfd(), timeout)); - shared_ptr sess(new session(role, server_voice, + shared_ptr sess(new session(opts, lua, project, keys, + role, server_voice, include_pattern, exclude_pattern, - project, keys, lua, opts, lexical_cast(client), str)); sess->begin_service(); sessions.insert(make_pair(client.get_socketfd(), sess)); @@ -2781,13 +2784,13 @@ static void } static void -serve_connections(protocol_role role, - globish const & include_pattern, - globish const & exclude_pattern, +serve_connections(options & opts, + lua_hooks & lua, project_t & project, key_store & keys, - lua_hooks & lua, - options & opts, + protocol_role role, + globish const & include_pattern, + globish const & exclude_pattern, std::list const & addresses, Netxx::port_type default_port, unsigned long timeout_seconds, @@ -2897,7 +2900,7 @@ serve_connections(protocol_role role, { P(F("connecting to %s") % addr()); shared_ptr server - = build_stream_to_server(lua, opts, + = build_stream_to_server(opts, lua, inc, exc, addr, default_port, timeout); @@ -2915,10 +2918,10 @@ serve_connections(protocol_role role, else if (request.what == "pull") role = sink_role; - shared_ptr sess(new session(role, client_voice, - inc, exc, + shared_ptr sess(new session(opts, lua, project, keys, - lua, opts, + role, client_voice, + inc, exc, addr(), server, true)); sessions.insert(make_pair(server->get_socketfd(), sess)); @@ -2956,10 +2959,10 @@ serve_connections(protocol_role role, // we either got a new connection else if (fd == server) - handle_new_connection(addr, server, timeout, role, + handle_new_connection(opts, lua, project, keys, + addr, server, timeout, role, include_pattern, exclude_pattern, - sessions, project, keys, - lua, opts); + sessions); // or an existing session woke up else @@ -3309,13 +3312,13 @@ void } void -run_netsync_protocol(protocol_voice voice, +run_netsync_protocol(options & opts, lua_hooks & lua, + project_t & project, key_store & keys, + protocol_voice voice, protocol_role role, std::list const & addrs, globish const & include_pattern, - globish const & exclude_pattern, - project_t & project, key_store & keys, - lua_hooks & lua, options & opts) + globish const & exclude_pattern) { if (include_pattern().find_first_of("'\"") != string::npos) { @@ -3339,15 +3342,15 @@ run_netsync_protocol(protocol_voice voic if (opts.bind_stdio) { shared_ptr str(new Netxx::PipeStream(0,1)); - shared_ptr sess(new session(role, server_voice, + shared_ptr sess(new session(opts, lua, project, keys, + role, server_voice, include_pattern, exclude_pattern, - project, keys, lua, opts, "stdio", str)); serve_single_connection(sess,constants::netsync_timeout_seconds); } else - serve_connections(role, include_pattern, exclude_pattern, - project, keys, lua, opts, + serve_connections(opts, lua, project, keys, + role, include_pattern, exclude_pattern, addrs, static_cast(constants::netsync_default_port), static_cast(constants::netsync_timeout_seconds), static_cast(constants::netsync_connection_limit)); @@ -3355,8 +3358,8 @@ run_netsync_protocol(protocol_voice voic else { I(voice == client_voice); - call_server(role, include_pattern, exclude_pattern, - project, keys, lua, opts, + call_server(opts, lua, project, keys, + role, include_pattern, exclude_pattern, addrs, static_cast(constants::netsync_default_port), static_cast(constants::netsync_timeout_seconds)); } ============================================================ --- project.cc 8f4a3f95b006e6c824bfdd32b9dfd75f3e85f824 +++ project.cc 919ba0487836de6e7520f368e202b6e146683297 @@ -93,7 +93,7 @@ namespace cert_name(branch_cert_name), branch_encoded, certs); - erase_bogus_certs(certs, db); + erase_bogus_certs(db, certs); return certs.empty(); } }; @@ -113,7 +113,7 @@ namespace cert_name(suspend_cert_name), branch_encoded, certs); - erase_bogus_certs(certs, db); + erase_bogus_certs(db, certs); return !certs.empty(); } }; @@ -141,7 +141,7 @@ project_t::get_branch_heads(branch_name branch.second); not_in_branch p(db, branch_encoded); - erase_ancestors_and_failures(branch.second, p, db, + erase_ancestors_and_failures(db, branch.second, p, inverse_graph_cache_ptr); if (!ignore_suspend_certs) @@ -173,7 +173,7 @@ project_t::revision_is_in_branch(revisio int num = certs.size(); - erase_bogus_certs(certs, db); + erase_bogus_certs(db, certs); L(FL("found %d (%d valid) %s branch certs on revision %s") % num @@ -189,7 +189,7 @@ project_t::put_revision_in_branch(key_st revision_id const & id, branch_name const & branch) { - cert_revision_in_branch(id, branch, db, keys); + cert_revision_in_branch(db, keys, id, branch); } bool @@ -204,7 +204,7 @@ project_t::revision_is_suspended_in_bran int num = certs.size(); - erase_bogus_certs(certs, db); + erase_bogus_certs(db, certs); L(FL("found %d (%d valid) %s suspend certs on revision %s") % num @@ -220,7 +220,7 @@ project_t::suspend_revision_in_branch(ke revision_id const & id, branch_name const & branch) { - cert_revision_suspended_in_branch(id, branch, db, keys); + cert_revision_suspended_in_branch(db, keys, id, branch); } @@ -244,7 +244,7 @@ project_t::get_revision_certs_by_name(re std::vector > & certs) { outdated_indicator i = db.get_revision_certs(id, name, certs); - erase_bogus_certs(certs, db); + erase_bogus_certs(db, certs); return i; } @@ -304,7 +304,7 @@ project_t::get_tags(set & tags) { std::vector > certs; outdated_indicator i = db.get_revision_certs(tag_cert_name, certs); - erase_bogus_certs(certs, db); + erase_bogus_certs(db, certs); tags.clear(); for (std::vector >::const_iterator i = certs.begin(); i != certs.end(); ++i) @@ -321,7 +321,7 @@ project_t::put_tag(key_store & keys, revision_id const & id, string const & name) { - cert_revision_tag(id, name, db, keys); + cert_revision_tag(db, keys, id, name); } @@ -338,10 +338,10 @@ project_t::put_standard_certs(key_store I(time.valid()); I(!author.empty()); - cert_revision_in_branch(id, branch, db, keys); - cert_revision_changelog(id, changelog, db, keys); - cert_revision_date_time(id, time, db, keys); - cert_revision_author(id, author, db, keys); + cert_revision_in_branch(db, keys, id, branch); + cert_revision_changelog(db, keys, id, changelog); + cert_revision_date_time(db, keys, id, time); + cert_revision_author(db, keys, id, author); } void @@ -362,7 +362,7 @@ project_t::put_standard_certs_from_optio if (author.empty()) { rsa_keypair_id key; - get_user_key(key, opts, lua, keys, db); + get_user_key(opts, lua, db, keys, key); if (!lua.hook_get_author(branch, key, author)) author = key(); @@ -377,7 +377,7 @@ project_t::put_cert(key_store & keys, cert_name const & name, cert_value const & value) { - put_simple_revision_cert(id, name, value, db, keys); + put_simple_revision_cert(db, keys, id, name, value); } ============================================================ --- rcs_import.cc 0f51bc2b23658e9ef7477a1ba9c7f4f8eff691a5 +++ rcs_import.cc b32cdb110fdf01057d6afac6b1d91263b0d74551 @@ -449,10 +449,10 @@ void // DB is stupid, but it's also stupid to put raw edge insert methods on the // DB itself. or is it? hmm.. encapsulation vs. usage guidance.. void -rcs_put_raw_file_edge(hexenc const & old_id, +rcs_put_raw_file_edge(database & db, + hexenc const & old_id, hexenc const & new_id, - delta const & del, - database & db) + delta const & del) { if (old_id == new_id) { @@ -476,12 +476,11 @@ static void static void -insert_into_db(data const & curr_data, +insert_into_db(database & db, data const & curr_data, hexenc const & curr_id, vector< piece > const & next_lines, data & next_data, - hexenc & next_id, - database & db) + hexenc & next_id) { // inserting into the DB // note: curr_lines is a "new" (base) version @@ -495,7 +494,7 @@ insert_into_db(data const & curr_data, delta del; diff(curr_data, next_data, del); calculate_ident(next_data, next_id); - rcs_put_raw_file_edge(next_id, curr_id, del, db); + rcs_put_raw_file_edge(db, next_id, curr_id, del); } @@ -558,12 +557,12 @@ static void static void -process_branch(string const & begin_version, +process_branch(database & db, + string const & begin_version, vector< piece > const & begin_lines, data const & begin_data, hexenc const & begin_id, rcs_file const & r, - database & db, cvs_history & cvs) { string curr_version = begin_version; @@ -595,8 +594,8 @@ process_branch(string const & begin_vers L(FL("constructed RCS version %s, inserting into database") % next_version); - insert_into_db(curr_data, curr_id, - *next_lines, next_data, next_id, db); + insert_into_db(db, curr_data, curr_id, + *next_lines, next_data, next_id); } // mark the beginning-of-branch time and state of this file if @@ -638,11 +637,11 @@ process_branch(string const & begin_vers L(FL("following RCS branch %s = '%s'") % (*i) % branch); construct_version(*curr_lines, *i, branch_lines, r); - insert_into_db(curr_data, curr_id, - branch_lines, branch_data, branch_id, db); + insert_into_db(db, curr_data, curr_id, + branch_lines, branch_data, branch_id); cvs.push_branch(branch, priv); - process_branch(*i, branch_lines, branch_data, branch_id, r, db, cvs); + process_branch(db, *i, branch_lines, branch_data, branch_id, r, cvs); cvs.pop_branch(); L(FL("finished RCS branch %s = '%s'") % (*i) % branch); @@ -663,7 +662,7 @@ static void static void -import_rcs_file_with_cvs(string const & filename, database & db, +import_rcs_file_with_cvs(database & db, string const & filename, cvs_history & cvs) { rcs_file r; @@ -695,16 +694,15 @@ import_rcs_file_with_cvs(string const & global_pieces.reset(); global_pieces.index_deltatext(r.deltatexts.find(r.admin.head)->second, head_lines); - process_branch(r.admin.head, head_lines, dat, id, r, db, cvs); + process_branch(db, r.admin.head, head_lines, dat, id, r, cvs); global_pieces.reset(); } ui.set_tick_trailer(""); } - void -test_parse_rcs_file(system_path const & filename, database & db) +test_parse_rcs_file(system_path const & filename) { cvs_history cvs; @@ -899,7 +897,7 @@ public: { try { - import_rcs_file_with_cvs(file, db, cvs); + import_rcs_file_with_cvs(db, file, cvs); } catch (oops const & o) { @@ -1037,9 +1035,9 @@ cluster_consumer editable_roster_base editable_ros; revision_id parent_rid, child_rid; - cluster_consumer(cvs_history & cvs, + cluster_consumer(project_t & project, key_store & keys, - project_t & project, + cvs_history & cvs, string const & branchname, cvs_branch const & branch, ticker & n_revs); @@ -1068,15 +1066,15 @@ void cluster_set; void -import_branch(cvs_history & cvs, +import_branch(project_t & project, key_store & keys, - project_t & project, + cvs_history & cvs, string const & branchname, shared_ptr const & branch, ticker & n_revs) { cluster_set clusters; - cluster_consumer cons(cvs, keys, project, branchname, *branch, n_revs); + cluster_consumer cons(project, keys, cvs, branchname, *branch, n_revs); unsigned long commits_remaining = branch->lineage.size(); // step 1: sort the lineage @@ -1200,9 +1198,9 @@ void } void -import_cvs_repo(system_path const & cvsroot, +import_cvs_repo(project_t & project, key_store & keys, - project_t & project, + system_path const & cvsroot, branch_name const & branchname) { @@ -1240,7 +1238,7 @@ import_cvs_repo(system_path const & cvsr string branchname = i->first; shared_ptr branch = i->second; L(FL("branch %s has %d entries") % branchname % branch->lineage.size()); - import_branch(cvs, keys, project, branchname, branch, n_revs); + import_branch(project, keys, cvs, branchname, branch, n_revs); // free up some memory cvs.branches.erase(branchname); @@ -1250,7 +1248,7 @@ import_cvs_repo(system_path const & cvsr { transaction_guard guard(project.db); L(FL("trunk has %d entries") % cvs.trunk->lineage.size()); - import_branch(cvs, keys, project, cvs.base_branch, cvs.trunk, n_revs); + import_branch(project, keys, cvs, cvs.base_branch, cvs.trunk, n_revs); guard.commit(); } @@ -1270,9 +1268,9 @@ import_cvs_repo(system_path const & cvsr } } -cluster_consumer::cluster_consumer(cvs_history & cvs, +cluster_consumer::cluster_consumer(project_t & project, key_store & keys, - project_t & project, + cvs_history & cvs, string const & branchname, cvs_branch const & branch, ticker & n_revs) ============================================================ --- rcs_import.hh a1bdda69e06ea25a4b50cfea9c8e1dbf5f61edd5 +++ rcs_import.hh a8e67b44e7960ed7ef21eb46abac7d658ce03689 @@ -16,9 +16,11 @@ class branch_name; class project_t; class branch_name; -void test_parse_rcs_file(system_path const & filename, database & db); -void import_cvs_repo(system_path const & cvsroot, key_store & keys, - project_t & project, branch_name const & branchname); +void test_parse_rcs_file(system_path const & filename); +void import_cvs_repo(project_t & project, + key_store & keys, + system_path const & cvsroot, + branch_name const & branchname); // Local Variables: // mode: C++ ============================================================ --- restrictions.cc 60b8793d468dfdcd60f0c9f5844a01d4e059435f +++ restrictions.cc a4bb673571007d154c6f40c87305348acc2b6fb4 @@ -82,10 +82,10 @@ static void } static void -validate_roster_paths(set const & included_paths, +validate_roster_paths(workspace & work, + set const & included_paths, set const & excluded_paths, - set const & known_paths, - workspace & work) + set const & known_paths) { int bad = 0; @@ -119,9 +119,9 @@ void } void -validate_workspace_paths(set const & included_paths, - set const & excluded_paths, - workspace & work) +validate_workspace_paths(workspace & work, + set const & included_paths, + set const & excluded_paths) { int bad = 0; @@ -165,11 +165,11 @@ restriction::restriction(std::vector const & includes, +node_restriction::node_restriction(workspace & w, + std::vector const & includes, std::vector const & excludes, long depth, - roster_t const & roster, - workspace & w) : + roster_t const & roster) : restriction(includes, excludes, depth) { map_nodes(node_map, roster, included_paths, known_paths, @@ -177,15 +177,15 @@ node_restriction::node_restriction(std:: map_nodes(node_map, roster, excluded_paths, known_paths, restricted_path::excluded); - validate_roster_paths(included_paths, excluded_paths, known_paths, w); + validate_roster_paths(w, included_paths, excluded_paths, known_paths); } -node_restriction::node_restriction(std::vector const & includes, +node_restriction::node_restriction(workspace & w, + std::vector const & includes, std::vector const & excludes, long depth, roster_t const & roster1, - roster_t const & roster2, - workspace & w) : + roster_t const & roster2) : restriction(includes, excludes, depth) { map_nodes(node_map, roster1, included_paths, known_paths, @@ -198,15 +198,15 @@ node_restriction::node_restriction(std:: map_nodes(node_map, roster2, excluded_paths, known_paths, restricted_path::excluded); - validate_roster_paths(included_paths, excluded_paths, known_paths, w); + validate_roster_paths(w, included_paths, excluded_paths, known_paths); } -node_restriction::node_restriction(std::vector const & includes, +node_restriction::node_restriction(workspace & w, + std::vector const & includes, std::vector const & excludes, long depth, parent_map const & rosters1, - roster_t const & roster2, - workspace & w) : + roster_t const & roster2) : restriction(includes, excludes, depth) { for (parent_map::const_iterator i = rosters1.begin(); @@ -224,14 +224,14 @@ node_restriction::node_restriction(std:: map_nodes(node_map, roster2, excluded_paths, known_paths, restricted_path::excluded); - validate_roster_paths(included_paths, excluded_paths, known_paths, w); + validate_roster_paths(w, included_paths, excluded_paths, known_paths); } -path_restriction::path_restriction(std::vector const & includes, +path_restriction::path_restriction(workspace & w, + std::vector const & includes, std::vector const & excludes, long depth, - workspace & w, validity_check vc) : restriction(includes, excludes, depth) { @@ -240,7 +240,7 @@ path_restriction::path_restriction(std:: if (vc == check_paths) { - validate_workspace_paths(included_paths, excluded_paths, w); + validate_workspace_paths(w, included_paths, excluded_paths); } } @@ -620,7 +620,7 @@ UNIT_TEST(restrictions, simple_include) // check restricted nodes - node_restriction nmask(includes, excludes, -1, roster, app.work); + node_restriction nmask(app.work, includes, excludes, -1, roster); UNIT_TEST_CHECK(!nmask.empty()); @@ -650,7 +650,7 @@ UNIT_TEST(restrictions, simple_include) // check restricted paths - path_restriction pmask(includes, excludes, -1, app.work); + path_restriction pmask(app.work, includes, excludes, -1); UNIT_TEST_CHECK(!pmask.empty()); @@ -692,7 +692,7 @@ UNIT_TEST(restrictions, simple_exclude) // check restricted nodes - node_restriction nmask(includes, excludes, -1, roster, app.work); + node_restriction nmask(app.work, includes, excludes, -1, roster); UNIT_TEST_CHECK(!nmask.empty()); @@ -722,7 +722,7 @@ UNIT_TEST(restrictions, simple_exclude) // check restricted paths - path_restriction pmask(includes, excludes, -1, app.work); + path_restriction pmask(app.work, includes, excludes, -1); UNIT_TEST_CHECK(!pmask.empty()); @@ -766,7 +766,7 @@ UNIT_TEST(restrictions, include_exclude) // check restricted nodes - node_restriction nmask(includes, excludes, -1, roster, app.work); + node_restriction nmask(app.work, includes, excludes, -1, roster); UNIT_TEST_CHECK(!nmask.empty()); @@ -796,7 +796,7 @@ UNIT_TEST(restrictions, include_exclude) // check restricted paths - path_restriction pmask(includes, excludes, -1, app.work); + path_restriction pmask(app.work, includes, excludes, -1); UNIT_TEST_CHECK(!pmask.empty()); @@ -843,7 +843,7 @@ UNIT_TEST(restrictions, exclude_include) // check restricted nodes - node_restriction nmask(includes, excludes, -1, roster, app.work); + node_restriction nmask(app.work, includes, excludes, -1, roster); UNIT_TEST_CHECK(!nmask.empty()); @@ -873,7 +873,7 @@ UNIT_TEST(restrictions, exclude_include) // check restricted paths - path_restriction pmask(includes, excludes, -1, app.work); + path_restriction pmask(app.work, includes, excludes, -1); UNIT_TEST_CHECK(!pmask.empty()); @@ -912,9 +912,8 @@ UNIT_TEST(restrictions, invalid_roster_p excludes.push_back(file_path_internal("bar")); app_state app; - UNIT_TEST_CHECK_THROW(node_restriction(includes, excludes, -1, roster, - app.work), - informative_failure); + UNIT_TEST_CHECK_THROW(node_restriction(app.work, includes, excludes, -1, roster), + informative_failure); } UNIT_TEST(restrictions, invalid_workspace_paths) @@ -927,7 +926,7 @@ UNIT_TEST(restrictions, invalid_workspac excludes.push_back(file_path_internal("bar")); app_state app; - UNIT_TEST_CHECK_THROW(path_restriction(includes, excludes, -1, app.work), + UNIT_TEST_CHECK_THROW(path_restriction(app.work, includes, excludes, -1), informative_failure); } @@ -941,7 +940,7 @@ UNIT_TEST(restrictions, ignored_invalid_ excludes.push_back(file_path_internal("bar")); app_state app; - path_restriction pmask(includes, excludes, -1, app.work, + path_restriction pmask(app.work, includes, excludes, -1, path_restriction::skip_check); UNIT_TEST_CHECK( pmask.includes(file_path_internal("foo"))); @@ -965,7 +964,7 @@ UNIT_TEST(restrictions, include_depth_0) // check restricted nodes - node_restriction nmask(includes, excludes, depth, roster, app.work); + node_restriction nmask(app.work, includes, excludes, depth, roster); UNIT_TEST_CHECK(!nmask.empty()); @@ -995,7 +994,7 @@ UNIT_TEST(restrictions, include_depth_0) // check restricted paths - path_restriction pmask(includes, excludes, depth, app.work); + path_restriction pmask(app.work, includes, excludes, depth); UNIT_TEST_CHECK(!pmask.empty()); @@ -1039,7 +1038,7 @@ UNIT_TEST(restrictions, include_depth_0_ // check restricted nodes - node_restriction nmask(includes, excludes, depth, roster, app.work); + node_restriction nmask(app.work, includes, excludes, depth, roster); UNIT_TEST_CHECK( nmask.empty()); @@ -1069,7 +1068,7 @@ UNIT_TEST(restrictions, include_depth_0_ // check restricted paths - path_restriction pmask(includes, excludes, depth, app.work); + path_restriction pmask(app.work, includes, excludes, depth); UNIT_TEST_CHECK( pmask.empty()); @@ -1115,7 +1114,7 @@ UNIT_TEST(restrictions, include_depth_1) // check restricted nodes - node_restriction nmask(includes, excludes, depth, roster, app.work); + node_restriction nmask(app.work, includes, excludes, depth, roster); UNIT_TEST_CHECK(!nmask.empty()); @@ -1145,7 +1144,7 @@ UNIT_TEST(restrictions, include_depth_1) // check restricted paths - path_restriction pmask(includes, excludes, depth, app.work); + path_restriction pmask(app.work, includes, excludes, depth); UNIT_TEST_CHECK(!pmask.empty()); ============================================================ --- restrictions.hh 587fd1930120667fa026468bfa7c7ab58fcd29fc +++ restrictions.hh 0b2c8a2c94abcc8d9d84e6684430f4324ce29f7a @@ -77,25 +77,25 @@ class node_restriction : public restrict public: node_restriction() : restriction() {} - node_restriction(std::vector const & includes, + node_restriction(workspace & work, + std::vector const & includes, std::vector const & excludes, long depth, - roster_t const & roster, - workspace & work); + roster_t const & roster); - node_restriction(std::vector const & includes, + node_restriction(workspace & work, + std::vector const & includes, std::vector const & excludes, long depth, roster_t const & roster1, - roster_t const & roster2, - workspace & work); + roster_t const & roster2); - node_restriction(std::vector const & includes, + node_restriction(workspace & work, + std::vector const & includes, std::vector const & excludes, long depth, parent_map const & rosters1, - roster_t const & roster2, - workspace & work); + roster_t const & roster2); bool includes(roster_t const & roster, node_id nid) const; @@ -121,10 +121,10 @@ class path_restriction : public restrict path_restriction() : restriction() {} - path_restriction(std::vector const & includes, + path_restriction(workspace & work, + std::vector const & includes, std::vector const & excludes, long depth, - workspace & work, validity_check vc = check_paths); bool includes(file_path const & sp) const; ============================================================ --- revision.cc 515d7f7cf392614ea54f70bc9a58bf91495c6d7f +++ revision.cc 86c0d8a6aacc484acae33d7d192dcc294cbe9080 @@ -172,10 +172,10 @@ void shared_bitmap & total_union); void -find_common_ancestor_for_merge(revision_id const & left, +find_common_ancestor_for_merge(database & db, + revision_id const & left, revision_id const & right, - revision_id & anc, - database & db) + revision_id & anc) { interner intern; set leaves; @@ -268,7 +268,6 @@ is_ancestor(T const & ancestor_id, T const & descendent_id, multimap const & graph) { - set visited; queue queue; @@ -299,9 +298,9 @@ bool } bool -is_ancestor(revision_id const & ancestor_id, - revision_id const & descendent_id, - database & db) +is_ancestor(database & db, + revision_id const & ancestor_id, + revision_id const & descendent_id) { L(FL("checking whether %s is an ancestor of %s") % ancestor_id % descendent_id); @@ -387,9 +386,9 @@ void } void -toposort(set const & revisions, - vector & sorted, - database & db) +toposort(database & db, + set const & revisions, + vector & sorted) { map work; @@ -411,10 +410,10 @@ static void } static void -accumulate_strict_ancestors(revision_id const & start, +accumulate_strict_ancestors(database & db, + revision_id const & start, set & all_ancestors, multimap const & inverse_graph, - database & db, rev_height const & min_height) { typedef multimap::const_iterator gi; @@ -452,9 +451,9 @@ void // many fewer calls to the predicate, which can be a significant speed win. void -erase_ancestors_and_failures(std::set & candidates, +erase_ancestors_and_failures(database & db, + std::set & candidates, is_failure & p, - database & db, multimap *inverse_graph_cache_ptr) { // Load up the ancestry graph @@ -508,7 +507,7 @@ erase_ancestors_and_failures(std::set & revisions, database & db) +erase_ancestors(database & db, set & revisions) { no_failures p; - erase_ancestors_and_failures(revisions, p, db); + erase_ancestors_and_failures(db, revisions, p); } // This function takes a revision A and a set of revision Bs, calculates the @@ -545,9 +544,9 @@ void // that's not in the Bs. If the output set if non-empty, then A will // certainly be in it; but the output set might be empty. void -ancestry_difference(revision_id const & a, set const & bs, - set & new_stuff, - database & db) +ancestry_difference(database & db, revision_id const & a, + set const & bs, + set & new_stuff) { new_stuff.clear(); typedef multimap::const_iterator gi; @@ -599,10 +598,10 @@ void } void -select_nodes_modified_by_rev(revision_t const & rev, +select_nodes_modified_by_rev(database & db, + revision_t const & rev, roster_t const new_roster, - set & nodes_modified, - database & db) + set & nodes_modified) { nodes_modified.clear(); @@ -942,7 +941,7 @@ void anc_graph::write_certs() cert_name name(j->second.first); cert_value val(j->second.second); - if (put_simple_revision_cert(rev, name, val, db, keys)) + if (put_simple_revision_cert(db, keys, rev, name, val)) ++n_certs_out; } } @@ -1061,7 +1060,7 @@ anc_graph::add_node_for_old_manifest(man // load certs vector< manifest > mcerts; db.get_manifest_certs(man, mcerts); - erase_bogus_certs(mcerts, db); + erase_bogus_certs(db, mcerts); for(vector< manifest >::const_iterator i = mcerts.begin(); i != mcerts.end(); ++i) { @@ -1103,7 +1102,7 @@ u64 anc_graph::add_node_for_oldstyle_rev // load certs vector< revision > rcerts; db.get_revision_certs(rev, rcerts); - erase_bogus_certs(rcerts, db); + erase_bogus_certs(db, rcerts); for(vector< revision >::const_iterator i = rcerts.begin(); i != rcerts.end(); ++i) { @@ -1692,7 +1691,7 @@ build_changesets_from_manifest_ancestry( vector< manifest > tmp; db.get_manifest_certs(cert_name("ancestor"), tmp); - erase_bogus_certs(tmp, db); + erase_bogus_certs(db, tmp); for (vector< manifest >::const_iterator i = tmp.begin(); i != tmp.end(); ++i) @@ -1716,8 +1715,8 @@ static void // must work even when caches (especially, the height cache!) do not exist. // For all other purposes, use toposort above. static void -allrevs_toposorted(vector & revisions, - database & db) +allrevs_toposorted(database & db, + vector & revisions) { // get the complete ancestry rev_ancestry_map graph; @@ -1738,7 +1737,7 @@ regenerate_caches(database & db) db.delete_existing_heights(); vector sorted_ids; - allrevs_toposorted(sorted_ids, db); + allrevs_toposorted(db, sorted_ids); ticker done(_("regenerated"), "r", 5); done.set_total(sorted_ids.size()); ============================================================ --- revision.hh 8a83775b59c469630040469f6a8ad0c5711f28f9 +++ revision.hh 40fdeded2fc3bf25dd780a6aaa583326084b7811 @@ -123,23 +123,22 @@ void // sanity checking void -find_common_ancestor_for_merge(revision_id const & left, +find_common_ancestor_for_merge(database & db, + revision_id const & left, revision_id const & right, - revision_id & anc, - database & db); + revision_id & anc); bool -is_ancestor(revision_id const & ancestor, - revision_id const & descendent, - database & db); +is_ancestor(database & db, revision_id const & ancestor, + revision_id const & descendent); void -toposort(std::set const & revisions, - std::vector & sorted, - database & db); +toposort(database & db, + std::set const & revisions, + std::vector & sorted); void -erase_ancestors(std::set & revisions, database & db); +erase_ancestors(database & db, std::set & revisions); struct is_failure { @@ -147,24 +146,24 @@ void virtual ~is_failure() {}; }; void -erase_ancestors_and_failures(std::set & revisions, +erase_ancestors_and_failures(database & db, + std::set & revisions, is_failure & p, - database & db, std::multimap *inverse_graph_cache_ptr = NULL); void -ancestry_difference(revision_id const & a, std::set const & bs, - std::set & new_stuff, - database & db); +ancestry_difference(database & db, revision_id const & a, + std::set const & bs, + std::set & new_stuff); // FIXME: can probably optimize this passing a lookaside cache of the active // frontier set of shared_ptrs, while traversing history. void -select_nodes_modified_by_rev(revision_t const & rev, +select_nodes_modified_by_rev(database & db, + revision_t const & rev, roster_t const roster, - std::set & nodes_modified, - database & db); + std::set & nodes_modified); void make_revision(revision_id const & old_rev_id, ============================================================ --- roster.cc 0ab37af195f9be17a9a4ffe1db7a2c6b2b84440d +++ roster.cc 5250b2f5913c0bd144a9828a1317384956729fc9 @@ -1992,9 +1992,9 @@ void // WARNING: this function is not tested directly (no unit tests). Do not put // real logic in it. void -make_roster_for_revision(revision_t const & rev, revision_id const & new_rid, - roster_t & new_roster, marking_map & new_markings, - database & db, node_id_source & nis) +make_roster_for_revision(database & db, node_id_source & nis, + revision_t const & rev, revision_id const & new_rid, + roster_t & new_roster, marking_map & new_markings) { MM(rev); MM(new_rid); @@ -2015,12 +2015,12 @@ void } void -make_roster_for_revision(revision_t const & rev, revision_id const & new_rid, - roster_t & new_roster, marking_map & new_markings, - database & db) +make_roster_for_revision(database & db, + revision_t const & rev, revision_id const & new_rid, + roster_t & new_roster, marking_map & new_markings) { true_node_id_source nis(db); - make_roster_for_revision(rev, new_rid, new_roster, new_markings, db, nis); + make_roster_for_revision(db, nis, rev, new_rid, new_roster, new_markings); } ============================================================ --- roster.hh 06321f820ddcd3415431c1d1fae35d9a637e8568 +++ roster.hh 944e852bb147760f804ceb6063a1934b016d4c86 @@ -407,21 +407,21 @@ void // This is for revisions that are being written to the db, only. It assigns // permanent node ids. void -make_roster_for_revision(revision_t const & rev, +make_roster_for_revision(database & db, + revision_t const & rev, revision_id const & rid, roster_t & result, - marking_map & marking, - database & db); + marking_map & marking); // This is for revisions that are not necessarily going to be written to the // db; you can specify your own node_id_source. void -make_roster_for_revision(revision_t const & rev, +make_roster_for_revision(database & db, + node_id_source & nis, + revision_t const & rev, revision_id const & rid, roster_t & result, - marking_map & marking, - database & db, - node_id_source & nis); + marking_map & marking); void read_roster_and_marking(roster_data const & dat, ============================================================ --- schema_migration.cc 5e346221c35a8797dd6255b834ec06f6bfa70006 +++ schema_migration.cc f5bb54455a2fb596c5091b3ec2677f7c350395ec @@ -913,8 +913,8 @@ void } void -migrate_sql_schema(sqlite3 * db, system_path const & filename, - key_store & keys) +migrate_sql_schema(sqlite3 * db, key_store & keys, + system_path const & filename) { I(db != NULL); @@ -1013,8 +1013,9 @@ void // conformance check will reject them). void -test_migration_step(sqlite3 * db, system_path const & filename, - key_store & keys, string const & schema) +test_migration_step(sqlite3 * db, key_store & keys, + system_path const & filename, + string const & schema) { I(db != NULL); sql::create_function(db, "sha1", sqlite_sha1_fn); ============================================================ --- schema_migration.hh c02720141fbd073e228691f9aa049a5a8309ded6 +++ schema_migration.hh 2c414db665de886231c317fb5292415c51b63fae @@ -24,15 +24,16 @@ void check_sql_schema(sqlite3 * db, syst std::string describe_sql_schema(sqlite3 * db); void check_sql_schema(sqlite3 * db, system_path const & filename); -void migrate_sql_schema(sqlite3 * db, system_path const & filename, - key_store & keys); +void migrate_sql_schema(sqlite3 * db, key_store & keys, + system_path const & filename); // utility routine shared with database.cc void assert_sqlite3_ok(sqlite3 * db); // debugging -void test_migration_step(sqlite3 * db, system_path const & filename, - key_store & keys, std::string const & schema); +void test_migration_step(sqlite3 * db, key_store & keys, + system_path const & filename, + std::string const & schema); // this constant is part of the database schema, but it is not in schema.sql // because sqlite expressions can't do arithmetic on character values. it ============================================================ --- selectors.cc 9682d2ded09a6d978251d9e251d67e174297deb7 +++ selectors.cc acbef6eec0355e5d85a3a775de4fcb2cecb7e252 @@ -209,9 +209,9 @@ static void } static void -complete_one_selector(selector_type ty, string const & value, - set & completions, - project_t & project) +complete_one_selector(project_t & project, + selector_type ty, string const & value, + set & completions) { switch (ty) { @@ -299,9 +299,9 @@ static void } static void -complete_selector(selector_list const & limit, - set & completions, - project_t & project) +complete_selector(project_t & project, + selector_list const & limit, + set & completions) { if (limit.empty()) // all the ids in the database { @@ -310,14 +310,14 @@ complete_selector(selector_list const & } selector_list::const_iterator i = limit.begin(); - complete_one_selector(i->first, i->second, completions, project); + complete_one_selector(project, i->first, i->second, completions); i++; while (i != limit.end()) { set candidates; set intersection; - complete_one_selector(i->first, i->second, candidates, project); + complete_one_selector(project, i->first, i->second, candidates); intersection.clear(); set_intersection(completions.begin(), completions.end(), @@ -350,7 +350,7 @@ complete(app_state & app, } P(F("expanding selection '%s'") % str); - complete_selector(sels, completions, project); + complete_selector(project, sels, completions); N(completions.size() != 0, F("no match for selection '%s'") % str); @@ -402,7 +402,7 @@ expand_selector(app_state & app, return; } - complete_selector(sels, completions, project); + complete_selector(project, sels, completions); } void ============================================================ --- update.cc 4ff426265325f41011fa8460cd261ca645646d5a +++ update.cc e4e1f8d71ba2133af8d22537033ed7f1bb350cbd @@ -53,9 +53,9 @@ static void using boost::lexical_cast; static void -get_test_results_for_revision(revision_id const & id, - map & results, - project_t & project) +get_test_results_for_revision(project_t & project, + revision_id const & id, + map & results) { vector< revision > certs; project.get_revision_certs_by_name(id, cert_name(testresult_cert_name), @@ -78,12 +78,12 @@ static bool } static bool -acceptable_descendent(branch_name const & branch, +acceptable_descendent(lua_hooks & lua, + project_t & project, + branch_name const & branch, revision_id const & base, map & base_results, - revision_id const & target, - project_t & project, - lua_hooks & lua) + revision_id const & target) { L(FL("Considering update target %s") % target); @@ -96,7 +96,7 @@ acceptable_descendent(branch_name const // step 2: check the testresults map target_results; - get_test_results_for_revision(target, target_results, project); + get_test_results_for_revision(project, target, target_results); if (lua.hook_accept_testresult_change(base_results, target_results)) { L(FL("%s is acceptable update candidate") % target); @@ -110,24 +110,24 @@ void } void -pick_update_candidates(set & candidates, +pick_update_candidates(lua_hooks & lua, + project_t & project, + set & candidates, revision_id const & base, branch_name const & branch, - project_t & project, - bool ignore_suspend_certs, - lua_hooks & lua) + bool ignore_suspend_certs) { I(!null_id(base)); I(!branch().empty()); map base_results; - get_test_results_for_revision(base, base_results, project); + get_test_results_for_revision(project, base, base_results); candidates.clear(); // we possibly insert base into the candidate set as well; returning a set // containing just it means that we are up to date; returning an empty set // means that there is no acceptable update. - if (acceptable_descendent(branch, base, base_results, base, project, lua)) + if (acceptable_descendent(lua, project, branch, base, base_results, base)) candidates.insert(base); // keep a visited set to avoid repeating work @@ -149,8 +149,8 @@ pick_update_candidates(set visited.insert(target); // then, possibly insert this revision as a candidate - if (acceptable_descendent(branch, base, base_results, - target, project, lua)) + if (acceptable_descendent(lua, project, branch, base, base_results, + target)) candidates.insert(target); // and traverse its children as well @@ -158,7 +158,7 @@ pick_update_candidates(set copy(children.begin(), children.end(), back_inserter(to_traverse)); } - erase_ancestors(candidates, project.db); + erase_ancestors(project.db, candidates); if (ignore_suspend_certs) return; ============================================================ --- update.hh af0e12d08bd51ce590f3f7de9e6ac7bf252712cc +++ update.hh 182e0764bd0c84327c2b1f073fc3eaf3642ad4d2 @@ -25,12 +25,12 @@ class lua_hooks; // returned in 'candidates'. if no revisions are better than the current // revision, then 'candidates' will contain exactly the current revision. -void pick_update_candidates(std::set & candidates, +void pick_update_candidates(lua_hooks & lua, + project_t & project, + std::set & candidates, revision_id const & base_ident, branch_name const & branchname, - project_t & project, - bool ignore_suspend_certs, - lua_hooks & lua); + bool ignore_suspend_certs); // Local Variables: // mode: C++ ============================================================ --- work.cc 7855294ba0c0228c5c0c19c15dc974f03e6ecf05 +++ work.cc f7da6e8d4930dfe92ba826ba76033b70513fdaf0 @@ -122,9 +122,9 @@ static void // the workspace static void -get_roster_for_rid(revision_id const & rid, - database::cached_roster & cr, - database & db) +get_roster_for_rid(database & db, + revision_id const & rid, + database::cached_roster & cr) { // We may be asked for a roster corresponding to the null rid, which // is not in the database. In this situation, what is wanted is an empty @@ -144,7 +144,7 @@ void } void -workspace::get_parent_rosters(parent_map & parents, database & db) +workspace::get_parent_rosters(database & db, parent_map & parents) { revision_t rev; get_work_rev(rev); @@ -154,14 +154,15 @@ workspace::get_parent_rosters(parent_map i != rev.edges.end(); i++) { database::cached_roster cr; - get_roster_for_rid(edge_old_revision(i), cr, db); + get_roster_for_rid(db, edge_old_revision(i), cr); safe_insert(parents, make_pair(edge_old_revision(i), cr)); } } void -workspace::get_current_roster_shape(roster_t & ros, database & db, - node_id_source & nis) +workspace::get_current_roster_shape(database & db, + node_id_source & nis, + roster_t & ros) { revision_t rev; get_work_rev(rev); @@ -178,7 +179,7 @@ workspace::get_current_roster_shape(rost else { marking_map dummy; - make_roster_for_revision(rev, new_rid, ros, dummy, db, nis); + make_roster_for_revision(db, nis, rev, new_rid, ros, dummy); } } @@ -186,7 +187,7 @@ workspace::has_changes(database & db) workspace::has_changes(database & db) { parent_map parents; - get_parent_rosters(parents, db); + get_parent_rosters(db, parents); // if we have more than one parent roster then this workspace contains // a merge which means this is always a committable change @@ -196,7 +197,7 @@ workspace::has_changes(database & db) temp_node_id_source nis; roster_t new_roster, old_roster = parent_roster(parents.begin()); - get_current_roster_shape(new_roster, db, nis); + get_current_roster_shape(db, nis, new_roster); update_current_roster_from_filesystem(new_roster); return !(old_roster == new_roster); @@ -440,11 +441,11 @@ workspace::maybe_update_inodeprints(data temp_node_id_source nis; roster_t new_roster; - get_current_roster_shape(new_roster, db, nis); + get_current_roster_shape(db, nis, new_roster); update_current_roster_from_filesystem(new_roster); parent_map parents; - get_parent_rosters(parents, db); + get_parent_rosters(db, parents); node_map const & new_nodes = new_roster.all_nodes(); for (node_map::const_iterator i = new_nodes.begin(); i != new_nodes.end(); ++i) @@ -1050,8 +1051,8 @@ static void }; // anonymous namespace static void -add_parent_dirs(file_path const & dst, roster_t & ros, node_id_source & nis, - database & db, workspace & work) +add_parent_dirs(database & db, node_id_source & nis, workspace & work, + file_path const & dst, roster_t & ros) { editable_roster_base er(ros, nis); addition_builder build(db, work, ros, er); @@ -1176,17 +1177,17 @@ void } void -workspace::find_unknown_and_ignored(path_restriction const & mask, +workspace::find_unknown_and_ignored(database & db, + path_restriction const & mask, vector const & roots, set & unknown, - set & ignored, - database & db) + set & ignored) { set known; roster_t new_roster; temp_node_id_source nis; - get_current_roster_shape(new_roster, db, nis); + get_current_roster_shape(db, nis, new_roster); new_roster.extract_path_set(known); file_itemizer u(db, *this, known, unknown, ignored, mask); @@ -1198,7 +1199,7 @@ void } void -workspace::perform_additions(set const & paths, database & db, +workspace::perform_additions(database & db, set const & paths, bool recursive, bool respect_ignore) { if (paths.empty()) @@ -1207,7 +1208,7 @@ workspace::perform_additions(set const & paths, - database & db, +workspace::perform_deletions(database & db, + set const & paths, bool recursive, bool bookkeep_only) { if (paths.empty()) @@ -1279,10 +1280,10 @@ workspace::perform_deletions(set is lexicographically @@ -1362,9 +1363,9 @@ void } void -workspace::perform_rename(set const & srcs, +workspace::perform_rename(database & db, + set const & srcs, file_path const & dst, - database & db, bool bookkeep_only) { temp_node_id_source nis; @@ -1374,7 +1375,7 @@ workspace::perform_rename(set I(!srcs.empty()); - get_current_roster_shape(new_roster, db, nis); + get_current_roster_shape(db, nis, new_roster); // validation. it's okay if the target exists as a file; we just won't // clobber it (in !--bookkeep-only mode). similarly, it's okay if the @@ -1408,7 +1409,7 @@ workspace::perform_rename(set } renames.insert(make_pair(src, dpath)); - add_parent_dirs(dpath, new_roster, nis, db, *this); + add_parent_dirs(db, nis, *this, dpath, new_roster); } else { @@ -1431,7 +1432,7 @@ workspace::perform_rename(set renames.insert(make_pair(*i, d)); - add_parent_dirs(d, new_roster, nis, db, *this); + add_parent_dirs(db, nis, *this, d, new_roster); } } @@ -1445,7 +1446,7 @@ workspace::perform_rename(set } parent_map parents; - get_parent_rosters(parents, db); + get_parent_rosters(db, parents); revision_t new_work; make_revision_for_workspace(parents, new_roster, new_work); @@ -1484,15 +1485,15 @@ void } void -workspace::perform_pivot_root(file_path const & new_root, +workspace::perform_pivot_root(database & db, + file_path const & new_root, file_path const & put_old, - database & db, bool bookkeep_only) { temp_node_id_source nis; roster_t new_roster; MM(new_roster); - get_current_roster_shape(new_roster, db, nis); + get_current_roster_shape(db, nis, new_roster); I(new_roster.has_root()); N(new_roster.has_node(new_root), @@ -1532,7 +1533,7 @@ workspace::perform_pivot_root(file_path { parent_map parents; - get_parent_rosters(parents, db); + get_parent_rosters(db, parents); revision_t new_work; make_revision_for_workspace(parents, new_roster, new_work); @@ -1541,15 +1542,15 @@ workspace::perform_pivot_root(file_path if (!bookkeep_only) { content_merge_empty_adaptor cmea; - perform_content_update(cs, cmea, db); + perform_content_update(db, cs, cmea); } update_any_attrs(db); } void -workspace::perform_content_update(cset const & update, +workspace::perform_content_update(database & db, + cset const & update, content_merge_adaptor const & ca, - database & db, bool const messages) { roster_t roster; @@ -1563,7 +1564,7 @@ workspace::perform_content_update(cset c "you must clean up and remove the %s directory") % detached); - get_current_roster_shape(new_roster, db, nis); + get_current_roster_shape(db, nis, new_roster); new_roster.extract_path_set(known); workspace_itemizer itemizer(roster, known, nis); @@ -1585,7 +1586,7 @@ workspace::update_any_attrs(database & d { temp_node_id_source nis; roster_t new_roster; - get_current_roster_shape(new_roster, db, nis); + get_current_roster_shape(db, nis, new_roster); node_map const & nodes = new_roster.all_nodes(); for (node_map::const_iterator i = nodes.begin(); i != nodes.end(); ++i) ============================================================ --- work.hh 17520ad35c3888799b82336286f832c535039261 +++ work.hh f86a3eb582bbf5f33572dac57d4281cb81076dbd @@ -70,35 +70,35 @@ struct workspace node_restriction const & mask, std::set & missing); - void find_unknown_and_ignored(path_restriction const & mask, + void find_unknown_and_ignored(database & db, + path_restriction const & mask, std::vector const & roots, std::set & unknown, - std::set & ignored, - database & db); + std::set & ignored); - void perform_additions(std::set const & targets, - database & db, + void perform_additions(database & db, + std::set const & targets, bool recursive = false, bool respect_ignore = true); - void perform_deletions(std::set const & targets, - database & db, + void perform_deletions(database & db, + std::set const & targets, bool recursive, bool bookkeep_only); - void perform_rename(std::set const & src_paths, + void perform_rename(database & db, + std::set const & src_paths, file_path const & dst_dir, - database & db, bool bookkeep_only); - void perform_pivot_root(file_path const & new_root, + void perform_pivot_root(database & db, + file_path const & new_root, file_path const & put_old, - database & db, bool bookkeep_only); - void perform_content_update(cset const & cs, + void perform_content_update(database & db, + cset const & cs, content_merge_adaptor const & ca, - database & db, bool messages = true); void update_any_attrs(database & db); @@ -123,13 +123,13 @@ struct workspace // hashes, call update_current_roster_from_filesystem on the result of // this function. Under almost all conditions, NIS should be a // temp_node_id_source. - void get_current_roster_shape(roster_t & ros, - database & db, node_id_source & nis); + void get_current_roster_shape(database & db, node_id_source & nis, + roster_t & ros); // This returns a map whose keys are revision_ids and whose values are // rosters, there being one such pair for each parent of the current // revision. - void get_parent_rosters(parent_map & parents, database & db); + void get_parent_rosters(database & db, parent_map & parents); // This updates the file-content hashes in ROSTER, which is assumed to be // the "current" roster returned by one of the above get_*_roster_shape