2d.h   2d.h 
skipping to change at line 66 skipping to change at line 66
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
scoped_ptr<mongo::twod_exec::GeoBrowse> _browse; scoped_ptr<mongo::twod_exec::GeoBrowse> _browse;
TwoDParams _params; TwoDParams _params;
WorkingSet* _workingSet; WorkingSet* _workingSet;
bool _initted; bool _initted;
IndexDescriptor* _descriptor; IndexDescriptor* _descriptor;
TwoDAccessMethod* _am; TwoDAccessMethod* _am;
CommonStats _commonStats; CommonStats _commonStats;
TwoDStats _specificStats;
}; };
} }
namespace mongo { namespace mongo {
namespace twod_exec { namespace twod_exec {
// //
// Impls of browse below // Impls of browse below
// //
 End of changes. 1 change blocks. 
0 lines changed or deleted 1 lines changed or added


 2d_access_method.h   2d_access_method.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/index/2d_common.h" #include "mongo/db/index/2d_common.h"
#include "mongo/db/index/2d_key_generator.h"
#include "mongo/db/index/btree_based_access_method.h" #include "mongo/db/index/btree_based_access_method.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class IndexCatalogEntry; class IndexCatalogEntry;
class IndexCursor; class IndexCursor;
class IndexDescriptor; class IndexDescriptor;
struct TwoDIndexingParams; struct TwoDIndexingParams;
skipping to change at line 75 skipping to change at line 76
} }
class TwoDAccessMethod : public BtreeBasedAccessMethod { class TwoDAccessMethod : public BtreeBasedAccessMethod {
public: public:
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
using BtreeBasedAccessMethod::_interface; using BtreeBasedAccessMethod::_interface;
TwoDAccessMethod(IndexCatalogEntry* btreeState); TwoDAccessMethod(IndexCatalogEntry* btreeState);
virtual ~TwoDAccessMethod() { } virtual ~TwoDAccessMethod() { }
virtual shared_ptr<KeyGenerator> getKeyGenerator() const { return _ keyGenerator; }
private: private:
friend class TwoDIndexCursor; friend class TwoDIndexCursor;
friend class twod_internal::GeoPoint; friend class twod_internal::GeoPoint;
friend class twod_internal::GeoAccumulator; friend class twod_internal::GeoAccumulator;
friend class twod_internal::GeoBrowse; friend class twod_internal::GeoBrowse;
friend class twod_internal::GeoHopper; friend class twod_internal::GeoHopper;
friend class twod_internal::GeoSearch; friend class twod_internal::GeoSearch;
friend class twod_internal::GeoCircleBrowse; friend class twod_internal::GeoCircleBrowse;
friend class twod_internal::GeoBoxBrowse; friend class twod_internal::GeoBoxBrowse;
friend class twod_internal::GeoPolygonBrowse; friend class twod_internal::GeoPolygonBrowse;
skipping to change at line 106 skipping to change at line 108
BtreeInterface* getInterface() { return _interface; } BtreeInterface* getInterface() { return _interface; }
const IndexDescriptor* getDescriptor() { return _descriptor; } const IndexDescriptor* getDescriptor() { return _descriptor; }
TwoDIndexingParams& getParams() { return _params; } TwoDIndexingParams& getParams() { return _params; }
// This really gets the 'locs' from the provided obj. // This really gets the 'locs' from the provided obj.
void getKeys(const BSONObj& obj, vector<BSONObj>& locs) const; void getKeys(const BSONObj& obj, vector<BSONObj>& locs) const;
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// This is called by the two getKeys above.
void getKeys(const BSONObj &obj, BSONObjSet* keys, vector<BSONObj>*
locs) const;
BSONObj _nullObj;
BSONElement _nullElt;
TwoDIndexingParams _params; TwoDIndexingParams _params;
shared_ptr<TwoDKeyGenerator> _keyGenerator;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
6 lines changed or deleted 4 lines changed or added


 2dcommon.h   2dcommon.h 
skipping to change at line 205 skipping to change at line 205
virtual void checkLocation(); virtual void checkLocation();
virtual Record* _current(); virtual Record* _current();
virtual BSONObj current(); virtual BSONObj current();
virtual DiskLoc currLoc(); virtual DiskLoc currLoc();
virtual BSONObj currKey() const; virtual BSONObj currKey() const;
// Are we finished getting points? // Are we finished getting points?
virtual bool moreToDo(); virtual bool moreToDo();
Box makeBox(const GeoHash &hash) const;
// Fills the stack, but only checks a maximum number of maxToCheck points at a time. // Fills the stack, but only checks a maximum number of maxToCheck points at a time.
// Further calls to this function will continue the expand/check ne ighbors algorithm. // Further calls to this function will continue the expand/check ne ighbors algorithm.
virtual void fillStack(int maxToCheck, int maxToAdd = -1, bool only Expand = false); virtual void fillStack(int maxToCheck, int maxToAdd = -1, bool only Expand = false);
bool checkAndAdvance(BtreeLocation* bl, const GeoHash& hash, int& t otalFound); bool checkAndAdvance(BtreeLocation* bl, const GeoHash& hash, int& t otalFound);
// The initial geo hash box for our first expansion // The initial geo hash box for our first expansion
virtual GeoHash expandStartHash() = 0; virtual GeoHash expandStartHash() = 0;
// Whether the current box width is big enough for our search area // Whether the current box width is big enough for our search area
 End of changes. 1 change blocks. 
2 lines changed or deleted 0 lines changed or added


 algorithm.h   algorithm.h 
skipping to change at line 24 skipping to change at line 24
*/ */
#pragma once #pragma once
#include <cstddef> #include <cstddef>
#include <algorithm> #include <algorithm>
#include <vector> #include <vector>
#include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/const_element.h"
#include "mongo/bson/mutable/element.h" #include "mongo/bson/mutable/element.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo { namespace mongo {
namespace mutablebson { namespace mutablebson {
/** For an overview of mutable BSON, please see the file document.h in this directory. /** For an overview of mutable BSON, please see the file document.h in this directory.
* *
* This file defines, in analogy with <algorithm>, a collection of use ful algorithms for * This file defines, in analogy with <algorithm>, a collection of use ful algorithms for
* use with mutable BSON classes. In particular, algorithms for search ing, sorting, * use with mutable BSON classes. In particular, algorithms for search ing, sorting,
* indexed access, and counting are included. * indexed access, and counting are included.
*/ */
skipping to change at line 265 skipping to change at line 266
std::size_t countSiblingsRight(ElementType element) { std::size_t countSiblingsRight(ElementType element) {
return element.countSiblingsRight(); return element.countSiblingsRight();
} }
/** Return the number of children of 'element'. */ /** Return the number of children of 'element'. */
template<typename ElementType> template<typename ElementType>
std::size_t countChildren(ElementType element) { std::size_t countChildren(ElementType element) {
return element.countChildren(); return element.countChildren();
} }
/** Return the full (path) name of this element separating each name wi
th the delim string. */
template<typename ElementType>
std::string getFullName(ElementType element, char delim = '.') {
std::vector<StringData> names;
ElementType curr = element;
while(curr.ok() && curr.parent().ok()) {
names.push_back(curr.getFieldName());
curr = curr.parent();
}
mongoutils::str::stream name;
bool first = true;
for(std::vector<StringData>::reverse_iterator it = names.rbegin();
it != names.rend();
++it) {
if (!first)
name << delim;
name << *it;
first = false;
}
return name;
}
} // namespace mutablebson } // namespace mutablebson
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
0 lines changed or deleted 24 lines changed or added


 allocator.h   allocator.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/util/signal_handlers.h" #include <stdlib.h>
// we need the "real" malloc here // we need the "real" malloc here
#include "mongo/client/undef_macros.h" #include "mongo/client/undef_macros.h"
namespace mongo { namespace mongo {
inline void * ourmalloc(size_t size) { inline void * ourmalloc(size_t size) {
void *x = malloc(size); void *x = malloc(size);
if ( x == 0 ) printStackAndExit(0); if ( x == 0 ) abort();
return x; return x;
} }
inline void * ourrealloc(void *ptr, size_t size) { inline void * ourrealloc(void *ptr, size_t size) {
void *x = realloc(ptr, size); void *x = realloc(ptr, size);
if ( x == 0 ) printStackAndExit(0); if ( x == 0 ) abort();
return x; return x;
} }
#define MONGO_malloc ::mongo::ourmalloc #define MONGO_malloc ::mongo::ourmalloc
#define MONGO_realloc ::mongo::ourrealloc #define MONGO_realloc ::mongo::ourrealloc
// this redefines 'malloc' to 'MONGO_malloc', etc // this redefines 'malloc' to 'MONGO_malloc', etc
#include "mongo/client/redef_macros.h" #include "mongo/client/redef_macros.h"
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 and_hash.h   and_hash.h 
skipping to change at line 56 skipping to change at line 56
* Preconditions: Valid DiskLoc. More than one child. * Preconditions: Valid DiskLoc. More than one child.
* *
* Any DiskLoc that we keep a reference to that is invalidated before w e are able to return it * Any DiskLoc that we keep a reference to that is invalidated before w e are able to return it
* is fetched and added to the WorkingSet as "flagged for further revie w." Because this stage * is fetched and added to the WorkingSet as "flagged for further revie w." Because this stage
* operates with DiskLocs, we are unable to evaluate the AND for the in validated DiskLoc, and it * operates with DiskLocs, we are unable to evaluate the AND for the in validated DiskLoc, and it
* must be fully matched later. * must be fully matched later.
*/ */
class AndHashStage : public PlanStage { class AndHashStage : public PlanStage {
public: public:
AndHashStage(WorkingSet* ws, const MatchExpression* filter); AndHashStage(WorkingSet* ws, const MatchExpression* filter);
/**
* For testing only. Allows tests to set memory usage threshold.
*/
AndHashStage(WorkingSet* ws, const MatchExpression* filter, size_t
maxMemUsage);
virtual ~AndHashStage(); virtual ~AndHashStage();
void addChild(PlanStage* child); void addChild(PlanStage* child);
/**
* Returns memory usage.
* For testing only.
*/
size_t getMemUsage() const;
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl, InvalidationType type); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
static const size_t kLookAheadWorks;
StageState readFirstChild(WorkingSetID* out); StageState readFirstChild(WorkingSetID* out);
StageState hashOtherChildren(WorkingSetID* out); StageState hashOtherChildren(WorkingSetID* out);
StageState workChild(size_t childNo, WorkingSetID* out);
// Not owned by us. // Not owned by us.
WorkingSet* _ws; WorkingSet* _ws;
// Not owned by us. // Not owned by us.
const MatchExpression* _filter; const MatchExpression* _filter;
// The stages we read from. Owned by us. // The stages we read from. Owned by us.
vector<PlanStage*> _children; std::vector<PlanStage*> _children;
// We want to see if any of our children are EOF immediately. This
requires working them a
// few times to see if they hit EOF or if they produce a result. I
f they produce a result,
// we place that result here.
std::vector<WorkingSetID> _lookAheadResults;
// _dataMap is filled out by the first child and probed by subseque nt children. This is the // _dataMap is filled out by the first child and probed by subseque nt children. This is the
// hash table that we create by intersecting _children and probe wi th the last child. // hash table that we create by intersecting _children and probe wi th the last child.
typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap; typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap;
DataMap _dataMap; DataMap _dataMap;
// Keeps track of what elements from _dataMap subsequent children h ave seen. // Keeps track of what elements from _dataMap subsequent children h ave seen.
// Only used while _hashingChildren. // Only used while _hashingChildren.
typedef unordered_set<DiskLoc, DiskLoc::Hasher> SeenMap; typedef unordered_set<DiskLoc, DiskLoc::Hasher> SeenMap;
SeenMap _seenMap; SeenMap _seenMap;
// True if we're still intersecting _children[0..._children.size()- 1]. // True if we're still intersecting _children[0..._children.size()- 1].
bool _hashingChildren; bool _hashingChildren;
// Which child are we currently working on? // Which child are we currently working on?
size_t _currentChild; size_t _currentChild;
// Stats // Stats
CommonStats _commonStats; CommonStats _commonStats;
AndHashStats _specificStats; AndHashStats _specificStats;
// The usage in bytes of all buffered data that we're holding.
// Memory usage is calculated from keys held in _dataMap only.
// For simplicity, results in _lookAheadResults do not count toward
s the limit.
size_t _memUsage;
// Upper limit for buffered data memory usage.
// Defaults to 32 MB (See kMaxBytes in and_hash.cpp).
size_t _maxMemUsage;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
1 lines changed or deleted 34 lines changed or added


 assert_util.h   assert_util.h 
skipping to change at line 34 skipping to change at line 34
#include "mongo/base/status.h" // NOTE: This is safe as utils depend on bas e #include "mongo/base/status.h" // NOTE: This is safe as utils depend on bas e
#include "mongo/bson/inline_decls.h" #include "mongo/bson/inline_decls.h"
#include "mongo/client/export_macros.h" #include "mongo/client/export_macros.h"
#include "mongo/platform/compiler.h" #include "mongo/platform/compiler.h"
namespace mongo { namespace mongo {
enum CommonErrorCodes { enum CommonErrorCodes {
OkCode = 0, OkCode = 0,
DatabaseDifferCaseCode = 13297 , // uassert( 13297 ) DatabaseDifferCaseCode = 13297 , // uassert( 13297 )
InterruptedAtShutdown = 11600 , // uassert( 11600 )
SendStaleConfigCode = 13388 , // uassert( 13388 ) SendStaleConfigCode = 13388 , // uassert( 13388 )
RecvStaleConfigCode = 9996, // uassert( 9996 ) RecvStaleConfigCode = 9996, // uassert( 9996 )
PrepareConfigsFailedCode = 13104, // uassert( 13104 ) PrepareConfigsFailedCode = 13104, // uassert( 13104 )
NotMasterOrSecondaryCode = 13436, // uassert( 13436 ) NotMasterOrSecondaryCode = 13436, // uassert( 13436 )
NotMasterNoSlaveOkCode = 13435, // uassert( 13435 ) NotMasterNoSlaveOkCode = 13435, // uassert( 13435 )
NotMaster = 10107 // uassert( 10107 ) NotMaster = 10107, // uassert( 10107 )
IndexOptionsDiffer = 17427 // uassert( 17427 )
}; };
class MONGO_CLIENT_API AssertionCount { class MONGO_CLIENT_API AssertionCount {
public: public:
AssertionCount(); AssertionCount();
void rollover(); void rollover();
void condrollover( int newValue ); void condrollover( int newValue );
int regular; int regular;
int warning; int warning;
skipping to change at line 147 skipping to change at line 147
public: public:
AssertionException( const ExceptionInfo& ei ) : DBException(ei) {} AssertionException( const ExceptionInfo& ei ) : DBException(ei) {}
AssertionException( const char * msg , int code ) : DBException(msg ,code) {} AssertionException( const char * msg , int code ) : DBException(msg ,code) {}
AssertionException( const std::string& msg , int code ) : DBExcepti on(msg,code) {} AssertionException( const std::string& msg , int code ) : DBExcepti on(msg,code) {}
virtual ~AssertionException() throw() { } virtual ~AssertionException() throw() { }
virtual bool severe() const { return true; } virtual bool severe() const { return true; }
virtual bool isUserAssertion() const { return false; } virtual bool isUserAssertion() const { return false; }
/* true if an interrupted exception - see KillCurrentOp */
bool interrupted() {
return _ei.code == InterruptedAtShutdown || _ei.code == 11601 |
|
_ei.code == ErrorCodes::ExceededTimeLimit;
}
}; };
/* UserExceptions are valid errors that a user can cause, like out of d isk space or duplicate key */ /* UserExceptions are valid errors that a user can cause, like out of d isk space or duplicate key */
class MONGO_CLIENT_API UserException : public AssertionException { class MONGO_CLIENT_API UserException : public AssertionException {
public: public:
UserException(int c , const std::string& m) : AssertionException( m , c ) {} UserException(int c , const std::string& m) : AssertionException( m , c ) {}
virtual bool severe() const { return false; } virtual bool severe() const { return false; }
virtual bool isUserAssertion() const { return true; } virtual bool isUserAssertion() const { return true; }
virtual void appendPrefix( std::stringstream& ss ) const; virtual void appendPrefix( std::stringstream& ss ) const;
}; };
 End of changes. 3 change blocks. 
9 lines changed or deleted 2 lines changed or added


 auth_helpers.h   auth_helpers.h 
skipping to change at line 19 skipping to change at line 19
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/base/string_data.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
namespace auth { namespace auth {
/** /**
* Hashes the password so that it can be stored in a user object or use
d for MONGODB-CR
* authentication.
*/
std::string MONGO_CLIENT_API createPasswordDigest(const StringData& use
rname,
const StringData& clearTextPassword);
/**
* Retrieves the schema version of the persistent data describing users and roles from the * Retrieves the schema version of the persistent data describing users and roles from the
* remote server connected to with conn. * remote server connected to with conn.
*/ */
Status getRemoteStoredAuthorizationVersion(DBClientBase* conn, int* out Version); Status getRemoteStoredAuthorizationVersion(DBClientBase* conn, int* out Version);
/** /**
* Given a schemaVersion24 user document and its source database, retur
n the query and update
* specifier needed to upsert a schemaVersion26 version of the user.
*/
void getUpdateToUpgradeUser(const StringData& sourceDB,
const BSONObj& oldUserDoc,
BSONObj* query,
BSONObj* update);
/**
* Name of the server parameter used to report the auth schema version (via getParameter). * Name of the server parameter used to report the auth schema version (via getParameter).
*/ */
extern const std::string schemaVersionServerParameter; extern const std::string schemaVersionServerParameter;
} // namespace auth } // namespace auth
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
21 lines changed or deleted 0 lines changed or added


 auth_index_d.h   auth_index_d.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
namespace mongo { namespace mongo {
class Collection;
namespace authindex { namespace authindex {
/** /**
* Creates the appropriate indexes on _new_ system collections supporti ng authentication and * Creates the appropriate indexes on _new_ system collections supporti ng authentication and
* authorization. * authorization.
*/ */
void createSystemIndexes(const NamespaceString& ns); void createSystemIndexes(Collection* collection);
/**
* Ensures that exactly the appropriate indexes to support authenticati
on and authorization
* are present for the given database.
*
* It is appropriate to call this function on new or existing databases
, though it is
* primarily intended for use on existing databases.
*/
void configureSystemIndexes(const StringData& dbname);
} // namespace authindex } // namespace authindex
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 15 lines changed or added


 authorization_manager.h   authorization_manager.h 
skipping to change at line 61 skipping to change at line 61
namespace mongo { namespace mongo {
class AuthzManagerExternalState; class AuthzManagerExternalState;
class UserDocumentParser; class UserDocumentParser;
/** /**
* Internal secret key info. * Internal secret key info.
*/ */
struct AuthInfo { struct AuthInfo {
User* user; User* user;
BSONObj authParams;
}; };
extern AuthInfo internalSecurity; // set at startup and not changed aft er initialization. extern AuthInfo internalSecurity; // set at startup and not changed aft er initialization.
/** /**
* Contains server/cluster-wide information about Authorization. * Contains server/cluster-wide information about Authorization.
*/ */
class AuthorizationManager { class AuthorizationManager {
MONGO_DISALLOW_COPYING(AuthorizationManager); MONGO_DISALLOW_COPYING(AuthorizationManager);
public: public:
// The newly constructed AuthorizationManager takes ownership of "e xternalState" // The newly constructed AuthorizationManager takes ownership of "e xternalState"
explicit AuthorizationManager(AuthzManagerExternalState* externalSt ate); explicit AuthorizationManager(AuthzManagerExternalState* externalSt ate);
~AuthorizationManager(); ~AuthorizationManager();
static const std::string USER_NAME_FIELD_NAME; static const std::string USER_NAME_FIELD_NAME;
static const std::string USER_DB_FIELD_NAME; static const std::string USER_DB_FIELD_NAME;
static const std::string ROLE_NAME_FIELD_NAME; static const std::string ROLE_NAME_FIELD_NAME;
static const std::string ROLE_SOURCE_FIELD_NAME; static const std::string ROLE_SOURCE_FIELD_NAME; // TODO: rename to ROLE_DB_FIELD_NAME
static const std::string PASSWORD_FIELD_NAME; static const std::string PASSWORD_FIELD_NAME;
static const std::string V1_USER_NAME_FIELD_NAME; static const std::string V1_USER_NAME_FIELD_NAME;
static const std::string V1_USER_SOURCE_FIELD_NAME; static const std::string V1_USER_SOURCE_FIELD_NAME;
static const NamespaceString adminCommandNamespace; static const NamespaceString adminCommandNamespace;
static const NamespaceString rolesCollectionNamespace; static const NamespaceString rolesCollectionNamespace;
static const NamespaceString usersAltCollectionNamespace; static const NamespaceString usersAltCollectionNamespace;
static const NamespaceString usersBackupCollectionNamespace; static const NamespaceString usersBackupCollectionNamespace;
static const NamespaceString usersCollectionNamespace; static const NamespaceString usersCollectionNamespace;
static const NamespaceString versionCollectionNamespace; static const NamespaceString versionCollectionNamespace;
static const NamespaceString defaultTempUsersCollectionNamespace; /
/ for mongorestore
static const NamespaceString defaultTempRolesCollectionNamespace; /
/ for mongorestore
/** /**
* Query to match the auth schema version document in the versionCo llectionNamespace. * Query to match the auth schema version document in the versionCo llectionNamespace.
*/ */
static const BSONObj versionDocumentQuery; static const BSONObj versionDocumentQuery;
/** /**
* Name of the field in the auth schema version document containing the current schema * Name of the field in the auth schema version document containing the current schema
* version. * version.
*/ */
skipping to change at line 169 skipping to change at line 170
* Sets whether or not access control enforcement is enabled for th is manager. * Sets whether or not access control enforcement is enabled for th is manager.
*/ */
void setAuthEnabled(bool enabled); void setAuthEnabled(bool enabled);
/** /**
* Returns true if access control is enabled for this manager . * Returns true if access control is enabled for this manager .
*/ */
bool isAuthEnabled() const; bool isAuthEnabled() const;
/** /**
* Returns the version number of the authorization system. * Returns via the output parameter "version" the version number of
the authorization
* system. Returns Status::OK() if it was able to successfully fet
ch the current
* authorization version. If it has problems fetching the most up
to date version it
* returns a non-OK status. When returning a non-OK status, *versi
on will be set to
* schemaVersionInvalid (0).
*/ */
int getAuthorizationVersion(); Status getAuthorizationVersion(int* version);
// Returns true if there exists at least one privilege document in the system. // Returns true if there exists at least one privilege document in the system.
bool hasAnyPrivilegeDocuments() const; bool hasAnyPrivilegeDocuments() const;
/** /**
* Updates the auth schema version document to reflect that the sys tem is upgraded to * Updates the auth schema version document to reflect that the sys tem is upgraded to
* schemaVersion26Final. * schemaVersion26Final.
* *
* Do not call if getAuthorizationVersion() reports a value other t han schemaVersion26Final. * Do not call if getAuthorizationVersion() reports a value other t han schemaVersion26Final.
*/ */
skipping to change at line 239 skipping to change at line 244
* Updates documents matching "query" according to "updatePattern" in "collectionName". * Updates documents matching "query" according to "updatePattern" in "collectionName".
* Should only be called on collections with authorization document s in them * Should only be called on collections with authorization document s in them
* (ie admin.system.users and admin.system.roles). * (ie admin.system.users and admin.system.roles).
*/ */
Status updateAuthzDocuments(const NamespaceString& collectionName, Status updateAuthzDocuments(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& updatePattern, const BSONObj& updatePattern,
bool upsert, bool upsert,
bool multi, bool multi,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numUpdated) const; int* nMatched) const;
/* /*
* Removes roles matching the given query. * Removes roles matching the given query.
* Writes into *numRemoved the number of role documents that were m odified. * Writes into *numRemoved the number of role documents that were m odified.
* 'writeConcern' contains the arguments to be passed to getLastErr or to block for * 'writeConcern' contains the arguments to be passed to getLastErr or to block for
* successful completion of the write. * successful completion of the write.
*/ */
Status removeRoleDocuments(const BSONObj& query, Status removeRoleDocuments(const BSONObj& query,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numRemoved) const; int* numRemoved) const;
 End of changes. 6 change blocks. 
5 lines changed or deleted 16 lines changed or added


 authorization_session.h   authorization_session.h 
skipping to change at line 88 skipping to change at line 88
* for it in the process. * for it in the process.
*/ */
Status addAndAuthorizeUser(const UserName& userName); Status addAndAuthorizeUser(const UserName& userName);
// Returns the authenticated user with the given name. Returns NUL L // Returns the authenticated user with the given name. Returns NUL L
// if no such user is found. // if no such user is found.
// The user remains in the _authenticatedUsers set for this Authori zationSession, // The user remains in the _authenticatedUsers set for this Authori zationSession,
// and ownership of the user stays with the AuthorizationManager // and ownership of the user stays with the AuthorizationManager
User* lookupUser(const UserName& name); User* lookupUser(const UserName& name);
// Returns the number of authenticated users in this session.
size_t getNumAuthenticatedUsers();
// Gets an iterator over the names of all authenticated users store d in this manager. // Gets an iterator over the names of all authenticated users store d in this manager.
UserNameIterator getAuthenticatedUserNames(); UserNameIterator getAuthenticatedUserNames();
// Returns a string representing all logged-in users on the current session. // Returns a string representing all logged-in users on the current session.
// WARNING: this string will contain NUL bytes so don't call c_str( )! // WARNING: this string will contain NUL bytes so don't call c_str( )!
std::string getAuthenticatedUserNamesToken(); std::string getAuthenticatedUserNamesToken();
// Removes any authenticated principals whose authorization credent ials came from the given // Removes any authenticated principals whose authorization credent ials came from the given
// database, and revokes any privileges that were granted via that principal. // database, and revokes any privileges that were granted via that principal.
void logoutDatabase(const std::string& dbname); void logoutDatabase(const std::string& dbname);
 End of changes. 1 change blocks. 
3 lines changed or deleted 0 lines changed or added


 authz_manager_external_state.h   authz_manager_external_state.h 
skipping to change at line 65 skipping to change at line 65
/** /**
* Initializes the external state object. Must be called after con struction and before * Initializes the external state object. Must be called after con struction and before
* calling other methods. Object may not be used after this method returns something other * calling other methods. Object may not be used after this method returns something other
* than Status::OK(). * than Status::OK().
*/ */
virtual Status initialize() = 0; virtual Status initialize() = 0;
/** /**
* Retrieves the schema version of the persistent data describing u sers and roles. * Retrieves the schema version of the persistent data describing u sers and roles.
* Will leave *outVersion unmodified on non-OK status return values .
*/ */
virtual Status getStoredAuthorizationVersion(int* outVersion) = 0; virtual Status getStoredAuthorizationVersion(int* outVersion) = 0;
/** /**
* Writes into "result" a document describing the named user and re turns Status::OK(). The * Writes into "result" a document describing the named user and re turns Status::OK(). The
* description includes the user credentials, if present, the user' s role membership and * description includes the user credentials, if present, the user' s role membership and
* delegation information, a full list of the user's privileges, an d a full list of the * delegation information, a full list of the user's privileges, an d a full list of the
* user's roles, including those roles held implicitly through othe r roles (indirect roles). * user's roles, including those roles held implicitly through othe r roles (indirect roles).
* In the event that some of this information is inconsistent, the document will contain a * In the event that some of this information is inconsistent, the document will contain a
* "warnings" array, with string messages describing inconsistencie s. * "warnings" array, with string messages describing inconsistencie s.
skipping to change at line 211 skipping to change at line 212
/** /**
* Updates documents matching "query" according to "updatePattern" in "collectionName". * Updates documents matching "query" according to "updatePattern" in "collectionName".
*/ */
virtual Status update(const NamespaceString& collectionName, virtual Status update(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& updatePattern, const BSONObj& updatePattern,
bool upsert, bool upsert,
bool multi, bool multi,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numUpdated) = 0; int* nMatched) = 0;
/** /**
* Removes all documents matching "query" from "collectionName". * Removes all documents matching "query" from "collectionName".
*/ */
virtual Status remove(const NamespaceString& collectionName, virtual Status remove(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numRemoved) = 0; int* numRemoved) = 0;
/** /**
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 authz_manager_external_state_d.h   authz_manager_external_state_d.h 
skipping to change at line 71 skipping to change at line 71
const boost::function<void(const BSONObj&)>& r esultProcessor); const boost::function<void(const BSONObj&)>& r esultProcessor);
virtual Status insert(const NamespaceString& collectionName, virtual Status insert(const NamespaceString& collectionName,
const BSONObj& document, const BSONObj& document,
const BSONObj& writeConcern); const BSONObj& writeConcern);
virtual Status update(const NamespaceString& collectionName, virtual Status update(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& updatePattern, const BSONObj& updatePattern,
bool upsert, bool upsert,
bool multi, bool multi,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numUpdated); int* nMatched);
virtual Status remove(const NamespaceString& collectionName, virtual Status remove(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numRemoved); int* numRemoved);
virtual Status createIndex(const NamespaceString& collectionName, virtual Status createIndex(const NamespaceString& collectionName,
const BSONObj& pattern, const BSONObj& pattern,
bool unique, bool unique,
const BSONObj& writeConcern); const BSONObj& writeConcern);
virtual Status dropIndexes(const NamespaceString& collectionName, virtual Status dropIndexes(const NamespaceString& collectionName,
const BSONObj& writeConcern); const BSONObj& writeConcern);
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 authz_manager_external_state_mock.h   authz_manager_external_state_mock.h 
skipping to change at line 90 skipping to change at line 90
const BSONObj& query, const BSONObj& query,
const BSONObj& updatePattern, const BSONObj& updatePattern,
bool upsert, bool upsert,
const BSONObj& writeConcern); const BSONObj& writeConcern);
virtual Status update(const NamespaceString& collectionName, virtual Status update(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& updatePattern, const BSONObj& updatePattern,
bool upsert, bool upsert,
bool multi, bool multi,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numUpdated); int* nMatched);
virtual Status remove(const NamespaceString& collectionName, virtual Status remove(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numRemoved); int* numRemoved);
virtual Status createIndex(const NamespaceString& collectionName, virtual Status createIndex(const NamespaceString& collectionName,
const BSONObj& pattern, const BSONObj& pattern,
bool unique, bool unique,
const BSONObj& writeConcern); const BSONObj& writeConcern);
virtual Status dropIndexes(const NamespaceString& collectionName, virtual Status dropIndexes(const NamespaceString& collectionName,
const BSONObj& writeConcern); const BSONObj& writeConcern);
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 authz_manager_external_state_s.h   authz_manager_external_state_s.h 
skipping to change at line 97 skipping to change at line 97
virtual Status insert(const NamespaceString& collectionName, virtual Status insert(const NamespaceString& collectionName,
const BSONObj& document, const BSONObj& document,
const BSONObj& writeConcern); const BSONObj& writeConcern);
virtual Status update(const NamespaceString& collectionName, virtual Status update(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& updatePattern, const BSONObj& updatePattern,
bool upsert, bool upsert,
bool multi, bool multi,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numUpdated); int* nMatched);
virtual Status remove(const NamespaceString& collectionName, virtual Status remove(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numRemoved); int* numRemoved);
virtual Status createIndex(const NamespaceString& collectionName, virtual Status createIndex(const NamespaceString& collectionName,
const BSONObj& pattern, const BSONObj& pattern,
bool unique, bool unique,
const BSONObj& writeConcern); const BSONObj& writeConcern);
virtual Status dropIndexes(const NamespaceString& collectionName, virtual Status dropIndexes(const NamespaceString& collectionName,
const BSONObj& writeConcern); const BSONObj& writeConcern);
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 batch_downconvert.h   batch_downconvert.h 
skipping to change at line 75 skipping to change at line 75
/** /**
* Purely enforces a write concern on a remote host by clearing the previous error. * Purely enforces a write concern on a remote host by clearing the previous error.
* This is more expensive than a normal safe write, but is sometime s needed to support * This is more expensive than a normal safe write, but is sometime s needed to support
* write command emulation. * write command emulation.
*/ */
virtual Status enforceWriteConcern( DBClientBase* conn, virtual Status enforceWriteConcern( DBClientBase* conn,
const StringData& dbName, const StringData& dbName,
const BSONObj& writeConcern, const BSONObj& writeConcern,
BSONObj* gleResponse ) = 0; BSONObj* gleResponse ) = 0;
/**
* Clears the error information on this connection.
*/
virtual Status clearErrors( DBClientBase* conn,
const StringData& dbName ) = 0;
}; };
/** /**
* Executes a batch write using safe writes. * Executes a batch write using safe writes.
* *
* The actual safe write operation is done via an interface to allow te sting the rest of the * The actual safe write operation is done via an interface to allow te sting the rest of the
* aggregation functionality. * aggregation functionality.
*/ */
class BatchSafeWriter { class BatchSafeWriter {
public: public:
 End of changes. 1 change blocks. 
0 lines changed or deleted 6 lines changed or added


 batch_executor.h   batch_executor.h 
skipping to change at line 58 skipping to change at line 58
struct WriteOpStats; struct WriteOpStats;
class WriteBatchStats; class WriteBatchStats;
/** /**
* An instance of WriteBatchExecutor is an object capable of issuing a write batch. * An instance of WriteBatchExecutor is an object capable of issuing a write batch.
*/ */
class WriteBatchExecutor { class WriteBatchExecutor {
MONGO_DISALLOW_COPYING(WriteBatchExecutor); MONGO_DISALLOW_COPYING(WriteBatchExecutor);
public: public:
// State object used by private execInserts. TODO: Do not expose t
his type.
class ExecInsertsState;
WriteBatchExecutor( const BSONObj& defaultWriteConcern, WriteBatchExecutor( const BSONObj& defaultWriteConcern,
Client* client, Client* client,
OpCounters* opCounters, OpCounters* opCounters,
LastError* le ); LastError* le );
/** /**
* Issues writes with requested write concern. Fills response with errors if problems * Issues writes with requested write concern. Fills response with errors if problems
* occur. * occur.
*/ */
void executeBatch( const BatchedCommandRequest& request, BatchedCom mandResponse* response ); void executeBatch( const BatchedCommandRequest& request, BatchedCom mandResponse* response );
const WriteBatchStats& getStats() const; const WriteBatchStats& getStats() const;
private: private:
/** /**
* Executes the writes in the batch and returns upserted _ids and w rite errors. * Executes the writes in the batch and returns upserted _ids and w rite errors.
* Dispatches to one of the three functions below for DBLock, CurOp , and stats management. * Dispatches to one of the three functions below for DBLock, CurOp , and stats management.
*/ */
void bulkExecute( const BatchedCommandRequest& request, void bulkExecute( const BatchedCommandRequest& request,
std::vector<BatchedUpsertDetail*>* upsertedIds, std::vector<BatchedUpsertDetail*>* upsertedIds,
std::vector<WriteErrorDetail*>* errors ); std::vector<WriteErrorDetail*>* errors );
/** /**
* Executes the inserts of an insert batch and returns the write er rors. * Executes the inserts of an insert batch and returns the write er rors.
* *
* Internally uses the DBLock of the request namespace. * Internally uses the DBLock of the request namespace.
* May execute multiple inserts inside the same DBLock, and/or take the DBLock multiple * May execute multiple inserts inside the same DBLock, and/or take the DBLock multiple
* times. * times.
*/ */
void execInserts( const BatchedCommandRequest& request, void execInserts( const BatchedCommandRequest& request,
std::vector<WriteErrorDetail*>* errors ); std::vector<WriteErrorDetail*>* errors );
/** /**
* Executes a single insert from a batch, described in the opaque "
state" object.
*/
void execOneInsert( ExecInsertsState* state, WriteErrorDetail** err
or );
/**
* Executes an update item (which may update many documents or upse rt), and returns the * Executes an update item (which may update many documents or upse rt), and returns the
* upserted _id on upsert or error on failure. * upserted _id on upsert or error on failure.
* *
* Internally uses the DBLock of the update namespace. * Internally uses the DBLock of the update namespace.
* May take the DBLock multiple times. * May take the DBLock multiple times.
*/ */
void execUpdate( const BatchItemRef& updateItem, void execUpdate( const BatchItemRef& updateItem,
BSONObj* upsertedId, BSONObj* upsertedId,
WriteErrorDetail** error ); WriteErrorDetail** error );
skipping to change at line 180 skipping to change at line 187
}; };
/** /**
* Full stats accumulated by a write batch execution. Note that these stats do not directly * Full stats accumulated by a write batch execution. Note that these stats do not directly
* correspond to the stats accumulated in opCounters and LastError. * correspond to the stats accumulated in opCounters and LastError.
*/ */
class WriteBatchStats { class WriteBatchStats {
public: public:
WriteBatchStats() : WriteBatchStats() :
numInserted( 0 ), numUpserted( 0 ), numUpdated( 0 ), numModifie d( 0 ), numDeleted( 0 ) { numInserted( 0 ), numUpserted( 0 ), numMatched( 0 ), numModifie d( 0 ), numDeleted( 0 ) {
} }
int numInserted; int numInserted;
int numUpserted; int numUpserted;
int numUpdated; int numMatched;
int numModified; int numModified;
int numDeleted; int numDeleted;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
3 lines changed or deleted 13 lines changed or added


 batch_write_op.h   batch_write_op.h 
skipping to change at line 123 skipping to change at line 123
* Fills a BatchCommandRequest from a TargetedWriteBatch for this B atchWriteOp. * Fills a BatchCommandRequest from a TargetedWriteBatch for this B atchWriteOp.
*/ */
void buildBatchRequest( const TargetedWriteBatch& targetedBatch, void buildBatchRequest( const TargetedWriteBatch& targetedBatch,
BatchedCommandRequest* request ) const; BatchedCommandRequest* request ) const;
/** /**
* Stores a response from one of the outstanding TargetedWriteBatch es for this BatchWriteOp. * Stores a response from one of the outstanding TargetedWriteBatch es for this BatchWriteOp.
* The response may be in any form, error or not. * The response may be in any form, error or not.
* *
* There is an additional optional 'trackedErrors' parameter, which can be used to return * There is an additional optional 'trackedErrors' parameter, which can be used to return
* copies of any errors in the response that the caller is interest ed in (specified by * copies of any write errors in the response that the caller is in terested in (specified by
* errCode). (This avoids external callers needing to know much ab out the response format.) * errCode). (This avoids external callers needing to know much ab out the response format.)
*/ */
void noteBatchResponse( const TargetedWriteBatch& targetedBatch, void noteBatchResponse( const TargetedWriteBatch& targetedBatch,
const BatchedCommandResponse& response, const BatchedCommandResponse& response,
TrackedErrors* trackedErrors ); TrackedErrors* trackedErrors );
/** /**
* Stores an error that occurred while trying to send/recv a Target * Stores an error that occurred trying to send/recv a TargetedWrit
edWriteBatch for this eBatch for this
* BatchWriteOp, and so a response is not available. * BatchWriteOp.
*/ */
void noteBatchError( const TargetedWriteBatch& targetedBatch, void noteBatchError( const TargetedWriteBatch& targetedBatch,
const WriteErrorDetail& error ); const WriteErrorDetail& error );
/** /**
* Sets a command error for this batch op directly. * Aborts any further writes in the batch with the provided error.
There must be no pending
* ops awaiting results when a batch is aborted.
* *
* Should only be used when there are no outstanding batches to ret urn. * Batch is finished immediately after aborting.
*/ */
void setBatchError( const WriteErrorDetail& error ); void abortBatch( const WriteErrorDetail& error );
/** /**
* Returns false if the batch write op needs more processing. * Returns false if the batch write op needs more processing.
*/ */
bool isFinished(); bool isFinished();
/** /**
* Fills a batch response to send back to the client. * Fills a batch response to send back to the client.
*/ */
void buildClientResponse( BatchedCommandResponse* batchResp ); void buildClientResponse( BatchedCommandResponse* batchResp );
skipping to change at line 180 skipping to change at line 181
// Current outstanding batch op write requests // Current outstanding batch op write requests
// Not owned here but tracked for reporting // Not owned here but tracked for reporting
std::set<const TargetedWriteBatch*> _targeted; std::set<const TargetedWriteBatch*> _targeted;
// Write concern responses from all write batches so far // Write concern responses from all write batches so far
OwnedPointerVector<ShardWCError> _wcErrors; OwnedPointerVector<ShardWCError> _wcErrors;
// Upserted ids for the whole write batch // Upserted ids for the whole write batch
OwnedPointerVector<BatchedUpsertDetail> _upsertedIds; OwnedPointerVector<BatchedUpsertDetail> _upsertedIds;
// Use to store a top-level error indicating that the batch aborted
unexpectedly and we
// can't report on any of the writes sent. May also include a Shar
dEndpoint indicating
// where the root problem was.
scoped_ptr<ShardError> _batchError;
// Stats for the entire batch op // Stats for the entire batch op
scoped_ptr<BatchWriteStats> _stats; scoped_ptr<BatchWriteStats> _stats;
}; };
struct BatchWriteStats { struct BatchWriteStats {
BatchWriteStats(); BatchWriteStats();
int numInserted; int numInserted;
int numUpserted; int numUpserted;
int numUpdated; int numMatched;
int numModified; int numModified;
int numDeleted; int numDeleted;
std::string toString() const {
StringBuilder str;
str << "numInserted: " << numInserted
<< " numUpserted: " << numUpserted
<< " numMatched: " << numMatched
<< " numModified: " << numModified
<< " numDeleted: " << numDeleted;
return str.str();
}
}; };
/** /**
* Data structure representing the information needed to make a batch r equest, along with * Data structure representing the information needed to make a batch r equest, along with
* pointers to where the resulting responses should be placed. * pointers to where the resulting responses should be placed.
* *
* Internal support for storage as a doubly-linked list, to allow the T argetedWriteBatch to * Internal support for storage as a doubly-linked list, to allow the T argetedWriteBatch to
* efficiently be registered for reporting. * efficiently be registered for reporting.
*/ */
class TargetedWriteBatch { class TargetedWriteBatch {
 End of changes. 8 change blocks. 
15 lines changed or deleted 20 lines changed or added


 batched_command_request.h   batched_command_request.h 
skipping to change at line 51 skipping to change at line 51
* command request. * command request.
* *
* Designed to be a very thin wrapper that mimics the underlying reques ts exactly. Owns the * Designed to be a very thin wrapper that mimics the underlying reques ts exactly. Owns the
* wrapped request object once constructed. * wrapped request object once constructed.
*/ */
class BatchedCommandRequest : public BSONSerializable { class BatchedCommandRequest : public BSONSerializable {
MONGO_DISALLOW_COPYING(BatchedCommandRequest); MONGO_DISALLOW_COPYING(BatchedCommandRequest);
public: public:
// Maximum number of write ops supported per batch // Maximum number of write ops supported per batch
static const int kMaxWriteBatchSize = 1000; static const size_t kMaxWriteBatchSize;
enum BatchType { enum BatchType {
BatchType_Insert, BatchType_Update, BatchType_Delete, BatchType _Unknown BatchType_Insert, BatchType_Update, BatchType_Delete, BatchType _Unknown
}; };
// //
// construction / destruction // construction / destruction
// //
BatchedCommandRequest( BatchType batchType ); BatchedCommandRequest( BatchType batchType );
skipping to change at line 110 skipping to change at line 110
// Batch type accessors // Batch type accessors
// //
BatchType getBatchType() const; BatchType getBatchType() const;
BatchedInsertRequest* getInsertRequest() const; BatchedInsertRequest* getInsertRequest() const;
BatchedUpdateRequest* getUpdateRequest() const; BatchedUpdateRequest* getUpdateRequest() const;
BatchedDeleteRequest* getDeleteRequest() const; BatchedDeleteRequest* getDeleteRequest() const;
// Index creation is also an insert, but a weird one. // Index creation is also an insert, but a weird one.
bool isInsertIndexRequest() const; bool isInsertIndexRequest() const;
bool isUniqueIndexRequest() const; bool isUniqueIndexRequest() const;
bool isValidIndexRequest( std::string* errMsg ) const;
std::string getTargetingNS() const; std::string getTargetingNS() const;
BSONObj getIndexKeyPattern() const; BSONObj getIndexKeyPattern() const;
// //
// individual field accessors // individual field accessors
// //
bool isVerboseWC() const; bool isVerboseWC() const;
void setNS( const StringData& collName ); void setNS( const StringData& collName );
skipping to change at line 209 skipping to change at line 210
const BatchedUpdateDocument* getUpdate() const { const BatchedUpdateDocument* getUpdate() const {
dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps( ) ) ); dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps( ) ) );
return _request->getUpdateRequest()->getUpdatesAt( _itemIndex ) ; return _request->getUpdateRequest()->getUpdatesAt( _itemIndex ) ;
} }
const BatchedDeleteDocument* getDelete() const { const BatchedDeleteDocument* getDelete() const {
dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps( ) ) ); dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps( ) ) );
return _request->getDeleteRequest()->getDeletesAt( _itemIndex ) ; return _request->getDeleteRequest()->getDeletesAt( _itemIndex ) ;
} }
BSONObj toBSON() const {
switch ( getOpType() ) {
case BatchedCommandRequest::BatchType_Insert:
return getDocument();
case BatchedCommandRequest::BatchType_Update:
return getUpdate()->toBSON();
default:
return getDelete()->toBSON();
}
}
private: private:
const BatchedCommandRequest* _request; const BatchedCommandRequest* _request;
const int _itemIndex; const int _itemIndex;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
1 lines changed or deleted 13 lines changed or added


 bgsync.h   bgsync.h 
skipping to change at line 99 skipping to change at line 99
boost::condition _condvar; boost::condition _condvar;
const Member* _currentSyncTarget; const Member* _currentSyncTarget;
// Notifier thread // Notifier thread
// used to wait until another op has been replicated // used to wait until another op has been replicated
boost::condition_variable _lastOpCond; boost::condition_variable _lastOpCond;
boost::mutex _lastOpMutex; boost::mutex _lastOpMutex;
const Member* _oplogMarkerTarget;
OpTime _consumedOpTime; // not locked, only used by notifier thread OpTime _consumedOpTime; // not locked, only used by notifier thread
BackgroundSync(); BackgroundSync();
BackgroundSync(const BackgroundSync& s); BackgroundSync(const BackgroundSync& s);
BackgroundSync operator=(const BackgroundSync& s); BackgroundSync operator=(const BackgroundSync& s);
// Production thread // Production thread
void _producerThread(); void _producerThread();
// Adds elements to the list, up to maxSize. // Adds elements to the list, up to maxSize.
void produce(); void produce();
skipping to change at line 127 skipping to change at line 126
// stop syncing when this becomes a primary // stop syncing when this becomes a primary
void stop(); void stop();
// restart syncing // restart syncing
void start(); void start();
// Tracker thread // Tracker thread
// tells the sync target where this member is synced to // tells the sync target where this member is synced to
void markOplog(); void markOplog();
bool hasCursor(); bool hasCursor();
// Sets _oplogMarkerTarget and calls connect();
// used for both the notifier command and the older OplogReader sty
le notifier
bool connectOplogNotifier();
bool isAssumingPrimary(); bool isAssumingPrimary();
public: public:
static BackgroundSync* get(); static BackgroundSync* get();
static void shutdown(); static void shutdown();
static void notify(); static void notify();
virtual ~BackgroundSync() {} virtual ~BackgroundSync() {}
// starts the producer thread // starts the producer thread
 End of changes. 2 change blocks. 
6 lines changed or deleted 0 lines changed or added


 bson.h   bson.h 
skipping to change at line 26 skipping to change at line 26
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
/** /**
Main include file for C++ BSON module when using standalone (sans MongoD Main include file for C++ BSON. This pulls in fewer dependencies than
B client). mongo/client/dbclient.h, but still requires libmongoclient to link.
"BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be "BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be
represented in JSON (plus a few extensions useful for databases & other languages). represented in JSON (plus a few extensions useful for databases & other languages).
http://www.bsonspec.org/ http://www.bsonspec.org/
*/ */
#pragma once #pragma once
#if defined(MONGO_EXPOSE_MACROS) #ifdef MONGO_EXPOSE_MACROS
#error this header is for client programs, not the mongo database itself. i #error bson.h is for C++ driver consumer use only
nclude jsobj.h instead.
/* because we define simplistic assert helpers here that don't pull in a bu
nch of util -- so that
BSON can be used header only.
*/
#endif #endif
#include <cstdlib> #define LIBMONGOCLIENT_CONSUMER
#include <memory>
#include <iostream> #include "mongo/client/redef_macros.h"
#include <sstream>
#include "mongo/pch.h"
#include "mongo/platform/compiler.h"
namespace bson {
using std::string;
using std::stringstream;
class assertion : public std::exception {
public:
assertion( unsigned u , const std::string& s )
: id( u ) , msg( s ) {
std::stringstream ss;
ss << "BsonAssertion id: " << u << " " << s;
full = ss.str();
}
virtual ~assertion() throw() {}
virtual const char* what() const throw() { return full.c_str(); }
unsigned id;
std::string msg;
std::string full;
};
}
namespace mongo {
#if !defined(verify)
inline void verify(bool expr) {
if(!expr) {
throw bson::assertion( 0 , "assertion failure in bson library"
);
}
}
#endif
#if !defined(uassert)
MONGO_COMPILER_NORETURN inline void uasserted(int msgid, const std::str
ing &s) {
throw bson::assertion( msgid , s );
}
inline void uassert(unsigned msgid, const std::string& msg, bool expr)
{
if( !expr )
uasserted( msgid , msg );
}
MONGO_COMPILER_NORETURN inline void msgasserted(int msgid, const char *
msg) {
throw bson::assertion( msgid , msg );
}
MONGO_COMPILER_NORETURN inline void msgasserted(int msgid, const std::s
tring &msg) {
msgasserted(msgid, msg.c_str());
}
inline void massert(int msgid, const std::string& msg, bool expr) {
if(!expr) {
std::cout << "assertion failure in bson library: " << msgid <<
' ' << msg << std::endl;
throw bson::assertion( msgid , msg );
}
}
#endif
}
#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement.h"
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/bsonobjiterator.h" #include "mongo/bson/bsonobjiterator.h"
#include "mongo/bson/bsontypes.h" #include "mongo/bson/bsontypes.h"
#include "mongo/bson/bson-inl.h" #include "mongo/bson/bson-inl.h"
#include "mongo/bson/oid.h" #include "mongo/bson/oid.h"
#include "mongo/bson/util/builder.h" #include "mongo/bson/util/builder.h"
#include "mongo/client/undef_macros.h"
 End of changes. 4 change blocks. 
77 lines changed or deleted 9 lines changed or added


 bsonobjbuilder.h   bsonobjbuilder.h 
skipping to change at line 842 skipping to change at line 842
return *this; return *this;
} }
bool isArray() const { bool isArray() const {
return true; return true;
} }
int len() const { return _b.len(); } int len() const { return _b.len(); }
int arrSize() const { return _i; } int arrSize() const { return _i; }
BufBuilder& bb() { return _b.bb(); }
private: private:
// These two are undefined privates to prevent their accidental // These two are undefined privates to prevent their accidental
// use as we don't support unsigned ints in BSON // use as we don't support unsigned ints in BSON
BSONObjBuilder& append(const StringData& fieldName, unsigned int va l); BSONObjBuilder& append(const StringData& fieldName, unsigned int va l);
BSONObjBuilder& append(const StringData& fieldName, unsigned long l ong val); BSONObjBuilder& append(const StringData& fieldName, unsigned long l ong val);
void fill( const StringData& name ) { void fill( const StringData& name ) {
long int n; long int n;
Status status = parseNumberFromStringWithBase( name, 10, &n ); Status status = parseNumberFromStringWithBase( name, 10, &n );
uassert( 13048, uassert( 13048,
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 btree_access_method.h   btree_access_method.h 
skipping to change at line 60 skipping to change at line 60
class BtreeAccessMethod : public BtreeBasedAccessMethod { class BtreeAccessMethod : public BtreeBasedAccessMethod {
public: public:
// Every Btree-based index needs these. We put them in the BtreeBa sedAccessMethod // Every Btree-based index needs these. We put them in the BtreeBa sedAccessMethod
// superclass and subclasses (like this) can use them. // superclass and subclasses (like this) can use them.
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
using BtreeBasedAccessMethod::_interface; using BtreeBasedAccessMethod::_interface;
BtreeAccessMethod(IndexCatalogEntry* btreeState ); BtreeAccessMethod(IndexCatalogEntry* btreeState );
virtual ~BtreeAccessMethod() { } virtual ~BtreeAccessMethod() { }
virtual shared_ptr<KeyGenerator> getKeyGenerator() const { return _ keyGenerator; }
private: private:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// Our keys differ for V0 and V1. // Our keys differ for V0 and V1.
scoped_ptr<BtreeKeyGenerator> _keyGenerator; // this is shared so that we can pass it off
shared_ptr<BtreeKeyGenerator> _keyGenerator;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 btree_based_access_method.h   btree_based_access_method.h 
skipping to change at line 67 skipping to change at line 67
class BtreeBasedAccessMethod : public IndexAccessMethod { class BtreeBasedAccessMethod : public IndexAccessMethod {
MONGO_DISALLOW_COPYING( BtreeBasedAccessMethod ); MONGO_DISALLOW_COPYING( BtreeBasedAccessMethod );
public: public:
BtreeBasedAccessMethod( IndexCatalogEntry* btreeState ); BtreeBasedAccessMethod( IndexCatalogEntry* btreeState );
virtual ~BtreeBasedAccessMethod() { } virtual ~BtreeBasedAccessMethod() { }
virtual Status insert(const BSONObj& obj, virtual Status insert(const BSONObj& obj,
const DiskLoc& loc, const DiskLoc& loc,
const InsertDeleteOptions& options, const InsertDeleteOptions& options,
int64_t* numInserted); int64_t* numInserted,
const PregeneratedKeysOnIndex* prepared = NUL
L ) ;
virtual Status remove(const BSONObj& obj, virtual Status remove(const BSONObj& obj,
const DiskLoc& loc, const DiskLoc& loc,
const InsertDeleteOptions& options, const InsertDeleteOptions& options,
int64_t* numDeleted); int64_t* numDeleted);
virtual Status validateUpdate(const BSONObj& from, virtual Status validateUpdate(const BSONObj& from,
const BSONObj& to, const BSONObj& to,
const DiskLoc& loc, const DiskLoc& loc,
const InsertDeleteOptions& options, const InsertDeleteOptions& options,
skipping to change at line 94 skipping to change at line 95
virtual Status initializeAsEmpty(); virtual Status initializeAsEmpty();
virtual IndexAccessMethod* initiateBulk() ; virtual IndexAccessMethod* initiateBulk() ;
virtual Status commitBulk( IndexAccessMethod* bulk, virtual Status commitBulk( IndexAccessMethod* bulk,
bool mayInterrupt, bool mayInterrupt,
std::set<DiskLoc>* dups ); std::set<DiskLoc>* dups );
virtual Status touch(const BSONObj& obj); virtual Status touch(const BSONObj& obj);
virtual Status touch( const BSONObjSet& keys );
virtual Status validate(int64_t* numKeys); virtual Status validate(int64_t* numKeys);
// XXX: consider migrating callers to use IndexCursor instead // XXX: consider migrating callers to use IndexCursor instead
virtual DiskLoc findSingle( const BSONObj& key ) const; virtual DiskLoc findSingle( const BSONObj& key ) const;
// exposed for testing, used for bulk commit // exposed for testing, used for bulk commit
static ExternalSortComparison* getComparison(int version, static ExternalSortComparison* getComparison(int version,
const BSONObj& keyPatt ern); const BSONObj& keyPatt ern);
protected: protected:
 End of changes. 2 change blocks. 
1 lines changed or deleted 5 lines changed or added


 btree_key_generator.h   btree_key_generator.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <vector>
#include <set> #include <set>
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/index/key_generator.h"
namespace mongo { namespace mongo {
/** /**
* Internal class used by BtreeAccessMethod to generate keys for indexe d documents. * Internal class used by BtreeAccessMethod to generate keys for indexe d documents.
* This class is meant to be kept under the index access layer. * This class is meant to be kept under the index access layer.
*/ */
class BtreeKeyGenerator { class BtreeKeyGenerator : public KeyGenerator {
public: public:
BtreeKeyGenerator(vector<const char*> fieldNames, vector<BSONElemen t> fixed, bool isSparse); BtreeKeyGenerator(vector<const char*> fieldNames, vector<BSONElemen t> fixed, bool isSparse);
virtual ~BtreeKeyGenerator() { } virtual ~BtreeKeyGenerator() { }
void getKeys(const BSONObj &obj, BSONObjSet *keys) const; void getKeys(const BSONObj &obj, BSONObjSet *keys) const;
static const int ParallelArraysCode; static const int ParallelArraysCode;
protected: protected:
// These are used by the getKeysImpl(s) below. // These are used by the getKeysImpl(s) below.
vector<const char*> _fieldNames; vector<string> _fieldNames;
bool _isSparse; bool _isSparse;
BSONObj _nullKey; // a full key with all fields null BSONObj _nullKey; // a full key with all fields null
BSONObj _nullObj; // only used for _nullElt BSONObj _nullObj; // only used for _nullElt
BSONElement _nullElt; // jstNull BSONElement _nullElt; // jstNull
BSONSizeTracker _sizeTracker; BSONSizeTracker _sizeTracker;
private: private:
// We have V0 and V1. Sigh. // We have V0 and V1. Sigh.
virtual void getKeysImpl(vector<const char*> fieldNames, vector<BSO NElement> fixed, virtual void getKeysImpl(vector<const char*> fieldNames, vector<BSO NElement> fixed,
const BSONObj &obj, BSONObjSet *keys) cons t = 0; const BSONObj &obj, BSONObjSet *keys) cons t = 0;
vector<BSONElement> _fixed; vector<BSONElement> _fixed;
 End of changes. 3 change blocks. 
2 lines changed or deleted 3 lines changed or added


 cached_plan_runner.h   cached_plan_runner.h 
skipping to change at line 60 skipping to change at line 60
/** /**
* CachedPlanRunner runs a plan retrieved from the cache. * CachedPlanRunner runs a plan retrieved from the cache.
* *
* If we run a plan from the cache and behavior wildly deviates from ex pected behavior, we may * If we run a plan from the cache and behavior wildly deviates from ex pected behavior, we may
* remove the plan from the cache. See plan_cache.h. * remove the plan from the cache. See plan_cache.h.
*/ */
class CachedPlanRunner : public Runner { class CachedPlanRunner : public Runner {
public: public:
/** /**
* Takes ownership of all arguments. * Takes ownership of all arguments.
* XXX: what args should this really take? probably a cachekey as well?
*/ */
CachedPlanRunner(const Collection* collection, CachedPlanRunner(const Collection* collection,
CanonicalQuery* canonicalQuery, QuerySolution* sol CanonicalQuery* canonicalQuery,
ution, QuerySolution* solution,
PlanStage* root, WorkingSet* ws); PlanStage* root,
WorkingSet* ws);
virtual ~CachedPlanRunner(); virtual ~CachedPlanRunner();
Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut); Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
virtual bool isEOF(); virtual bool isEOF();
virtual void saveState(); virtual void saveState();
virtual bool restoreState(); virtual bool restoreState();
 End of changes. 2 change blocks. 
4 lines changed or deleted 4 lines changed or added


 canonical_query.h   canonical_query.h 
skipping to change at line 45 skipping to change at line 45
#include "mongo/db/query/lite_parsed_query.h" #include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/query/parsed_projection.h" #include "mongo/db/query/parsed_projection.h"
namespace mongo { namespace mongo {
// TODO: Is this binary data really? // TODO: Is this binary data really?
typedef std::string PlanCacheKey; typedef std::string PlanCacheKey;
class CanonicalQuery { class CanonicalQuery {
public: public:
/**
* Caller owns the pointer in 'out' if any call to canonicalize ret
urns Status::OK().
*/
static Status canonicalize(const QueryMessage& qm, CanonicalQuery** out); static Status canonicalize(const QueryMessage& qm, CanonicalQuery** out);
/** /**
* For testing or for internal clients to use. * For testing or for internal clients to use.
*/ */
/**
* Used for creating sub-queries from an existing CanonicalQuery.
*
* 'root' must be an expression in baseQuery.root().
*
* Does not take ownership of 'root'.
*/
static Status canonicalize(const CanonicalQuery& baseQuery,
MatchExpression* root,
CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query, CanonicalQuery** out); static Status canonicalize(const string& ns, const BSONObj& query, CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query, long long skip, static Status canonicalize(const string& ns, const BSONObj& query, long long skip,
long long limit, CanonicalQuery** out); long long limit, CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort, static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort,
const BSONObj& proj, CanonicalQuery** ou t); const BSONObj& proj, CanonicalQuery** ou t);
static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort, static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort,
const BSONObj& proj, const BSONObj& proj,
skipping to change at line 74 skipping to change at line 89
const BSONObj& proj, const BSONObj& proj,
long long skip, long long limit, long long skip, long long limit,
const BSONObj& hint, const BSONObj& hint,
CanonicalQuery** out); CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort, static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort,
const BSONObj& proj, const BSONObj& proj,
long long skip, long long limit, long long skip, long long limit,
const BSONObj& hint, const BSONObj& hint,
const BSONObj& minObj, const BSONObj& ma xObj, const BSONObj& minObj, const BSONObj& ma xObj,
bool snapshot, CanonicalQuery** out); bool snapshot,
bool explain,
CanonicalQuery** out);
/** /**
* Returns true if "query" describes an exact-match query on _id, p ossibly with * Returns true if "query" describes an exact-match query on _id, p ossibly with
* the $isolated/$atomic modifier. * the $isolated/$atomic modifier.
*/ */
static bool isSimpleIdQuery(const BSONObj& query); static bool isSimpleIdQuery(const BSONObj& query);
// What namespace is this query over? // What namespace is this query over?
const string& ns() const { return _pq->ns(); } const string& ns() const { return _pq->ns(); }
skipping to change at line 99 skipping to change at line 116
BSONObj getQueryObj() const { return _pq->getFilter(); } BSONObj getQueryObj() const { return _pq->getFilter(); }
const LiteParsedQuery& getParsed() const { return *_pq; } const LiteParsedQuery& getParsed() const { return *_pq; }
const ParsedProjection* getProj() const { return _proj.get(); } const ParsedProjection* getProj() const { return _proj.get(); }
/** /**
* Get the cache key for this canonical query. * Get the cache key for this canonical query.
*/ */
const PlanCacheKey& getPlanCacheKey() const; const PlanCacheKey& getPlanCacheKey() const;
// Debugging // Debugging
string toString() const; std::string toString() const;
std::string toStringShort() const;
/** /**
* Validates match expression, checking for certain * Validates match expression, checking for certain
* combinations of operators in match expression and * combinations of operators in match expression and
* query options in LiteParsedQuery. * query options in LiteParsedQuery.
* Since 'root' is derived from 'filter' in LiteParsedQuery, * Since 'root' is derived from 'filter' in LiteParsedQuery,
* 'filter' is not validated. * 'filter' is not validated.
* *
* TODO: Move this to query_validator.cpp * TODO: Move this to query_validator.cpp
*/ */
skipping to change at line 125 skipping to change at line 143
* Takes ownership of 'root'. * Takes ownership of 'root'.
*/ */
static MatchExpression* normalizeTree(MatchExpression* root); static MatchExpression* normalizeTree(MatchExpression* root);
/** /**
* Traverses expression tree post-order. * Traverses expression tree post-order.
* Sorts children at each non-leaf node by (MatchType, path(), cach eKey) * Sorts children at each non-leaf node by (MatchType, path(), cach eKey)
*/ */
static void sortTree(MatchExpression* tree); static void sortTree(MatchExpression* tree);
/**
* Returns a count of 'type' nodes in expression tree.
*/
static size_t countNodes(const MatchExpression* root, MatchExpressi
on::MatchType type);
/**
* Takes ownership of 'tree'. Performs some rewriting of the query
to a logically
* equivalent but more digestible form.
*
* TODO: This doesn't entirely belong here. Really we'd do this wh
ile exploring
* solutions in an enumeration setting but given the current lack o
f pruning
* while exploring the enumeration space we do it here.
*/
static MatchExpression* logicalRewrite(MatchExpression* tree);
private: private:
// You must go through canonicalize to create a CanonicalQuery. // You must go through canonicalize to create a CanonicalQuery.
CanonicalQuery() { } CanonicalQuery() { }
/** /**
* Computes and stores the cache key / query shape * Computes and stores the cache key / query shape
* for this query. * for this query.
*/ */
void generateCacheKey(void); void generateCacheKey(void);
// Takes ownership of lpq /**
Status init(LiteParsedQuery* lpq); * Takes ownership of 'root' and 'lpq'.
*/
Status init(LiteParsedQuery* lpq, MatchExpression* root);
scoped_ptr<LiteParsedQuery> _pq; scoped_ptr<LiteParsedQuery> _pq;
// _root points into _pq->getFilter() // _root points into _pq->getFilter()
scoped_ptr<MatchExpression> _root; scoped_ptr<MatchExpression> _root;
scoped_ptr<ParsedProjection> _proj; scoped_ptr<ParsedProjection> _proj;
/** /**
* Cache key is a string-ified combination of the query and sort ob fuscated * Cache key is a string-ified combination of the query and sort ob fuscated
 End of changes. 6 change blocks. 
4 lines changed or deleted 44 lines changed or added


 chunk_manager_targeter.h   chunk_manager_targeter.h 
skipping to change at line 76 skipping to change at line 76
Status targetInsert( const BSONObj& doc, ShardEndpoint** endpoint ) const; Status targetInsert( const BSONObj& doc, ShardEndpoint** endpoint ) const;
// Returns ShardKeyNotFound if the update can't be targeted without a shard key. // Returns ShardKeyNotFound if the update can't be targeted without a shard key.
Status targetUpdate( const BatchedUpdateDocument& updateDoc, Status targetUpdate( const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints ) const ; std::vector<ShardEndpoint*>* endpoints ) const ;
// Returns ShardKeyNotFound if the delete can't be targeted without a shard key. // Returns ShardKeyNotFound if the delete can't be targeted without a shard key.
Status targetDelete( const BatchedDeleteDocument& deleteDoc, Status targetDelete( const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints ) const ; std::vector<ShardEndpoint*>* endpoints ) const ;
Status targetAll( std::vector<ShardEndpoint*>* endpoints ) const; Status targetCollection( std::vector<ShardEndpoint*>* endpoints ) c
onst;
Status targetAllShards( std::vector<ShardEndpoint*>* endpoints ) co
nst;
void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ); void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo );
void noteCouldNotTarget(); void noteCouldNotTarget();
/** /**
* Replaces the targeting information with the latest information f rom the cache. If this * Replaces the targeting information with the latest information f rom the cache. If this
* information is stale WRT the noted stale responses or a remote r efresh is needed due * information is stale WRT the noted stale responses or a remote r efresh is needed due
* to a targeting failure, will contact the config servers to reloa d the metadata. * to a targeting failure, will contact the config servers to reloa d the metadata.
* *
 End of changes. 1 change blocks. 
1 lines changed or deleted 5 lines changed or added


 client.h   client.h 
skipping to change at line 89 skipping to change at line 89
* call this when your thread starts. * call this when your thread starts.
*/ */
static Client& initThread(const char *desc, AbstractMessagingPort * mp = 0); static Client& initThread(const char *desc, AbstractMessagingPort * mp = 0);
static void initThreadIfNotAlready(const char *desc) { static void initThreadIfNotAlready(const char *desc) {
if( currentClient.get() ) if( currentClient.get() )
return; return;
initThread(desc); initThread(desc);
} }
/**
* Allows detaching a thread from a Client object. Use for testing
and for the creation
* of non-connection clients.
*/
static void resetThread( const StringData& origThreadName );
/** this has to be called as the client goes away, but before threa d termination /** this has to be called as the client goes away, but before threa d termination
* @return true if anything was done * @return true if anything was done
*/ */
bool shutdown(); bool shutdown();
string clientAddress(bool includePort=false) const; string clientAddress(bool includePort=false) const;
CurOp* curop() const { return _curOp; } CurOp* curop() const { return _curOp; }
Context* getContext() const { return _context; } Context* getContext() const { return _context; }
Database* database() const { return _context ? _context->db() : 0; } Database* database() const { return _context ? _context->db() : 0; }
const char *ns() const { return _context->ns(); } const char *ns() const { return _context->ns(); }
skipping to change at line 126 skipping to change at line 120
bool setGod(bool newVal) { const bool prev = _god; _god = newVal; r eturn prev; } bool setGod(bool newVal) { const bool prev = _god; _god = newVal; r eturn prev; }
string toString() const; string toString() const;
void gotHandshake( const BSONObj& o ); void gotHandshake( const BSONObj& o );
BSONObj getRemoteID() const { return _remoteId; } BSONObj getRemoteID() const { return _remoteId; }
BSONObj getHandshake() const { return _handshake; } BSONObj getHandshake() const { return _handshake; }
ConnectionId getConnectionId() const { return _connectionId; } ConnectionId getConnectionId() const { return _connectionId; }
bool inPageFaultRetryableSection() const { return _pageFaultRetryab leSection != 0; } bool inPageFaultRetryableSection() const { return _pageFaultRetryab leSection != 0; }
PageFaultRetryableSection* getPageFaultRetryableSection() const { r eturn _pageFaultRetryableSection; } PageFaultRetryableSection* getPageFaultRetryableSection() const { r eturn _pageFaultRetryableSection; }
bool hasWrittenThisPass() const { return _hasWrittenThisPass; } void writeHappened() { _hasWrittenSinceCheckpoint = true; _hasWritt
void writeHappened() { _hasWrittenThisPass = true; } enThisOperation = true; }
void newTopLevelRequest() { _hasWrittenThisPass = false; } bool hasWrittenSinceCheckpoint() const { return _hasWrittenSinceChe
ckpoint; }
void checkpointHappened() { _hasWrittenSinceCheckpoint = false; }
bool hasWrittenThisOperation() const { return _hasWrittenThisOperat
ion; }
void newTopLevelRequest() {
_hasWrittenThisOperation = false;
_hasWrittenSinceCheckpoint = false;
}
/**
* Call this to allow PageFaultExceptions even if writes happened b
efore this was called.
* Writes after this is called still prevent PFEs from being thrown
.
*/
void clearHasWrittenThisOperation() { _hasWrittenThisOperation = fa
lse; }
bool allowedToThrowPageFaultException() const; bool allowedToThrowPageFaultException() const;
LockState& lockState() { return _ls; } LockState& lockState() { return _ls; }
private: private:
Client(const std::string& desc, AbstractMessagingPort *p = 0); Client(const std::string& desc, AbstractMessagingPort *p = 0);
friend class CurOp; friend class CurOp;
ConnectionId _connectionId; // > 0 for things "conn", 0 otherwise ConnectionId _connectionId; // > 0 for things "conn", 0 otherwise
string _threadId; // "" on non support systems string _threadId; // "" on non support systems
CurOp * _curOp; CurOp * _curOp;
Context * _context; Context * _context;
bool _shutdown; // to track if Client::shutdown() gets called bool _shutdown; // to track if Client::shutdown() gets called
std::string _desc; std::string _desc;
bool _god; bool _god;
OpTime _lastOp; OpTime _lastOp;
BSONObj _handshake; BSONObj _handshake;
BSONObj _remoteId; BSONObj _remoteId;
bool _hasWrittenThisPass; bool _hasWrittenThisOperation;
bool _hasWrittenSinceCheckpoint;
PageFaultRetryableSection *_pageFaultRetryableSection; PageFaultRetryableSection *_pageFaultRetryableSection;
LockState _ls; LockState _ls;
friend class PageFaultRetryableSection; // TEMP friend class PageFaultRetryableSection; // TEMP
friend class NoPageFaultsAllowed; // TEMP friend class NoPageFaultsAllowed; // TEMP
public: public:
/** "read lock, and set my context, all in one operation" /** "read lock, and set my context, all in one operation"
* This handles (if not recursively locked) opening an unopened da tabase. * This handles (if not recursively locked) opening an unopened da tabase.
 End of changes. 3 change blocks. 
11 lines changed or deleted 22 lines changed or added


 cloner.h   cloner.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
struct CloneOptions; struct CloneOptions;
class DBClientBase; class DBClientBase;
class DBClientCursor; class DBClientCursor;
class Query; class Query;
 End of changes. 1 change blocks. 
0 lines changed or deleted 1 lines changed or added


 collection.h   collection.h 
skipping to change at line 51 skipping to change at line 51
#include "mongo/db/structure/record_store.h" #include "mongo/db/structure/record_store.h"
#include "mongo/db/catalog/collection_info_cache.h" #include "mongo/db/catalog/collection_info_cache.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
namespace mongo { namespace mongo {
class Database; class Database;
class ExtentManager; class ExtentManager;
class NamespaceDetails; class NamespaceDetails;
class IndexCatalog; class IndexCatalog;
class MultiIndexBlock;
class CollectionIterator; class CollectionIterator;
class FlatIterator; class FlatIterator;
class CappedIterator; class CappedIterator;
class OpDebug; class OpDebug;
class DocWriter { class DocWriter {
public: public:
virtual ~DocWriter() {} virtual ~DocWriter() {}
skipping to change at line 158 skipping to change at line 159
void deleteDocument( const DiskLoc& loc, void deleteDocument( const DiskLoc& loc,
bool cappedOK = false, bool cappedOK = false,
bool noWarn = false, bool noWarn = false,
BSONObj* deletedId = 0 ); BSONObj* deletedId = 0 );
/** /**
* this does NOT modify the doc before inserting * this does NOT modify the doc before inserting
* i.e. will not add an _id field for documents that are missing it * i.e. will not add an _id field for documents that are missing it
*/ */
StatusWith<DiskLoc> insertDocument( const BSONObj& doc, bool enforc StatusWith<DiskLoc> insertDocument( const BSONObj& doc, bool enforc
eQuota ); eQuota,
const PregeneratedKeys* preGen
= NULL );
StatusWith<DiskLoc> insertDocument( const DocWriter* doc, bool enfo rceQuota ); StatusWith<DiskLoc> insertDocument( const DocWriter* doc, bool enfo rceQuota );
StatusWith<DiskLoc> insertDocument( const BSONObj& doc, MultiIndexB
lock& indexBlock );
/** /**
* updates the document @ oldLocation with newDoc * updates the document @ oldLocation with newDoc
* if the document fits in the old space, it is put there * if the document fits in the old space, it is put there
* if not, it is moved * if not, it is moved
* @return the post update location of the doc (may or may not be t he same as oldLocation) * @return the post update location of the doc (may or may not be t he same as oldLocation)
*/ */
StatusWith<DiskLoc> updateDocument( const DiskLoc& oldLocation, StatusWith<DiskLoc> updateDocument( const DiskLoc& oldLocation,
const BSONObj& newDoc, const BSONObj& newDoc,
bool enforceQuota, bool enforceQuota,
OpDebug* debug ); OpDebug* debug );
skipping to change at line 210 skipping to change at line 214
return 5; return 5;
return static_cast<int>( dataSize() / n ); return static_cast<int>( dataSize() / n );
} }
private: private:
/** /**
* same semantics as insertDocument, but doesn't do: * same semantics as insertDocument, but doesn't do:
* - some user error checks * - some user error checks
* - adjust padding * - adjust padding
*/ */
StatusWith<DiskLoc> _insertDocument( const BSONObj& doc, bool enfor StatusWith<DiskLoc> _insertDocument( const BSONObj& doc,
ceQuota ); bool enforceQuota,
const PregeneratedKeys* preGen
);
void _compactExtent(const DiskLoc diskloc, int extentNumber, void _compactExtent(const DiskLoc diskloc, int extentNumber,
vector<IndexAccessMethod*>& indexesToInsertTo, MultiIndexBlock& indexesToInsertTo,
const CompactOptions* compactOptions, CompactSt ats* stats ); const CompactOptions* compactOptions, CompactSt ats* stats );
// @return 0 for inf., otherwise a number of files // @return 0 for inf., otherwise a number of files
int largestFileNumberInQuota() const; int largestFileNumberInQuota() const;
ExtentManager* getExtentManager(); ExtentManager* getExtentManager();
const ExtentManager* getExtentManager() const; const ExtentManager* getExtentManager() const;
int _magic; int _magic;
 End of changes. 5 change blocks. 
5 lines changed or deleted 13 lines changed or added


 collection_iterator.h   collection_iterator.h 
skipping to change at line 51 skipping to change at line 51
* A CollectionIterator provides an interface for walking over a collec tion. * A CollectionIterator provides an interface for walking over a collec tion.
* The details of navigating the collection's structure are below this interface. * The details of navigating the collection's structure are below this interface.
*/ */
class CollectionIterator { class CollectionIterator {
public: public:
virtual ~CollectionIterator() { } virtual ~CollectionIterator() { }
// True if getNext will produce no more data, false otherwise. // True if getNext will produce no more data, false otherwise.
virtual bool isEOF() = 0; virtual bool isEOF() = 0;
// Return the next item from the collection. Returns DiskLoc() if // Return the DiskLoc that the iterator points at. Returns DiskLoc
isEOF. () if isEOF.
virtual DiskLoc curr() = 0;
// Return the DiskLoc that the iterator points at and move the iter
ator to the next item
// from the collection. Returns DiskLoc() if isEOF.
virtual DiskLoc getNext() = 0; virtual DiskLoc getNext() = 0;
// Can only be called after prepareToYield and before recoverFromYi eld. // Can only be called after prepareToYield and before recoverFromYi eld.
virtual void invalidate(const DiskLoc& dl) = 0; virtual void invalidate(const DiskLoc& dl) = 0;
// Save any state required to resume operation (without crashing) a fter DiskLoc deletion or // Save any state required to resume operation (without crashing) a fter DiskLoc deletion or
// a collection drop. // a collection drop.
virtual void prepareToYield() = 0; virtual void prepareToYield() = 0;
// Returns true if collection still exists, false otherwise. // Returns true if collection still exists, false otherwise.
skipping to change at line 79 skipping to change at line 83
* If start is not DiskLoc(), the iteration begins at that DiskLoc. * If start is not DiskLoc(), the iteration begins at that DiskLoc.
*/ */
class FlatIterator : public CollectionIterator { class FlatIterator : public CollectionIterator {
public: public:
FlatIterator(const Collection* collection, const DiskLoc& start, FlatIterator(const Collection* collection, const DiskLoc& start,
const CollectionScanParams::Direction& dir); const CollectionScanParams::Direction& dir);
virtual ~FlatIterator() { } virtual ~FlatIterator() { }
virtual bool isEOF(); virtual bool isEOF();
virtual DiskLoc getNext(); virtual DiskLoc getNext();
virtual DiskLoc curr();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl);
virtual void prepareToYield(); virtual void prepareToYield();
virtual bool recoverFromYield(); virtual bool recoverFromYield();
private: private:
// The result returned on the next call to getNext(). // The result returned on the next call to getNext().
DiskLoc _curr; DiskLoc _curr;
const Collection* _collection; const Collection* _collection;
skipping to change at line 111 skipping to change at line 116
*/ */
class CappedIterator : public CollectionIterator { class CappedIterator : public CollectionIterator {
public: public:
CappedIterator(const Collection* collection, const DiskLoc& start, bool tailable, CappedIterator(const Collection* collection, const DiskLoc& start, bool tailable,
const CollectionScanParams::Direction& dir); const CollectionScanParams::Direction& dir);
virtual ~CappedIterator() { } virtual ~CappedIterator() { }
// If this is a tailable cursor, isEOF could change its mind after a call to getNext(). // If this is a tailable cursor, isEOF could change its mind after a call to getNext().
virtual bool isEOF(); virtual bool isEOF();
virtual DiskLoc getNext(); virtual DiskLoc getNext();
virtual DiskLoc curr();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl);
virtual void prepareToYield(); virtual void prepareToYield();
virtual bool recoverFromYield(); virtual bool recoverFromYield();
private: private:
/** /**
* Internal collection navigation helper methods. * Internal collection navigation helper methods.
*/ */
static DiskLoc getNextCapped(const NamespaceDetails* nsd, const Ext entManager* em, static DiskLoc getNextCapped(const NamespaceDetails* nsd, const Ext entManager* em,
 End of changes. 3 change blocks. 
2 lines changed or deleted 9 lines changed or added


 collection_metadata.h   collection_metadata.h 
skipping to change at line 105 skipping to change at line 105
* *
* If a new metadata can't be created, returns NULL and fills in 'e rrMsg', if it was * If a new metadata can't be created, returns NULL and fills in 'e rrMsg', if it was
* provided. * provided.
*/ */
CollectionMetadata* cloneMigrate( const ChunkType& chunk, CollectionMetadata* cloneMigrate( const ChunkType& chunk,
const ChunkVersion& newShardVersi on, const ChunkVersion& newShardVersi on,
string* errMsg ) const; string* errMsg ) const;
/** /**
* Returns a new metadata's instance by splitting an existing 'chun k' at the points * Returns a new metadata's instance by splitting an existing 'chun k' at the points
* describe by 'splitKeys'. The first resulting chunk will have 'ne wShardVersion' and * described by 'splitKeys'. The first resulting chunk will have 'n ewShardVersion' and
* subsequent one would have that with the minor version incremente d at each chunk. The * subsequent one would have that with the minor version incremente d at each chunk. The
* caller owns the metadata. * caller owns the metadata.
* *
* If a new metadata can't be created, returns NULL and fills in 'e rrMsg', if it was * If a new metadata can't be created, returns NULL and fills in 'e rrMsg', if it was
* provided. * provided.
*
* Note: 'splitKeys' must be sorted in ascending order.
*/ */
CollectionMetadata* cloneSplit( const ChunkType& chunk, CollectionMetadata* cloneSplit( const ChunkType& chunk,
const vector<BSONObj>& splitKeys, const vector<BSONObj>& splitKeys,
const ChunkVersion& newShardVersion , const ChunkVersion& newShardVersion ,
string* errMsg ) const; string* errMsg ) const;
/** /**
* Returns a new metadata instance by merging a key range which sta rts and ends at existing * Returns a new metadata instance by merging a key range which sta rts and ends at existing
* chunks into a single chunk. The range may not have holes. The resulting metadata will * chunks into a single chunk. The range may not have holes. The resulting metadata will
* have the 'newShardVersion'. The caller owns the new metadata. * have the 'newShardVersion'. The caller owns the new metadata.
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 collection_scan.h   collection_scan.h 
skipping to change at line 63 skipping to change at line 63
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
virtual void invalidate(const DiskLoc& dl, InvalidationType type); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
/**
* Returns true if the record 'loc' references is in memory, false
otherwise.
*/
static bool diskLocInMemory(DiskLoc loc);
// WorkingSet is not owned by us. // WorkingSet is not owned by us.
WorkingSet* _workingSet; WorkingSet* _workingSet;
// The filter is not owned by us. // The filter is not owned by us.
const MatchExpression* _filter; const MatchExpression* _filter;
scoped_ptr<CollectionIterator> _iter; scoped_ptr<CollectionIterator> _iter;
CollectionScanParams _params; CollectionScanParams _params;
// True if nsdetails(_ns) == NULL on our first call to work. // True if nsdetails(_ns) == NULL on our first call to work.
bool _nsDropped; bool _nsDropped;
// If we want to return a DiskLoc and it points at something that's
not in memory, we return
// a a "please page this in" result. We allocate one WSM for this
purpose at construction
// and reuse it for any future fetch requests, changing the DiskLoc
as appropriate.
WorkingSetID _wsidForFetch;
// Stats // Stats
CommonStats _commonStats; CommonStats _commonStats;
CollectionScanStats _specificStats; CollectionScanStats _specificStats;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
0 lines changed or deleted 14 lines changed or added


 commands.h   commands.h 
skipping to change at line 34 skipping to change at line 34
#include "mongo/db/auth/privilege.h" #include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/client_basic.h" #include "mongo/db/client_basic.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class BSONObj; class BSONObj;
class BSONObjBuilder; class BSONObjBuilder;
class Client; class Client;
class Database;
class Timer; class Timer;
namespace mutablebson { namespace mutablebson {
class Document; class Document;
} // namespace mutablebson } // namespace mutablebson
/** mongodb "commands" (sent via db.$cmd.findOne(...)) /** mongodb "commands" (sent via db.$cmd.findOne(...))
subclass to make a command. define a singleton object for it. subclass to make a command. define a singleton object for it.
*/ */
class Command { class Command {
skipping to change at line 194 skipping to change at line 195
return BSONObj(); return BSONObj();
} }
static void logIfSlow( const Timer& cmdTimer, const string& msg); static void logIfSlow( const Timer& cmdTimer, const string& msg);
static map<string,Command*> * _commands; static map<string,Command*> * _commands;
static map<string,Command*> * _commandsByBestName; static map<string,Command*> * _commandsByBestName;
static map<string,Command*> * _webCommands; static map<string,Command*> * _webCommands;
public: public:
// Stop all index builds required to run this command and return in // Stops all index builds required to run this command and returns
dex builds killed. index builds killed.
virtual std::vector<BSONObj> stopIndexBuilds(const std::string& dbn virtual std::vector<BSONObj> stopIndexBuilds(Database* db,
ame,
const BSONObj& cmdObj) ; const BSONObj& cmdObj) ;
static const map<string,Command*>* commandsByBestName() { return _c ommandsByBestName; } static const map<string,Command*>* commandsByBestName() { return _c ommandsByBestName; }
static const map<string,Command*>* webCommands() { return _webComma nds; } static const map<string,Command*>* webCommands() { return _webComma nds; }
/** @return if command was found */ /** @return if command was found */
static void runAgainstRegistered(const char *ns, static void runAgainstRegistered(const char *ns,
BSONObj& jsobj, BSONObj& jsobj,
BSONObjBuilder& anObjBuilder, BSONObjBuilder& anObjBuilder,
int queryOptions = 0); int queryOptions = 0);
static LockType locktype( const string& name ); static LockType locktype( const string& name );
 End of changes. 2 change blocks. 
4 lines changed or deleted 4 lines changed or added


 connpool.h   connpool.h 
skipping to change at line 38 skipping to change at line 38
class Shard; class Shard;
class DBConnectionPool; class DBConnectionPool;
/** /**
* not thread safe * not thread safe
* thread safety is handled by DBConnectionPool * thread safety is handled by DBConnectionPool
*/ */
class MONGO_CLIENT_API PoolForHost { class MONGO_CLIENT_API PoolForHost {
public: public:
PoolForHost()
: _created(0), _minValidCreationTimeMicroSec(0) {}
PoolForHost( const PoolForHost& other ) { // Sentinel value indicating pool has no cleanup limit
static const int kPoolSizeUnlimited;
PoolForHost() :
_created(0),
_minValidCreationTimeMicroSec(0),
_type(ConnectionString::INVALID),
_maxPoolSize(kPoolSizeUnlimited) {
}
PoolForHost(const PoolForHost& other) :
_created(other._created),
_minValidCreationTimeMicroSec(other._minValidCreationTimeMicroS
ec),
_type(other._type),
_maxPoolSize(other._maxPoolSize) {
verify(_created == 0);
verify(other._pool.size() == 0); verify(other._pool.size() == 0);
_created = other._created;
_minValidCreationTimeMicroSec = other._minValidCreationTimeMicr
oSec;
verify( _created == 0 );
} }
~PoolForHost(); ~PoolForHost();
/**
* Returns the maximum number of connections stored in the pool
*/
int getMaxPoolSize() { return _maxPoolSize; }
/**
* Sets the maximum number of connections stored in the pool
*/
void setMaxPoolSize( int maxPoolSize ) { _maxPoolSize = maxPoolSize
; }
int numAvailable() const { return (int)_pool.size(); } int numAvailable() const { return (int)_pool.size(); }
void createdOne( DBClientBase * base ); void createdOne( DBClientBase * base );
long long numCreated() const { return _created; } long long numCreated() const { return _created; }
ConnectionString::ConnectionType type() const { verify(_created); r eturn _type; } ConnectionString::ConnectionType type() const { verify(_created); r eturn _type; }
/** /**
* gets a connection or return NULL * gets a connection or return NULL
*/ */
skipping to change at line 88 skipping to change at line 108
* @return true if the given creation time is considered to be not * @return true if the given creation time is considered to be not
* good for use. * good for use.
*/ */
bool isBadSocketCreationTime(uint64_t microSec); bool isBadSocketCreationTime(uint64_t microSec);
/** /**
* Sets the host name to a new one, only if it is currently empty. * Sets the host name to a new one, only if it is currently empty.
*/ */
void initializeHostName(const std::string& hostName); void initializeHostName(const std::string& hostName);
static void setMaxPerHost( unsigned max ) { _maxPerHost = max; }
static unsigned getMaxPerHost() { return _maxPerHost; }
private: private:
struct StoredConnection { struct StoredConnection {
StoredConnection( DBClientBase * c ); StoredConnection( DBClientBase * c );
bool ok( time_t now ); bool ok( time_t now );
DBClientBase* conn; DBClientBase* conn;
time_t when; time_t when;
}; };
std::string _hostName; std::string _hostName;
std::stack<StoredConnection> _pool; std::stack<StoredConnection> _pool;
int64_t _created; int64_t _created;
uint64_t _minValidCreationTimeMicroSec; uint64_t _minValidCreationTimeMicroSec;
ConnectionString::ConnectionType _type; ConnectionString::ConnectionType _type;
static unsigned _maxPerHost; // The maximum number of connections we'll save in the pool
int _maxPoolSize;
}; };
class DBConnectionHook { class DBConnectionHook {
public: public:
virtual ~DBConnectionHook() {} virtual ~DBConnectionHook() {}
virtual void onCreate( DBClientBase * conn ) {} virtual void onCreate( DBClientBase * conn ) {}
virtual void onHandedOut( DBClientBase * conn ) {} virtual void onHandedOut( DBClientBase * conn ) {}
virtual void onDestroy( DBClientBase * conn ) {} virtual void onDestroy( DBClientBase * conn ) {}
}; };
skipping to change at line 144 skipping to change at line 163
class MONGO_CLIENT_API DBConnectionPool : public PeriodicTask { class MONGO_CLIENT_API DBConnectionPool : public PeriodicTask {
public: public:
DBConnectionPool(); DBConnectionPool();
~DBConnectionPool(); ~DBConnectionPool();
/** right now just controls some asserts. defaults to "dbconnectio npool" */ /** right now just controls some asserts. defaults to "dbconnectio npool" */
void setName( const string& name ) { _name = name; } void setName( const string& name ) { _name = name; }
/**
* Returns the maximum number of connections pooled per-host
*
* This setting only applies to new host connection pools, previous
ly-pooled host pools are
* unaffected.
*/
int getMaxPoolSize() { return _maxPoolSize; }
/**
* Sets the maximum number of connections pooled per-host.
*
* This setting only applies to new host connection pools, previous
ly-pooled host pools are
* unaffected.
*/
void setMaxPoolSize( int maxPoolSize ) { _maxPoolSize = maxPoolSize
; }
void onCreate( DBClientBase * conn ); void onCreate( DBClientBase * conn );
void onHandedOut( DBClientBase * conn ); void onHandedOut( DBClientBase * conn );
void onDestroy( DBClientBase * conn ); void onDestroy( DBClientBase * conn );
void flush(); void flush();
DBClientBase *get(const string& host, double socketTimeout = 0); DBClientBase *get(const string& host, double socketTimeout = 0);
DBClientBase *get(const ConnectionString& host, double socketTimeou t = 0); DBClientBase *get(const ConnectionString& host, double socketTimeou t = 0);
void release(const string& host, DBClientBase *c); void release(const string& host, DBClientBase *c);
skipping to change at line 207 skipping to change at line 242
struct poolKeyCompare { struct poolKeyCompare {
bool operator()( const PoolKey& a , const PoolKey& b ) const; bool operator()( const PoolKey& a , const PoolKey& b ) const;
}; };
typedef map<PoolKey,PoolForHost,poolKeyCompare> PoolMap; // servern ame -> pool typedef map<PoolKey,PoolForHost,poolKeyCompare> PoolMap; // servern ame -> pool
mongo::mutex _mutex; mongo::mutex _mutex;
string _name; string _name;
// The maximum number of connections we'll save in the pool per-hos
t
// PoolForHost::kPoolSizeUnlimited is a sentinel value meaning "no
limit"
// 0 effectively disables the pool
int _maxPoolSize;
PoolMap _pools; PoolMap _pools;
// pointers owned by me, right now they leak on shutdown // pointers owned by me, right now they leak on shutdown
// _hooks itself also leaks because it creates a shutdown race cond ition // _hooks itself also leaks because it creates a shutdown race cond ition
list<DBConnectionHook*> * _hooks; list<DBConnectionHook*> * _hooks;
}; };
extern MONGO_CLIENT_API DBConnectionPool pool; extern MONGO_CLIENT_API DBConnectionPool pool;
 End of changes. 8 change blocks. 
10 lines changed or deleted 56 lines changed or added


 const_element-inl.h   const_element-inl.h 
skipping to change at line 203 skipping to change at line 203
} }
inline Element::RepIdx ConstElement::getIdx() const { inline Element::RepIdx ConstElement::getIdx() const {
return _basis.getIdx(); return _basis.getIdx();
} }
inline std::string ConstElement::toString() const { inline std::string ConstElement::toString() const {
return _basis.toString(); return _basis.toString();
} }
template<typename Builder>
inline void ConstElement::writeElement(Builder* builder, const StringDa
ta* fieldName) const {
return _basis.writeElement(builder, fieldName);
}
inline bool operator==(const ConstElement& l, const ConstElement& r) { inline bool operator==(const ConstElement& l, const ConstElement& r) {
return l._basis == r._basis; return l._basis == r._basis;
} }
inline bool operator!=(const ConstElement& l, const ConstElement& r) { inline bool operator!=(const ConstElement& l, const ConstElement& r) {
return !(l == r); return !(l == r);
} }
inline bool operator==(const Element& l, const ConstElement& r) { inline bool operator==(const Element& l, const ConstElement& r) {
return ConstElement(l) == r; return ConstElement(l) == r;
 End of changes. 1 change blocks. 
6 lines changed or deleted 0 lines changed or added


 counters.h   counters.h 
// counters.h // Copyright 2012 the V8 project authors. All rights reserved.
/* // Redistribution and use in source and binary forms, with or without
* Copyright (C) 2010 10gen Inc. // modification, are permitted provided that the following conditions are
* // met:
* This program is free software: you can redistribute it and/or modify //
* it under the terms of the GNU Affero General Public License, version // * Redistributions of source code must retain the above copyright
3, // notice, this list of conditions and the following disclaimer.
* as published by the Free Software Foundation. // * Redistributions in binary form must reproduce the above
* // copyright notice, this list of conditions and the following
* This program is distributed in the hope that it will be useful, // disclaimer in the documentation and/or other materials provided
* but WITHOUT ANY WARRANTY; without even the implied warranty of // with the distribution.
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // * Neither the name of Google Inc. nor the names of its
* GNU Affero General Public License for more details. // contributors may be used to endorse or promote products derived
* // from this software without specific prior written permission.
* You should have received a copy of the GNU Affero General Public Lice //
nse // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* along with this program. If not, see <http://www.gnu.org/licenses/>. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* As a special exception, the copyright holders give permission to link // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
the // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* code of portions of this program with the OpenSSL library under certa // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
in // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* conditions as described in each individual source file and distribute // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* linked combinations including the program with the OpenSSL library. Y // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
ou // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* must comply with the GNU Affero General Public License in all respect // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
s for
* all of the code used other than as permitted herein. If you modify fi #ifndef V8_COUNTERS_H_
le(s) #define V8_COUNTERS_H_
* with this exception, you may extend this exception to your version of
the #include "../include/v8.h"
* file(s), but you are not obligated to do so. If you do not wish to do #include "allocation.h"
so,
* delete this exception statement from your version. If you delete this namespace v8 {
* exception statement from all source files in the program, then also d namespace internal {
elete
* it in the license file. // StatsCounters is an interface for plugging into external
*/ // counters for monitoring. Counters can be looked up and
// manipulated by name.
#pragma once
class StatsTable {
#include "mongo/pch.h" public:
#include "mongo/db/jsobj.h" // Register an application-defined function where
#include "mongo/util/net/message.h" // counters can be looked up.
#include "mongo/util/processinfo.h" void SetCounterFunction(CounterLookupCallback f) {
#include "mongo/util/concurrency/spin_lock.h" lookup_function_ = f;
#include "mongo/db/pdfile.h" }
namespace mongo { // Register an application-defined function to create
// a histogram for passing to the AddHistogramSample function
/** void SetCreateHistogramFunction(CreateHistogramCallback f) {
* for storing operation counters create_histogram_function_ = f;
* note: not thread safe. ok with that for speed }
*/
class OpCounters { // Register an application-defined function to add a sample
public: // to a histogram created with CreateHistogram function
void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
OpCounters(); add_histogram_sample_function_ = f;
void incInsertInWriteLock(int n) { _insert.x += n; } }
void gotInsert() { _insert++; }
void gotQuery() { _query++; } bool HasCounterFunction() const {
void gotUpdate() { _update++; } return lookup_function_ != NULL;
void gotDelete() { _delete++; } }
void gotGetMore() { _getmore++; }
void gotCommand() { _command++; } // Lookup the location of a counter by name. If the lookup
// is successful, returns a non-NULL pointer for writing the
void gotOp( int op , bool isCommand ); // value of the counter. Each thread calling this function
// may receive a different location to store it's counter.
BSONObj getObj() const; // The return value must not be cached and re-used across
// threads, although a single thread is free to cache it.
// thse are used by snmp, and other things, do not remove int* FindLocation(const char* name) {
const AtomicUInt * getInsert() const { return &_insert; } if (!lookup_function_) return NULL;
const AtomicUInt * getQuery() const { return &_query; } return lookup_function_(name);
const AtomicUInt * getUpdate() const { return &_update; } }
const AtomicUInt * getDelete() const { return &_delete; }
const AtomicUInt * getGetMore() const { return &_getmore; } // Create a histogram by name. If the create is successful,
const AtomicUInt * getCommand() const { return &_command; } // returns a non-NULL pointer for use with AddHistogramSample
// function. min and max define the expected minimum and maximum
private: // sample values. buckets is the maximum number of buckets
void _checkWrap(); // that the samples will be grouped into.
void* CreateHistogram(const char* name,
// todo: there will be a lot of cache line contention on these. ne int min,
ed to do something int max,
// else eventually. size_t buckets) {
AtomicUInt _insert; if (!create_histogram_function_) return NULL;
AtomicUInt _query; return create_histogram_function_(name, min, max, buckets);
AtomicUInt _update; }
AtomicUInt _delete;
AtomicUInt _getmore; // Add a sample to a histogram created with the CreateHistogram
AtomicUInt _command; // function.
}; void AddHistogramSample(void* histogram, int sample) {
if (!add_histogram_sample_function_) return;
extern OpCounters globalOpCounters; return add_histogram_sample_function_(histogram, sample);
extern OpCounters replOpCounters; }
class NetworkCounter { private:
public: StatsTable();
NetworkCounter() : _bytesIn(0), _bytesOut(0), _requests(0), _overfl
ows(0) {} CounterLookupCallback lookup_function_;
void hit( long long bytesIn , long long bytesOut ); CreateHistogramCallback create_histogram_function_;
void append( BSONObjBuilder& b ); AddHistogramSampleCallback add_histogram_sample_function_;
private:
long long _bytesIn; friend class Isolate;
long long _bytesOut;
long long _requests; DISALLOW_COPY_AND_ASSIGN(StatsTable);
};
// StatsCounters are dynamically created values which can be tracked in
// the StatsTable. They are designed to be lightweight to create and
// easy to use.
//
// Internally, a counter represents a value in a row of a StatsTable.
// The row has a 32bit value for each process/thread in the table and also
// a name (stored in the table metadata). Since the storage location can b
e
// thread-specific, this class cannot be shared across threads.
//
// This class is designed to be POD initialized. It will be registered wit
h
// the counter system on first use. For example:
// StatsCounter c = { "c:myctr", NULL, false };
struct StatsCounter {
const char* name_;
int* ptr_;
bool lookup_done_;
// Sets the counter to a specific value.
void Set(int value) {
int* loc = GetPtr();
if (loc) *loc = value;
}
// Increments the counter.
void Increment() {
int* loc = GetPtr();
if (loc) (*loc)++;
}
void Increment(int value) {
int* loc = GetPtr();
if (loc)
(*loc) += value;
}
// Decrements the counter.
void Decrement() {
int* loc = GetPtr();
if (loc) (*loc)--;
}
void Decrement(int value) {
int* loc = GetPtr();
if (loc) (*loc) -= value;
}
// Is this counter enabled?
// Returns false if table is full.
bool Enabled() {
return GetPtr() != NULL;
}
// Get the internal pointer to the counter. This is used
// by the code generator to emit code that manipulates a
// given counter without calling the runtime system.
int* GetInternalPointer() {
int* loc = GetPtr();
ASSERT(loc != NULL);
return loc;
}
protected:
// Returns the cached address of this counter location.
int* GetPtr() {
if (lookup_done_) return ptr_;
lookup_done_ = true;
ptr_ = FindLocationInStatsTable();
return ptr_;
}
private:
int* FindLocationInStatsTable() const;
};
// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
struct StatsCounterTimer {
StatsCounter counter_;
int64_t start_time_;
int64_t stop_time_;
// Start the timer.
void Start();
// Stop the timer and record the results.
void Stop();
// Returns true if the timer is running.
bool Running() {
return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0;
}
};
// A Histogram represents a dynamically created histogram in the StatsTable
.
//
// This class is designed to be POD initialized. It will be registered wit
h
// the histogram system on first use. For example:
// Histogram h = { "myhist", 0, 10000, 50, NULL, false };
struct Histogram {
const char* name_;
int min_;
int max_;
int num_buckets_;
void* histogram_;
bool lookup_done_;
// Add a single sample to this histogram.
void AddSample(int sample);
// Returns true if this histogram is enabled.
bool Enabled() {
return GetHistogram() != NULL;
}
protected:
// Returns the handle to the histogram.
void* GetHistogram() {
if (!lookup_done_) {
lookup_done_ = true;
histogram_ = CreateHistogram();
}
return histogram_;
}
private:
void* CreateHistogram() const;
};
// A HistogramTimer allows distributions of results to be created
// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 };
struct HistogramTimer {
Histogram histogram_;
int64_t start_time_;
int64_t stop_time_;
// Start the timer.
void Start();
// Stop the timer and record the results.
void Stop();
// Returns true if the timer is running.
bool Running() {
return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0);
}
};
// Helper class for scoping a HistogramTimer.
class HistogramTimerScope BASE_EMBEDDED {
public:
explicit HistogramTimerScope(HistogramTimer* timer) :
timer_(timer) {
timer_->Start();
}
~HistogramTimerScope() {
timer_->Stop();
}
private:
HistogramTimer* timer_;
};
long long _overflows; } } // namespace v8::internal
SpinLock _lock; #endif // V8_COUNTERS_H_
};
extern NetworkCounter networkCounter;
}
 End of changes. 3 change blocks. 
106 lines changed or deleted 273 lines changed or added


 curop.h   curop.h 
skipping to change at line 47 skipping to change at line 47
#include "mongo/db/structure/catalog/namespace.h" #include "mongo/db/structure/catalog/namespace.h"
#include "mongo/util/concurrency/spin_lock.h" #include "mongo/util/concurrency/spin_lock.h"
#include "mongo/util/net/hostandport.h" #include "mongo/util/net/hostandport.h"
#include "mongo/util/progress_meter.h" #include "mongo/util/progress_meter.h"
#include "mongo/util/time_support.h" #include "mongo/util/time_support.h"
namespace mongo { namespace mongo {
class CurOp; class CurOp;
/**
* stores a copy of a bson obj in a fixed size buffer
* if its too big for the buffer, says "too big"
* useful for keeping a copy around indefinitely without wasting a lot
of space or doing malloc
*/
class CachedBSONObjBase {
public:
static BSONObj _tooBig; // { $msg : "query not recording (too large
)" }
};
template <size_t BUFFER_SIZE>
class CachedBSONObj : public CachedBSONObjBase {
public:
enum { TOO_BIG_SENTINEL = 1 } ;
CachedBSONObj() {
_size = (int*)_buf;
reset();
}
void reset( int sz = 0 ) {
_lock.lock();
_reset( sz );
_lock.unlock();
}
void set( const BSONObj& o ) {
scoped_spinlock lk(_lock);
size_t sz = o.objsize();
if ( sz > sizeof(_buf) ) {
_reset(TOO_BIG_SENTINEL);
}
else {
memcpy(_buf, o.objdata(), sz );
}
}
int size() const { return *_size; }
bool have() const { return size() > 0; }
bool tooBig() const { return size() == TOO_BIG_SENTINEL; }
BSONObj get() const {
scoped_spinlock lk(_lock);
return _get();
}
void append( BSONObjBuilder& b , const StringData& name ) const {
scoped_spinlock lk(_lock);
BSONObj temp = _get();
b.append( name , temp );
}
private:
/** you have to be locked when you call this */
BSONObj _get() const {
int sz = size();
if ( sz == 0 )
return BSONObj();
if ( sz == TOO_BIG_SENTINEL )
return _tooBig;
return BSONObj( _buf ).copy();
}
/** you have to be locked when you call this */
void _reset( int sz ) { _size[0] = sz; }
mutable SpinLock _lock;
int * _size;
char _buf[BUFFER_SIZE];
};
/* lifespan is different than CurOp because of recursives with DBDirect Client */ /* lifespan is different than CurOp because of recursives with DBDirect Client */
class OpDebug { class OpDebug {
public: public:
OpDebug() : ns(""){ reset(); } OpDebug() : ns(""){ reset(); }
void reset(); void reset();
void recordStats(); void recordStats();
string report( const CurOp& curop ) const; string report( const CurOp& curop ) const;
skipping to change at line 92 skipping to change at line 163
BSONObj updateobj; BSONObj updateobj;
// detailed options // detailed options
long long cursorid; long long cursorid;
int ntoreturn; int ntoreturn;
int ntoskip; int ntoskip;
bool exhaust; bool exhaust;
// debugging/profile info // debugging/profile info
long long nscanned; long long nscanned;
long long nscannedObjects;
bool idhack; // indicates short circuited code path on an u pdate to make the update faster bool idhack; // indicates short circuited code path on an u pdate to make the update faster
bool scanAndOrder; // scanandorder query plan aspect was used bool scanAndOrder; // scanandorder query plan aspect was used
long long nupdated; // number of records updated (including no-ops ) long long nMatched; // number of records that match the query
long long nModified; // number of records written (no no-ops) long long nModified; // number of records written (no no-ops)
long long nmoved; // updates resulted in a move (moves are expen sive) long long nmoved; // updates resulted in a move (moves are expen sive)
long long ninserted; long long ninserted;
long long ndeleted; long long ndeleted;
bool fastmod; bool fastmod;
bool fastmodinsert; // upsert of an $operation. builds a default o bject bool fastmodinsert; // upsert of an $operation. builds a default o bject
bool upsert; // true if the update actually did an insert bool upsert; // true if the update actually did an insert
int keyUpdates; int keyUpdates;
std::string planSummary; // a brief string describing the query sol ution ThreadSafeString planSummary; // a brief string describing the quer y solution
// New Query Framework debugging/profiling info // New Query Framework debugging/profiling info
// XXX: should this really be an opaque BSONObj? Not sure. // TODO: should this really be an opaque BSONObj? Not sure.
BSONObj execStats; CachedBSONObj<4096> execStats;
// error handling // error handling
ExceptionInfo exceptionInfo; ExceptionInfo exceptionInfo;
// response info // response info
int executionTime; int executionTime;
int nreturned; int nreturned;
int responseLength; int responseLength;
}; };
/**
* stores a copy of a bson obj in a fixed size buffer
* if its too big for the buffer, says "too big"
* useful for keeping a copy around indefinitely without wasting a lot
of space or doing malloc
*/
class CachedBSONObj {
public:
enum { TOO_BIG_SENTINEL = 1 } ;
static BSONObj _tooBig; // { $msg : "query not recording (too large
)" }
CachedBSONObj() {
_size = (int*)_buf;
reset();
}
void reset( int sz = 0 ) {
_lock.lock();
_reset( sz );
_lock.unlock();
}
void set( const BSONObj& o ) {
scoped_spinlock lk(_lock);
size_t sz = o.objsize();
if ( sz > sizeof(_buf) ) {
_reset(TOO_BIG_SENTINEL);
}
else {
memcpy(_buf, o.objdata(), sz );
}
}
int size() const { return *_size; }
bool have() const { return size() > 0; }
BSONObj get() const {
scoped_spinlock lk(_lock);
return _get();
}
void append( BSONObjBuilder& b , const StringData& name ) const {
scoped_spinlock lk(_lock);
BSONObj temp = _get();
b.append( name , temp );
}
private:
/** you have to be locked when you call this */
BSONObj _get() const {
int sz = size();
if ( sz == 0 )
return BSONObj();
if ( sz == TOO_BIG_SENTINEL )
return _tooBig;
return BSONObj( _buf ).copy();
}
/** you have to be locked when you call this */
void _reset( int sz ) { _size[0] = sz; }
mutable SpinLock _lock;
int * _size;
char _buf[512];
};
/* Current operation (for the current Client). /* Current operation (for the current Client).
an embedded member of Client class, and typically used from within t he mutex there. an embedded member of Client class, and typically used from within t he mutex there.
*/ */
class CurOp : boost::noncopyable { class CurOp : boost::noncopyable {
public: public:
CurOp( Client * client , CurOp * wrapped = 0 ); CurOp( Client * client , CurOp * wrapped = 0 );
~CurOp(); ~CurOp();
bool haveQuery() const { return _query.have(); } bool haveQuery() const { return _query.have(); }
BSONObj query() const { return _query.get(); } BSONObj query() const { return _query.get(); }
skipping to change at line 344 skipping to change at line 351
unsigned long long _start; unsigned long long _start;
unsigned long long _end; unsigned long long _end;
bool _active; bool _active;
bool _suppressFromCurop; // unless $all is set bool _suppressFromCurop; // unless $all is set
int _op; int _op;
bool _isCommand; bool _isCommand;
int _dbprofile; // 0=off, 1=slow, 2=all int _dbprofile; // 0=off, 1=slow, 2=all
AtomicUInt _opNum; // todo: simple being "unsigned" m ay make more sense here AtomicUInt _opNum; // todo: simple being "unsigned" m ay make more sense here
char _ns[Namespace::MaxNsLen+2]; char _ns[Namespace::MaxNsLen+2];
HostAndPort _remote; // CAREFUL here with thread safety HostAndPort _remote; // CAREFUL here with thread safety
CachedBSONObj _query; // CachedBSONObj is thread safe CachedBSONObj<512> _query; // CachedBSONObj is thread safe
OpDebug _debug; OpDebug _debug;
ThreadSafeString _message; ThreadSafeString _message;
ProgressMeter _progressMeter; ProgressMeter _progressMeter;
AtomicInt32 _killPending; AtomicInt32 _killPending;
int _numYields; int _numYields;
LockStat _lockStat; LockStat _lockStat;
// _notifyList is protected by the global killCurrentOp's mtx. // _notifyList is protected by the global killCurrentOp's mtx.
std::vector<bool*> _notifyList; std::vector<bool*> _notifyList;
// this is how much "extra" time a query might take // this is how much "extra" time a query might take
 End of changes. 7 change blocks. 
72 lines changed or deleted 79 lines changed or added


 d_logic.h   d_logic.h 
skipping to change at line 64 skipping to change at line 64
const string& getConfigServer() const { return _configServer; } const string& getConfigServer() const { return _configServer; }
void enable( const string& server ); void enable( const string& server );
// Initialize sharding state and begin authenticating outgoing conn ections and handling // Initialize sharding state and begin authenticating outgoing conn ections and handling
// shard versions. If this is not run before sharded operations oc cur auth will not work // shard versions. If this is not run before sharded operations oc cur auth will not work
// and versions will not be tracked. // and versions will not be tracked.
static void initialize(const string& server); static void initialize(const string& server);
void gotShardName( const string& name ); void gotShardName( const string& name );
bool setShardName( const string& name ); // Same as above, does not throw bool setShardName( const string& name ); // Same as above, does not throw
string getShardName() { scoped_lock lk(_mutex); return _shardName; } string getShardName() { scoped_lock lk(_mutex); return _shardName; }
// Helpers for SetShardVersion which report the host name sent to t
his shard when the shard
// name does not match. Do not use in other places.
// TODO: Remove once SSV is deprecated
void gotShardNameAndHost( const string& name, const string& host );
bool setShardNameAndHost( const string& name, const string& host );
/** Reverts back to a state where this mongod is not sharded. */ /** Reverts back to a state where this mongod is not sharded. */
void resetShardingState(); void resetShardingState();
// versioning support // versioning support
bool hasVersion( const string& ns ); bool hasVersion( const string& ns );
bool hasVersion( const string& ns , ChunkVersion& version ); bool hasVersion( const string& ns , ChunkVersion& version );
const ChunkVersion getVersion( const string& ns ) const; const ChunkVersion getVersion( const string& ns ) const;
/** /**
 End of changes. 2 change blocks. 
1 lines changed or deleted 7 lines changed or added


 data_file.h   data_file.h 
skipping to change at line 62 skipping to change at line 62
---------------------- ----------------------
*/ */
#pragma pack(1) #pragma pack(1)
class DataFileHeader { class DataFileHeader {
public: public:
int version; int version;
int versionMinor; int versionMinor;
int fileLength; int fileLength;
DiskLoc unused; /* unused is the portion of the file that doesn't b elong to any allocated extents. -1 = no more */ DiskLoc unused; /* unused is the portion of the file that doesn't b elong to any allocated extents. -1 = no more */
int unusedLength; int unusedLength;
char reserved[8192 - 4*4 - 8]; DiskLoc freeListStart;
DiskLoc freeListEnd;
char reserved[8192 - 4*4 - 8*3];
char data[4]; // first extent starts here char data[4]; // first extent starts here
enum { HeaderSize = 8192 }; enum { HeaderSize = 8192 };
// all of this should move up to the database level
bool isCurrentVersion() const { bool isCurrentVersion() const {
return version == PDFILE_VERSION && ( versionMinor == PDFILE_VE RSION_MINOR_22_AND_OLDER return version == PDFILE_VERSION && ( versionMinor == PDFILE_VE RSION_MINOR_22_AND_OLDER
|| versionMinor == PDFILE_VE RSION_MINOR_24_AND_NEWER || versionMinor == PDFILE_VE RSION_MINOR_24_AND_NEWER
); );
} }
bool uninitialized() const { return version == 0; } bool uninitialized() const { return version == 0; }
void init(int fileno, int filelength, const char* filename); void init(int fileno, int filelength, const char* filename);
void checkUpgrade();
bool isEmpty() const { bool isEmpty() const {
return uninitialized() || ( unusedLength == fileLength - Header Size - 16 ); return uninitialized() || ( unusedLength == fileLength - Header Size - 16 );
} }
}; };
#pragma pack() #pragma pack()
class DataFile { class DataFile {
friend class BasicCursor; friend class BasicCursor;
friend class ExtentManager; friend class ExtentManager;
public: public:
DataFile(int fn) : _mb(0), fileNo(fn) { } DataFile(int fn) : _mb(0), fileNo(fn) { }
/** @return true if found and opened. if uninitialized (prealloc on ly) does not open. */ /** @return true if found and opened. if uninitialized (prealloc on ly) does not open. */
Status openExisting( const char *filename ); Status openExisting( const char *filename );
/** creates if DNE */ /** creates if DNE */
void open(const char *filename, int requestedDataSize = 0, bool pre allocateOnly = false); void open(const char *filename, int requestedDataSize = 0, bool pre allocateOnly = false);
DiskLoc allocExtentArea( int size ); DiskLoc allocExtentArea( int size );
DataFileHeader *getHeader() { return header(); } DataFileHeader* getHeader() { return header(); }
const DataFileHeader* getHeader() const { return header(); }
HANDLE getFd() { return mmf.getFd(); } HANDLE getFd() { return mmf.getFd(); }
unsigned long long length() const { return mmf.length(); } unsigned long long length() const { return mmf.length(); }
/* return max size an extent may be */ /* return max size an extent may be */
static int maxSize(); static int maxSize();
/** fsync */ /** fsync */
void flush( bool sync ); void flush( bool sync );
private: private:
void badOfs(int) const; void badOfs(int) const;
void badOfs2(int) const; void badOfs2(int) const;
int defaultSize( const char *filename ) const; int defaultSize( const char *filename ) const;
void grow(DiskLoc dl, int size); void grow(DiskLoc dl, int size);
char* p() const { return (char *) _mb; } char* p() const { return (char *) _mb; }
DataFileHeader* header() { return (DataFileHeader*) _mb; } DataFileHeader* header() { return static_cast<DataFileHeader*>( _mb
); }
const DataFileHeader* header() const { return static_cast<DataFileH
eader*>( _mb ); }
DurableMappedFile mmf; DurableMappedFile mmf;
void *_mb; // the memory mapped view void *_mb; // the memory mapped view
int fileNo; int fileNo;
}; };
} }
 End of changes. 5 change blocks. 
3 lines changed or deleted 13 lines changed or added


 database.h   database.h 
skipping to change at line 47 skipping to change at line 47
#include "mongo/util/string_map.h" #include "mongo/util/string_map.h"
namespace mongo { namespace mongo {
class Collection; class Collection;
class Extent; class Extent;
class DataFile; class DataFile;
class IndexCatalog; class IndexCatalog;
class IndexDetails; class IndexDetails;
struct CollectionOptions {
CollectionOptions() {
reset();
}
void reset() {
capped = false;
cappedSize = 0;
cappedMaxDocs = 0;
initialNumExtents = 0;
initialExtentSizes.clear();
autoIndexId = DEFAULT;
flags = 0;
flagsSet = false;
temp = false;
}
Status parse( const BSONObj& obj );
BSONObj toBSON() const;
// ----
bool capped;
long long cappedSize;
long long cappedMaxDocs;
// following 2 are mutually exclusive, can only have one set
long long initialNumExtents;
vector<long long> initialExtentSizes;
// behavior of _id index creation when collection created
void setNoIdIndex() { autoIndexId = NO; }
enum {
DEFAULT, // currently yes for most collections, NO for some sys
tem ones
YES, // create _id index
NO // do not create _id index
} autoIndexId;
// user flags
int flags;
bool flagsSet;
bool temp;
};
/** /**
* Database represents a database database * Database represents a database database
* Each database database has its own set of files -- dbname.ns, dbname .0, dbname.1, ... * Each database database has its own set of files -- dbname.ns, dbname .0, dbname.1, ...
* NOT memory mapped * NOT memory mapped
*/ */
class Database { class Database {
public: public:
// you probably need to be in dbHolderMutex when constructing this // you probably need to be in dbHolderMutex when constructing this
Database(const char *nm, /*out*/ bool& newDb, Database(const char *nm, /*out*/ bool& newDb,
const string& path = storageGlobalParams.dbpath); const string& path = storageGlobalParams.dbpath);
skipping to change at line 82 skipping to change at line 127
bool isEmpty() { return ! _namespaceIndex.allocated(); } bool isEmpty() { return ! _namespaceIndex.allocated(); }
/** /**
* total file size of Database in bytes * total file size of Database in bytes
*/ */
long long fileSize() const { return _extentManager.fileSize(); } long long fileSize() const { return _extentManager.fileSize(); }
int numFiles() const { return _extentManager.numFiles(); } int numFiles() const { return _extentManager.numFiles(); }
/** void getFileFormat( int* major, int* minor );
* return file n. if it doesn't exist, create it
*/
DataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly
= false ) {
_initForWrites();
return _extentManager.getFile( n, sizeNeeded, preallocateOnly )
;
}
DataFile* addAFile( int sizeNeeded, bool preallocateNextFile ) {
_initForWrites();
return _extentManager.addAFile( sizeNeeded, preallocateNextFile
);
}
/** /**
* makes sure we have an extra file at the end that is empty * makes sure we have an extra file at the end that is empty
* safe to call this multiple times - the implementation will only preallocate one file * safe to call this multiple times - the implementation will only preallocate one file
*/ */
void preallocateAFile() { _extentManager.preallocateAFile(); } void preallocateAFile() { _extentManager.preallocateAFile(); }
/** /**
* @return true if success. false if bad level or error creating p rofile ns * @return true if success. false if bad level or error creating p rofile ns
*/ */
skipping to change at line 134 skipping to change at line 168
const NamespaceIndex& namespaceIndex() const { return _namespaceInd ex; } const NamespaceIndex& namespaceIndex() const { return _namespaceInd ex; }
NamespaceIndex& namespaceIndex() { return _namespaceIndex; } NamespaceIndex& namespaceIndex() { return _namespaceIndex; }
// TODO: do not think this method should exist, so should try and e ncapsulate better // TODO: do not think this method should exist, so should try and e ncapsulate better
ExtentManager& getExtentManager() { return _extentManager; } ExtentManager& getExtentManager() { return _extentManager; }
const ExtentManager& getExtentManager() const { return _extentManag er; } const ExtentManager& getExtentManager() const { return _extentManag er; }
Status dropCollection( const StringData& fullns ); Status dropCollection( const StringData& fullns );
Collection* createCollection( const StringData& ns, Collection* createCollection( const StringData& ns,
bool capped = false, const CollectionOptions& options = Co
const BSONObj* options = NULL, llectionOptions(),
bool allocateDefaultSpace = true ); bool allocateSpace = true,
bool createDefaultIndexes = true );
/** /**
* @param ns - this is fully qualified, which is maybe not ideal ?? ? * @param ns - this is fully qualified, which is maybe not ideal ?? ?
*/ */
Collection* getCollection( const StringData& ns ); Collection* getCollection( const StringData& ns );
Collection* getCollection( const NamespaceString& ns ) { return get Collection( ns.ns() ); } Collection* getCollection( const NamespaceString& ns ) { return get Collection( ns.ns() ); }
Collection* getOrCreateCollection( const StringData& ns ); Collection* getOrCreateCollection( const StringData& ns );
skipping to change at line 178 skipping to change at line 212
/** /**
* removes from *.system.namespaces * removes from *.system.namespaces
* frees extents * frees extents
* removes from NamespaceIndex * removes from NamespaceIndex
* NOT RIGHT NOW, removes cache entry in Database TODO? * NOT RIGHT NOW, removes cache entry in Database TODO?
*/ */
Status _dropNS( const StringData& ns ); Status _dropNS( const StringData& ns );
/** /**
* make sure namespace is initialized and $freelist is allocated be
fore
* doing anything that will write
*/
void _initForWrites() {
_namespaceIndex.init();
if ( !_extentManager.hasFreeList() ) {
_initExtentFreeList();
}
}
void _initExtentFreeList();
/**
* @throws DatabaseDifferCaseCode if the name is a duplicate based on * @throws DatabaseDifferCaseCode if the name is a duplicate based on
* case insensitive matching. * case insensitive matching.
*/ */
void checkDuplicateUncasedNames(bool inholderlockalready) const; void checkDuplicateUncasedNames(bool inholderlockalready) const;
void openAllFiles(); void openAllFiles();
Status _renameSingleNamespace( const StringData& fromNS, const Stri ngData& toNS, Status _renameSingleNamespace( const StringData& fromNS, const Stri ngData& toNS,
bool stayTemp ); bool stayTemp );
const string _name; // "alleyinsider" const string _name; // "alleyinsider"
const string _path; // "/data/db" const string _path; // "/data/db"
NamespaceIndex _namespaceIndex; NamespaceIndex _namespaceIndex;
ExtentManager _extentManager; ExtentManager _extentManager;
const string _profileName; // "alleyinsider.system.profile" const string _profileName; // "alleyinsider.system.profile"
const string _namespacesName; // "alleyinsider.system.namespaces" const string _namespacesName; // "alleyinsider.system.namespaces"
const string _indexesName; // "alleyinsider.system.indexes" const string _indexesName; // "alleyinsider.system.indexes"
const string _extentFreelistName;
RecordStats _recordStats; RecordStats _recordStats;
int _profile; // 0=off. int _profile; // 0=off.
int _magic; // used for making sure the object is still loaded in m emory int _magic; // used for making sure the object is still loaded in m emory
// TODO: make sure deletes go through // TODO: make sure deletes go through
// this in some ways is a dupe of _namespaceIndex // this in some ways is a dupe of _namespaceIndex
// but it points to a much more useful data structure // but it points to a much more useful data structure
typedef StringMap< Collection* > CollectionMap; typedef StringMap< Collection* > CollectionMap;
 End of changes. 5 change blocks. 
33 lines changed or deleted 51 lines changed or added


 dbclient_safe_writer.h   dbclient_safe_writer.h 
skipping to change at line 60 skipping to change at line 60
Status safeWrite( DBClientBase* conn, Status safeWrite( DBClientBase* conn,
const BatchItemRef& batchItem, const BatchItemRef& batchItem,
const BSONObj& writeConcern, const BSONObj& writeConcern,
BSONObj* gleResponse ); BSONObj* gleResponse );
Status enforceWriteConcern( DBClientBase* conn, Status enforceWriteConcern( DBClientBase* conn,
const StringData& dbName, const StringData& dbName,
const BSONObj& writeConcern, const BSONObj& writeConcern,
BSONObj* gleResponse ); BSONObj* gleResponse );
Status clearErrors( DBClientBase* conn,
const StringData& dbName );
}; };
} }
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 dbclientinterface.h   dbclientinterface.h 
skipping to change at line 27 skipping to change at line 27
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include <boost/function.hpp> #include <boost/function.hpp>
#include "mongo/base/string_data.h"
#include "mongo/client/export_macros.h" #include "mongo/client/export_macros.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/logger/log_severity.h" #include "mongo/logger/log_severity.h"
#include "mongo/platform/atomic_word.h" #include "mongo/platform/atomic_word.h"
#include "mongo/util/net/message.h" #include "mongo/util/net/message.h"
#include "mongo/util/net/message_port.h" #include "mongo/util/net/message_port.h"
namespace mongo { namespace mongo {
/** the query field 'options' can have these bits set: */ /** the query field 'options' can have these bits set: */
 End of changes. 1 change blocks. 
0 lines changed or deleted 1 lines changed or added


 dbhelpers.h   dbhelpers.h 
skipping to change at line 68 skipping to change at line 68
@param keyPattern key pattern, e.g., { ts : 1 } @param keyPattern key pattern, e.g., { ts : 1 }
@param name index name, e.g., "name_1" @param name index name, e.g., "name_1"
This method can be a little (not much) cpu-slow, so you may wish to use This method can be a little (not much) cpu-slow, so you may wish to use
OCCASIONALLY ensureIndex(...); OCCASIONALLY ensureIndex(...);
Note: does nothing if collection does not yet exist. Note: does nothing if collection does not yet exist.
*/ */
static void ensureIndex(const char *ns, BSONObj keyPattern, bool un ique, const char *name); static void ensureIndex(const char *ns, BSONObj keyPattern, bool un ique, const char *name);
// same as other ensureIndex
static void ensureIndex(Collection* collection,
BSONObj keyPattern, bool unique, const char
*name);
/* fetch a single object from collection ns that matches query. /* fetch a single object from collection ns that matches query.
set your db SavedContext first. set your db SavedContext first.
@param query - the query to perform. note this is the low level portion of query so "orderby : ..." @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
won't work. won't work.
@param requireIndex if true, assert if no index for the query. a way to guard against @param requireIndex if true, assert if no index for the query. a way to guard against
writing a slow query. writing a slow query.
@return true if object found @return true if object found
 End of changes. 1 change blocks. 
0 lines changed or deleted 5 lines changed or added


 dbmessage.h   dbmessage.h 
skipping to change at line 264 skipping to change at line 264
/* a request to run a query, received from the database */ /* a request to run a query, received from the database */
class QueryMessage { class QueryMessage {
public: public:
const char *ns; const char *ns;
int ntoskip; int ntoskip;
int ntoreturn; int ntoreturn;
int queryOptions; int queryOptions;
BSONObj query; BSONObj query;
BSONObj fields; BSONObj fields;
/* parses the message into the above fields */ /**
* parses the message into the above fields
* Warning: constructor mutates DbMessage.
*/
QueryMessage(DbMessage& d) { QueryMessage(DbMessage& d) {
ns = d.getns(); ns = d.getns();
ntoskip = d.pullInt(); ntoskip = d.pullInt();
ntoreturn = d.pullInt(); ntoreturn = d.pullInt();
query = d.nextJsObj(); query = d.nextJsObj();
if ( d.moreJSObjs() ) { if ( d.moreJSObjs() ) {
fields = d.nextJsObj(); fields = d.nextJsObj();
} }
queryOptions = d.msg().header()->dataAsInt(); queryOptions = d.msg().header()->dataAsInt();
} }
 End of changes. 1 change blocks. 
1 lines changed or deleted 4 lines changed or added


 distinct_scan.h   distinct_scan.h 
skipping to change at line 115 skipping to change at line 115
// Index access. // Index access.
const IndexDescriptor* _descriptor; // owned by Collection -> Index Catalog const IndexDescriptor* _descriptor; // owned by Collection -> Index Catalog
const IndexAccessMethod* _iam; // owned by Collection -> IndexCatal og const IndexAccessMethod* _iam; // owned by Collection -> IndexCatal og
// The cursor we use to navigate the tree. // The cursor we use to navigate the tree.
boost::scoped_ptr<BtreeIndexCursor> _btreeCursor; boost::scoped_ptr<BtreeIndexCursor> _btreeCursor;
// Have we hit the end of the index scan? // Have we hit the end of the index scan?
bool _hitEnd; bool _hitEnd;
// Could our index have duplicates? If so, we use _returned to ded
up.
unordered_set<DiskLoc, DiskLoc::Hasher> _returned;
// For yielding. // For yielding.
BSONObj _savedKey; BSONObj _savedKey;
DiskLoc _savedLoc; DiskLoc _savedLoc;
DistinctParams _params; DistinctParams _params;
// _checker gives us our start key and ensures we stay in bounds. // _checker gives us our start key and ensures we stay in bounds.
boost::scoped_ptr<IndexBoundsChecker> _checker; boost::scoped_ptr<IndexBoundsChecker> _checker;
int _keyEltsToUse; int _keyEltsToUse;
bool _movePastKeyElts; bool _movePastKeyElts;
 End of changes. 1 change blocks. 
4 lines changed or deleted 0 lines changed or added


 distlock.h   distlock.h 
skipping to change at line 61 skipping to change at line 61
* Indicates an error in retrieving time values from remote servers. * Indicates an error in retrieving time values from remote servers.
*/ */
class MONGO_CLIENT_API TimeNotFoundException : public LockException { class MONGO_CLIENT_API TimeNotFoundException : public LockException {
public: public:
TimeNotFoundException( const char * msg , int code ) : LockExceptio n( msg, code ) {} TimeNotFoundException( const char * msg , int code ) : LockExceptio n( msg, code ) {}
TimeNotFoundException( const string& msg, int code ) : LockExceptio n( msg, code ) {} TimeNotFoundException( const string& msg, int code ) : LockExceptio n( msg, code ) {}
virtual ~TimeNotFoundException() throw() { } virtual ~TimeNotFoundException() throw() { }
}; };
/** /**
* The distributed lock is a configdb backed way of synchronizing syste * The distributed lock is a configdb backed way of synchronizing syste
m-wide tasks. A task must be identified by a m-wide tasks. A task
* unique name across the system (e.g., "balancer"). A lock is taken by * must be identified by a unique name across the system (e.g., "balanc
writing a document in the configdb's locks er"). A lock is taken
* collection with that name. * by writing a document in the configdb's locks collection with that n
ame.
* *
* To be maintained, each taken lock needs to be revalidated ("pinged") * To be maintained, each taken lock needs to be revalidated ("pinged")
within a pre-established amount of time. This within a
* class does this maintenance automatically once a DistributedLock obj * pre-established amount of time. This class does this maintenance aut
ect was constructed. omatically once a
* DistributedLock object was constructed. The ping procedure records t
he local time to
* the ping document, but that time is untrusted and is only used as a
point of reference
* of whether the ping was refreshed or not. Ultimately, the clock a co
nfigdb is the source
* of truth when determining whether a ping is still fresh or not. This
is achieved by
* (1) remembering the ping document time along with config server time
when unable to
* take a lock, and (2) ensuring all config servers report similar time
s and have similar
* time rates (the difference in times must start and stay small).
*/ */
class MONGO_CLIENT_API DistributedLock { class MONGO_CLIENT_API DistributedLock {
public: public:
static LabeledLevel logLvl; static LabeledLevel logLvl;
struct PingData { struct PingData {
PingData( const string& _id , Date_t _lastPing , Date_t _remote , OID _ts ) PingData( const string& _id , Date_t _lastPing , Date_t _remote , OID _ts )
: id(_id), lastPing(_lastPing), remote(_remote), ts(_ts){ : id(_id), lastPing(_lastPing), remote(_remote), ts(_ts){
skipping to change at line 151 skipping to change at line 158
Date_t getRemoteTime(); Date_t getRemoteTime();
bool isRemoteTimeSkewed(); bool isRemoteTimeSkewed();
const string& getProcessId(); const string& getProcessId();
const ConnectionString& getRemoteConnection(); const ConnectionString& getRemoteConnection();
/** /**
* Check the skew between a cluster of servers * Checks the skew among a cluster of servers and returns true if t
he min and max clock
* times among the servers are within maxClockSkew.
*/ */
static bool checkSkew( const ConnectionString& cluster, unsigned sk static bool checkSkew( const ConnectionString& cluster,
ewChecks = NUM_LOCK_SKEW_CHECKS, unsigned long long maxClockSkew = MAX_LOCK unsigned skewChecks = NUM_LOCK_SKEW_CHECKS,
_CLOCK_SKEW, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW ); unsigned long long maxClockSkew = MAX_LOCK_C
LOCK_SKEW,
unsigned long long maxNetSkew = MAX_LOCK_NET
_SKEW );
/** /**
* Get the remote time from a server or cluster * Get the remote time from a server or cluster
*/ */
static Date_t remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW ); static Date_t remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
static bool killPinger( DistributedLock& lock ); static bool killPinger( DistributedLock& lock );
/** /**
* Namespace for lock pings * Namespace for lock pings
 End of changes. 4 change blocks. 
13 lines changed or deleted 32 lines changed or added


 document_source.h   document_source.h 
skipping to change at line 53 skipping to change at line 53
#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression.h"
#include "mongo/db/pipeline/value.h" #include "mongo/db/pipeline/value.h"
#include "mongo/db/projection.h" #include "mongo/db/projection.h"
#include "mongo/db/sorter/sorter.h" #include "mongo/db/sorter/sorter.h"
#include "mongo/s/shard.h" #include "mongo/s/shard.h"
#include "mongo/s/strategy.h" #include "mongo/s/strategy.h"
#include "mongo/util/intrusive_counter.h" #include "mongo/util/intrusive_counter.h"
namespace mongo { namespace mongo {
class Accumulator; class Accumulator;
class Cursor;
class Document; class Document;
class Expression; class Expression;
class ExpressionFieldPath; class ExpressionFieldPath;
class ExpressionObject; class ExpressionObject;
class DocumentSourceLimit; class DocumentSourceLimit;
class Runner;
class DocumentSource : public IntrusiveCounterUnsigned { class DocumentSource : public IntrusiveCounterUnsigned {
public: public:
virtual ~DocumentSource() {} virtual ~DocumentSource() {}
/** Returns the next Document if there is one or boost::none if at EOF. /** Returns the next Document if there is one or boost::none if at EOF.
* Subclasses must call pExpCtx->checkForInterupt(). * Subclasses must call pExpCtx->checkForInterupt().
*/ */
virtual boost::optional<Document> getNext() = 0; virtual boost::optional<Document> getNext() = 0;
/** /**
* Inform the source that it is no longer needed and may release it s resources. After * Inform the source that it is no longer needed and may release it s resources. After
* dispose() is called the source must still be able to handle iter ation requests, but may * dispose() is called the source must still be able to handle iter ation requests, but may
* become eof(). * become eof().
* NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will * NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will
* not be advanced until eof(), see SERVER-6123. * not be advanced until eof(), see SERVER-6123.
*/ */
virtual void dispose(); virtual void dispose();
/** /**
* See ClientCursor::kill()
*/
virtual void kill();
/**
Get the source's name. Get the source's name.
@returns the string name of the source as a constant string; @returns the string name of the source as a constant string;
this is static, and there's no need to worry about adopting it this is static, and there's no need to worry about adopting it
*/ */
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
/** /**
Set the underlying source this source should use to get Documents Set the underlying source this source should use to get Documents
from. from.
skipping to change at line 334 skipping to change at line 329
bool unstarted; bool unstarted;
bool hasCurrent; bool hasCurrent;
bool newSource; // set to true for the first item of a new source bool newSource; // set to true for the first item of a new source
intrusive_ptr<DocumentSourceBsonArray> pBsonSource; intrusive_ptr<DocumentSourceBsonArray> pBsonSource;
Document pCurrent; Document pCurrent;
ShardOutput::const_iterator iterator; ShardOutput::const_iterator iterator;
ShardOutput::const_iterator listEnd; ShardOutput::const_iterator listEnd;
}; };
/** /**
* Constructs and returns Documents from the BSONObj objects produced b y a supplied Cursor. * Constructs and returns Documents from the BSONObj objects produced b y a supplied Runner.
* An object of this type may only be used by one thread, see SERVER-61 23. * An object of this type may only be used by one thread, see SERVER-61 23.
*/ */
class DocumentSourceCursor : class DocumentSourceCursor :
public DocumentSource { public DocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual ~DocumentSourceCursor(); virtual ~DocumentSourceCursor();
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual void setSource(DocumentSource *pSource); virtual void setSource(DocumentSource *pSource);
virtual bool coalesce(const intrusive_ptr<DocumentSource>& nextSour ce); virtual bool coalesce(const intrusive_ptr<DocumentSource>& nextSour ce);
virtual bool isValidInitialSource() const { return true; } virtual bool isValidInitialSource() const { return true; }
virtual void dispose(); virtual void dispose();
virtual void kill();
/** /**
* Create a document source based on a passed-in cursor. * Create a document source based on a passed-in Runner.
* *
* This is usually put at the beginning of a chain of document sour ces * This is usually put at the beginning of a chain of document sour ces
* in order to fetch data from the database. * in order to fetch data from the database.
*
* The DocumentSource takes ownership of the cursor and will destro
y it
* when the DocumentSource is finished with the cursor, if it hasn'
t
* already been destroyed.
*
* @param ns the namespace the cursor is over
* @param cursorId the id of the cursor to use
* @param pExpCtx the expression context for the pipeline
*/ */
static intrusive_ptr<DocumentSourceCursor> create( static intrusive_ptr<DocumentSourceCursor> create(
const string& ns, const string& ns,
CursorId cursorId, const boost::shared_ptr<Runner>& runner,
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
/* /*
Record the query that was specified for the cursor this wraps, if Record the query that was specified for the cursor this wraps, if
any. any.
This should be captured after any optimizations are applied to This should be captured after any optimizations are applied to
the pipeline so that it reflects what is really used. the pipeline so that it reflects what is really used.
This gets used for explain output. This gets used for explain output.
skipping to change at line 411 skipping to change at line 397
* @param deps The output of DepsTracker::toParsedDeps * @param deps The output of DepsTracker::toParsedDeps
*/ */
void setProjection(const BSONObj& projection, const boost::optional <ParsedDeps>& deps); void setProjection(const BSONObj& projection, const boost::optional <ParsedDeps>& deps);
/// returns -1 for no limit /// returns -1 for no limit
long long getLimit() const; long long getLimit() const;
private: private:
DocumentSourceCursor( DocumentSourceCursor(
const string& ns, const string& ns,
CursorId cursorId, const boost::shared_ptr<Runner>& runner,
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
void loadBatch(); void loadBatch();
std::deque<Document> _currentBatch; std::deque<Document> _currentBatch;
// BSONObj members must outlive _projection and cursor. // BSONObj members must outlive _projection and cursor.
BSONObj _query; BSONObj _query;
BSONObj _sort; BSONObj _sort;
BSONObj _projection; BSONObj _projection;
boost::optional<ParsedDeps> _dependencies; boost::optional<ParsedDeps> _dependencies;
intrusive_ptr<DocumentSourceLimit> _limit; intrusive_ptr<DocumentSourceLimit> _limit;
long long _docsAddedToBatches; // for _limit enforcement long long _docsAddedToBatches; // for _limit enforcement
string _ns; // namespace const string _ns;
CursorId _cursorId; boost::shared_ptr<Runner> _runner; // PipelineRunner holds a weak_p
bool _killed; tr to this.
}; };
class DocumentSourceGroup : public DocumentSource class DocumentSourceGroup : public DocumentSource
, public SplittableDocumentSource { , public SplittableDocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual void optimize(); virtual void optimize();
virtual GetDepsReturn getDependencies(DepsTracker* deps) const; virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
skipping to change at line 452 skipping to change at line 437
/** /**
Create a new grouping DocumentSource. Create a new grouping DocumentSource.
@param pExpCtx the expression context for the pipeline @param pExpCtx the expression context for the pipeline
@returns the DocumentSource @returns the DocumentSource
*/ */
static intrusive_ptr<DocumentSourceGroup> create( static intrusive_ptr<DocumentSourceGroup> create(
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
/** /**
Set the Id Expression.
Documents that pass through the grouping Document are grouped
according to this key. This will generate the id_ field in the
result documents.
@param pExpression the group key
*/
void setIdExpression(const intrusive_ptr<Expression> &pExpression);
/**
Add an accumulator. Add an accumulator.
Accumulators become fields in the Documents that result from Accumulators become fields in the Documents that result from
grouping. Each unique group document must have it's own grouping. Each unique group document must have it's own
accumulator; the accumulator factory is used to create that. accumulator; the accumulator factory is used to create that.
@param fieldName the name the accumulator result will have in the @param fieldName the name the accumulator result will have in the
result documents result documents
@param pAccumulatorFactory used to create the accumulator for the @param pAccumulatorFactory used to create the accumulator for the
group field group field
skipping to change at line 520 skipping to change at line 494
/* /*
Before returning anything, this source must fetch everything from Before returning anything, this source must fetch everything from
the underlying source and group it. populate() is used to do tha t the underlying source and group it. populate() is used to do tha t
on the first call to any method on this source. The populated on the first call to any method on this source. The populated
boolean indicates that this has been done. boolean indicates that this has been done.
*/ */
void populate(); void populate();
bool populated; bool populated;
intrusive_ptr<Expression> pIdExpression; /**
* Parses the raw id expression into _idExpressions and possibly _i
dFieldNames.
*/
void parseIdExpression(BSONElement groupField, const VariablesParse
State& vps);
/**
* Computes the internal representation of the group key.
*/
Value computeId(Variables* vars);
/**
* Converts the internal representation of the group key to the _id
shape specified by the
* user.
*/
Value expandId(const Value& val);
typedef vector<intrusive_ptr<Accumulator> > Accumulators; typedef vector<intrusive_ptr<Accumulator> > Accumulators;
typedef boost::unordered_map<Value, Accumulators, Value::Hash> Grou psMap; typedef boost::unordered_map<Value, Accumulators, Value::Hash> Grou psMap;
GroupsMap groups; GroupsMap groups;
/* /*
The field names for the result documents and the accumulator The field names for the result documents and the accumulator
factories for the result documents. The Expressions are the factories for the result documents. The Expressions are the
common expressions used by each instance of each accumulator common expressions used by each instance of each accumulator
in order to find the right-hand side of what gets added to the in order to find the right-hand side of what gets added to the
skipping to change at line 549 skipping to change at line 537
vector<intrusive_ptr<Accumulator> (*)()> vpAccumulatorFactory; vector<intrusive_ptr<Accumulator> (*)()> vpAccumulatorFactory;
vector<intrusive_ptr<Expression> > vpExpression; vector<intrusive_ptr<Expression> > vpExpression;
Document makeDocument(const Value& id, const Accumulators& accums, bool mergeableOutput); Document makeDocument(const Value& id, const Accumulators& accums, bool mergeableOutput);
bool _doingMerge; bool _doingMerge;
bool _spilled; bool _spilled;
const bool _extSortAllowed; const bool _extSortAllowed;
const int _maxMemoryUsageBytes; const int _maxMemoryUsageBytes;
boost::scoped_ptr<Variables> _variables; boost::scoped_ptr<Variables> _variables;
std::vector<std::string> _idFieldNames; // used when id is a docume
nt
std::vector<intrusive_ptr<Expression> > _idExpressions;
// only used when !_spilled // only used when !_spilled
GroupsMap::iterator groupsIterator; GroupsMap::iterator groupsIterator;
// only used when _spilled // only used when _spilled
scoped_ptr<Sorter<Value, Value>::Iterator> _sorterIterator; scoped_ptr<Sorter<Value, Value>::Iterator> _sorterIterator;
pair<Value, Value> _firstPartOfNextGroup; pair<Value, Value> _firstPartOfNextGroup;
Value _currentId; Value _currentId;
Accumulators _currentAccumulators; Accumulators _currentAccumulators;
}; };
skipping to change at line 636 skipping to change at line 626
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
static const char name[]; static const char name[];
/** Returns non-owning pointers to cursors managed by this stage. /** Returns non-owning pointers to cursors managed by this stage.
* Call this instead of getNext() if you want access to the raw st reams. * Call this instead of getNext() if you want access to the raw st reams.
* This method should only be called at most once. * This method should only be called at most once.
*/ */
vector<DBClientCursor*> getCursors(); vector<DBClientCursor*> getCursors();
/**
* Returns the next object from the cursor, throwing an appropriate
exception if the cursor
* reported an error. This is a better form of DBClientCursor::next
Safe.
*/
static Document nextSafeFrom(DBClientCursor* cursor);
private: private:
struct CursorAndConnection { struct CursorAndConnection {
CursorAndConnection(ConnectionString host, NamespaceString ns, CursorId id); CursorAndConnection(ConnectionString host, NamespaceString ns, CursorId id);
ScopedDbConnection connection; ScopedDbConnection connection;
DBClientCursor cursor; DBClientCursor cursor;
}; };
// using list to enable removing arbitrary elements // using list to enable removing arbitrary elements
typedef list<boost::shared_ptr<CursorAndConnection> > Cursors; typedef list<boost::shared_ptr<CursorAndConnection> > Cursors;
skipping to change at line 1103 skipping to change at line 1099
bool spherical; bool spherical;
double distanceMultiplier; double distanceMultiplier;
scoped_ptr<FieldPath> includeLocs; scoped_ptr<FieldPath> includeLocs;
bool uniqueDocs; bool uniqueDocs;
// these fields are used while processing the results // these fields are used while processing the results
BSONObj cmdOutput; BSONObj cmdOutput;
boost::scoped_ptr<BSONObjIterator> resultsIterator; // iterator ove r cmdOutput["results"] boost::scoped_ptr<BSONObjIterator> resultsIterator; // iterator ove r cmdOutput["results"]
}; };
} }
/* ======================= INLINED IMPLEMENTATIONS ========================
== */
namespace mongo {
inline void DocumentSourceGroup::setIdExpression(
const intrusive_ptr<Expression> &pExpression) {
pIdExpression = pExpression;
}
}
 End of changes. 15 change blocks. 
36 lines changed or deleted 37 lines changed or added


 dur.h   dur.h 
skipping to change at line 135 skipping to change at line 135
The idea is that long running write operations that don't y ield The idea is that long running write operations that don't y ield
(like creating an index or update with $atomic) can call th is (like creating an index or update with $atomic) can call th is
whenever the db is in a sane state and it will prevent comm its whenever the db is in a sane state and it will prevent comm its
from growing too large. from growing too large.
@return true if commited @return true if commited
*/ */
virtual bool commitIfNeeded(bool force=false) = 0; virtual bool commitIfNeeded(bool force=false) = 0;
/** @return true if time to commit but does NOT do a commit */ /** @return true if time to commit but does NOT do a commit */
virtual bool aCommitIsNeeded() const = 0; virtual bool isCommitNeeded() const = 0;
/** Declare write intent for a DiskLoc. @see DiskLoc::writing( ) */ /** Declare write intent for a DiskLoc. @see DiskLoc::writing( ) */
inline DiskLoc& writingDiskLoc(DiskLoc& d) { return *((DiskLoc* ) writingPtr(&d, sizeof(d))); } inline DiskLoc& writingDiskLoc(DiskLoc& d) { return *((DiskLoc* ) writingPtr(&d, sizeof(d))); }
/** Declare write intent for an int */ /** Declare write intent for an int */
inline int& writingInt(int& d) { return *static_cast<int*>(writ ingPtr( &d, sizeof(d))); } inline int& writingInt(int& d) { return *static_cast<int*>(writ ingPtr( &d, sizeof(d))); }
/** "assume i've already indicated write intent, let me write" /** "assume i've already indicated write intent, let me write"
redeclaration is fine too, but this is faster. redeclaration is fine too, but this is faster.
*/ */
skipping to change at line 203 skipping to change at line 203
friend class TempDisableDurability; friend class TempDisableDurability;
}; // class DurableInterface }; // class DurableInterface
class NonDurableImpl : public DurableInterface { class NonDurableImpl : public DurableInterface {
void* writingPtr(void *x, unsigned len); void* writingPtr(void *x, unsigned len);
void* writingAtOffset(void *buf, unsigned ofs, unsigned len) { return buf; } void* writingAtOffset(void *buf, unsigned ofs, unsigned len) { return buf; }
void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges) { return buf; } void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges) { return buf; }
void declareWriteIntent(void *, unsigned); void declareWriteIntent(void *, unsigned);
void createdFile(const std::string& filename, unsigned long lon g len) { } void createdFile(const std::string& filename, unsigned long lon g len) { }
bool awaitCommit() { return false; } bool awaitCommit() { return false; }
bool commitNow() { return false; } bool commitNow();
bool commitIfNeeded(bool) { return false; } bool commitIfNeeded(bool);
bool aCommitIsNeeded() const { return false; } bool isCommitNeeded() const { return false; }
void syncDataAndTruncateJournal() {} void syncDataAndTruncateJournal() {}
bool isDurable() const { return false; } bool isDurable() const { return false; }
}; };
class DurableImpl : public DurableInterface { class DurableImpl : public DurableInterface {
bool _aCommitIsNeeded(); bool _aCommitIsNeeded();
void* writingPtr(void *x, unsigned len); void* writingPtr(void *x, unsigned len);
void* writingAtOffset(void *buf, unsigned ofs, unsigned len); void* writingAtOffset(void *buf, unsigned ofs, unsigned len);
void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges); void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges);
void declareWriteIntent(void *, unsigned); void declareWriteIntent(void *, unsigned);
void createdFile(const std::string& filename, unsigned long lon g len); void createdFile(const std::string& filename, unsigned long lon g len);
bool awaitCommit(); bool awaitCommit();
bool commitNow(); bool commitNow();
bool aCommitIsNeeded() const; bool isCommitNeeded() const;
bool commitIfNeeded(bool); bool commitIfNeeded(bool);
void syncDataAndTruncateJournal(); void syncDataAndTruncateJournal();
bool isDurable() const { return true; } bool isDurable() const { return true; }
}; };
} // namespace dur } // namespace dur
inline dur::DurableInterface& getDur() { return dur::DurableInterface:: getDur(); } inline dur::DurableInterface& getDur() { return dur::DurableInterface:: getDur(); }
/** declare that we are modifying a diskloc and this is a datafile writ e. */ /** declare that we are modifying a diskloc and this is a datafile writ e. */
 End of changes. 3 change blocks. 
5 lines changed or deleted 5 lines changed or added


 element.h   element.h 
skipping to change at line 583 skipping to change at line 583
friend bool operator==(const Element&, const Element&); friend bool operator==(const Element&, const Element&);
inline Element(Document* doc, RepIdx repIdx); inline Element(Document* doc, RepIdx repIdx);
Status addChild(Element e, bool front); Status addChild(Element e, bool front);
StringData getValueStringOrSymbol() const; StringData getValueStringOrSymbol() const;
Status setValue(Element::RepIdx newValueIdx); Status setValue(Element::RepIdx newValueIdx);
template<typename Builder>
inline void writeElement(Builder* builder, const StringData* fieldN
ame = NULL) const;
template<typename Builder>
inline void writeChildren(Builder* builder) const;
Document* _doc; Document* _doc;
RepIdx _repIdx; RepIdx _repIdx;
}; };
/** Element comparison support. Comparison is like STL iterator compari sion: equal Elements /** Element comparison support. Comparison is like STL iterator compari sion: equal Elements
* refer to the same underlying data. The equality does *not* mean tha t the underlying * refer to the same underlying data. The equality does *not* mean tha t the underlying
* values are equivalent. Use the Element::compareWith methods to comp are the represented * values are equivalent. Use the Element::compareWith methods to comp are the represented
* data. * data.
*/ */
 End of changes. 1 change blocks. 
7 lines changed or deleted 0 lines changed or added


 engine.h   engine.h 
skipping to change at line 28 skipping to change at line 28
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
typedef unsigned long long ScriptingFunction; typedef unsigned long long ScriptingFunction;
typedef BSONObj (*NativeFunction)(const BSONObj& args, void* data); typedef BSONObj (*NativeFunction)(const BSONObj& args, void* data);
typedef map<string, ScriptingFunction> FunctionCacheMap; typedef map<string, ScriptingFunction> FunctionCacheMap;
class DBClientWithCommands; class DBClientWithCommands;
class DBClientBase;
struct JSFile { struct JSFile {
const char* name; const char* name;
const StringData& source; const StringData& source;
}; };
class Scope : boost::noncopyable { class Scope : boost::noncopyable {
public: public:
Scope(); Scope();
virtual ~Scope(); virtual ~Scope();
skipping to change at line 248 skipping to change at line 249
static void (*_connectCallback)(DBClientWithCommands&); static void (*_connectCallback)(DBClientWithCommands&);
static const char* (*_checkInterruptCallback)(); static const char* (*_checkInterruptCallback)();
static unsigned (*_getCurrentOpIdCallback)(); static unsigned (*_getCurrentOpIdCallback)();
}; };
void installGlobalUtils(Scope& scope); void installGlobalUtils(Scope& scope);
bool hasJSReturn(const string& s); bool hasJSReturn(const string& s);
const char* jsSkipWhiteSpace(const char* raw); const char* jsSkipWhiteSpace(const char* raw);
extern ScriptEngine* globalScriptEngine; extern ScriptEngine* globalScriptEngine;
extern DBClientBase* directDBClient;
} }
 End of changes. 2 change blocks. 
0 lines changed or deleted 2 lines changed or added


 engine_v8.h   engine_v8.h 
skipping to change at line 344 skipping to change at line 344
// We never need to Dispose since this should last as long as V 8Scope exists // We never need to Dispose since this should last as long as V 8Scope exists
_strLitMap[str] = v8::Persistent<v8::String>::New(v8Str); _strLitMap[str] = v8::Persistent<v8::String>::New(v8Str);
return v8Str; return v8Str;
} }
private: private:
/** /**
* Recursion limit when converting from JS objects to BSON. * Recursion limit when converting from JS objects to BSON.
*/ */
static const int objectDepthLimit = 500; static const int objectDepthLimit = 150;
/** /**
* Attach data to obj such that the data has the same lifetime as t he Object obj points to. * Attach data to obj such that the data has the same lifetime as t he Object obj points to.
* obj must have been created by either LazyBsonFT or ROBsonFT. * obj must have been created by either LazyBsonFT or ROBsonFT.
*/ */
void wrapBSONObject(v8::Handle<v8::Object> obj, BSONObj data, bool readOnly); void wrapBSONObject(v8::Handle<v8::Object> obj, BSONObj data, bool readOnly);
/** /**
* Trampoline to call a c++ function with a specific signature (V8S cope*, v8::Arguments&). * Trampoline to call a c++ function with a specific signature (V8S cope*, v8::Arguments&).
* Handles interruption, exceptions, etc. * Handles interruption, exceptions, etc.
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 environment.h   environment.h 
skipping to change at line 121 skipping to change at line 121
*/ */
Status addKeyConstraint(KeyConstraint* keyConstraint); Status addKeyConstraint(KeyConstraint* keyConstraint);
Status addConstraint(Constraint* constraint); Status addConstraint(Constraint* constraint);
/** Add the Value to this Environment with the given Key. If " validate" has already /** Add the Value to this Environment with the given Key. If " validate" has already
* been called on this Environment, runs all Constraints on th e new Environment. If * been called on this Environment, runs all Constraints on th e new Environment. If
* any of the Constraints fail, reverts to the old Environment and returns an error * any of the Constraints fail, reverts to the old Environment and returns an error
*/ */
Status set(const Key& key, const Value& value); Status set(const Key& key, const Value& value);
/** Remove the Value from this Environment with the given Key.
If "validate" has
* already been called on this Environment, runs all Constraint
s on the new Environment.
* If any of the Constraints fail, reverts to the old Environme
nt and returns an error
*/
Status remove(const Key& key);
/** Add a default Value to this Environment with the given Key. Fails if validate has /** Add a default Value to this Environment with the given Key. Fails if validate has
* already been called on our environment. The get functions will return the default * already been called on our environment. The get functions will return the default
* if one exists and the value has not been explicitly set. * if one exists and the value has not been explicitly set.
*/ */
Status setDefault(const Key& key, const Value& value); Status setDefault(const Key& key, const Value& value);
/** Populate the given Value with the Value stored for the give n Key. Return a success /** Populate the given Value with the Value stored for the give n Key. Return a success
* status if the value was found, or an error status if the va lue was not found. * status if the value was found, or an error status if the va lue was not found.
* Leaves the Value unchanged on error. * Leaves the Value unchanged on error.
*/ */
Status get(const Key& key, Value* value) const; Status get(const Key& key, Value* value) const;
/** Same as the above get interface, but supports directly gett ing C++ types without the /** Same as the above get interface, but supports directly gett ing C++ types without the
* intermediate Value and has the added failure case of the va lue being the wrong type * intermediate Value and has the added failure case of the va lue being the wrong type
*/ */
template <typename T> template <typename T>
Status get(const Key& key, T* value_contents) const; Status get(const Key& key, T* value_contents) const;
/** Runs all registered Constraints and returns the result. On /** Runs all registered Constraints and returns the result. If
success, marks this as a "setValid" is true and
* valid Environment so that any modifications will re run all * validation succeeds, marks this as a valid Environment so th
Constraints at any modifications will
* re run all Constraints
*/ */
Status validate(); Status validate(bool setValid=true);
/** Sets all variables in the given Environment in this Environ ment. Does not add /** Sets all variables in the given Environment in this Environ ment. Does not add
* Constraints * Constraints
*/ */
Status setAll(const Environment& other); Status setAll(const Environment& other);
/** The functions below are the legacy interface to be consiste nt with /** The functions below are the legacy interface to be consiste nt with
* boost::program_options::variables_map during the transition period * boost::program_options::variables_map during the transition period
*/ */
skipping to change at line 197 skipping to change at line 204
* "h" : "foo" * "h" : "foo"
* } * }
* } * }
* *
* Note that the BSON representation only includes fields that were explicitly set using * Note that the BSON representation only includes fields that were explicitly set using
* setAll or set, and not defaults that were specified using se tDefault. * setAll or set, and not defaults that were specified using se tDefault.
*/ */
BSONObj toBSON() const; BSONObj toBSON() const;
/* Debugging */ /* Debugging */
void dump(); void dump() const;
protected: protected:
std::vector<Constraint*> constraints; std::vector<Constraint*> constraints;
std::vector<KeyConstraint*> keyConstraints; std::vector<KeyConstraint*> keyConstraints;
std::map <Key, Value> values; std::map <Key, Value> values;
std::map <Key, Value> default_values; std::map <Key, Value> default_values;
bool valid; bool valid;
}; };
template <typename T> template <typename T>
 End of changes. 4 change blocks. 
6 lines changed or deleted 16 lines changed or added


 error_codes.h   error_codes.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <string>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/client/export_macros.h" #include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* This is a generated class containing a table of error codes and thei r corresponding error * This is a generated class containing a table of error codes and thei r corresponding error
* strings. The class is derived from the definitions in src/mongo/base /error_codes.err file. * strings. The class is derived from the definitions in src/mongo/base /error_codes.err file.
* *
* Do not update this file directly. Update src/mongo/base/error_codes. err instead. * Do not update this file directly. Update src/mongo/base/error_codes. err instead.
skipping to change at line 117 skipping to change at line 119
InvalidNamespace = 73, InvalidNamespace = 73,
NodeNotFound = 74, NodeNotFound = 74,
WriteConcernLegacyOK = 75, WriteConcernLegacyOK = 75,
NoReplicationEnabled = 76, NoReplicationEnabled = 76,
OperationIncomplete = 77, OperationIncomplete = 77,
CommandResultSchemaViolation = 78, CommandResultSchemaViolation = 78,
UnknownReplWriteConcern = 79, UnknownReplWriteConcern = 79,
RoleDataInconsistent = 80, RoleDataInconsistent = 80,
NoClientContext = 81, NoClientContext = 81,
NoProgressMade = 82, NoProgressMade = 82,
RemoteResultsUnavailable = 83,
NotMaster = 10107, NotMaster = 10107,
DuplicateKey = 11000, DuplicateKey = 11000,
InterruptedAtShutdown = 11600,
Interrupted = 11601, Interrupted = 11601,
OutOfDiskSpace = 14031,
MaxError MaxError
}; };
static const char* errorString(Error err); static std::string errorString(Error err);
/** /**
* Parse an Error from its "name". Returns UnknownError if "name" is unrecognized. * Parses an Error from its "name". Returns UnknownError if "name" is unrecognized.
* *
* NOTE: Also returns UnknownError for the string "UnknownError". * NOTE: Also returns UnknownError for the string "UnknownError".
*/ */
static Error fromString(const StringData& name); static Error fromString(const StringData& name);
/** /**
* Parse an Error from its "code". Returns UnknownError if "code" * Casts an integer "code" to an Error. Unrecognized codes are pre
is unrecognized. served, meaning
* * that the result of a call to fromInt() may not be one of the val
* NOTE: Also returns UnknownError for the integer code for Unknown ues in the
Error. * Error enumeration.
*/ */
static Error fromInt(int code); static Error fromInt(int code);
static bool isNetworkError(Error err); static bool isNetworkError(Error err);
static bool isInterruption(Error err);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 8 change blocks. 
7 lines changed or deleted 13 lines changed or added


 explain_plan.h   explain_plan.h 
skipping to change at line 58 skipping to change at line 58
* 'nscannedObjectsAllPlans', 'nscannedAllPlans', 'scanAndOrder', 'inde xOnly', 'nYields', * 'nscannedObjectsAllPlans', 'nscannedAllPlans', 'scanAndOrder', 'inde xOnly', 'nYields',
* 'nChunkSkips', 'millis', 'allPlans', and 'oldPlan'. * 'nChunkSkips', 'millis', 'allPlans', and 'oldPlan'.
* *
* All these fields are documented in type_explain.h * All these fields are documented in type_explain.h
* *
* TODO: Currently, only working for single-leaf plans. * TODO: Currently, only working for single-leaf plans.
*/ */
Status explainPlan(const PlanStageStats& stats, TypeExplain** explainOu t, bool fullDetails); Status explainPlan(const PlanStageStats& stats, TypeExplain** explainOu t, bool fullDetails);
/** /**
* Returns OK, allocating and filling in '*explain' with details of
* the "winner" plan. Caller takes ownership of '*explain'. Otherwise,
* return a status describing the error.
*
* 'bestStats', 'candidateStats' and 'solution' are used to fill in '*e
xplain'.
* Used by both MultiPlanRunner and CachedPlanRunner.
*/
Status explainMultiPlan(const PlanStageStats& stats,
const std::vector<PlanStageStats*>& candidateSt
ats,
QuerySolution* solution,
TypeExplain** explain);
/**
* Returns a short plan summary string describing the leaves of the que
ry solution.
*
* Used for logging.
*/
std::string getPlanSummary(const QuerySolution& soln);
/**
* If the out-parameter 'info' is non-null, fills in '*infoOut' with in formation * If the out-parameter 'info' is non-null, fills in '*infoOut' with in formation
* from the query solution tree 'soln' that can be determined before th e query is done * from the query solution tree 'soln' that can be determined before th e query is done
* running. Whereas 'explainPlan(...)' above is for collecting runtime debug information, * running. Whereas 'explainPlan(...)' above is for collecting runtime debug information,
* this function is for collecting static debug information that is kno wn prior * this function is for collecting static debug information that is kno wn prior
* to query runtime. * to query runtime.
* *
* The caller is responsible for deleting '*infoOut'. * The caller is responsible for deleting '*infoOut'.
*/ */
void getPlanInfo(const QuerySolution& soln, PlanInfo** infoOut); void getPlanInfo(const QuerySolution& soln, PlanInfo** infoOut);
void statsToBSON(const PlanStageStats& stats, BSONObjBuilder* bob); void statsToBSON(const PlanStageStats& stats, BSONObjBuilder* bob);
BSONObj statsToBSON(const PlanStageStats& stats);
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
0 lines changed or deleted 25 lines changed or added


 expression.h   expression.h 
skipping to change at line 66 skipping to change at line 66
LTE, LT, EQ, GT, GTE, REGEX, MOD, EXISTS, MATCH_IN, NIN, LTE, LT, EQ, GT, GTE, REGEX, MOD, EXISTS, MATCH_IN, NIN,
// special types // special types
TYPE_OPERATOR, GEO, WHERE, TYPE_OPERATOR, GEO, WHERE,
// things that maybe shouldn't even be nodes // things that maybe shouldn't even be nodes
ATOMIC, ALWAYS_FALSE, ATOMIC, ALWAYS_FALSE,
// Things that we parse but cannot be answered without an index . // Things that we parse but cannot be answered without an index .
GEO_NEAR, TEXT, GEO_NEAR, TEXT,
// Expressions that are only created internally
INTERNAL_GEO_S2_KEYCHECK
}; };
MatchExpression( MatchType type ); MatchExpression( MatchType type );
virtual ~MatchExpression(){} virtual ~MatchExpression(){}
// //
// Structural/AST information // Structural/AST information
// //
/** /**
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 expression_leaf.h   expression_leaf.h 
skipping to change at line 207 skipping to change at line 207
if ( getTag() ) { if ( getTag() ) {
e->setTag(getTag()->clone()); e->setTag(getTag()->clone());
} }
return e; return e;
} }
virtual bool matchesSingleElement( const BSONElement& e ) const; virtual bool matchesSingleElement( const BSONElement& e ) const;
virtual void debugString( StringBuilder& debug, int level ) const; virtual void debugString( StringBuilder& debug, int level ) const;
void shortDebugString( StringBuilder& debug ) const;
virtual bool equivalent( const MatchExpression* other ) const; virtual bool equivalent( const MatchExpression* other ) const;
const string& getString() const { return _regex; } const string& getString() const { return _regex; }
const string& getFlags() const { return _flags; } const string& getFlags() const { return _flags; }
private: private:
std::string _regex; std::string _regex;
std::string _flags; std::string _flags;
boost::scoped_ptr<pcrecpp::RE> _re; boost::scoped_ptr<pcrecpp::RE> _re;
}; };
skipping to change at line 299 skipping to change at line 301
bool hasNull() const { return _hasNull; } bool hasNull() const { return _hasNull; }
bool singleNull() const { return size() == 1 && _hasNull; } bool singleNull() const { return size() == 1 && _hasNull; }
bool hasEmptyArray() const { return _hasEmptyArray; } bool hasEmptyArray() const { return _hasEmptyArray; }
int size() const { return _equalities.size() + _regexes.size(); } int size() const { return _equalities.size() + _regexes.size(); }
bool equivalent( const ArrayFilterEntries& other ) const; bool equivalent( const ArrayFilterEntries& other ) const;
void copyTo( ArrayFilterEntries& toFillIn ) const; void copyTo( ArrayFilterEntries& toFillIn ) const;
void debugString( StringBuilder& debug ) const;
private: private:
bool _hasNull; // if _equalities has a jstNULL element in it bool _hasNull; // if _equalities has a jstNULL element in it
bool _hasEmptyArray; bool _hasEmptyArray;
BSONElementSet _equalities; BSONElementSet _equalities;
std::vector<RegexMatchExpression*> _regexes; std::vector<RegexMatchExpression*> _regexes;
}; };
/** /**
* query operator: $in * query operator: $in
*/ */
 End of changes. 2 change blocks. 
0 lines changed or deleted 4 lines changed or added


 expression_parser.h   expression_parser.h 
skipping to change at line 53 skipping to change at line 53
typedef StatusWith<MatchExpression*> StatusWithMatchExpression; typedef StatusWith<MatchExpression*> StatusWithMatchExpression;
class MatchExpressionParser { class MatchExpressionParser {
public: public:
/** /**
* caller has to maintain ownership obj * caller has to maintain ownership obj
* the tree has views (BSONElement) into obj * the tree has views (BSONElement) into obj
*/ */
static StatusWithMatchExpression parse( const BSONObj& obj ) { static StatusWithMatchExpression parse( const BSONObj& obj ) {
return _parse( obj, true ); // The 0 initializes the match expression tree depth.
return _parse( obj, 0 );
} }
private: private:
/** /**
* 5 = false * 5 = false
{ a : 5 } = false * { a : 5 } = false
{ $lt : 5 } = true * { $lt : 5 } = true
{ $ref : "s" } = false * { $ref: "s", $id: "x" } = false
* { $ref: "s", $id: "x", $db: "mydb" } = false
* { $ref : "s" } = false (if incomplete DBRef is allowed)
* { $id : "x" } = false (if incomplete DBRef is allowed)
* { $db : "mydb" } = false (if incomplete DBRef is allowed)
*/ */
static bool _isExpressionDocument( const BSONElement& e ); static bool _isExpressionDocument( const BSONElement& e, bool allow IncompleteDBRef );
static bool _isDBRefDocument( const BSONObj& obj ); /**
* { $ref: "s", $id: "x" } = true
* { $ref : "s" } = true (if incomplete DBRef is allowed)
* { $id : "x" } = true (if incomplete DBRef is allowed)
* { $db : "x" } = true (if incomplete DBRef is allowed)
*/
static bool _isDBRefDocument( const BSONObj& obj, bool allowIncompl
eteDBRef );
static StatusWithMatchExpression _parse( const BSONObj& obj, bool t /**
opLevel ); * Parse 'obj' and return either a MatchExpression or an error.
*
* 'level' tracks the current depth of the tree across recursive ca
lls to this
* function. Used in order to apply special logic at the top-level
and to return an
* error if the tree exceeds the maximum allowed depth.
*/
static StatusWithMatchExpression _parse( const BSONObj& obj, int le
vel );
/** /**
* parses a field in a sub expression * parses a field in a sub expression
* if the query is { x : { $gt : 5, $lt : 8 } } * if the query is { x : { $gt : 5, $lt : 8 } }
* e is { $gt : 5, $lt : 8 } * e is { $gt : 5, $lt : 8 }
*/ */
static Status _parseSub( const char* name, static Status _parseSub( const char* name,
const BSONObj& obj, const BSONObj& obj,
AndMatchExpression* root ); AndMatchExpression* root );
skipping to change at line 115 skipping to change at line 133
// arrays // arrays
static StatusWithMatchExpression _parseElemMatch( const char* name, static StatusWithMatchExpression _parseElemMatch( const char* name,
const BSONElement& e ) ; const BSONElement& e ) ;
static StatusWithMatchExpression _parseAll( const char* name, static StatusWithMatchExpression _parseAll( const char* name,
const BSONElement& e ); const BSONElement& e );
// tree // tree
static Status _parseTreeList( const BSONObj& arr, ListOfMatchExpres sion* out ); static Status _parseTreeList( const BSONObj& arr, ListOfMatchExpres sion* out, int level );
static StatusWithMatchExpression _parseNot( const char* name, const BSONElement& e ); static StatusWithMatchExpression _parseNot( const char* name, const BSONElement& e );
// The maximum allowed depth of a query tree. Just to guard against
stack overflow.
static const int kMaximumTreeDepth;
}; };
typedef boost::function<StatusWithMatchExpression(const char* name, int type, const BSONObj& section)> MatchExpressionParserGeoCallback; typedef boost::function<StatusWithMatchExpression(const char* name, int type, const BSONObj& section)> MatchExpressionParserGeoCallback;
extern MatchExpressionParserGeoCallback expressionParserGeoCallback; extern MatchExpressionParserGeoCallback expressionParserGeoCallback;
typedef boost::function<StatusWithMatchExpression(const BSONElement& wh ere)> MatchExpressionParserWhereCallback; typedef boost::function<StatusWithMatchExpression(const BSONElement& wh ere)> MatchExpressionParserWhereCallback;
extern MatchExpressionParserWhereCallback expressionParserWhereCallback ; extern MatchExpressionParserWhereCallback expressionParserWhereCallback ;
typedef boost::function<StatusWithMatchExpression(const BSONObj& queryO bj)> MatchExpressionParserTextCallback; typedef boost::function<StatusWithMatchExpression(const BSONObj& queryO bj)> MatchExpressionParserTextCallback;
extern MatchExpressionParserTextCallback expressionParserTextCallback; extern MatchExpressionParserTextCallback expressionParserTextCallback;
 End of changes. 7 change blocks. 
9 lines changed or deleted 33 lines changed or added


 expression_tree.h   expression_tree.h 
skipping to change at line 178 skipping to change at line 178
} }
virtual void debugString( StringBuilder& debug, int level = 0 ) con st; virtual void debugString( StringBuilder& debug, int level = 0 ) con st;
bool equivalent( const MatchExpression* other ) const; bool equivalent( const MatchExpression* other ) const;
virtual size_t numChildren() const { return 1; } virtual size_t numChildren() const { return 1; }
virtual MatchExpression* getChild( size_t i ) const { return _exp.g et(); } virtual MatchExpression* getChild( size_t i ) const { return _exp.g et(); }
MatchExpression* releaseChild(void) { return _exp.release(); }
void resetChild( MatchExpression* newChild) { _exp.reset(newChild);
}
private: private:
boost::scoped_ptr<MatchExpression> _exp; std::auto_ptr<MatchExpression> _exp;
}; };
} }
 End of changes. 2 change blocks. 
1 lines changed or deleted 6 lines changed or added


 extent_manager.h   extent_manager.h 
skipping to change at line 71 skipping to change at line 71
class ExtentManager { class ExtentManager {
MONGO_DISALLOW_COPYING( ExtentManager ); MONGO_DISALLOW_COPYING( ExtentManager );
public: public:
/** /**
* @param freeListDetails this is a reference into the .ns file * @param freeListDetails this is a reference into the .ns file
* while a bit odd, this is not a layer violation as extents * while a bit odd, this is not a layer violation as extents
* are a peer to the .ns file, without any layering * are a peer to the .ns file, without any layering
*/ */
ExtentManager( const StringData& dbname, const StringData& path, ExtentManager( const StringData& dbname, const StringData& path,
NamespaceDetails* freeListDetails,
bool directoryPerDB ); bool directoryPerDB );
~ExtentManager(); ~ExtentManager();
/** /**
* deletes all state and puts back to original state * deletes all state and puts back to original state
*/ */
void reset(); void reset();
/** /**
* can only be called once
*/
void init( NamespaceDetails* freeListDetails );
/**
* opens all current files * opens all current files
*/ */
Status init(); Status init();
size_t numFiles() const; size_t numFiles() const;
long long fileSize() const; long long fileSize() const;
DataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly = false ); DataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly = false );
DataFile* addAFile( int sizeNeeded, bool preallocateNextFile ); DataFile* addAFile( int sizeNeeded, bool preallocateNextFile );
skipping to change at line 113 skipping to change at line 107
* @param maxFileNoForQuota - 0 for unlimited * @param maxFileNoForQuota - 0 for unlimited
*/ */
DiskLoc createExtent( int approxSize, int maxFileNoForQuota ); DiskLoc createExtent( int approxSize, int maxFileNoForQuota );
/** /**
* will return NULL if nothing suitable in free list * will return NULL if nothing suitable in free list
*/ */
DiskLoc allocFromFreeList( int approxSize, bool capped ); DiskLoc allocFromFreeList( int approxSize, bool capped );
/** /**
* @param details - this is for the collection we're adding space t o
* @param quotaMax 0 == no limit * @param quotaMax 0 == no limit
* TODO: this isn't quite in the right spot * TODO: this isn't quite in the right spot
* really need the concept of a NamespaceStructure in the current paradigm * really need the concept of a NamespaceStructure in the current paradigm
*/ */
Extent* increaseStorageSize( const string& ns, Extent* increaseStorageSize( const string& ns,
NamespaceDetails* details, NamespaceDetails* details,
int size, int size,
int quotaMax ); int quotaMax );
/** /**
* firstExt has to be == lastExt or a chain * firstExt has to be == lastExt or a chain
*/ */
void freeExtents( DiskLoc firstExt, DiskLoc lastExt ); void freeExtents( DiskLoc firstExt, DiskLoc lastExt );
void printFreeList() const; void printFreeList() const;
bool hasFreeList() const { return _freeListDetails != NULL; } void freeListStats( int* numExtents, int64_t* totalFreeSize ) const ;
/** /**
* @param loc - has to be for a specific Record * @param loc - has to be for a specific Record
*/ */
Record* recordFor( const DiskLoc& loc ) const; Record* recordFor( const DiskLoc& loc ) const;
/** /**
* @param loc - has to be for a specific Record (not an Extent) * @param loc - has to be for a specific Record (not an Extent)
*/ */
Extent* extentFor( const DiskLoc& loc ) const; Extent* extentFor( const DiskLoc& loc ) const;
skipping to change at line 175 skipping to change at line 170
DiskLoc getPrevRecordInExtent( const DiskLoc& loc ) const; DiskLoc getPrevRecordInExtent( const DiskLoc& loc ) const;
/** /**
* quantizes extent size to >= min + page boundary * quantizes extent size to >= min + page boundary
*/ */
static int quantizeExtentSize( int size ); static int quantizeExtentSize( int size );
private: private:
DiskLoc _getFreeListStart() const;
DiskLoc _getFreeListEnd() const;
void _setFreeListStart( DiskLoc loc );
void _setFreeListEnd( DiskLoc loc );
const DataFile* _getOpenFile( int n ) const; const DataFile* _getOpenFile( int n ) const;
DiskLoc _createExtentInFile( int fileNo, DataFile* f, DiskLoc _createExtentInFile( int fileNo, DataFile* f,
int size, int maxFileNoForQuota ); int size, int maxFileNoForQuota );
boost::filesystem::path fileName( int n ) const; boost::filesystem::path fileName( int n ) const;
// ----- // -----
std::string _dbname; // i.e. "test" std::string _dbname; // i.e. "test"
std::string _path; // i.e. "/data/db" std::string _path; // i.e. "/data/db"
NamespaceDetails* _freeListDetails;
bool _directoryPerDB; bool _directoryPerDB;
// must be in the dbLock when touching this (and write locked when writing to of course) // must be in the dbLock when touching this (and write locked when writing to of course)
// however during Database object construction we aren't, which is ok as it isn't yet visible // however during Database object construction we aren't, which is ok as it isn't yet visible
// to others and we are in the dbholder lock then. // to others and we are in the dbholder lock then.
std::vector<DataFile*> _files; std::vector<DataFile*> _files;
}; };
} }
 End of changes. 6 change blocks. 
8 lines changed or deleted 7 lines changed or added


 file.h   file.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <boost/cstdint.hpp>
#include <string> #include <string>
#include "mongo/platform/basic.h" #include "mongo/platform/basic.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
namespace mongo { namespace mongo {
typedef uint64_t fileofs; typedef uint64_t fileofs;
// NOTE: not thread-safe. (at least the windows implementation isn't) // NOTE: not thread-safe. (at least the windows implementation isn't)
skipping to change at line 47 skipping to change at line 46
bool bad() const { return _bad; } bool bad() const { return _bad; }
void fsync() const; void fsync() const;
bool is_open() const; bool is_open() const;
fileofs len(); fileofs len();
void open(const char* filename, bool readOnly = false, bool direct = false); void open(const char* filename, bool readOnly = false, bool direct = false);
void read(fileofs o, char* data, unsigned len); void read(fileofs o, char* data, unsigned len);
void truncate(fileofs size); void truncate(fileofs size);
void write(fileofs o, const char* data, unsigned len); void write(fileofs o, const char* data, unsigned len);
static boost::intmax_t freeSpace(const std::string& path); static intmax_t freeSpace(const std::string& path);
private: private:
bool _bad; bool _bad;
#ifdef _WIN32 #ifdef _WIN32
HANDLE _handle; HANDLE _handle;
#else #else
int _fd; int _fd;
#endif #endif
std::string _name; std::string _name;
 End of changes. 2 change blocks. 
2 lines changed or deleted 1 lines changed or added


 filter.h   filter.h 
skipping to change at line 49 skipping to change at line 49
* the WorkingSetMember's various types can be tested to see if they sa tisfy an expression. * the WorkingSetMember's various types can be tested to see if they sa tisfy an expression.
*/ */
class WorkingSetMatchableDocument : public MatchableDocument { class WorkingSetMatchableDocument : public MatchableDocument {
public: public:
WorkingSetMatchableDocument(WorkingSetMember* wsm) : _wsm(wsm) { } WorkingSetMatchableDocument(WorkingSetMember* wsm) : _wsm(wsm) { }
virtual ~WorkingSetMatchableDocument() { } virtual ~WorkingSetMatchableDocument() { }
// This is only called by a $where query. The query system must be smart enough to realize // This is only called by a $where query. The query system must be smart enough to realize
// that it should do a fetch beforehand. // that it should do a fetch beforehand.
BSONObj toBSON() const { BSONObj toBSON() const {
verify(_wsm->hasObj()); invariant(_wsm->hasObj());
return _wsm->obj; return _wsm->obj;
} }
virtual ElementIterator* allocateIterator(const ElementPath* path) const { virtual ElementIterator* allocateIterator(const ElementPath* path) const {
// BSONElementIterator does some interesting things with arrays that I don't think // BSONElementIterator does some interesting things with arrays that I don't think
// SimpleArrayElementIterator does. // SimpleArrayElementIterator does.
if (_wsm->hasObj()) { if (_wsm->hasObj()) {
return new BSONElementIterator(path, _wsm->obj); return new BSONElementIterator(path, _wsm->obj);
} }
// NOTE: This (kind of) duplicates code in WorkingSetMember::ge tFieldDotted. // NOTE: This (kind of) duplicates code in WorkingSetMember::ge tFieldDotted.
// Keep in sync w/that. // Keep in sync w/that.
// Find the first field in the index key data described by path and return an iterator // Find the first field in the index key data described by path and return an iterator
// over it. // over it.
for (size_t i = 0; i < _wsm->keyData.size(); ++i) { for (size_t i = 0; i < _wsm->keyData.size(); ++i) {
BSONObjIterator keyPatternIt(_wsm->keyData[i].indexKeyPatte rn); BSONObjIterator keyPatternIt(_wsm->keyData[i].indexKeyPatte rn);
BSONObjIterator keyDataIt(_wsm->keyData[i].keyData); BSONObjIterator keyDataIt(_wsm->keyData[i].keyData);
while (keyPatternIt.more()) { while (keyPatternIt.more()) {
BSONElement keyPatternElt = keyPatternIt.next(); BSONElement keyPatternElt = keyPatternIt.next();
verify(keyDataIt.more()); invariant(keyDataIt.more());
BSONElement keyDataElt = keyDataIt.next(); BSONElement keyDataElt = keyDataIt.next();
if (path->fieldRef().equalsDottedField(keyPatternElt.fi eldName())) { if (path->fieldRef().equalsDottedField(keyPatternElt.fi eldName())) {
if (Array == keyDataElt.type()) { if (Array == keyDataElt.type()) {
return new SimpleArrayElementIterator(keyDataEl t, true); return new SimpleArrayElementIterator(keyDataEl t, true);
} }
else { else {
return new SingleElementElementIterator(keyData Elt); return new SingleElementElementIterator(keyData Elt);
} }
} }
skipping to change at line 99 skipping to change at line 99
} }
virtual void releaseIterator( ElementIterator* iterator ) const { virtual void releaseIterator( ElementIterator* iterator ) const {
delete iterator; delete iterator;
} }
private: private:
WorkingSetMember* _wsm; WorkingSetMember* _wsm;
}; };
class IndexKeyMatchableDocument : public MatchableDocument {
public:
IndexKeyMatchableDocument(const BSONObj& key,
const BSONObj& keyPattern)
: _keyPattern(keyPattern), _key(key) { }
BSONObj toBSON() const {
// Planning shouldn't let this happen.
invariant(0);
}
virtual ElementIterator* allocateIterator(const ElementPath* path)
const {
BSONObjIterator keyPatternIt(_keyPattern);
BSONObjIterator keyDataIt(_key);
while (keyPatternIt.more()) {
BSONElement keyPatternElt = keyPatternIt.next();
invariant(keyDataIt.more());
BSONElement keyDataElt = keyDataIt.next();
if (path->fieldRef().equalsDottedField(keyPatternElt.fieldN
ame())) {
if (Array == keyDataElt.type()) {
return new SimpleArrayElementIterator(keyDataElt, t
rue);
}
else {
return new SingleElementElementIterator(keyDataElt)
;
}
}
}
// Planning should not let this happen.
massert(17409,
"trying to match on unknown field: " + path->fieldRef()
.dottedField().toString(),
0);
return new SingleElementElementIterator(BSONElement());
}
virtual void releaseIterator(ElementIterator* iterator) const {
delete iterator;
}
private:
BSONObj _keyPattern;
BSONObj _key;
};
/** /**
* Used by every stage with a filter. * Used by every stage with a filter.
*/ */
class Filter { class Filter {
public: public:
/** /**
* Returns true if filter is NULL or if 'wsm' satisfies the filter. * Returns true if filter is NULL or if 'wsm' satisfies the filter.
* Returns false if 'wsm' does not satisfy the filter. * Returns false if 'wsm' does not satisfy the filter.
*/ */
static bool passes(WorkingSetMember* wsm, const MatchExpression* fi lter) { static bool passes(WorkingSetMember* wsm, const MatchExpression* fi lter) {
if (NULL == filter) { return true; } if (NULL == filter) { return true; }
WorkingSetMatchableDocument doc(wsm); WorkingSetMatchableDocument doc(wsm);
return filter->matches(&doc, NULL); return filter->matches(&doc, NULL);
} }
static bool passes(const BSONObj& keyData,
const BSONObj& keyPattern,
const MatchExpression* filter) {
if (NULL == filter) { return true; }
IndexKeyMatchableDocument doc(keyData, keyPattern);
return filter->matches(&doc, NULL);
}
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
2 lines changed or deleted 63 lines changed or added


 fts_access_method.h   fts_access_method.h 
skipping to change at line 46 skipping to change at line 46
namespace mongo { namespace mongo {
class FTSAccessMethod : public BtreeBasedAccessMethod { class FTSAccessMethod : public BtreeBasedAccessMethod {
public: public:
FTSAccessMethod(IndexCatalogEntry* btreeState ); FTSAccessMethod(IndexCatalogEntry* btreeState );
virtual ~FTSAccessMethod() { } virtual ~FTSAccessMethod() { }
const fts::FTSSpec& getSpec() const { return _ftsSpec; } const fts::FTSSpec& getSpec() const { return _ftsSpec; }
virtual shared_ptr<KeyGenerator> getKeyGenerator() const { return _ keyGenerator; }
private: private:
// Implemented: // Implemented:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
fts::FTSSpec _ftsSpec; fts::FTSSpec _ftsSpec;
shared_ptr<KeyGenerator> _keyGenerator;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
0 lines changed or deleted 2 lines changed or added


 fts_index_format.h   fts_index_format.h 
skipping to change at line 51 skipping to change at line 51
static void getKeys( const FTSSpec& spec, static void getKeys( const FTSSpec& spec,
const BSONObj& document, const BSONObj& document,
BSONObjSet* keys ); BSONObjSet* keys );
/* /*
* Helper method to get return entry from the FTSIndex as a BSO NObj * Helper method to get return entry from the FTSIndex as a BSO NObj
* @param weight, the weight of the term in the entry * @param weight, the weight of the term in the entry
* @param term, the string term in the entry * @param term, the string term in the entry
* @param indexPrefix, the fields that go in the index first * @param indexPrefix, the fields that go in the index first
* @param textIndexVersion, index version. affects key format.
*/ */
static BSONObj getIndexKey( double weight, static BSONObj getIndexKey( double weight,
const string& term, const string& term,
const BSONObj& indexPrefix ); const BSONObj& indexPrefix,
TextIndexVersion textIndexVersion )
;
private: private:
/* /*
* Helper method to get return entry from the FTSIndex as a BSO NObj * Helper method to get return entry from the FTSIndex as a BSO NObj
* @param b, reference to the BSONOBjBuilder * @param b, reference to the BSONOBjBuilder
* @param weight, the weight of the term in the entry * @param weight, the weight of the term in the entry
* @param term, the string term in the entry * @param term, the string term in the entry
* @param textIndexVersion, index version. affects key format.
*/ */
static void _appendIndexKey( BSONObjBuilder& b, double weight, static void _appendIndexKey( BSONObjBuilder& b, double weight,
const string& term ); const string& term,
TextIndexVersion textIndexVersion
);
}; };
} }
} }
 End of changes. 4 change blocks. 
3 lines changed or deleted 9 lines changed or added


 fts_matcher.h   fts_matcher.h 
skipping to change at line 65 skipping to change at line 65
bool phraseMatch( const string& phrase, const BSONObj& obj ) co nst; bool phraseMatch( const string& phrase, const BSONObj& obj ) co nst;
bool matchesNonTerm( const BSONObj& obj ) const { bool matchesNonTerm( const BSONObj& obj ) const {
return !hasNegativeTerm( obj ) && phrasesMatch( obj ); return !hasNegativeTerm( obj ) && phrasesMatch( obj );
} }
private: private:
/** /**
* @return true if raw has a negated term * @return true if raw has a negated term
*/ */
bool _hasNegativeTerm_string( const string& raw ) const; bool _hasNegativeTerm_string( const FTSLanguage* language, cons t string& raw ) const;
/** /**
* @return true if raw has a phrase * @return true if raw has a phrase
*/ */
bool _phraseMatches( const string& phrase, const string& raw ) const; bool _phraseMatches( const string& phrase, const string& raw ) const;
FTSQuery _query; FTSQuery _query;
FTSSpec _spec; FTSSpec _spec;
Stemmer _stemmer;
}; };
} }
} }
 End of changes. 2 change blocks. 
2 lines changed or deleted 1 lines changed or added


 fts_query.h   fts_query.h 
skipping to change at line 39 skipping to change at line 39
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/fts/stemmer.h" #include "mongo/db/fts/stemmer.h"
#include "mongo/db/fts/stop_words.h" #include "mongo/db/fts/stop_words.h"
#include "mongo/platform/unordered_set.h"
#include "mongo/util/stringutils.h" #include "mongo/util/stringutils.h"
namespace mongo { namespace mongo {
namespace fts { namespace fts {
using std::string; using std::string;
using std::vector; using std::vector;
using std::set; using std::set;
class FTSQuery { class FTSQuery {
public: public:
Status parse(const string& query, const StringData& language); Status parse(const string& query, const StringData& language);
const vector<string>& getTerms() const { return _terms; } const vector<string>& getTerms() const { return _terms; }
const unordered_set<string>& getNegatedTerms() const { return _ negatedTerms; } const set<string>& getNegatedTerms() const { return _negatedTer ms; }
const vector<string>& getPhr() const { return _phrases; } const vector<string>& getPhr() const { return _phrases; }
const vector<string>& getNegatedPhr() const { return _negatedPh rases; } const vector<string>& getNegatedPhr() const { return _negatedPh rases; }
/** /**
* @return true if any negations or phrase + or - * @return true if any negations or phrase + or -
*/ */
bool hasNonTermPieces() const { bool hasNonTermPieces() const {
return return
_negatedTerms.size() > 0 || _negatedTerms.size() > 0 ||
skipping to change at line 78 skipping to change at line 77
_negatedPhrases.size() > 0; _negatedPhrases.size() > 0;
} }
string getSearch() const { return _search; } string getSearch() const { return _search; }
const FTSLanguage& getLanguage() const { return *_language; } const FTSLanguage& getLanguage() const { return *_language; }
string toString() const; string toString() const;
string debugString() const; string debugString() const;
BSONObj toBSON() const;
protected: protected:
string _search; string _search;
const FTSLanguage* _language; const FTSLanguage* _language;
vector<string> _terms; vector<string> _terms;
unordered_set<string> _negatedTerms; set<string> _negatedTerms;
vector<string> _phrases; vector<string> _phrases;
vector<string> _negatedPhrases; vector<string> _negatedPhrases;
private: private:
void _addTerm( const StopWords* sw, Stemmer& stemmer, const str ing& term, bool negated ); void _addTerm( const StopWords* sw, Stemmer& stemmer, const str ing& term, bool negated );
}; };
} }
} }
 End of changes. 4 change blocks. 
3 lines changed or deleted 4 lines changed or added


 fts_spec.h   fts_spec.h 
skipping to change at line 108 skipping to change at line 108
void scoreDocument( const BSONObj& obj, TermFrequencyMap* term_ freqs ) const; void scoreDocument( const BSONObj& obj, TermFrequencyMap* term_ freqs ) const;
/** /**
* given a query, pulls out the pieces (in order) that go in th e index first * given a query, pulls out the pieces (in order) that go in th e index first
*/ */
Status getIndexPrefix( const BSONObj& filter, BSONObj* out ) co nst; Status getIndexPrefix( const BSONObj& filter, BSONObj* out ) co nst;
const Weights& weights() const { return _weights; } const Weights& weights() const { return _weights; }
static BSONObj fixSpec( const BSONObj& spec ); static BSONObj fixSpec( const BSONObj& spec );
/**
* Returns text index version.
*/
TextIndexVersion getTextIndexVersion() const { return _textInde
xVersion; }
private: private:
// //
// Helper methods. Invoked for TEXT_INDEX_VERSION_2 spec objec ts only. // Helper methods. Invoked for TEXT_INDEX_VERSION_2 spec objec ts only.
// //
/** /**
* Calculate the term scores for 'raw' and update 'term_freqs' with the result. Parses * Calculate the term scores for 'raw' and update 'term_freqs' with the result. Parses
* 'raw' using 'tools', and weights term scores based on 'weigh t'. * 'raw' using 'tools', and weights term scores based on 'weigh t'.
*/ */
void _scoreStringV2( const Tools& tools, void _scoreStringV2( const Tools& tools,
 End of changes. 1 change blocks. 
0 lines changed or deleted 6 lines changed or added


 geoquery.h   geoquery.h 
skipping to change at line 43 skipping to change at line 43
#include "mongo/util/mongoutils/str.h" #include "mongo/util/mongoutils/str.h"
#include "third_party/s2/s2regionunion.h" #include "third_party/s2/s2regionunion.h"
namespace mongo { namespace mongo {
class GeometryContainer { class GeometryContainer {
public: public:
bool parseFrom(const BSONObj &obj); bool parseFrom(const BSONObj &obj);
/** /**
* Is the geometry any of {Point, Line, Polygon}?
*/
bool isSimpleContainer() const;
/**
* To check intersection, we iterate over the otherContainer's geom etries, checking each * To check intersection, we iterate over the otherContainer's geom etries, checking each
* geometry to see if we intersect it. If we intersect one geometr y, we intersect the * geometry to see if we intersect it. If we intersect one geometr y, we intersect the
* entire other container. * entire other container.
*/ */
bool intersects(const GeometryContainer& otherContainer) const; bool intersects(const GeometryContainer& otherContainer) const;
/** /**
* To check containment, we iterate over the otherContainer's geome tries. If we don't * To check containment, we iterate over the otherContainer's geome tries. If we don't
* contain any sub-geometry of the otherContainer, the otherContain er is not contained * contain any sub-geometry of the otherContainer, the otherContain er is not contained
* within us. If each sub-geometry of the otherContainer is contai ned within us, we contain * within us. If each sub-geometry of the otherContainer is contai ned within us, we contain
skipping to change at line 116 skipping to change at line 121
: minDistance(0), : minDistance(0),
maxDistance(std::numeric_limits<double>::max()), maxDistance(std::numeric_limits<double>::max()),
isNearSphere(false) { } isNearSphere(false) { }
NearQuery(const string& f) NearQuery(const string& f)
: field(f), : field(f),
minDistance(0), minDistance(0),
maxDistance(std::numeric_limits<double>::max()), maxDistance(std::numeric_limits<double>::max()),
isNearSphere(false) { } isNearSphere(false) { }
bool parseFrom(const BSONObj &obj); Status parseFrom(const BSONObj &obj);
// The name of the field that contains the geometry. // The name of the field that contains the geometry.
string field; string field;
// The starting point of the near search. // The starting point of the near search.
PointWithCRS centroid; PointWithCRS centroid;
// Min and max distance from centroid that we're willing to search. // Min and max distance from centroid that we're willing to search.
// Distance is in whatever units the centroid's CRS implies. // Distance is in whatever units the centroid's CRS implies.
// If centroid.crs == FLAT these are radians. // If centroid.crs == FLAT these are radians.
skipping to change at line 144 skipping to change at line 149
string toString() const { string toString() const {
stringstream ss; stringstream ss;
ss << " field=" << field; ss << " field=" << field;
ss << " maxdist=" << maxDistance; ss << " maxdist=" << maxDistance;
ss << " isNearSphere=" << isNearSphere; ss << " isNearSphere=" << isNearSphere;
return ss.str(); return ss.str();
} }
private: private:
bool parseLegacyQuery(const BSONObj &obj); bool parseLegacyQuery(const BSONObj &obj);
bool parseNewQuery(const BSONObj &obj); Status parseNewQuery(const BSONObj &obj);
}; };
// This represents either a $within or a $geoIntersects. // This represents either a $within or a $geoIntersects.
class GeoQuery { class GeoQuery {
public: public:
GeoQuery() : field(""), predicate(INVALID) {} GeoQuery() : field(""), predicate(INVALID) {}
GeoQuery(const string& f) : field(f), predicate(INVALID) {} GeoQuery(const string& f) : field(f), predicate(INVALID) {}
enum Predicate { enum Predicate {
WITHIN, WITHIN,
 End of changes. 3 change blocks. 
2 lines changed or deleted 7 lines changed or added


 get_runner.h   get_runner.h 
skipping to change at line 30 skipping to change at line 30
* must comply with the GNU Affero General Public License in all respect s for * must comply with the GNU Affero General Public License in all respect s for
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_settings.h" #include "mongo/db/query/query_settings.h"
#include "mongo/db/query/runner.h" #include "mongo/db/query/runner.h"
namespace mongo { namespace mongo {
class Collection; class Collection;
/** /**
* Filter indexes retrieved from index catalog by * Filter indexes retrieved from index catalog by
* allowed indices in query settings. * allowed indices in query settings.
* Used by getRunner(). * Used by getRunner().
* This function is public to facilitate testing. * This function is public to facilitate testing.
*/ */
void filterAllowedIndexEntries(const AllowedIndices& allowedIndices, void filterAllowedIndexEntries(const AllowedIndices& allowedIndices,
std::vector<IndexEntry>* indexEntries); std::vector<IndexEntry>* indexEntries);
/** /**
* Fill out the provided 'plannerParams' for the 'canonicalQuery' opera
ting on the collection
* 'collection'. Exposed for testing.
*/
void fillOutPlannerParams(Collection* collection,
CanonicalQuery* canonicalQuery,
QueryPlannerParams* plannerParams);
/**
* Get a runner for a query. Takes ownership of rawCanonicalQuery. * Get a runner for a query. Takes ownership of rawCanonicalQuery.
* *
* If the query is valid and a runner could be created, returns Status: :OK() * If the query is valid and a runner could be created, returns Status: :OK()
* and populates *out with the Runner. * and populates *out with the Runner.
* *
* If the query cannot be executed, returns a Status indicating why. D eletes * If the query cannot be executed, returns a Status indicating why. D eletes
* rawCanonicalQuery. * rawCanonicalQuery.
*/ */
Status getRunner(CanonicalQuery* rawCanonicalQuery, Status getRunner(CanonicalQuery* rawCanonicalQuery,
Runner** out, Runner** out,
skipping to change at line 82 skipping to change at line 91
* Gets a runner for a query described as an unparsed BSON object over the named and optionally * Gets a runner for a query described as an unparsed BSON object over the named and optionally
* supplied collection. * supplied collection.
* *
* If necessary, parses a CanonicalQuery out of 'unparsedQuery'. * If necessary, parses a CanonicalQuery out of 'unparsedQuery'.
* *
* Returns Status::OK() on success, in which case '*outRunner' points t o a runner now owned by * Returns Status::OK() on success, in which case '*outRunner' points t o a runner now owned by
* the caller, and '*outCanonicalQuery' is either NULL or points to a c anonical query owned by * the caller, and '*outCanonicalQuery' is either NULL or points to a c anonical query owned by
* the returned runner. On failure, returns other status values, and ' *outRunner' and * the returned runner. On failure, returns other status values, and ' *outRunner' and
* '*outCanonicalQuery' have unspecified values. * '*outCanonicalQuery' have unspecified values.
*/ */
Status getRunner(Collection* collection, const std::string& ns, const B Status getRunner(Collection* collection,
SONObj& unparsedQuery, const std::string& ns,
Runner** outRunner, CanonicalQuery** outCanonicalQuery const BSONObj& unparsedQuery,
, Runner** outRunner,
CanonicalQuery** outCanonicalQuery,
size_t plannerOptions = 0); size_t plannerOptions = 0);
/* /*
* Get a runner for a query executing as part of a distinct command. * Get a runner for a query executing as part of a distinct command.
* *
* Distinct is unique in that it doesn't care about getting all the res ults; it just wants all * Distinct is unique in that it doesn't care about getting all the res ults; it just wants all
* possible values of a certain field. As such, we can skip lots of da ta in certain cases (see * possible values of a certain field. As such, we can skip lots of da ta in certain cases (see
* body of method for detail). * body of method for detail).
*/ */
Status getRunnerDistinct(Collection* collection, Status getRunnerDistinct(Collection* collection,
skipping to change at line 110 skipping to change at line 122
* Count doesn't care about actually examining its results; it just wan ts to walk through them. * Count doesn't care about actually examining its results; it just wan ts to walk through them.
* As such, with certain covered queries, we can skip the overhead of f etching etc. when * As such, with certain covered queries, we can skip the overhead of f etching etc. when
* executing a count. * executing a count.
*/ */
Status getRunnerCount(Collection* collection, Status getRunnerCount(Collection* collection,
const BSONObj& query, const BSONObj& query,
const BSONObj& hintObj, const BSONObj& hintObj,
Runner** out); Runner** out);
/** /**
* Get a runner for a query. Ignores the cache and always plans the fu
ll query.
*/
Status getRunnerAlwaysPlan(Collection* collection,
CanonicalQuery* rawCanonicalQuery,
const QueryPlannerParams& plannerParams,
Runner** out);
/**
* RAII approach to ensuring that runners are deregistered in newRunQue ry. * RAII approach to ensuring that runners are deregistered in newRunQue ry.
* *
* While retrieving the first batch of results, newRunQuery manually re gisters the runner with * While retrieving the first batch of results, newRunQuery manually re gisters the runner with
* ClientCursor. Certain query execution paths, namely $where, can thr ow an exception. If we * ClientCursor. Certain query execution paths, namely $where, can thr ow an exception. If we
* fail to deregister the runner, we will call invalidate/kill on the * fail to deregister the runner, we will call invalidate/kill on the
* still-registered-yet-deleted runner. * still-registered-yet-deleted runner.
* *
* For any subsequent calls to getMore, the runner is already registere d with ClientCursor * For any subsequent calls to getMore, the runner is already registere d with ClientCursor
* by virtue of being cached, so this exception-proofing is not require d. * by virtue of being cached, so this exception-proofing is not require d.
*/ */
 End of changes. 4 change blocks. 
4 lines changed or deleted 24 lines changed or added


 goodies.h   goodies.h 
skipping to change at line 139 skipping to change at line 139
* this is a thread safe string * this is a thread safe string
* you will never get a bad pointer, though data may be mungedd * you will never get a bad pointer, though data may be mungedd
*/ */
class ThreadSafeString : boost::noncopyable { class ThreadSafeString : boost::noncopyable {
public: public:
ThreadSafeString( size_t size=256 ) ThreadSafeString( size_t size=256 )
: _size( size ) , _buf( new char[size] ) { : _size( size ) , _buf( new char[size] ) {
memset( _buf , 0 , _size ); memset( _buf , 0 , _size );
} }
ThreadSafeString( const ThreadSafeString& other )
: _size( other._size ) , _buf( new char[_size] ) {
strncpy( _buf , other._buf , _size );
}
~ThreadSafeString() { ~ThreadSafeString() {
delete[] _buf; delete[] _buf;
_buf = 0;
} }
string toString() const { string toString() const {
string s = _buf; string s = _buf;
return s; return s;
} }
ThreadSafeString& operator=( const char * str ) { ThreadSafeString& operator=( const char * str ) {
size_t s = strlen(str); size_t s = strlen(str);
if ( s >= _size - 2 ) if ( s >= _size - 2 )
s = _size - 2; s = _size - 2;
strncpy( _buf , str , s ); strncpy( _buf , str , s );
_buf[s] = 0; _buf[s] = 0;
return *this; return *this;
} }
bool operator==( const ThreadSafeString& other ) const {
return strcmp( _buf , other._buf ) == 0;
}
bool operator==( const char * str ) const {
return strcmp( _buf , str ) == 0;
}
bool operator!=( const char * str ) const {
return strcmp( _buf , str ) != 0;
}
bool empty() const { bool empty() const {
return _buf == 0 || _buf[0] == 0; return _buf == 0 || _buf[0] == 0;
} }
private: private:
size_t _size; const size_t _size;
char * _buf; char *const _buf;
}; };
std::ostream& operator<<(std::ostream &s, const ThreadSafeString &o); std::ostream& operator<<(std::ostream &s, const ThreadSafeString &o);
/** A generic pointer type for function arguments. /** A generic pointer type for function arguments.
* It will convert from any pointer type except auto_ptr. * It will convert from any pointer type except auto_ptr.
* Semantics are the same as passing the pointer returned from get() * Semantics are the same as passing the pointer returned from get()
* const ptr<T> => T * const * const ptr<T> => T * const
* ptr<const T> => T const * or const T* * ptr<const T> => T const * or const T*
*/ */
 End of changes. 4 change blocks. 
20 lines changed or deleted 2 lines changed or added


 hash.h   hash.h 
skipping to change at line 38 skipping to change at line 38
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include <iostream> #include <iostream>
namespace mongo { namespace mongo {
class GeoHash; class GeoHash;
class Box;
struct Point; struct Point;
std::ostream& operator<<(std::ostream &s, const GeoHash &h); std::ostream& operator<<(std::ostream &s, const GeoHash &h);
/* This class maps an unsigned x,y coordinate pair to a hash value. /* This class maps an unsigned x,y coordinate pair to a hash value.
* To hash values more interesting than unsigned, use the GeoHashConver ter, * To hash values more interesting than unsigned, use the GeoHashConver ter,
* which maps doubles to unsigned values. * which maps doubles to unsigned values.
*/ */
class GeoHash { class GeoHash {
public: public:
GeoHash(); GeoHash();
skipping to change at line 156 skipping to change at line 157
int bits; int bits;
// X/Y values must be [min, max] // X/Y values must be [min, max]
double min; double min;
double max; double max;
// Values are scaled by this when converted to/from hash scale. // Values are scaled by this when converted to/from hash scale.
double scaling; double scaling;
}; };
GeoHashConverter(const Parameters &params); GeoHashConverter(const Parameters &params);
/**
* Return converter parameterss which can be used to
* construct an copy of this converter.
*/
const Parameters& getParams() const { return _params; }
int getBits() const { return _params.bits; } int getBits() const { return _params.bits; }
double getError() const { return _error; } double getError() const { return _error; }
double getErrorSphere() const { return _errorSphere ;} double getErrorSphere() const { return _errorSphere ;}
double getMin() const { return _params.min; } double getMin() const { return _params.min; }
double getMax() const { return _params.max; } double getMax() const { return _params.max; }
double distanceBetweenHashes(const GeoHash& a, const GeoHash& b) co nst; double distanceBetweenHashes(const GeoHash& a, const GeoHash& b) co nst;
/** /**
* Hashing functions. Convert the following types to a GeoHash: * Hashing functions. Convert the following types to a GeoHash:
skipping to change at line 183 skipping to change at line 190
GeoHash hash(const BSONObj& o) const; GeoHash hash(const BSONObj& o) const;
// src is printed out as debugging information. I'm not sure if it 's actually // src is printed out as debugging information. I'm not sure if it 's actually
// somehow the 'source' of o? Anyway, this is nasty, very nasty. XXX // somehow the 'source' of o? Anyway, this is nasty, very nasty. XXX
GeoHash hash(const BSONObj& o, const BSONObj* src) const; GeoHash hash(const BSONObj& o, const BSONObj* src) const;
GeoHash hash(double x, double y) const; GeoHash hash(double x, double y) const;
/** Unhashing functions. /** Unhashing functions.
* Convert from a hash to the following types: * Convert from a hash to the following types:
* double, double * double, double
* Point * Point
* Box
* BSONObj * BSONObj
*/ */
// XXX: these should have consistent naming // XXX: these should have consistent naming
Point unhashToPoint(const GeoHash &h) const; Point unhashToPoint(const GeoHash &h) const;
Point unhashToPoint(const BSONElement &e) const; Point unhashToPoint(const BSONElement &e) const;
BSONObj unhashToBSONObj(const GeoHash& h) const; BSONObj unhashToBSONObj(const GeoHash& h) const;
void unhash(const GeoHash &h, double *x, double *y) const; void unhash(const GeoHash &h, double *x, double *y) const;
/**
* Generates bounding box from geo hash using converter.
* Used in GeoBrowse::fillStack and db/query/explain_plan.cpp
* to generate index bounds from
* geo hashes in plan stats.
*/
Box unhashToBox(const GeoHash &h) const;
double sizeOfDiag(const GeoHash& a) const; double sizeOfDiag(const GeoHash& a) const;
// XXX: understand/clean this. // XXX: understand/clean this.
double sizeEdge(const GeoHash& a) const; double sizeEdge(const GeoHash& a) const;
private: private:
// Convert from an unsigned in [0, (max-min)*scaling] to [min, max] // Convert from an unsigned in [0, (max-min)*scaling] to [min, max]
double convertFromHashScale(unsigned in) const; double convertFromHashScale(unsigned in) const;
// Convert from a double that is [min, max] to an unsigned in [0, ( max-min)*scaling] // Convert from a double that is [min, max] to an unsigned in [0, ( max-min)*scaling]
unsigned convertToHashScale(double in) const; unsigned convertToHashScale(double in) const;
 End of changes. 4 change blocks. 
0 lines changed or deleted 16 lines changed or added


 hash_access_method.h   hash_access_method.h 
skipping to change at line 56 skipping to change at line 56
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
HashAccessMethod(IndexCatalogEntry* btreeState); HashAccessMethod(IndexCatalogEntry* btreeState);
virtual ~HashAccessMethod() { } virtual ~HashAccessMethod() { }
// This is a NO-OP. // This is a NO-OP.
virtual Status setOptions(const CursorOptions& options) { virtual Status setOptions(const CursorOptions& options) {
return Status::OK(); return Status::OK();
} }
/** virtual shared_ptr<KeyGenerator> getKeyGenerator() const { return _
* Hashing function used by both this class and the cursors we crea keyGenerator; }
te.
* Exposed for testing and so mongo/db/index_legacy.cpp can use it.
*/
static long long int makeSingleKey(const BSONElement& e, HashSeed s
eed, int v);
/**
* Exposed externally for testing purposes.
*/
static void getKeysImpl(const BSONObj& obj, const string& hashedFie
ld, HashSeed seed,
int hashVersion, bool isSparse, BSONObjSet*
keys);
private: private:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// Only one of our fields is hashed. This is the field name for it . // Only one of our fields is hashed. This is the field name for it .
string _hashedField; string _hashedField;
// _seed defaults to zero. // _seed defaults to zero.
HashSeed _seed; HashSeed _seed;
// _hashVersion defaults to zero. // _hashVersion defaults to zero.
int _hashVersion; int _hashVersion;
BSONObj _missingKey; BSONObj _missingKey;
shared_ptr<KeyGenerator> _keyGenerator;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
16 lines changed or deleted 4 lines changed or added


 haystack_access_method.h   haystack_access_method.h 
skipping to change at line 61 skipping to change at line 61
* bucketSize specifies the dimension of the square bucket for the da ta in pos. * bucketSize specifies the dimension of the square bucket for the da ta in pos.
* ALL fields are mandatory. * ALL fields are mandatory.
*/ */
class HaystackAccessMethod : public BtreeBasedAccessMethod { class HaystackAccessMethod : public BtreeBasedAccessMethod {
public: public:
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
HaystackAccessMethod(IndexCatalogEntry* btreeState); HaystackAccessMethod(IndexCatalogEntry* btreeState);
virtual ~HaystackAccessMethod() { } virtual ~HaystackAccessMethod() { }
virtual shared_ptr<KeyGenerator> getKeyGenerator() const { return _ keyGenerator; }
protected: protected:
friend class GeoHaystackSearchCommand; friend class GeoHaystackSearchCommand;
void searchCommand(const BSONObj& nearObj, double maxDistance, cons t BSONObj& search, void searchCommand(const BSONObj& nearObj, double maxDistance, cons t BSONObj& search,
BSONObjBuilder* result, unsigned limit); BSONObjBuilder* result, unsigned limit);
private: private:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// Helper methods called by getKeys:
int hash(const BSONElement& e) const;
string makeString(int hashedX, int hashedY) const;
void addKey(const string& root, const BSONElement& e, BSONObjSet* k
eys) const;
string _geoField; string _geoField;
vector<string> _otherFields; vector<string> _otherFields;
double _bucketSize; double _bucketSize;
shared_ptr<KeyGenerator> _keyGenerator;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
6 lines changed or deleted 3 lines changed or added


 idhack_runner.h   idhack_runner.h 
skipping to change at line 81 skipping to change at line 81
virtual const std::string& ns(); virtual const std::string& ns();
virtual void kill(); virtual void kill();
virtual const Collection* collection() { return _collection; } virtual const Collection* collection() { return _collection; }
virtual Status getInfo(TypeExplain** explain, virtual Status getInfo(TypeExplain** explain,
PlanInfo** planInfo) const; PlanInfo** planInfo) const;
/**
* ID Hack has a very strict criteria for the queries it supports.
*/
static bool supportsQuery(const CanonicalQuery& query);
private: private:
/**
* ID Hack will work with only one projection: {_id: 1}.
*/
static bool canUseProjection(const CanonicalQuery& query);
// Not owned here. // Not owned here.
const Collection* _collection; const Collection* _collection;
// The value to match against the _id field. // The value to match against the _id field.
BSONObj _key; BSONObj _key;
// TODO: When we combine the canonicalize and getRunner steps into one we can get rid of // TODO: When we combine the canonicalize and getRunner steps into one we can get rid of
// this. // this.
boost::scoped_ptr<CanonicalQuery> _query; boost::scoped_ptr<CanonicalQuery> _query;
 End of changes. 2 change blocks. 
0 lines changed or deleted 10 lines changed or added


 index_access_method.h   index_access_method.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/index/index_cursor.h" #include "mongo/db/index/index_cursor.h"
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/key_generator.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class UpdateTicket; class UpdateTicket;
class InsertTicket;
struct InsertDeleteOptions; struct InsertDeleteOptions;
struct PregeneratedKeysOnIndex;
/** /**
* An IndexAccessMethod is the interface through which all the mutation , lookup, and * An IndexAccessMethod is the interface through which all the mutation , lookup, and
* traversal of index entries is done. The class is designed so that th e underlying index * traversal of index entries is done. The class is designed so that th e underlying index
* data structure is opaque to the caller. * data structure is opaque to the caller.
* *
* IndexAccessMethods for existing indices are obtained through the sys tem catalog. * IndexAccessMethods for existing indices are obtained through the sys tem catalog.
* *
* We assume the caller has whatever locks required. This interface is not thread safe. * We assume the caller has whatever locks required. This interface is not thread safe.
* *
skipping to change at line 66 skipping to change at line 70
// Lookup, traversal, and mutation support // Lookup, traversal, and mutation support
// //
/** /**
* Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k -> * Internally generate the keys {k1, ..., kn} for 'obj'. For each key k, insert (k ->
* 'loc') into the index. 'obj' is the object at the location 'loc '. If not NULL, * 'loc') into the index. 'obj' is the object at the location 'loc '. If not NULL,
* 'numInserted' will be set to the number of keys added to the ind ex for the document. If * 'numInserted' will be set to the number of keys added to the ind ex for the document. If
* there is more than one key for 'obj', either all keys will be in serted or none will. * there is more than one key for 'obj', either all keys will be in serted or none will.
* *
* The behavior of the insertion can be specified through 'options' . * The behavior of the insertion can be specified through 'options' .
*
* prepared: if you generated keys before, you can pass the generat
or you used
* and the keys you got. If the generator matches, the keys are us
ed. Otherwise we
* generate our own keys and you do not have to do anything.
*/ */
virtual Status insert(const BSONObj& obj, virtual Status insert(const BSONObj& obj,
const DiskLoc& loc, const DiskLoc& loc,
const InsertDeleteOptions& options, const InsertDeleteOptions& options,
int64_t* numInserted) = 0; int64_t* numInserted,
const PregeneratedKeysOnIndex* prepared = NUL
L ) = 0;
/** /**
* Analogous to above, but remove the records instead of inserting them. If not NULL, * Analogous to above, but remove the records instead of inserting them. If not NULL,
* numDeleted will be set to the number of keys removed from the in dex for the document. * numDeleted will be set to the number of keys removed from the in dex for the document.
*/ */
virtual Status remove(const BSONObj& obj, virtual Status remove(const BSONObj& obj,
const DiskLoc& loc, const DiskLoc& loc,
const InsertDeleteOptions& options, const InsertDeleteOptions& options,
int64_t* numDeleted) = 0; int64_t* numDeleted) = 0;
skipping to change at line 131 skipping to change at line 140
/** /**
* Try to page-in the pages that contain the keys generated from 'o bj'. * Try to page-in the pages that contain the keys generated from 'o bj'.
* This can be used to speed up future accesses to an index by tryi ng to ensure the * This can be used to speed up future accesses to an index by tryi ng to ensure the
* appropriate pages are not swapped out. * appropriate pages are not swapped out.
* See prefetch.cpp. * See prefetch.cpp.
*/ */
virtual Status touch(const BSONObj& obj) = 0; virtual Status touch(const BSONObj& obj) = 0;
/** /**
* Try to page-in the pages that contain the keys.
* This can be used to speed up future accesses to an index by tryi
ng to ensure the
* appropriate pages are not swapped out.
* See prefetch.cpp.
*/
virtual Status touch(const BSONObjSet& keys) = 0;
/**
* Walk the entire index, checking the internal structure for consi stency. * Walk the entire index, checking the internal structure for consi stency.
* Set numKeys to the number of keys in the index. * Set numKeys to the number of keys in the index.
* *
* Return OK if the index is valid. * Return OK if the index is valid.
* *
* Currently wasserts that the index is invalid. This could/should be changed in * Currently wasserts that the index is invalid. This could/should be changed in
* the future to return a Status. * the future to return a Status.
*/ */
virtual Status validate(int64_t* numKeys) = 0; virtual Status validate(int64_t* numKeys) = 0;
skipping to change at line 171 skipping to change at line 188
* After this method is called, the bulk index access method is inv alid * After this method is called, the bulk index access method is inv alid
* and should not be used. * and should not be used.
* @param bulk - something created from initiateBulk * @param bulk - something created from initiateBulk
* @param mayInterrupt - is this commit interruptable (will cancel) * @param mayInterrupt - is this commit interruptable (will cancel)
* @param dups - if NULL, error out on dups if not allowed * @param dups - if NULL, error out on dups if not allowed
* if not NULL, put the bad DiskLocs there * if not NULL, put the bad DiskLocs there
*/ */
virtual Status commitBulk( IndexAccessMethod* bulk, virtual Status commitBulk( IndexAccessMethod* bulk,
bool mayInterrupt, bool mayInterrupt,
std::set<DiskLoc>* dups ) = 0; std::set<DiskLoc>* dups ) = 0;
/**
* this returns a shared_ptr so that someone can get all the genera
tors in a lock,
* then unlock, generate keys, and then re-lock and use those keys
*/
virtual shared_ptr<KeyGenerator> getKeyGenerator() const = 0;
}; };
/** /**
* Updates are two steps: verify that it's a valid update, and perform it. * Updates are two steps: verify that it's a valid update, and perform it.
* validateUpdate fills out the UpdateStatus and update actually applie s it. * validateUpdate fills out the UpdateStatus and update actually applie s it.
*/ */
class UpdateTicket { class UpdateTicket {
public: public:
UpdateTicket() : _isValid(false) { } UpdateTicket() : _isValid(false) { }
 End of changes. 7 change blocks. 
1 lines changed or deleted 29 lines changed or added


 index_bounds.h   index_bounds.h 
skipping to change at line 54 skipping to change at line 54
OrderedIntervalList(const string& n) : name(n) { } OrderedIntervalList(const string& n) : name(n) { }
// Must be ordered according to the index order. // Must be ordered according to the index order.
vector<Interval> intervals; vector<Interval> intervals;
// TODO: We could drop this. Only used in IndexBounds::isValidFor. // TODO: We could drop this. Only used in IndexBounds::isValidFor.
string name; string name;
bool isValidFor(int expectedOrientation) const; bool isValidFor(int expectedOrientation) const;
std::string toString() const; std::string toString() const;
/**
* Complements the OIL. Used by the index bounds builder in order
* to create index bounds for $not predicates.
*
* Assumes the OIL is increasing, and therefore must be called prio
r to
* alignBounds(...).
*
* Example:
* The complement of [3, 6), [8, 10] is [MinKey, 3), [6, 8), (20,
MaxKey],
* where this OIL has direction==1.
*/
void complement();
}; };
/** /**
* Tied to an index. Permissible values for all fields in the index. Requires the index to * Tied to an index. Permissible values for all fields in the index. Requires the index to
* interpret. Previously known as FieldRangeVector. * interpret. Previously known as FieldRangeVector.
*/ */
struct IndexBounds { struct IndexBounds {
IndexBounds() : isSimpleRange(false) { } IndexBounds() : isSimpleRange(false) { }
// For each indexed field, the values that the field is allowed to take on. // For each indexed field, the values that the field is allowed to take on.
skipping to change at line 171 skipping to change at line 184
* that the key corresponds to. An example: If keyEltsToUse is 1, movePastKeyElts is * that the key corresponds to. An example: If keyEltsToUse is 1, movePastKeyElts is
* false, and the index we're iterating over has two fields, o ut[1] will have the value * false, and the index we're iterating over has two fields, o ut[1] will have the value
* for the second field. * for the second field.
* *
* incOut: If the i-th element is false, seek to the key *after* th e i-th element of out. * incOut: If the i-th element is false, seek to the key *after* th e i-th element of out.
* If the i-th element is true, seek to the i-th element of out. * If the i-th element is true, seek to the i-th element of out.
*/ */
KeyState checkKey(const BSONObj& key, int* keyEltsToUse, bool* move PastKeyElts, KeyState checkKey(const BSONObj& key, int* keyEltsToUse, bool* move PastKeyElts,
vector<const BSONElement*>* out, vector<bool>* in cOut); vector<const BSONElement*>* out, vector<bool>* in cOut);
private: /**
* Relative position of a key to an interval.
* Exposed for testing only.
*/
enum Location { enum Location {
BEHIND = -1, BEHIND = -1,
WITHIN = 0, WITHIN = 0,
AHEAD = 1, AHEAD = 1,
}; };
/** /**
* If 'elt' is in any interval, return WITHIN and set 'newIntervalI
ndex' to the index of the
* interval in the ordered interval list.
*
* If 'elt' is not in any interval but could be advanced to be in o
ne, return BEHIND and set
* 'newIntervalIndex' to the index of the interval that 'elt' could
be advanced to.
*
* If 'elt' cannot be advanced to any interval, return AHEAD.
*
* Exposed for testing only.
*
* TODO(efficiency): Start search from a given index.
*/
static Location findIntervalForField(const BSONElement &elt, const
OrderedIntervalList& oil,
const int expectedDirection, s
ize_t* newIntervalIndex);
private:
/**
* Find the first field in the key that isn't within the interval w e think it is. Returns * Find the first field in the key that isn't within the interval w e think it is. Returns
* false if every field is in the interval we think it is. Returns true and populates out * false if every field is in the interval we think it is. Returns true and populates out
* parameters if a field isn't in the interval we think it is. * parameters if a field isn't in the interval we think it is.
* *
* Out parameters set if we return true: * Out parameters set if we return true:
* 'where' is the leftmost field that isn't in the interval we thin k it is. * 'where' is the leftmost field that isn't in the interval we thin k it is.
* 'what' is the orientation of the field with respect to that inte rval. * 'what' is the orientation of the field with respect to that inte rval.
*/ */
bool findLeftmostProblem(const vector<BSONElement>& keyValues, size _t* where, bool findLeftmostProblem(const vector<BSONElement>& keyValues, size _t* where,
Location* what); Location* what);
/** /**
* Returns true if it's possible to advance any of the first 'field sToCheck' fields of the * Returns true if it's possible to advance any of the first 'field sToCheck' fields of the
* index key and still be within valid index bounds. * index key and still be within valid index bounds.
* *
* keyValues are the elements of the index key in order. * keyValues are the elements of the index key in order.
*/ */
bool spaceLeftToAdvance(size_t fieldsToCheck, const vector<BSONElem ent>& keyValues); bool spaceLeftToAdvance(size_t fieldsToCheck, const vector<BSONElem ent>& keyValues);
/**
* Returns BEHIND if the key is behind the interval.
* Returns WITHIN if the key is within the interval.
* Returns AHEAD if the key is ahead the interval.
*
* All directions are oriented along 'direction'.
*/
static Location intervalCmp(const Interval& interval, const BSONEle
ment& key,
const int expectedDirection);
/**
* If 'elt' is in any interval, return WITHIN and set 'newIntervalI
ndex' to the index of the
* interval in the ordered interval list.
*
* If 'elt' is not in any interval but could be advanced to be in o
ne, return BEHIND and set
* 'newIntervalIndex' to the index of the interval that 'elt' could
be advanced to.
*
* If 'elt' cannot be advanced to any interval, return AHEAD.
*
* TODO(efficiency): Start search from a given index.
* TODO(efficiency): Binary search for the answer.
*/
static Location findIntervalForField(const BSONElement &elt, const
OrderedIntervalList& oil,
const int expectedDirection, s
ize_t* newIntervalIndex);
// The actual bounds. Must outlive this object. Not owned by us. // The actual bounds. Must outlive this object. Not owned by us.
const IndexBounds* _bounds; const IndexBounds* _bounds;
// For each field, which interval are we currently in? // For each field, which interval are we currently in?
vector<size_t> _curInterval; vector<size_t> _curInterval;
// Direction of scan * direction of indexing. // Direction of scan * direction of indexing.
vector<int> _expectedDirection; vector<int> _expectedDirection;
}; };
 End of changes. 4 change blocks. 
32 lines changed or deleted 41 lines changed or added


 index_bounds_builder.h   index_bounds_builder.h 
skipping to change at line 173 skipping to change at line 173
* index described by 'keyPattern' in the default forward direction . * index described by 'keyPattern' in the default forward direction .
*/ */
static void allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds); static void allValuesBounds(const BSONObj& keyPattern, IndexBounds* bounds);
/** /**
* Assumes each OIL in 'bounds' is increasing. * Assumes each OIL in 'bounds' is increasing.
* *
* Aligns OILs (and bounds) according to the 'kp' direction * the s canDir. * Aligns OILs (and bounds) according to the 'kp' direction * the s canDir.
*/ */
static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir = 1); static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int scanDir = 1);
/**
* Returns 'true' if the bounds 'bounds' can be represented as one
interval between
* 'startKey' and 'endKey'. Inclusivity of each bound is set throu
gh the relevant
* (name)KeyInclusive parameter. Returns 'false' if otherwise.
*/
static bool isSingleInterval(const IndexBounds& bounds,
BSONObj* startKey,
bool* startKeyInclusive,
BSONObj* endKey,
bool* endKeyInclusive);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
0 lines changed or deleted 13 lines changed or added


 index_builder.h   index_builder.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/util/background.h" #include "mongo/util/background.h"
/** /**
* Forks off a thread to build an index. * Forks off a thread to build an index.
*/ */
namespace mongo { namespace mongo {
class Collection;
class IndexBuilder : public BackgroundJob { class IndexBuilder : public BackgroundJob {
public: public:
IndexBuilder(const BSONObj& index); IndexBuilder(const BSONObj& index);
virtual ~IndexBuilder(); virtual ~IndexBuilder();
virtual void run(); virtual void run();
/** /**
* name of the builder, not the index * name of the builder, not the index
*/ */
virtual std::string name() const; virtual std::string name() const;
Status build( Client::Context& context ) const; Status build( Client::Context& context ) const;
/** /**
* Kill all in-progress indexes matching criteria and, optionally, * Kill all in-progress indexes matching criteria, if non-empty:
store them in the * index ns, index name, and/or index key spec.
* indexes list. * Returns a vector of the indexes that were killed.
*/ */
static std::vector<BSONObj> killMatchingIndexBuilds(const BSONObj& static std::vector<BSONObj>
criteria); killMatchingIndexBuilds(Collection* collection,
const IndexCatalog::IndexKillCriteria&
criteria);
/** /**
* Retry all index builds in the list. Builds each index in a separ ate thread. If ns does * Retry all index builds in the list. Builds each index in a separ ate thread. If ns does
* not match the ns field in the indexes list, the BSONObj's ns fie ld is changed before the * not match the ns field in the indexes list, the BSONObj's ns fie ld is changed before the
* index is built (to handle rename). * index is built (to handle rename).
*/ */
static void restoreIndexes(const std::vector<BSONObj>& indexes); static void restoreIndexes(const std::vector<BSONObj>& indexes);
private: private:
const BSONObj _index; const BSONObj _index;
 End of changes. 4 change blocks. 
5 lines changed or deleted 10 lines changed or added


 index_catalog.h   index_catalog.h 
skipping to change at line 36 skipping to change at line 36
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <vector>
#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/catalog/index_pregen.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/platform/unordered_map.h"
namespace mongo { namespace mongo {
class Client;
class Collection; class Collection;
class NamespaceDetails; class NamespaceDetails;
class BtreeInMemoryState; class BtreeInMemoryState;
class IndexDescriptor; class IndexDescriptor;
class IndexDetails; class IndexDetails;
class IndexAccessMethod; class IndexAccessMethod;
class BtreeAccessMethod; class BtreeAccessMethod;
class BtreeBasedAccessMethod; class BtreeBasedAccessMethod;
skipping to change at line 99 skipping to change at line 102
IndexDescriptor* findIndexByKeyPattern( const BSONObj& key, IndexDescriptor* findIndexByKeyPattern( const BSONObj& key,
bool includeUnfinishedIndex es = false ) const; bool includeUnfinishedIndex es = false ) const;
/* Returns the index entry for the first index whose prefix contain s /* Returns the index entry for the first index whose prefix contain s
* 'keyPattern'. If 'requireSingleKey' is true, skip indices that c ontain * 'keyPattern'. If 'requireSingleKey' is true, skip indices that c ontain
* array attributes. Otherwise, returns NULL. * array attributes. Otherwise, returns NULL.
*/ */
IndexDescriptor* findIndexByPrefix( const BSONObj &keyPattern, IndexDescriptor* findIndexByPrefix( const BSONObj &keyPattern,
bool requireSingleKey ) const; bool requireSingleKey ) const;
void findIndexByType( const string& type , vector<IndexDescriptor*> void findIndexByType( const string& type,
& matches ) const; vector<IndexDescriptor*>& matches,
bool includeUnfinishedIndexes = false ) const
;
// never returns NULL // never returns NULL
IndexAccessMethod* getIndex( const IndexDescriptor* desc ); IndexAccessMethod* getIndex( const IndexDescriptor* desc );
const IndexAccessMethod* getIndex( const IndexDescriptor* desc ) co nst; const IndexAccessMethod* getIndex( const IndexDescriptor* desc ) co nst;
class IndexIterator { class IndexIterator {
public: public:
bool more(); bool more();
IndexDescriptor* next(); IndexDescriptor* next();
// returns the access method for the last return IndexDescripto r // returns the access method for the last return IndexDescripto r
IndexAccessMethod* accessMethod( IndexDescriptor* desc ); IndexAccessMethod* accessMethod( IndexDescriptor* desc );
IndexCatalogEntry* entry( IndexDescriptor* desc );
private: private:
IndexIterator( const IndexCatalog* cat, bool includeUnfinishedI ndexes ); IndexIterator( const IndexCatalog* cat, bool includeUnfinishedI ndexes );
void _advance(); void _advance();
bool _includeUnfinishedIndexes; bool _includeUnfinishedIndexes;
const IndexCatalog* _catalog; const IndexCatalog* _catalog;
IndexCatalogEntryContainer::const_iterator _iterator; IndexCatalogEntryContainer::const_iterator _iterator;
bool _start; // only true before we've called next() or more() bool _start; // only true before we've called next() or more()
skipping to change at line 146 skipping to change at line 153
enum ShutdownBehavior { enum ShutdownBehavior {
SHUTDOWN_CLEANUP, // fully clean up this build SHUTDOWN_CLEANUP, // fully clean up this build
SHUTDOWN_LEAVE_DIRTY // leave as if kill -9 happened, so have t o deal with on restart SHUTDOWN_LEAVE_DIRTY // leave as if kill -9 happened, so have t o deal with on restart
}; };
Status createIndex( BSONObj spec, Status createIndex( BSONObj spec,
bool mayInterrupt, bool mayInterrupt,
ShutdownBehavior shutdownBehavior = SHUTDOWN_CL EANUP ); ShutdownBehavior shutdownBehavior = SHUTDOWN_CL EANUP );
Status okToAddIndex( const BSONObj& spec ) const; StatusWith<BSONObj> prepareSpecForCreate( const BSONObj& original ) const;
Status dropAllIndexes( bool includingIdIndex ); Status dropAllIndexes( bool includingIdIndex );
Status dropIndex( IndexDescriptor* desc ); Status dropIndex( IndexDescriptor* desc );
/** /**
* will drop all incompleted indexes and return specs * will drop all incompleted indexes and return specs
* after this, the indexes can be rebuilt * after this, the indexes can be rebuilt
*/ */
vector<BSONObj> getAndClearUnfinishedIndexes(); vector<BSONObj> getAndClearUnfinishedIndexes();
struct IndexKillCriteria {
std::string ns;
std::string name;
BSONObj key;
};
/**
* Given some criteria, will search through all in-progress index b
uilds
* and will kill ones that match. (namespace, index name, and/or in
dex key spec)
* Returns the list of index specs that were killed, for use in res
tarting them later.
*/
std::vector<BSONObj> killMatchingIndexBuilds(const IndexKillCriteri
a& criteria);
// ---- modify single index // ---- modify single index
/* Updates the expireAfterSeconds field of the given index to the v alue in newExpireSecs. /* Updates the expireAfterSeconds field of the given index to the v alue in newExpireSecs.
* The specified index must already contain an expireAfterSeconds f ield, and the value in * The specified index must already contain an expireAfterSeconds f ield, and the value in
* that field and newExpireSecs must both be numeric. * that field and newExpireSecs must both be numeric.
*/ */
void updateTTLSetting( const IndexDescriptor* idx, long long newExp ireSeconds ); void updateTTLSetting( const IndexDescriptor* idx, long long newExp ireSeconds );
bool isMultikey( const IndexDescriptor* idex ); bool isMultikey( const IndexDescriptor* idex );
skipping to change at line 219 skipping to change at line 239
string _indexName; string _indexName;
string _indexNamespace; string _indexNamespace;
IndexCatalogEntry* _entry; IndexCatalogEntry* _entry;
bool _inProgress; bool _inProgress;
}; };
// ----- data modifiers ------ // ----- data modifiers ------
/**
* TODO: document
*/
void touch( const PregeneratedKeys* preGen ) const;
// this throws for now // this throws for now
void indexRecord( const BSONObj& obj, const DiskLoc &loc ); void indexRecord( const BSONObj& obj, const DiskLoc &loc,
const PregeneratedKeys* preGen = NULL );
void unindexRecord( const BSONObj& obj, const DiskLoc& loc, bool no Warn ); void unindexRecord( const BSONObj& obj, const DiskLoc& loc, bool no Warn );
/** /**
* checks all unique indexes and checks for conflicts * checks all unique indexes and checks for conflicts
* should not throw * should not throw
*/ */
Status checkNoIndexConflicts( const BSONObj& obj ); Status checkNoIndexConflicts( const BSONObj& obj, const Pregenerate dKeys* preGen );
// ------- temp internal ------- // ------- temp internal -------
string getAccessMethodName(const BSONObj& keyPattern) { string getAccessMethodName(const BSONObj& keyPattern) {
return _getAccessMethodName( keyPattern ); return _getAccessMethodName( keyPattern );
} }
// public static helpers Status _upgradeDatabaseMinorVersionIfNeeded( const string& newPlugi
nName );
static bool validKeyPattern( const BSONObj& obj );
static BSONObj fixIndexSpec( const BSONObj& spec ); // public static helpers
static BSONObj fixIndexKey( const BSONObj& key ); static BSONObj fixIndexKey( const BSONObj& key );
private: private:
typedef unordered_map<IndexDescriptor*, Client*> InProgressIndexesM ap;
// creates a new thing, no caching // creates a new thing, no caching
IndexAccessMethod* _createAccessMethod( const IndexDescriptor* desc , IndexAccessMethod* _createAccessMethod( const IndexDescriptor* desc ,
IndexCatalogEntry* entry ); IndexCatalogEntry* entry );
Status _upgradeDatabaseMinorVersionIfNeeded( const string& newPlugi
nName );
int _removeFromSystemIndexes( const StringData& indexName ); int _removeFromSystemIndexes( const StringData& indexName );
bool _shouldOverridePlugin( const BSONObj& keyPattern ) const; bool _shouldOverridePlugin( const BSONObj& keyPattern ) const;
/** /**
* This differs from IndexNames::findPluginName in that returns the plugin name we *should* * This differs from IndexNames::findPluginName in that returns the plugin name we *should*
* use, not the plugin name inside of the provided key pattern. To understand when these * use, not the plugin name inside of the provided key pattern. To understand when these
* differ, see shouldOverridePlugin. * differ, see shouldOverridePlugin.
*/ */
string _getAccessMethodName(const BSONObj& keyPattern) const; string _getAccessMethodName(const BSONObj& keyPattern) const;
IndexDetails* _getIndexDetails( const IndexDescriptor* descriptor ) const; IndexDetails* _getIndexDetails( const IndexDescriptor* descriptor ) const;
void _checkMagic() const; void _checkMagic() const;
// checks if there is anything in _leftOverIndexes // checks if there is anything in _leftOverIndexes
// meaning we shouldn't modify catalog // meaning we shouldn't modify catalog
Status _checkUnfinished() const; Status _checkUnfinished() const;
Status _indexRecord( IndexCatalogEntry* index, const BSONObj& obj, Status _indexRecord( IndexCatalogEntry* index,
const DiskLoc &loc ); const BSONObj& obj, const DiskLoc &loc,
const PregeneratedKeysOnIndex* pregen );
Status _unindexRecord( IndexCatalogEntry* index, const BSONObj& obj , const DiskLoc &loc, Status _unindexRecord( IndexCatalogEntry* index, const BSONObj& obj , const DiskLoc &loc,
bool logIfError ); bool logIfError );
/** /**
* this does no sanity checks * this does no sanity checks
*/ */
Status _dropIndex( IndexCatalogEntry* entry ); Status _dropIndex( IndexCatalogEntry* entry );
// just does disk hanges // just does disk hanges
// doesn't change memory state, etc... // doesn't change memory state, etc...
void _deleteIndexFromDisk( const string& indexName, void _deleteIndexFromDisk( const string& indexName,
const string& indexNamespace, const string& indexNamespace,
int idxNo ); int idxNo );
// descriptor ownership passes to _setupInMemoryStructures // descriptor ownership passes to _setupInMemoryStructures
IndexCatalogEntry* _setupInMemoryStructures( IndexDescriptor* descr iptor ); IndexCatalogEntry* _setupInMemoryStructures( IndexDescriptor* descr iptor );
static BSONObj _fixIndexSpec( const BSONObj& spec );
Status _isSpecOk( const BSONObj& spec ) const;
Status _doesSpecConflictWithExisting( const BSONObj& spec ) const;
int _magic; int _magic;
Collection* _collection; Collection* _collection;
NamespaceDetails* _details; NamespaceDetails* _details;
IndexCatalogEntryContainer _entries; IndexCatalogEntryContainer _entries;
// These are the index specs of indexes that were "leftover" // These are the index specs of indexes that were "leftover"
// "Leftover" means they were unfinished when a mongod shut down // "Leftover" means they were unfinished when a mongod shut down
// Certain operations are prohibted until someone fixes // Certain operations are prohibted until someone fixes
// get by calling getAndClearUnfinishedIndexes // get by calling getAndClearUnfinishedIndexes
std::vector<BSONObj> _unfinishedIndexes; std::vector<BSONObj> _unfinishedIndexes;
static const BSONObj _idObj; // { _id : 1 } static const BSONObj _idObj; // { _id : 1 }
// Track in-progress index builds, in order to find and stop them w
hen necessary.
InProgressIndexesMap _inProgressIndexes;
}; };
} }
 End of changes. 17 change blocks. 
14 lines changed or deleted 52 lines changed or added


 index_create.h   index_create.h 
skipping to change at line 34 skipping to change at line 34
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
#include "mongo/db/diskloc.h"
#include "mongo/db/index/index_access_method.h"
namespace mongo { namespace mongo {
class IndexCatalogEntry; class BSONObj;
class Collection; class Collection;
class IndexCatalogEntry;
// Build an index in the foreground // Build an index in the foreground
// If background is false, uses fast index builder // If background is false, uses fast index builder
// If background is true, uses background index builder; blocks until d one. // If background is true, uses background index builder; blocks until d one.
void buildAnIndex( Collection* collection, void buildAnIndex( Collection* collection,
IndexCatalogEntry* btreeState, IndexCatalogEntry* btreeState,
bool mayInterrupt ); bool mayInterrupt );
class MultiIndexBlock {
MONGO_DISALLOW_COPYING( MultiIndexBlock );
public:
MultiIndexBlock( Collection* collection );
~MultiIndexBlock();
Status init( std::vector<BSONObj>& specs );
Status insert( const BSONObj& doc,
const DiskLoc& loc,
const InsertDeleteOptions& options );
Status commit();
private:
Collection* _collection;
struct IndexState {
IndexState()
: block( NULL ), real( NULL ), bulk( NULL ) {
}
IndexAccessMethod* forInsert() { return bulk ? bulk : real; }
IndexCatalog::IndexBuildBlock* block;
IndexAccessMethod* real;
IndexAccessMethod* bulk;
};
std::vector<IndexState> _states;
};
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
1 lines changed or deleted 40 lines changed or added


 index_cursor.h   index_cursor.h 
skipping to change at line 57 skipping to change at line 57
* be simple (a key location for a Btree index) or rich ($within for a geo index). * be simple (a key location for a Btree index) or rich ($within for a geo index).
* *
* Locking is the responsibility of the caller. The IndexCursor keeps state. If the caller * Locking is the responsibility of the caller. The IndexCursor keeps state. If the caller
* wishes to yield or unlock, it must call savePosition() first. When it decides to unyield it * wishes to yield or unlock, it must call savePosition() first. When it decides to unyield it
* must call restorePosition(). The cursor may be EOF after a restoreP osition(). * must call restorePosition(). The cursor may be EOF after a restoreP osition().
*/ */
class IndexCursor { class IndexCursor {
public: public:
virtual ~IndexCursor() { } virtual ~IndexCursor() { }
// XXX SHORT TERM HACKS THAT MUST DIE: 2d index
virtual DiskLoc getBucket() const { return DiskLoc(); }
// XXX SHORT TERM HACKS THAT MUST DIE: 2d index
virtual int getKeyOfs() const { return 0; }
/** /**
* Set options on the cursor (direction). See CursorOptions below. * Set options on the cursor (direction). See CursorOptions below.
*/ */
virtual Status setOptions(const CursorOptions& options) = 0; virtual Status setOptions(const CursorOptions& options) = 0;
/** /**
* A cursor doesn't point anywhere by default. You must seek to th e start position. * A cursor doesn't point anywhere by default. You must seek to th e start position.
* The provided position must be a predicate that the index underst ands. The * The provided position must be a predicate that the index underst ands. The
* predicate must describe one value, though there may be several i nstances * predicate must describe one value, though there may be several i nstances
* *
skipping to change at line 134 skipping to change at line 128
* Restore the saved position. Errors if there is no saved positio n. * Restore the saved position. Errors if there is no saved positio n.
* The cursor may be EOF after a restore. * The cursor may be EOF after a restore.
*/ */
virtual Status restorePosition() = 0; virtual Status restorePosition() = 0;
// Return a string describing the cursor. // Return a string describing the cursor.
virtual string toString() = 0; virtual string toString() = 0;
/** /**
* Add debugging info to the provided builder. * Add debugging info to the provided builder.
* TODO(hk/alerner): We can do this better, perhaps with a more str uctured format. * TODO(hk): We can do this better, perhaps with a more structured format.
*/ */
virtual void explainDetails(BSONObjBuilder* b) { } virtual void explainDetails(BSONObjBuilder* b) { }
}; };
// All the options we might want to set on a cursor. // All the options we might want to set on a cursor.
struct CursorOptions { struct CursorOptions {
// Set the direction of the scan. Ignored if the cursor doesn't ha ve directions (geo). // Set the direction of the scan. Ignored if the cursor doesn't ha ve directions (geo).
enum Direction { enum Direction {
DECREASING = -1, DECREASING = -1,
INCREASING = 1, INCREASING = 1,
 End of changes. 2 change blocks. 
7 lines changed or deleted 1 lines changed or added


 index_descriptor.h   index_descriptor.h 
// index_descriptor.cpp
/** /**
* Copyright (C) 2013 10gen Inc. * Copyright (C) 2013 10gen Inc.
* *
* This program is free software: you can redistribute it and/or modify * This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3 , * it under the terms of the GNU Affero General Public License, version 3 ,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
skipping to change at line 59 skipping to change at line 61
* mutable "head" pointer which is index-specific. * mutable "head" pointer which is index-specific.
* *
* All synchronization is the responsibility of the caller. * All synchronization is the responsibility of the caller.
*/ */
class IndexDescriptor { class IndexDescriptor {
public: public:
/** /**
* OnDiskIndexData is a pointer to the memory mapped per-index data . * OnDiskIndexData is a pointer to the memory mapped per-index data .
* infoObj is a copy of the index-describing BSONObj contained in t he OnDiskIndexData. * infoObj is a copy of the index-describing BSONObj contained in t he OnDiskIndexData.
*/ */
IndexDescriptor(Collection* collection, BSONObj infoObj) IndexDescriptor(Collection* collection, const std::string& accessMe thodName, BSONObj infoObj)
: _magic(123987), : _magic(123987),
_collection(collection), _collection(collection),
_accessMethodName(accessMethodName),
_infoObj(infoObj.getOwned()), _infoObj(infoObj.getOwned()),
_numFields(infoObj.getObjectField("key").nFields()), _numFields(infoObj.getObjectField("key").nFields()),
_keyPattern(infoObj.getObjectField("key").getOwned()), _keyPattern(infoObj.getObjectField("key").getOwned()),
_indexName(infoObj.getStringField("name")), _indexName(infoObj.getStringField("name")),
_parentNS(infoObj.getStringField("ns")), _parentNS(infoObj.getStringField("ns")),
_isIdIndex(IndexDetails::isIdIndexPattern( _keyPattern )), _isIdIndex(IndexDetails::isIdIndexPattern( _keyPattern )),
_sparse(infoObj["sparse"].trueValue()), _sparse(infoObj["sparse"].trueValue()),
_dropDups(infoObj["dropDups"].trueValue()), _dropDups(infoObj["dropDups"].trueValue()),
_unique( _isIdIndex || infoObj["unique"].trueValue() ), _unique( _isIdIndex || infoObj["unique"].trueValue() ),
_cachedEntry( NULL ) _cachedEntry( NULL )
skipping to change at line 105 skipping to change at line 108
const BSONObj& keyPattern() const { _checkOk(); return _keyPattern; } const BSONObj& keyPattern() const { _checkOk(); return _keyPattern; }
// How many fields do we index / are in the key pattern? // How many fields do we index / are in the key pattern?
int getNumFields() const { _checkOk(); return _numFields; } int getNumFields() const { _checkOk(); return _numFields; }
// //
// Information about the index's namespace / collection. // Information about the index's namespace / collection.
// //
// Return the name of the index. // Return the name of the index.
const string& indexName() const { _checkOk(); return _indexName; } const std::string& indexName() const { _checkOk(); return _indexNam e; }
// Return the name of the indexed collection. // Return the name of the indexed collection.
const string& parentNS() const { return _parentNS; } const std::string& parentNS() const { return _parentNS; }
// Return the name of this index's storage area (database.table.$in dex) // Return the name of this index's storage area (database.table.$in dex)
const string& indexNamespace() const { return _indexNamespace; } const std::string& indexNamespace() const { return _indexNamespace;
}
// Return the name of the access method we must use to access this
index's data.
const std::string& getAccessMethodName() const { return _accessMeth
odName; }
// //
// Properties every index has // Properties every index has
// //
// Return what version of index this is. // Return what version of index this is.
int version() const { return _version; } int version() const { return _version; }
// May each key only occur once? // May each key only occur once?
bool unique() const { return _unique; } bool unique() const { return _unique; }
skipping to change at line 147 skipping to change at line 153
// Allow access to arbitrary fields in the per-index info object. Some indices stash // Allow access to arbitrary fields in the per-index info object. Some indices stash
// index-specific data there. // index-specific data there.
BSONElement getInfoElement(const string& name) const { return _info Obj[name]; } BSONElement getInfoElement(const string& name) const { return _info Obj[name]; }
// //
// "Internals" of accessing the index, used by IndexAccessMethod(s) . // "Internals" of accessing the index, used by IndexAccessMethod(s) .
// //
// Return a (rather compact) string representation. // Return a (rather compact) string representation.
string toString() const { _checkOk(); return _infoObj.toString(); } std::string toString() const { _checkOk(); return _infoObj.toString (); }
// Return the info object. // Return the info object.
const BSONObj& infoObj() const { _checkOk(); return _infoObj; } const BSONObj& infoObj() const { _checkOk(); return _infoObj; }
// this is the owner of this IndexDescriptor // this is the owner of this IndexDescriptor
IndexCatalog* getIndexCatalog() const { return _collection->getInde xCatalog(); } IndexCatalog* getIndexCatalog() const { return _collection->getInde xCatalog(); }
bool areIndexOptionsEquivalent( const IndexDescriptor* other ) cons
t;
private: private:
void _checkOk() const { void _checkOk() const {
if ( _magic == 123987 ) if ( _magic == 123987 )
return; return;
log() << "uh oh: " << (void*)(this) << " " << _magic; log() << "uh oh: " << (void*)(this) << " " << _magic;
verify(0); verify(0);
} }
int _magic; int _magic;
// Related catalog information of the parent collection // Related catalog information of the parent collection
Collection* _collection; Collection* _collection;
// What access method should we use for this index?
std::string _accessMethodName;
// The BSONObj describing the index. Accessed through the various members above. // The BSONObj describing the index. Accessed through the various members above.
const BSONObj _infoObj; const BSONObj _infoObj;
// --- cached data from _infoObj // --- cached data from _infoObj
int64_t _numFields; // How many fields are indexed? int64_t _numFields; // How many fields are indexed?
BSONObj _keyPattern; BSONObj _keyPattern;
string _indexName; std::string _indexName;
string _parentNS; std::string _parentNS;
string _indexNamespace; std::string _indexNamespace;
bool _isIdIndex; bool _isIdIndex;
bool _sparse; bool _sparse;
bool _dropDups; bool _dropDups;
bool _unique; bool _unique;
int _version; int _version;
// only used by IndexCatalogEntryContainer to do caching for perf // only used by IndexCatalogEntryContainer to do caching for perf
// users not allowed to touch, and not part of API // users not allowed to touch, and not part of API
IndexCatalogEntry* _cachedEntry; IndexCatalogEntry* _cachedEntry;
 End of changes. 10 change blocks. 
8 lines changed or deleted 23 lines changed or added


 index_details.h   index_details.h 
skipping to change at line 96 skipping to change at line 96
return info.obj().getObjectField("key"); return info.obj().getObjectField("key");
} }
/** /**
* @return offset into keyPattern for key * @return offset into keyPattern for key
-1 if doesn't exist -1 if doesn't exist
*/ */
int keyPatternOffset( const string& key ) const; int keyPatternOffset( const string& key ) const;
bool inKeyPattern( const string& key ) const { return keyPatternOff set( key ) >= 0; } bool inKeyPattern( const string& key ) const { return keyPatternOff set( key ) >= 0; }
/* true if the specified key is in the index */
bool hasKey(const BSONObj& key);
// returns name of this index's storage area (database.collection.$ index) // returns name of this index's storage area (database.collection.$ index)
string indexNamespace() const { string indexNamespace() const {
return indexNamespaceFromObj(info.obj()); return indexNamespaceFromObj(info.obj());
} }
// returns the name of an index's storage area (database.collection .$index) from a BSONObj // returns the name of an index's storage area (database.collection .$index) from a BSONObj
static string indexNamespaceFromObj(const BSONObj& io) { static string indexNamespaceFromObj(const BSONObj& io) {
string s; string s;
s.reserve(Namespace::MaxNsLen); s.reserve(Namespace::MaxNsLen);
s = io.getStringField("ns"); s = io.getStringField("ns");
skipping to change at line 182 skipping to change at line 179
/** delete this index. does NOT clean up the system catalog /** delete this index. does NOT clean up the system catalog
(system.indexes or system.namespaces) -- only NamespaceIndex. (system.indexes or system.namespaces) -- only NamespaceIndex.
*/ */
void _reset(); void _reset();
string toString() const { string toString() const {
return info.obj().toString(); return info.obj().toString();
} }
/**
* @param newSpec the new index specification to check.
*
* @return true if the given newSpec has the same options as the
* existing index assuming the key spec matches.
*/
bool areIndexOptionsEquivalent( const BSONObj& newSpec ) const;
/** @return true if supported. supported means we can use the inde x, including adding new keys. /** @return true if supported. supported means we can use the inde x, including adding new keys.
it may not mean we can build the index version in quest ion: we may not maintain building it may not mean we can build the index version in quest ion: we may not maintain building
of indexes in old formats in the future. of indexes in old formats in the future.
*/ */
static bool isASupportedIndexVersionNumber(int v) { return (v&1)==v ; } // v == 0 || v == 1 static bool isASupportedIndexVersionNumber(int v) { return (v&1)==v ; } // v == 0 || v == 1
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
11 lines changed or deleted 0 lines changed or added


 index_entry.h   index_entry.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
/** /**
* This name sucks, but every name involving 'index' is used somewhere. * This name sucks, but every name involving 'index' is used somewhere.
*/ */
struct IndexEntry { struct IndexEntry {
/**
* Use this constructor if you're making an IndexEntry from the cat
alog.
*/
IndexEntry(const BSONObj& kp, IndexEntry(const BSONObj& kp,
bool mk = false, const string& accessMethod,
bool sp = false, bool mk,
const string& n = "default_name", bool sp,
const BSONObj& io = BSONObj()) const string& n,
const BSONObj& io)
: keyPattern(kp), : keyPattern(kp),
multikey(mk), multikey(mk),
sparse(sp), sparse(sp),
name(n), name(n),
infoObj(io) { } infoObj(io) {
type = IndexNames::nameToType(accessMethod);
}
/**
* For testing purposes only.
*/
IndexEntry(const BSONObj& kp,
bool mk,
bool sp,
const string& n,
const BSONObj& io)
: keyPattern(kp),
multikey(mk),
sparse(sp),
name(n),
infoObj(io) {
type = IndexNames::nameToType(IndexNames::findPluginName(keyPat
tern));
}
/**
* For testing purposes only.
*/
IndexEntry(const BSONObj& kp)
: keyPattern(kp),
multikey(false),
sparse(false),
name("test_foo"),
infoObj(BSONObj()) {
type = IndexNames::nameToType(IndexNames::findPluginName(keyPat
tern));
}
BSONObj keyPattern; BSONObj keyPattern;
bool multikey; bool multikey;
bool sparse; bool sparse;
string name; string name;
// Geo indices have extra parameters. We need those available to p lan correctly. // Geo indices have extra parameters. We need those available to p lan correctly.
BSONObj infoObj; BSONObj infoObj;
// What type of index is this? (What access method can we use on t
he index described
// by the keyPattern?)
IndexType type;
std::string toString() const { std::string toString() const {
mongoutils::str::stream ss; mongoutils::str::stream ss;
ss << "kp: " << keyPattern.toString(); ss << "kp: " << keyPattern.toString();
if (multikey) { if (multikey) {
ss << " multikey"; ss << " multikey";
} }
if (sparse) { if (sparse) {
ss << " sparse"; ss << " sparse";
 End of changes. 5 change blocks. 
5 lines changed or deleted 51 lines changed or added


 index_legacy.h   index_legacy.h 
skipping to change at line 52 skipping to change at line 52
* index/index_access_pattern.h. Such behavior can't be changed in the current implementation of * index/index_access_pattern.h. Such behavior can't be changed in the current implementation of
* the code. * the code.
* *
* We grouped such exception/legacy behavior here. * We grouped such exception/legacy behavior here.
*/ */
class IndexLegacy { class IndexLegacy {
public: public:
/** /**
* Adjust the provided index spec BSONObj depending on the type of index obj describes. * Adjust the provided index spec BSONObj depending on the type of index obj describes.
* *
* This is a no-op unless the object describes a FTS index. To see * This is a no-op unless the object describes a TEXT or a GEO_2DSP
what FTS does, look in HERE index. TEXT and
* FTSSpec::fixSpec in fts/fts_spec.cpp. * GEO_2DSPHERE provide additional validation on the index spec, an
d tweak the index spec
* object to conform to their expected format.
*/ */
static BSONObj adjustIndexSpecObject(const BSONObj& obj); static BSONObj adjustIndexSpecObject(const BSONObj& obj);
/** /**
* Returns the BSONObj that is inserted into an index when the obje ct is missing the keys * Returns the BSONObj that is inserted into an index when the obje ct is missing the keys
* the index is over. * the index is over.
* *
* For every index *except hash*, this is the BSON equivalent of js tNULL. * For every index *except hash*, this is the BSON equivalent of js tNULL.
* For the hash index, it's the hash of BSON("" << BSONNULL). * For the hash index, it's the hash of BSON("" << BSONNULL).
* *
 End of changes. 1 change blocks. 
3 lines changed or deleted 5 lines changed or added


 index_names.h   index_names.h 
skipping to change at line 40 skipping to change at line 40
#include <string> #include <string>
namespace mongo { namespace mongo {
using std::string; using std::string;
class BSONObj; class BSONObj;
/** /**
* We need to know what 'type' an index is in order to plan correctly.
We can't entirely rely
* on the key pattern to tell us what kind of index we have.
*
* An example of the Bad Thing That We Must Avoid:
* 1. Create a 2dsphere index in 2.4, insert some docs.
* 2. Downgrade to 2.2. Insert some more docs into the collection w/th
e 2dsphere
* index. 2.2 treats the index as a normal btree index and creates
keys accordingly.
* 3. Using the 2dsphere index in 2.4 gives wrong results or assert-fai
ls or crashes as
* the data isn't what we expect.
*/
enum IndexType {
INDEX_BTREE,
INDEX_2D,
INDEX_HAYSTACK,
INDEX_2DSPHERE,
INDEX_TEXT,
INDEX_HASHED,
};
/**
* We use the string representation of index names all over the place, so we declare them all * We use the string representation of index names all over the place, so we declare them all
* once here. * once here.
*/ */
class IndexNames { class IndexNames {
public: public:
static const string GEO_2D; static const string GEO_2D;
static const string GEO_HAYSTACK; static const string GEO_HAYSTACK;
static const string GEO_2DSPHERE; static const string GEO_2DSPHERE;
static const string TEXT; static const string TEXT;
static const string HASHED; static const string HASHED;
static const string BTREE;
/** /**
* True if is a regular (non-plugin) index or uses a plugin that ex isted before 2.4. * True if is a regular (non-plugin) index or uses a plugin that ex isted before 2.4.
* These plugins are grandfathered in and allowed to exist in DBs w ith * These plugins are grandfathered in and allowed to exist in DBs w ith
* PDFILE_MINOR_VERSION_22_AND_OLDER * PDFILE_MINOR_VERSION_22_AND_OLDER
*/ */
static bool existedBefore24(const string& name) { static bool existedBefore24(const string& name);
return name.empty()
|| name == IndexNames::GEO_2D
|| name == IndexNames::GEO_HAYSTACK
|| name == IndexNames::HASHED;
}
/** /**
* Return the first string value in the provided object. For an in dex key pattern, * Return the first string value in the provided object. For an in dex key pattern,
* a field with a non-string value indicates a "special" (not strai ght Btree) index. * a field with a non-string value indicates a "special" (not strai ght Btree) index.
*/ */
static string findPluginName(const BSONObj& keyPattern); static string findPluginName(const BSONObj& keyPattern);
static bool isKnownName(const string& name) { /**
return name.empty() * Is the provided access method name one we recognize?
|| name == IndexNames::GEO_2D */
|| name == IndexNames::GEO_2DSPHERE static bool isKnownName(const string& name);
|| name == IndexNames::GEO_HAYSTACK
|| name == IndexNames::TEXT /**
|| name == IndexNames::HASHED; * Convert an index name to an IndexType.
} */
static IndexType nameToType(const string& accessMethod);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
14 lines changed or deleted 35 lines changed or added


 index_scan.h   index_scan.h 
skipping to change at line 73 skipping to change at line 73
size_t maxScan; size_t maxScan;
// Do we want to add the key as metadata? // Do we want to add the key as metadata?
bool addKeyMetadata; bool addKeyMetadata;
}; };
/** /**
* Stage scans over an index from startKey to endKey, returning results that pass the provided * Stage scans over an index from startKey to endKey, returning results that pass the provided
* filter. Internally dedups on DiskLoc. * filter. Internally dedups on DiskLoc.
* *
* XXX: we probably should split this into 2 stages: one btree-only "fa * TODO: we probably should split this into 2 stages: one btree-only "f
st" ixscan and one ast" ixscan and one that
* that strictly talks through the index API. Need to figure out * strictly talks through the index API. Need to figure out what we re
what we really want ally want to ship down
* to ship down through that API predicate-wise though, currently * through that API predicate-wise though, currently the language is a
is a BSONObj but that's BSONObj but that's
* not going to be enough. See SERVER-12397 for tracking. * clearly not enough (or we need different index scan exec nodes per i
ndex type?). See
* SERVER-12397 for tracking.
* *
* Sub-stage preconditions: None. Is a leaf and consumes no stage data . * Sub-stage preconditions: None. Is a leaf and consumes no stage data .
*/ */
class IndexScan : public PlanStage { class IndexScan : public PlanStage {
public: public:
IndexScan(const IndexScanParams& params, WorkingSet* workingSet, IndexScan(const IndexScanParams& params, WorkingSet* workingSet,
const MatchExpression* filter); const MatchExpression* filter);
virtual ~IndexScan() { } virtual ~IndexScan() { }
skipping to change at line 110 skipping to change at line 111
/** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */ /** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */
void checkEnd(); void checkEnd();
// The WorkingSet we annotate with results. Not owned by us. // The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet; WorkingSet* _workingSet;
// Index access. // Index access.
const IndexAccessMethod* _iam; // owned by Collection -> IndexCatal og const IndexAccessMethod* _iam; // owned by Collection -> IndexCatal og
scoped_ptr<IndexCursor> _indexCursor; scoped_ptr<IndexCursor> _indexCursor;
const IndexDescriptor* _descriptor; // owned by Collection -> Index Catalog BSONObj _keyPattern;
// Have we hit the end of the index scan? // Have we hit the end of the index scan?
bool _hitEnd; bool _hitEnd;
// Contains expressions only over fields in the index key. We assu me this is built // Contains expressions only over fields in the index key. We assu me this is built
// correctly by whomever creates this class. // correctly by whomever creates this class.
// The filter is not owned by us. // The filter is not owned by us.
const MatchExpression* _filter; const MatchExpression* _filter;
// Could our index have duplicates? If so, we use _returned to ded up. // Could our index have duplicates? If so, we use _returned to ded up.
 End of changes. 2 change blocks. 
8 lines changed or deleted 10 lines changed or added


 index_tag.h   index_tag.h 
skipping to change at line 67 skipping to change at line 67
// What index should we try to use for this leaf? // What index should we try to use for this leaf?
size_t index; size_t index;
// What position are we in the index? (Compound.) // What position are we in the index? (Compound.)
size_t pos; size_t pos;
}; };
// used internally // used internally
class RelevantTag : public MatchExpression::TagData { class RelevantTag : public MatchExpression::TagData {
public: public:
RelevantTag() { } RelevantTag() : elemMatchExpr(NULL), pathPrefix("") { }
std::vector<size_t> first; std::vector<size_t> first;
std::vector<size_t> notFirst; std::vector<size_t> notFirst;
// We don't know the full path from a node unless we keep notes as we traverse from the // We don't know the full path from a node unless we keep notes as we traverse from the
// root. We do this once and store it. // root. We do this once and store it.
// TODO: Do a FieldRef / StringData pass. // TODO: Do a FieldRef / StringData pass.
// TODO: We might want this inside of the MatchExpression. // TODO: We might want this inside of the MatchExpression.
string path; string path;
// Points to the innermost containing $elemMatch. If this tag is
// attached to an expression not contained in an $elemMatch, then
// 'elemMatchExpr' is NULL. Not owned here.
MatchExpression* elemMatchExpr;
// If not contained inside an elemMatch, 'pathPrefix' contains the
// part of 'path' prior to the first dot. For example, if 'path' is
// "a.b.c", then 'pathPrefix' is "a". If 'path' is just "a", then
// 'pathPrefix' is also "a".
//
// If tagging a predicate contained in an $elemMatch, 'pathPrefix'
// holds the prefix of the path *inside* the $elemMatch. If this
// tags predicate {a: {$elemMatch: {"b.c": {$gt: 1}}}}, then
// 'pathPrefix' is "b".
//
// Used by the plan enumerator to make sure that we never
// compound two predicates sharing a path prefix.
std::string pathPrefix;
virtual void debugString(StringBuilder* builder) const { virtual void debugString(StringBuilder* builder) const {
*builder << "First: "; *builder << " || First: ";
for (size_t i = 0; i < first.size(); ++i) { for (size_t i = 0; i < first.size(); ++i) {
*builder << first[i] << " "; *builder << first[i] << " ";
} }
*builder << "notFirst: "; *builder << "notFirst: ";
for (size_t i = 0; i < notFirst.size(); ++i) { for (size_t i = 0; i < notFirst.size(); ++i) {
*builder << notFirst[i] << " "; *builder << notFirst[i] << " ";
} }
*builder << "full path: " << path; *builder << "full path: " << path;
} }
 End of changes. 3 change blocks. 
2 lines changed or deleted 21 lines changed or added


 indexability.h   indexability.h 
skipping to change at line 53 skipping to change at line 53
*/ */
static bool nodeCanUseIndexOnOwnField(const MatchExpression* me) { static bool nodeCanUseIndexOnOwnField(const MatchExpression* me) {
if (me->path().empty()) { if (me->path().empty()) {
return false; return false;
} }
if (arrayUsesIndexOnOwnField(me)) { if (arrayUsesIndexOnOwnField(me)) {
return true; return true;
} }
return me->matchType() == MatchExpression::LTE return isIndexOnOwnFieldTypeNode(me);
|| me->matchType() == MatchExpression::LT
|| me->matchType() == MatchExpression::EQ
|| me->matchType() == MatchExpression::GT
|| me->matchType() == MatchExpression::GTE
|| me->matchType() == MatchExpression::REGEX
|| me->matchType() == MatchExpression::MOD
|| me->matchType() == MatchExpression::MATCH_IN
|| me->matchType() == MatchExpression::TYPE_OPERATOR
|| me->matchType() == MatchExpression::GEO
|| me->matchType() == MatchExpression::GEO_NEAR
|| me->matchType() == MatchExpression::TEXT;
} }
/** /**
* This array operator doesn't have any children with fields and ca n use an index. * This array operator doesn't have any children with fields and ca n use an index.
* *
* Example: a: {$elemMatch: {$gte: 1, $lte: 1}}. * Example: a: {$elemMatch: {$gte: 1, $lte: 1}}.
*/ */
static bool arrayUsesIndexOnOwnField(const MatchExpression* me) { static bool arrayUsesIndexOnOwnField(const MatchExpression* me) {
return me->isArray() && MatchExpression::ELEM_MATCH_VALUE == me if (!me->isArray()) {
->matchType(); return false;
}
if (MatchExpression::ELEM_MATCH_VALUE != me->matchType()) {
return false;
}
// We have an ELEM_MATCH_VALUE expression. In order to be
// considered "indexable" all children of the ELEM_MATCH_VALUE
// must be "indexable" type expressions as well.
for (size_t i = 0; i < me->numChildren(); i++) {
if (!isIndexOnOwnFieldTypeNode(me->getChild(i))) {
return false;
}
}
return true;
} }
/** /**
* Certain array operators require that the field for that operator is prepended * Certain array operators require that the field for that operator is prepended
* to all fields in that operator's children. * to all fields in that operator's children.
* *
* Example: a: {$elemMatch: {b:1, c:1}}. * Example: a: {$elemMatch: {b:1, c:1}}.
*/ */
static bool arrayUsesIndexOnChildren(const MatchExpression* me) { static bool arrayUsesIndexOnChildren(const MatchExpression* me) {
return me->isArray() && (MatchExpression::ELEM_MATCH_OBJECT == me->matchType() return me->isArray() && (MatchExpression::ELEM_MATCH_OBJECT == me->matchType()
|| MatchExpression::ALL == me->matchTy pe()); || MatchExpression::ALL == me->matchTy pe());
}; }
/**
* Returns true if 'me' is a NOT, and the child of the NOT can use
* an index on its own field.
*/
static bool isBoundsGeneratingNot(const MatchExpression* me) {
return MatchExpression::NOT == me->matchType() &&
nodeCanUseIndexOnOwnField(me->getChild(0));
}
/**
* Returns true if either 'me' is a bounds generating NOT,
* or 'me' can use an index on its own field.
*/
static bool isBoundsGenerating(const MatchExpression* me) {
return isBoundsGeneratingNot(me) || nodeCanUseIndexOnOwnField(m
e);
}
private:
/**
* Returns true if 'me' is "sargable" but is not a negation and
* is not an array node such as ELEM_MATCH_VALUE.
*
* Used as a helper for nodeCanUseIndexOnOwnField().
*/
static bool isIndexOnOwnFieldTypeNode(const MatchExpression* me) {
return me->matchType() == MatchExpression::LTE
|| me->matchType() == MatchExpression::LT
|| me->matchType() == MatchExpression::EQ
|| me->matchType() == MatchExpression::GT
|| me->matchType() == MatchExpression::GTE
|| me->matchType() == MatchExpression::REGEX
|| me->matchType() == MatchExpression::MOD
|| me->matchType() == MatchExpression::MATCH_IN
|| me->matchType() == MatchExpression::TYPE_OPERATOR
|| me->matchType() == MatchExpression::GEO
|| me->matchType() == MatchExpression::GEO_NEAR
|| me->matchType() == MatchExpression::EXISTS
|| me->matchType() == MatchExpression::TEXT;
}
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
15 lines changed or deleted 61 lines changed or added


 initialize_server_global_state.h   initialize_server_global_state.h 
skipping to change at line 54 skipping to change at line 54
* Call after processing the command line but before running mongo init ializers. * Call after processing the command line but before running mongo init ializers.
*/ */
void forkServerOrDie(); void forkServerOrDie();
/** /**
* Notify the parent that we forked from that we have successfully comp leted basic * Notify the parent that we forked from that we have successfully comp leted basic
* initialization so it can stop waiting and exit. * initialization so it can stop waiting and exit.
*/ */
void signalForkSuccess(); void signalForkSuccess();
void setupCoreSignals();
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
2 lines changed or deleted 0 lines changed or added


 interval.h   interval.h 
skipping to change at line 84 skipping to change at line 84
} }
/** /**
* Creates an interval that starts at the first field of 'base' and ends at the second * Creates an interval that starts at the first field of 'base' and ends at the second
* field of 'base'. (In other words, 'base' is a bsonobj with at le ast two elements, of * field of 'base'. (In other words, 'base' is a bsonobj with at le ast two elements, of
* which we don't care about field names.) * which we don't care about field names.)
* *
* The interval's extremities are closed or not depending on whethe r * The interval's extremities are closed or not depending on whethe r
* 'start'/'endIncluded' are true or not. * 'start'/'endIncluded' are true or not.
*/ */
Interval(BSONObj base, bool startIncluded, bool endInclued); Interval(BSONObj base, bool startIncluded, bool endIncluded);
/** Sets the current interval to the given values (see constructor) */ /** Sets the current interval to the given values (see constructor) */
void init(BSONObj base, bool startIncluded, bool endIncluded); void init(BSONObj base, bool startIncluded, bool endIncluded);
/** Returns true if an empty-constructed interval hasn't been init( /**
)-ialized yet */ * Returns true if an empty-constructed interval hasn't been init()
-ialized yet
*/
bool isEmpty() const; bool isEmpty() const;
bool isPoint() const { /**
return startInclusive && endInclusive && 0 == start.woCompare(e * Does this interval represent exactly one point?
nd, false); */
} bool isPoint() const;
/** Returns true if start is same as end and interval is open at ei /**
ther end */ * Returns true if start is same as end and interval is open at eit
bool isNull() const { her end
return (!startInclusive || !endInclusive) && 0 == start.woCompa */
re(end, false); bool isNull() const;
}
//
// Comparison with other intervals
//
/** Returns true if 'this' is the same interval as 'other' */ /**
* Returns true if 'this' is the same interval as 'other'
*/
bool equals(const Interval& other) const; bool equals(const Interval& other) const;
/** /**
* Swap start and end points of interval. * Returns true if 'this' overlaps with 'other', false otherwise.
*/ */
void reverse(); bool intersects(const Interval& rhs) const;
/**
* Returns true if 'this' is within 'other', false otherwise.
*/
bool within(const Interval& other) const;
/**
* Returns true if 'this' is located before 'other', false otherwis
e.
*/
bool precedes(const Interval& other) const;
/** Returns how 'this' compares to 'other' */ /** Returns how 'this' compares to 'other' */
enum IntervalComparison { enum IntervalComparison {
// //
// There is some intersection. // There is some intersection.
// //
// The two intervals are *exactly* equal. // The two intervals are *exactly* equal.
INTERVAL_EQUALS, INTERVAL_EQUALS,
skipping to change at line 151 skipping to change at line 170
INTERVAL_UNKNOWN INTERVAL_UNKNOWN
}; };
IntervalComparison compare(const Interval& other) const; IntervalComparison compare(const Interval& other) const;
/** /**
* toString for IntervalComparison * toString for IntervalComparison
*/ */
static string cmpstr(IntervalComparison c); static string cmpstr(IntervalComparison c);
//
// Mutation of intervals
//
/**
* Swap start and end points of interval.
*/
void reverse();
/** /**
* Updates 'this' with the intersection of 'this' and 'other'. If ' this' and 'other' * Updates 'this' with the intersection of 'this' and 'other'. If ' this' and 'other'
* have been compare()d before, that result can be optionally passe d in 'cmp' * have been compare()d before, that result can be optionally passe d in 'cmp'
*/ */
void intersect(const Interval& other, IntervalComparison cmp = INTE RVAL_UNKNOWN); void intersect(const Interval& other, IntervalComparison cmp = INTE RVAL_UNKNOWN);
/** /**
* Updates 'this" with the union of 'this' and 'other'. If 'this' a nd 'other' have * Updates 'this" with the union of 'this' and 'other'. If 'this' a nd 'other' have
* been compare()d before, that result can be optionaly passed in ' cmp'. * been compare()d before, that result can be optionaly passed in ' cmp'.
*/ */
 End of changes. 8 change blocks. 
16 lines changed or deleted 43 lines changed or added


 listen.h   listen.h 
skipping to change at line 31 skipping to change at line 31
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/platform/atomic_word.h" #include "mongo/platform/atomic_word.h"
#include "mongo/util/concurrency/ticketholder.h" #include "mongo/util/concurrency/ticketholder.h"
#include "mongo/util/log.h" #include "mongo/util/log.h"
#include "mongo/util/net/sock.h" #include "mongo/util/net/sock.h"
namespace mongo { namespace mongo {
const int DEFAULT_MAX_CONN = 20000; const int DEFAULT_MAX_CONN = 1000000;
class MessagingPort; class MessagingPort;
class Listener : boost::noncopyable { class Listener : boost::noncopyable {
public: public:
Listener(const std::string& name, const std::string &ip, int port, bool logConnect=true ); Listener(const std::string& name, const std::string &ip, int port, bool logConnect=true );
virtual ~Listener(); virtual ~Listener();
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 lite_parsed_query.h   lite_parsed_query.h 
skipping to change at line 54 skipping to change at line 54
int ntoskip, int ntoskip,
int ntoreturn, int ntoreturn,
int queryoptions, int queryoptions,
const BSONObj& query, const BSONObj& query,
const BSONObj& proj, const BSONObj& proj,
const BSONObj& sort, const BSONObj& sort,
const BSONObj& hint, const BSONObj& hint,
const BSONObj& minObj, const BSONObj& minObj,
const BSONObj& maxObj, const BSONObj& maxObj,
bool snapshot, bool snapshot,
bool explain,
LiteParsedQuery** out); LiteParsedQuery** out);
/** /**
* Helper functions to parse maxTimeMS from a command object. Retu rns the contained value, * Helper functions to parse maxTimeMS from a command object. Retu rns the contained value,
* or an error on parsing fail. When passed an EOO-type element, r eturns 0 (special value * or an error on parsing fail. When passed an EOO-type element, r eturns 0 (special value
* for "allow to run indefinitely"). * for "allow to run indefinitely").
*/ */
static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj) ; static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj) ;
/** /**
skipping to change at line 90 skipping to change at line 91
/** /**
* Helper function to validate a sort object. * Helper function to validate a sort object.
* Returns true if each element satisfies one of: * Returns true if each element satisfies one of:
* 1. a number with value 1 * 1. a number with value 1
* 2. a number with value -1 * 2. a number with value -1
* 3. isTextScoreMeta * 3. isTextScoreMeta
*/ */
static bool isValidSortOrder(const BSONObj& sortObj); static bool isValidSortOrder(const BSONObj& sortObj);
/** /**
* Returns true if the query described by "query" should execute
* at an elevated level of isolation (i.e., $isolated was specified
).
*/
static bool isQueryIsolated(const BSONObj& query);
/**
* Helper function to create a normalized sort object. * Helper function to create a normalized sort object.
* Each element of the object returned satisfies one of: * Each element of the object returned satisfies one of:
* 1. a number with value 1 * 1. a number with value 1
* 2. a number with value -1 * 2. a number with value -1
* 3. isTextScoreMeta * 3. isTextScoreMeta
*/ */
static BSONObj normalizeSortOrder(const BSONObj& sortObj); static BSONObj normalizeSortOrder(const BSONObj& sortObj);
// Names of the maxTimeMS command and query option. // Names of the maxTimeMS command and query option.
static const string cmdOptionMaxTimeMS; static const string cmdOptionMaxTimeMS;
 End of changes. 2 change blocks. 
0 lines changed or deleted 8 lines changed or added


 log.h   log.h 
skipping to change at line 129 skipping to change at line 129
// Used only by mongodump (mongo/tools/dump.cpp). Do not introduce new uses. // Used only by mongodump (mongo/tools/dump.cpp). Do not introduce new uses.
struct LogIndentLevel { struct LogIndentLevel {
LogIndentLevel(); LogIndentLevel();
~LogIndentLevel(); ~LogIndentLevel();
}; };
extern Tee* const warnings; // Things put here go in serverStatus extern Tee* const warnings; // Things put here go in serverStatus
extern Tee* const startupWarningsLog; // Things put here get reported i n MMS extern Tee* const startupWarningsLog; // Things put here get reported i n MMS
string errnoWithDescription(int errorcode = -1); string errnoWithDescription(int errorcode = -1);
void rawOut( const StringData &s );
/*
* Redirects the output of "rawOut" to stderr. The default is stdout.
*
* NOTE: This needs to be here because the tools such as mongoexport an
d mongodump sometimes
* send data to stdout and share this code, so they need to be able to
redirect output to
* stderr. Eventually rawOut should be replaced with something better
and our tools should not
* need to call internal server shutdown functions.
*
* NOTE: This function is not thread safe and should not be called from
a multithreaded context.
*/
void setRawOutToStderr();
/** /**
* Write the current context (backtrace), along with the optional "msg" . * Write the current context (backtrace), along with the optional "msg" .
*/ */
void logContext(const char *msg = NULL); void logContext(const char *msg = NULL);
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
17 lines changed or deleted 0 lines changed or added


 lru_key_value.h   lru_key_value.h 
skipping to change at line 57 skipping to change at line 57
* for protecting concurrent access to the LRU store if used in a threa ded * for protecting concurrent access to the LRU store if used in a threa ded
* context. * context.
* *
* Implemented as a doubly-linked list (std::list) with a hash map * Implemented as a doubly-linked list (std::list) with a hash map
* (boost::unordered_map) for quickly locating the kv-store entries. Th e * (boost::unordered_map) for quickly locating the kv-store entries. Th e
* add(), get(), and remove() operations are all O(1). * add(), get(), and remove() operations are all O(1).
* *
* The keys of generic type K map to values of type V*. The V* * The keys of generic type K map to values of type V*. The V*
* pointers are owned by the kv-store. * pointers are owned by the kv-store.
* *
* XXX: Move this into the util/ directory and do any cleanup necessary * TODO: We could move this into the util/ directory and do any cleanup
* to make if fully general. necessary to make it
* fully general.
*/ */
template<class K, class V> template<class K, class V>
class LRUKeyValue { class LRUKeyValue {
public: public:
LRUKeyValue(size_t maxSize) : _maxSize(maxSize), _currentSize(0) { }; LRUKeyValue(size_t maxSize) : _maxSize(maxSize), _currentSize(0) { };
~LRUKeyValue() { ~LRUKeyValue() {
clear(); clear();
} }
skipping to change at line 163 skipping to change at line 163
*entryOut = foundEntry; *entryOut = foundEntry;
return Status::OK(); return Status::OK();
} }
/** /**
* Remove the kv-store entry keyed by 'key'. * Remove the kv-store entry keyed by 'key'.
*/ */
Status remove(const K& key) { Status remove(const K& key) {
KVMapConstIt i = _kvMap.find(key); KVMapConstIt i = _kvMap.find(key);
if (i == _kvMap.end()) { if (i == _kvMap.end()) {
return Status(ErrorCodes::BadValue, "no such key in LRU key -value store"); return Status(ErrorCodes::NoSuchKey, "no such key in LRU ke y-value store");
} }
KVListIt found = i->second; KVListIt found = i->second;
delete found->second; delete found->second;
_kvMap.erase(i); _kvMap.erase(i);
_kvList.erase(found); _kvList.erase(found);
_currentSize--; _currentSize--;
return Status::OK(); return Status::OK();
} }
/** /**
skipping to change at line 186 skipping to change at line 186
void clear() { void clear() {
for (KVListIt i = _kvList.begin(); i != _kvList.end(); i++) { for (KVListIt i = _kvList.begin(); i != _kvList.end(); i++) {
delete i->second; delete i->second;
} }
_kvList.clear(); _kvList.clear();
_kvMap.clear(); _kvMap.clear();
_currentSize = 0; _currentSize = 0;
} }
/** /**
* Returns true if entry is found in the kv-store.
*/
bool hasKey(const K& key) const {
return _kvMap.find(key) != _kvMap.end();
}
/**
* Returns the number of entries currently in the kv-store. * Returns the number of entries currently in the kv-store.
*/ */
size_t size() const { return _currentSize; } size_t size() const { return _currentSize; }
/** /**
* XXX: The kv-store should implement its own iterator. Calling thr ough to the underlying * TODO: The kv-store should implement its own iterator. Calling th rough to the underlying
* iterator exposes the internals, and forces the caller to make a horrible type * iterator exposes the internals, and forces the caller to make a horrible type
* declaration. * declaration.
*/ */
KVListConstIt begin() const { return _kvList.begin(); } KVListConstIt begin() const { return _kvList.begin(); }
KVListConstIt end() const { return _kvList.end(); } KVListConstIt end() const { return _kvList.end(); }
private: private:
// The maximum allowable number of entries in the kv-store. // The maximum allowable number of entries in the kv-store.
const size_t _maxSize; const size_t _maxSize;
 End of changes. 4 change blocks. 
4 lines changed or deleted 12 lines changed or added


 merge_sort.h   merge_sort.h 
skipping to change at line 106 skipping to change at line 106
// If we receive an invalidate, we need to iterate over any cached state to see if the // If we receive an invalidate, we need to iterate over any cached state to see if the
// invalidate is relevant. // invalidate is relevant.
// //
// We can't iterate over a priority_queue, so we keep the actual ca ched state in a list and // We can't iterate over a priority_queue, so we keep the actual ca ched state in a list and
// have a priority_queue of iterators into that list. // have a priority_queue of iterators into that list.
// //
// Why an iterator instead of a pointer? We need to be able to use the information in the // Why an iterator instead of a pointer? We need to be able to use the information in the
// priority_queue to remove the item from the list and quickly. // priority_queue to remove the item from the list and quickly.
struct StageWithValue { struct StageWithValue {
StageWithValue() : id(WorkingSet::INVALID_ID), stage(NULL) { }
WorkingSetID id; WorkingSetID id;
PlanStage* stage; PlanStage* stage;
}; };
// We have a priority queue of these. // We have a priority queue of these.
typedef list<StageWithValue>::iterator MergingRef; typedef list<StageWithValue>::iterator MergingRef;
// The comparison function used in our priority queue. // The comparison function used in our priority queue.
class StageWithValueComparison { class StageWithValueComparison {
public: public:
 End of changes. 1 change blocks. 
0 lines changed or deleted 1 lines changed or added


 mock_multi_write_command.h   mock_multi_write_command.h 
skipping to change at line 56 skipping to change at line 56
MockWriteResult( const ConnectionString& endpoint, const WriteError Detail& error ) : MockWriteResult( const ConnectionString& endpoint, const WriteError Detail& error ) :
endpoint( endpoint ) { endpoint( endpoint ) {
WriteErrorDetail* errorCopy = new WriteErrorDetail; WriteErrorDetail* errorCopy = new WriteErrorDetail;
error.cloneTo( errorCopy ); error.cloneTo( errorCopy );
errorCopy->setIndex( 0 ); errorCopy->setIndex( 0 );
response.setOk(true); response.setOk(true);
response.setN(0); response.setN(0);
response.addToErrDetails( errorCopy ); response.addToErrDetails( errorCopy );
} }
MockWriteResult( const ConnectionString& endpoint,
const WriteErrorDetail& error,
int copies ) :
endpoint( endpoint ) {
response.setOk( true );
response.setN( 0 );
for ( int i = 0; i < copies; ++i ) {
WriteErrorDetail* errorCopy = new WriteErrorDetail;
error.cloneTo( errorCopy );
errorCopy->setIndex( i );
response.addToErrDetails( errorCopy );
}
}
MockWriteResult( const ConnectionString& endpoint, const BatchedCom mandResponse& response ) : MockWriteResult( const ConnectionString& endpoint, const BatchedCom mandResponse& response ) :
endpoint( endpoint ) { endpoint( endpoint ) {
response.cloneTo( &this->response ); response.cloneTo( &this->response );
} }
const ConnectionString endpoint; const ConnectionString endpoint;
BatchedCommandResponse response; BatchedCommandResponse response;
}; };
/** /**
 End of changes. 1 change blocks. 
0 lines changed or deleted 16 lines changed or added


 mock_ns_targeter.h   mock_ns_targeter.h 
skipping to change at line 95 skipping to change at line 95
} }
const NamespaceString& getNS() const { const NamespaceString& getNS() const {
return _nss; return _nss;
} }
/** /**
* Returns a ShardEndpoint for the doc from the mock ranges * Returns a ShardEndpoint for the doc from the mock ranges
*/ */
Status targetInsert( const BSONObj& doc, ShardEndpoint** endpoint ) const { Status targetInsert( const BSONObj& doc, ShardEndpoint** endpoint ) const {
std::vector<ShardEndpoint*> endpoints;
const std::vector<MockRange*>& ranges = getRanges(); Status status = targetQuery( doc, &endpoints );
for ( std::vector<MockRange*>::const_iterator it = ranges.begin if ( !status.isOK() )
(); it != ranges.end(); return status;
++it ) { if ( !endpoints.empty() )
*endpoint = endpoints.front();
const MockRange* range = *it; return Status::OK();
if ( rangeContains( range->range.minKey, range->range.maxKe
y, doc ) ) {
*endpoint = new ShardEndpoint( range->endpoint );
return Status::OK();
}
}
return Status( ErrorCodes::UnknownError, "no mock range found f
or document" );
} }
/** /**
* Returns the first ShardEndpoint for the query from the mock rang es. Only can handle * Returns the first ShardEndpoint for the query from the mock rang es. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }. * queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/ */
Status targetUpdate( const BatchedUpdateDocument& updateDoc, Status targetUpdate( const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints ) const { std::vector<ShardEndpoint*>* endpoints ) const {
return targetQuery( updateDoc.getQuery(), endpoints ); return targetQuery( updateDoc.getQuery(), endpoints );
} }
/** /**
* Returns the first ShardEndpoint for the query from the mock rang es. Only can handle * Returns the first ShardEndpoint for the query from the mock rang es. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }. * queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/ */
Status targetDelete( const BatchedDeleteDocument& deleteDoc, Status targetDelete( const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints ) const { std::vector<ShardEndpoint*>* endpoints ) const {
return targetQuery( deleteDoc.getQuery(), endpoints ); return targetQuery( deleteDoc.getQuery(), endpoints );
} }
Status targetAll( std::vector<ShardEndpoint*>* endpoints ) const { Status targetCollection( std::vector<ShardEndpoint*>* endpoints ) c onst {
// TODO: XXX // TODO: XXX
// No-op // No-op
return Status::OK(); return Status::OK();
} }
Status targetAllShards( std::vector<ShardEndpoint*>* endpoints ) co
nst {
const std::vector<MockRange*>& ranges = getRanges();
for ( std::vector<MockRange*>::const_iterator it = ranges.begin
(); it != ranges.end();
++it ) {
const MockRange* range = *it;
endpoints->push_back( new ShardEndpoint( range->endpoint )
);
}
return Status::OK();
}
void noteCouldNotTarget() { void noteCouldNotTarget() {
// No-op // No-op
} }
void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ) { void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ) {
// No-op // No-op
} }
Status refreshIfNeeded( bool* wasChanged ) { Status refreshIfNeeded( bool* wasChanged ) {
// No-op // No-op
skipping to change at line 159 skipping to change at line 164
const std::vector<MockRange*>& getRanges() const { const std::vector<MockRange*>& getRanges() const {
return _mockRanges.vector(); return _mockRanges.vector();
} }
private: private:
KeyRange parseRange( const BSONObj& query ) const { KeyRange parseRange( const BSONObj& query ) const {
ASSERT_EQUALS( query.nFields(), 1 ); ASSERT_EQUALS( query.nFields(), 1 );
ASSERT_EQUALS( query.firstElement().type(), Object );
string fieldName = query.firstElement().fieldName(); string fieldName = query.firstElement().fieldName();
BSONObj queryRange = query.firstElement().Obj();
ASSERT( !queryRange[GTE.l_].eoo() ); if ( query.firstElement().isNumber() ) {
ASSERT( !queryRange[LT.l_].eoo() );
BSONObjBuilder minKeyB; return KeyRange( "",
minKeyB.appendAs( queryRange[GTE.l_], fieldName ); BSON( fieldName << query.firstElement().nu
BSONObjBuilder maxKeyB; mberInt() ),
maxKeyB.appendAs( queryRange[LT.l_], fieldName ); BSON( fieldName << query.firstElement().nu
mberInt() + 1 ),
BSON( fieldName << 1 ) );
}
else if ( query.firstElement().type() == Object ) {
BSONObj queryRange = query.firstElement().Obj();
ASSERT( !queryRange[GTE.l_].eoo() );
ASSERT( !queryRange[LT.l_].eoo() );
BSONObjBuilder minKeyB;
minKeyB.appendAs( queryRange[GTE.l_], fieldName );
BSONObjBuilder maxKeyB;
maxKeyB.appendAs( queryRange[LT.l_], fieldName );
return KeyRange( "", minKeyB.obj(), maxKeyB.obj(), BSON( fi
eldName << 1 ) );
}
return KeyRange( "", minKeyB.obj(), maxKeyB.obj(), BSON( fieldN ASSERT( false );
ame << 1 ) ); return KeyRange( "", BSONObj(), BSONObj(), BSONObj() );
} }
/** /**
* Returns the first ShardEndpoint for the query from the mock rang es. Only can handle * Returns the first ShardEndpoint for the query from the mock rang es. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }. * queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/ */
Status targetQuery( const BSONObj& query, std::vector<ShardEndpoint *>* endpoints ) const { Status targetQuery( const BSONObj& query, std::vector<ShardEndpoint *>* endpoints ) const {
KeyRange queryRange = parseRange( query ); KeyRange queryRange = parseRange( query );
 End of changes. 8 change blocks. 
29 lines changed or deleted 48 lines changed or added


 mongobridge_options.h   mongobridge_options.h 
skipping to change at line 49 skipping to change at line 49
namespace optionenvironment { namespace optionenvironment {
class OptionSection; class OptionSection;
class Environment; class Environment;
} // namespace optionenvironment } // namespace optionenvironment
namespace moe = mongo::optionenvironment; namespace moe = mongo::optionenvironment;
struct MongoBridgeGlobalParams { struct MongoBridgeGlobalParams {
int port; int port;
int delay; int delay;
int connectTimeoutSec;
string destUri; string destUri;
MongoBridgeGlobalParams() : port(0), delay(0) { } MongoBridgeGlobalParams() : port(0), delay(0), connectTimeoutSec(15 ) {}
}; };
extern MongoBridgeGlobalParams mongoBridgeGlobalParams; extern MongoBridgeGlobalParams mongoBridgeGlobalParams;
Status addMongoBridgeOptions(moe::OptionSection* options); Status addMongoBridgeOptions(moe::OptionSection* options);
void printMongoBridgeHelp(std::ostream* out); void printMongoBridgeHelp(std::ostream* out);
/** /**
* Handle options that should come before validation, such as "help". * Handle options that should come before validation, such as "help".
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 mongod_options.h   mongod_options.h 
skipping to change at line 72 skipping to change at line 72
void printMongodHelp(const moe::OptionSection& options); void printMongodHelp(const moe::OptionSection& options);
/** /**
* Handle options that should come before validation, such as "help". * Handle options that should come before validation, such as "help".
* *
* Returns false if an option was found that implies we should prematur ely exit with success. * Returns false if an option was found that implies we should prematur ely exit with success.
*/ */
bool handlePreValidationMongodOptions(const moe::Environment& params, bool handlePreValidationMongodOptions(const moe::Environment& params,
const std::vector<std::string>& args); const std::vector<std::string>& args);
/**
* Handle custom validation of mongod options that can not currently be
done by using
* Constraints in the Environment. See the "validate" function in the
Environment class for
* more details.
*/
Status validateMongodOptions(const moe::Environment& params);
/**
* Canonicalize mongod options for the given environment.
*
* For example, the options "dur", "nodur", "journal", "nojournal", and
* "storage.journaling.enabled" should all be merged into "storage.jour
naling.enabled".
*/
Status canonicalizeMongodOptions(moe::Environment* params);
Status storeMongodOptions(const moe::Environment& params, const std::ve ctor<std::string>& args); Status storeMongodOptions(const moe::Environment& params, const std::ve ctor<std::string>& args);
} }
 End of changes. 1 change blocks. 
0 lines changed or deleted 18 lines changed or added


 mongorestore_options.h   mongorestore_options.h 
skipping to change at line 50 skipping to change at line 50
struct MongoRestoreGlobalParams { struct MongoRestoreGlobalParams {
bool drop; bool drop;
bool oplogReplay; bool oplogReplay;
std::string oplogLimit; std::string oplogLimit;
bool keepIndexVersion; bool keepIndexVersion;
bool restoreOptions; bool restoreOptions;
bool restoreIndexes; bool restoreIndexes;
bool restoreUsersAndRoles; bool restoreUsersAndRoles;
int w; int w;
std::string restoreDirectory; std::string restoreDirectory;
std::string tempUsersColl;
std::string tempRolesColl;
}; };
extern MongoRestoreGlobalParams mongoRestoreGlobalParams; extern MongoRestoreGlobalParams mongoRestoreGlobalParams;
Status addMongoRestoreOptions(moe::OptionSection* options); Status addMongoRestoreOptions(moe::OptionSection* options);
void printMongoRestoreHelp(std::ostream* out); void printMongoRestoreHelp(std::ostream* out);
/** /**
* Handle options that should come before validation, such as "help". * Handle options that should come before validation, such as "help".
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 mongos_options.h   mongos_options.h 
skipping to change at line 68 skipping to change at line 68
void printMongosHelp(const moe::OptionSection& options); void printMongosHelp(const moe::OptionSection& options);
/** /**
* Handle options that should come before validation, such as "help". * Handle options that should come before validation, such as "help".
* *
* Returns false if an option was found that implies we should prematur ely exit with success. * Returns false if an option was found that implies we should prematur ely exit with success.
*/ */
bool handlePreValidationMongosOptions(const moe::Environment& params, bool handlePreValidationMongosOptions(const moe::Environment& params,
const std::vector<std::string>& args); const std::vector<std::string>& args);
/**
* Handle custom validation of mongos options that can not currently be
done by using
* Constraints in the Environment. See the "validate" function in the
Environment class for
* more details.
*/
Status validateMongosOptions(const moe::Environment& params);
/**
* Canonicalize mongos options for the given environment.
*
* For example, the options "dur", "nodur", "journal", "nojournal", and
* "storage.journaling.enabled" should all be merged into "storage.jour
naling.enabled".
*/
Status canonicalizeMongosOptions(moe::Environment* params);
Status storeMongosOptions(const moe::Environment& params, const std::ve ctor<std::string>& args); Status storeMongosOptions(const moe::Environment& params, const std::ve ctor<std::string>& args);
bool isMongos(); bool isMongos();
} }
 End of changes. 1 change blocks. 
0 lines changed or deleted 18 lines changed or added


 mr.h   mr.h 
skipping to change at line 254 skipping to change at line 254
long long incomingDocuments(); long long incomingDocuments();
// ---- map stage ---- // ---- map stage ----
/** /**
* stages on in in-memory storage * stages on in in-memory storage
*/ */
void emit( const BSONObj& a ); void emit( const BSONObj& a );
/** /**
* if size is big, run a reduce * Checks the size of the transient in-memory results accumulate
* if its still big, dump to temp collection d so far and potentially
*/ * runs reduce in order to compact them. If the data is still to
void checkSize(); o large, it will be
* spilled to the output collection.
*
* NOTE: Make sure that no DB locks are held, when calling this
function, because it may
* try to acquire write DB lock for the write to the output coll
ection.
*/
void reduceAndSpillInMemoryStateIfNeeded();
/** /**
* run reduce on _temp * run reduce on _temp
*/ */
void reduceInMemory(); void reduceInMemory();
/** /**
* transfers in memory storage to temp collection * transfers in memory storage to temp collection
*/ */
void dumpToInc(); void dumpToInc();
skipping to change at line 328 skipping to change at line 332
bool jsMode() {return _jsMode;} bool jsMode() {return _jsMode;}
void switchMode(bool jsMode); void switchMode(bool jsMode);
void bailFromJS(); void bailFromJS();
const Config& _config; const Config& _config;
DBDirectClient _db; DBDirectClient _db;
bool _useIncremental; // use an incremental collection bool _useIncremental; // use an incremental collection
protected: protected:
void _add( InMemory* im , const BSONObj& a , long& size ); /**
* Appends a new document to the in-memory list of tuples, whic
h are under that
* document's key.
*
* @return estimated in-memory size occupied by the newly added
document.
*/
int _add(InMemory* im , const BSONObj& a);
scoped_ptr<Scope> _scope; scoped_ptr<Scope> _scope;
bool _onDisk; // if the end result of this map reduce is disk o r not bool _onDisk; // if the end result of this map reduce is disk o r not
scoped_ptr<InMemory> _temp; scoped_ptr<InMemory> _temp;
long _size; // bytes in _temp long _size; // bytes in _temp
long _dupCount; // number of duplicate key entries long _dupCount; // number of duplicate key entries
long long _numEmits; long long _numEmits;
 End of changes. 2 change blocks. 
5 lines changed or deleted 21 lines changed or added


 multi_plan_runner.h   multi_plan_runner.h 
skipping to change at line 82 skipping to change at line 82
*/ */
Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut); Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
virtual bool isEOF(); virtual bool isEOF();
/** /**
* Runs all plans added by addPlan, ranks them, and picks a best. Deletes all loser plans. * Runs all plans added by addPlan, ranks them, and picks a best. Deletes all loser plans.
* All further calls to getNext(...) will return results from the b est plan. * All further calls to getNext(...) will return results from the b est plan.
* *
* Returns true if a best plan was picked, false if there was an er ror. * Returns true if a best plan was picked, false if there was an er ror.
* If there was a failure in the underlying plan, *objOut may hold error details.
* *
* If out is not-NULL, set *out to the index of the picked plan. * If out is not-NULL, set *out to the index of the picked plan.
*/ */
bool pickBestPlan(size_t* out); bool pickBestPlan(size_t* out, BSONObj* objOut);
/**
* Returns true if a backup plan was picked.
* This is the case when the best plan has a blocking stage.
* Exposed for testing.
*/
bool hasBackupPlan() const;
/**
* Caching the best plan is (currently implemented as) a destructiv
e act so we separate it
* from ranking so that inspection of the winning solution is possi
ble. Also sets a backup
* plan if a backup plan is needed. Exposed for testing.
*/
void cacheBestPlan();
virtual void saveState(); virtual void saveState();
virtual bool restoreState(); virtual bool restoreState();
virtual void invalidate(const DiskLoc& dl, InvalidationType type); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual void setYieldPolicy(Runner::YieldPolicy policy); virtual void setYieldPolicy(Runner::YieldPolicy policy);
virtual const std::string& ns(); virtual const std::string& ns();
virtual void kill(); virtual void kill();
virtual const Collection* collection() { return _collection; } virtual const Collection* collection() { return _collection; }
/** /**
* Returns OK, allocating and filling in '*explain' and '*planInfo' with details of * Returns OK, allocating and filling in '*explain' and '*planInfo' with details of
* the "winner" plan. Caller takes ownership of '*explain' and '*pl anInfo'. Otherwise, * the "winner" plan. Caller takes ownership of '*explain' and '*pl anInfo'. Otherwise,
* return a status describing the error. * return a status describing the error.
*
* TOOD: fill in the explain of all candidate plans
*/ */
virtual Status getInfo(TypeExplain** explain, virtual Status getInfo(TypeExplain** explain,
PlanInfo** planInfo) const; PlanInfo** planInfo) const;
private: private:
/** /**
* Have all our candidate plans do something. * Have all our candidate plans do something.
* If all our candidate plans fail, *objOut will contain
* information on the failure.
*/ */
bool workAllPlans(); bool workAllPlans(BSONObj* objOut);
void allPlansSaveState(); void allPlansSaveState();
void allPlansRestoreState(); void allPlansRestoreState();
const Collection* _collection; const Collection* _collection;
// Were we killed by an invalidate? // Were we killed by an invalidate?
bool _killed; bool _killed;
// Did all plans fail while we were running them? Note that one pl an can fail // Did all plans fail while we were running them? Note that one pl an can fail
// during normal execution of the plan competition. Here is an exa mple: // during normal execution of the plan competition. Here is an exa mple:
skipping to change at line 159 skipping to change at line 174
// Candidate plans' stats. Owned here. // Candidate plans' stats. Owned here.
std::vector<PlanStageStats*> _candidateStats; std::vector<PlanStageStats*> _candidateStats;
// Yielding policy we use when we're running candidates. // Yielding policy we use when we're running candidates.
boost::scoped_ptr<RunnerYieldPolicy> _yieldPolicy; boost::scoped_ptr<RunnerYieldPolicy> _yieldPolicy;
// The query that we're trying to figure out the best solution to. // The query that we're trying to figure out the best solution to.
boost::scoped_ptr<CanonicalQuery> _query; boost::scoped_ptr<CanonicalQuery> _query;
// What's the ranking? Produced by pickBestPlan, consumed by cache
BestPlan.
auto_ptr<PlanRankingDecision> _ranking;
// What's the best child? Filled out by pickBestPlan, consumed by
cacheBestPlan.
size_t _bestChild;
// //
// Backup plan for sort // Backup plan for sort
// //
QuerySolution* _backupSolution; QuerySolution* _backupSolution;
PlanExecutor* _backupPlan; PlanExecutor* _backupPlan;
std::list<WorkingSetID> _backupAlreadyProduced; std::list<WorkingSetID> _backupAlreadyProduced;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
4 lines changed or deleted 29 lines changed or added


 multicmd.h   multicmd.h 
skipping to change at line 65 skipping to change at line 65
public: public:
BSONObj& cmd; BSONObj& cmd;
Target& d; Target& d;
_MultiCommandJob(BSONObj& _cmd, Target& _d) : cmd(_cmd), d(_d) { } _MultiCommandJob(BSONObj& _cmd, Target& _d) : cmd(_cmd), d(_d) { }
private: private:
string name() const { return "MultiCommandJob"; } string name() const { return "MultiCommandJob"; }
void run() { void run() {
try { try {
ScopedConn c(d.toHost); ScopedConn c(d.toHost);
LOG(1) << "multiCommand running on host " << d.toHost;
d.ok = c.runCommand("admin", cmd, d.result); d.ok = c.runCommand("admin", cmd, d.result);
LOG(1) << "multiCommand response: " << d.result;
} }
catch(DBException&) { catch (const DBException& e) {
DEV log() << "dev caught dbexception on multiCommand " << d LOG(1) << "dev caught " << e.what() << " on multiCommand to
.toHost << rsLog; " << d.toHost;
} }
} }
}; };
inline void multiCommand(BSONObj cmd, list<Target>& L) { inline void multiCommand(BSONObj cmd, list<Target>& L) {
list< shared_ptr<BackgroundJob> > jobs; list< shared_ptr<BackgroundJob> > jobs;
for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) { for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) {
Target& d = *i; Target& d = *i;
_MultiCommandJob *j = new _MultiCommandJob(cmd, d); _MultiCommandJob *j = new _MultiCommandJob(cmd, d);
 End of changes. 3 change blocks. 
3 lines changed or deleted 5 lines changed or added


 namespace_index.h   namespace_index.h 
skipping to change at line 56 skipping to change at line 56
*/ */
class NamespaceIndex { class NamespaceIndex {
public: public:
NamespaceIndex(const std::string &dir, const std::string &database) : NamespaceIndex(const std::string &dir, const std::string &database) :
_ht( 0 ), _dir( dir ), _database( database ) {} _ht( 0 ), _dir( dir ), _database( database ) {}
/* returns true if new db will be created if we init lazily */ /* returns true if new db will be created if we init lazily */
bool exists() const; bool exists() const;
void init() { void init() {
if( !_ht ) if ( !_ht.get() )
_init(); _init();
} }
void add_ns( const StringData& ns, const DiskLoc& loc, bool capped) ; void add_ns( const StringData& ns, const DiskLoc& loc, bool capped) ;
void add_ns( const StringData& ns, const NamespaceDetails* details ); void add_ns( const StringData& ns, const NamespaceDetails* details );
void add_ns( const Namespace& ns, const NamespaceDetails* details ) ; void add_ns( const Namespace& ns, const NamespaceDetails* details ) ;
NamespaceDetails* details(const StringData& ns); NamespaceDetails* details(const StringData& ns);
NamespaceDetails* details(const Namespace& ns); NamespaceDetails* details(const Namespace& ns);
void kill_ns(const StringData& ns); void kill_ns(const StringData& ns);
bool allocated() const { return _ht != 0; } bool allocated() const { return _ht.get() != 0; }
void getNamespaces( std::list<std::string>& tofill , bool onlyColle ctions = true ) const; void getNamespaces( std::list<std::string>& tofill , bool onlyColle ctions = true ) const;
boost::filesystem::path path() const; boost::filesystem::path path() const;
unsigned long long fileLength() const { return _f.length(); } unsigned long long fileLength() const { return _f.length(); }
private: private:
void _init(); void _init();
void maybeMkdir() const; void maybeMkdir() const;
DurableMappedFile _f; DurableMappedFile _f;
HashTable<Namespace,NamespaceDetails> *_ht; auto_ptr<HashTable<Namespace,NamespaceDetails> > _ht;
std::string _dir; std::string _dir;
std::string _database; std::string _database;
}; };
} }
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 ns_targeter.h   ns_targeter.h 
skipping to change at line 109 skipping to change at line 109
* Returns OK and fills the endpoints; returns a status describing the error otherwise. * Returns OK and fills the endpoints; returns a status describing the error otherwise.
*/ */
virtual Status targetDelete( const BatchedDeleteDocument& deleteDoc , virtual Status targetDelete( const BatchedDeleteDocument& deleteDoc ,
std::vector<ShardEndpoint*>* endpoints ) const = 0; std::vector<ShardEndpoint*>* endpoints ) const = 0;
/** /**
* Returns a vector of ShardEndpoints for the entire collection. * Returns a vector of ShardEndpoints for the entire collection.
* *
* Returns !OK with message if the full collection could not be tar geted. * Returns !OK with message if the full collection could not be tar geted.
*/ */
virtual Status targetAll( std::vector<ShardEndpoint*>* endpoints ) virtual Status targetCollection( std::vector<ShardEndpoint*>* endpo
const = 0; ints ) const = 0;
/**
* Returns a vector of ShardEndpoints for all shards.
*
* Returns !OK with message if all shards could not be targeted.
*/
virtual Status targetAllShards( std::vector<ShardEndpoint*>* endpoi
nts ) const = 0;
/** /**
* Informs the targeter that a targeting failure occurred during on e of the last targeting * Informs the targeter that a targeting failure occurred during on e of the last targeting
* operations. If this is noted, we cannot note stale responses. * operations. If this is noted, we cannot note stale responses.
*/ */
virtual void noteCouldNotTarget() = 0; virtual void noteCouldNotTarget() = 0;
/** /**
* Informs the targeter of stale config responses for this namespac e from an endpoint, with * Informs the targeter of stale config responses for this namespac e from an endpoint, with
* further information available in the returned staleInfo. * further information available in the returned staleInfo.
 End of changes. 1 change blocks. 
2 lines changed or deleted 10 lines changed or added


 option_section.h   option_section.h 
skipping to change at line 112 skipping to change at line 112
*/ */
OptionDescription& addOptionChaining(const std::string& dottedName, OptionDescription& addOptionChaining(const std::string& dottedName,
const std::string& singleName, const std::string& singleName,
const OptionType type, const OptionType type,
const std::string& description ); const std::string& description );
// These functions are used by the OptionsParser to make calls into boost::program_options // These functions are used by the OptionsParser to make calls into boost::program_options
Status getBoostOptions(po::options_description* boostOptions, Status getBoostOptions(po::options_description* boostOptions,
bool visibleOnly = false, bool visibleOnly = false,
bool includeDefaults = false, bool includeDefaults = false,
OptionSources = SourceAll) const; OptionSources = SourceAll,
bool getEmptySections = true) const;
Status getBoostPositionalOptions( Status getBoostPositionalOptions(
po::positional_options_description* boostPositionalOptions) const; po::positional_options_description* boostPositionalOptions) const;
// This is needed so that the parser can iterate over all registere d options to get the // This is needed so that the parser can iterate over all registere d options to get the
// correct names when populating the Environment, as well as check that a parameter that was // correct names when populating the Environment, as well as check that a parameter that was
// found has been registered and has the correct type // found has been registered and has the correct type
Status getAllOptions(std::vector<OptionDescription>* options) const ; Status getAllOptions(std::vector<OptionDescription>* options) const ;
// Count the number of options in this section and all subsections
Status countOptions(int* numOptions, bool visibleOnly, OptionSource
s sources) const;
/** /**
* Populates the given map with all the default values for any opti ons in this option * Populates the given map with all the default values for any opti ons in this option
* section and all sub sections. * section and all sub sections.
*/ */
Status getDefaults(std::map<Key, Value>* values) const; Status getDefaults(std::map<Key, Value>* values) const;
/** /**
* Populates the given vector with all the constraints for all opti ons in this section and * Populates the given vector with all the constraints for all opti ons in this section and
* sub sections. * sub sections.
*/ */
 End of changes. 2 change blocks. 
1 lines changed or deleted 6 lines changed or added


 owned_pointer_map.h   owned_pointer_map.h 
skipping to change at line 27 skipping to change at line 27
#include <map> #include <map>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
namespace mongo { namespace mongo {
/** /**
* An std::map wrapper that deletes pointers within a vector on destruc tion. The objects * An std::map wrapper that deletes pointers within a vector on destruc tion. The objects
* referenced by the vector's pointers are 'owned' by an object of this class. * referenced by the vector's pointers are 'owned' by an object of this class.
* NOTE that an OwnedPointerMap<K,T> wraps an std::map<K,T*>. * NOTE that an OwnedPointerMap<K,T,Compare> wraps an std::map<K,T*,Com pare>.
*/ */
template<class K, class T> template<class K, class T, class Compare = std::less<K> >
class OwnedPointerMap { class OwnedPointerMap {
MONGO_DISALLOW_COPYING(OwnedPointerMap); MONGO_DISALLOW_COPYING(OwnedPointerMap);
public: public:
typedef typename std::map<K, T*, Compare> MapType;
OwnedPointerMap(); OwnedPointerMap();
~OwnedPointerMap(); ~OwnedPointerMap();
/** Access the map. */ /** Access the map. */
const std::map<K, T*>& map() { return _map; } const MapType& map() { return _map; }
std::map<K, T*>& mutableMap() { return _map; } MapType& mutableMap() { return _map; }
void clear(); void clear();
private: private:
std::map<K, T*> _map; MapType _map;
}; };
template<class K, class T> template<class K, class T, class Compare>
OwnedPointerMap<K, T>::OwnedPointerMap() { OwnedPointerMap<K, T, Compare>::OwnedPointerMap() {
} }
template<class K, class T> template<class K, class T, class Compare>
OwnedPointerMap<K, T>::~OwnedPointerMap() { OwnedPointerMap<K, T, Compare>::~OwnedPointerMap() {
clear(); clear();
} }
template<class K, class T> template<class K, class T, class Compare>
void OwnedPointerMap<K, T>::clear() { void OwnedPointerMap<K, T, Compare>::clear() {
for( typename std::map<K, T*>::iterator i = _map.begin(); i != _map for( typename MapType::iterator i = _map.begin(); i != _map.end();
.end(); ++i ) { ++i ) {
delete i->second; delete i->second;
+i ) {
} }
_map.clear(); _map.clear();
} }
} // namespace mongo } // namespace mongo
 End of changes. 9 change blocks. 
13 lines changed or deleted 16 lines changed or added


 parsed_projection.h   parsed_projection.h 
skipping to change at line 86 skipping to change at line 86
* Does the projection want geoNear metadata? If so any geoNear st age should include them. * Does the projection want geoNear metadata? If so any geoNear st age should include them.
*/ */
bool wantGeoNearDistance() const { bool wantGeoNearDistance() const {
return _wantGeoNearDistance; return _wantGeoNearDistance;
} }
bool wantGeoNearPoint() const { bool wantGeoNearPoint() const {
return _wantGeoNearPoint; return _wantGeoNearPoint;
} }
bool wantIndexKey() const {
return _returnKey;
}
private: private:
/** /**
* Must go through ::make * Must go through ::make
*/ */
ParsedProjection() : _requiresDocument(true) { } ParsedProjection() : _requiresDocument(true) { }
/** /**
* Returns true if field name refers to a positional projection.
*/
static bool _isPositionalOperator(const char* fieldName);
/**
* Returns true if the MatchExpression 'query' queries against * Returns true if the MatchExpression 'query' queries against
* the field named by 'matchfield'. This deeply traverses logical * the field named by 'matchfield'. This deeply traverses logical
* nodes in the matchfield and returns true if any of the children * nodes in the matchfield and returns true if any of the children
* have the field (so if 'query' is {$and: [{a: 1}, {b: 1}]} and * have the field (so if 'query' is {$and: [{a: 1}, {b: 1}]} and
* 'matchfield' is "b", the return value is true). * 'matchfield' is "b", the return value is true).
* *
* Does not take ownership of 'query'. * Does not take ownership of 'query'.
*/ */
static bool _hasPositionalOperatorMatch(const MatchExpression* cons t query, static bool _hasPositionalOperatorMatch(const MatchExpression* cons t query,
const std::string& matchfie ld); const std::string& matchfie ld);
// XXX stringdata? // TODO: stringdata?
vector<string> _requiredFields; vector<string> _requiredFields;
bool _requiresDocument; bool _requiresDocument;
BSONObj _source; BSONObj _source;
bool _wantGeoNearDistance; bool _wantGeoNearDistance;
bool _wantGeoNearPoint; bool _wantGeoNearPoint;
bool _returnKey;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
1 lines changed or deleted 12 lines changed or added


 pdfile.h   pdfile.h 
skipping to change at line 43 skipping to change at line 43
database.1 - data files database.1 - data files
database.2 database.2
... ...
*/ */
#pragma once #pragma once
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobjmanipulator.h"
#include "mongo/db/storage/data_file.h" #include "mongo/db/storage/data_file.h"
#include "mongo/db/storage/durable_mapped_file.h" #include "mongo/db/storage/durable_mapped_file.h"
#include "mongo/db/storage/extent.h" #include "mongo/db/storage/extent.h"
#include "mongo/db/structure/catalog/namespace_details-inl.h" #include "mongo/db/structure/catalog/namespace_details-inl.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/db/pdfile_version.h" #include "mongo/db/pdfile_version.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
#include "mongo/util/log.h" #include "mongo/util/log.h"
#include "mongo/util/mmap.h" #include "mongo/util/mmap.h"
namespace mongo { namespace mongo {
class DataFileHeader; class DataFileHeader;
class Extent; class Extent;
class OpDebug; class OpDebug;
class Record; class Record;
void dropDatabase(const std::string& db); void dropDatabase(const std::string& db);
bool repairDatabase(string db, string &errmsg, bool preserveClonedFiles OnFailure = false, bool backupOriginalFiles = false);
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForRe bool userCreateNS(const char *ns, BSONObj j, string& err,
plication, bool *deferIdIndex = 0); bool logForReplication, bool createDefaultIndexes = t
rue );
/*--------------------------------------------------------------------- */ /*--------------------------------------------------------------------- */
boost::intmax_t dbSize( const char *database );
inline NamespaceIndex* nsindex(const StringData& ns) { inline NamespaceIndex* nsindex(const StringData& ns) {
Database *database = cc().database(); Database *database = cc().database();
verify( database ); verify( database );
DEV { DEV {
StringData dbname = nsToDatabaseSubstring( ns ); StringData dbname = nsToDatabaseSubstring( ns );
if ( database->name() != dbname ) { if ( database->name() != dbname ) {
out() << "ERROR: attempt to write to wrong database\n"; out() << "ERROR: attempt to write to wrong database\n";
out() << " ns:" << ns << '\n'; out() << " ns:" << ns << '\n';
out() << " database->name:" << database->name() << endl; out() << " database->name:" << database->name() << endl;
verify( database->name() == dbname ); verify( database->name() == dbname );
 End of changes. 4 change blocks. 
6 lines changed or deleted 3 lines changed or added


 pipeline_d.h   pipeline_d.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify file( s) * all of the code used other than as permitted herein. If you modify file( s)
* with this exception, you may extend this exception to your version of th e * with this exception, you may extend this exception to your version of th e
* file(s), but you are not obligated to do so. If you do not wish to do so , * file(s), but you are not obligated to do so. If you do not wish to do so ,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also dele te * exception statement from all source files in the program, then also dele te
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include <boost/smart_ptr.hpp>
namespace mongo { namespace mongo {
class Collection;
class DocumentSourceCursor; class DocumentSourceCursor;
struct ExpressionContext; struct ExpressionContext;
class Pipeline; class Pipeline;
class Runner;
/* /*
PipelineD is an extension of the Pipeline class, but with additional PipelineD is an extension of the Pipeline class, but with additional
material that references symbols that are not available in mongos, material that references symbols that are not available in mongos,
where the remainder of the Pipeline class also functions. PipelineD where the remainder of the Pipeline class also functions. PipelineD
is a friend of Pipeline so that it can have equal access to Pipeline' s is a friend of Pipeline so that it can have equal access to Pipeline' s
members. members.
See the friend declaration in Pipeline. See the friend declaration in Pipeline.
*/ */
skipping to change at line 63 skipping to change at line 65
* will feed the execution of the pipeline. * will feed the execution of the pipeline.
* *
* This method looks for early pipeline stages that can be folded i nto * This method looks for early pipeline stages that can be folded i nto
* the underlying cursor, and when a cursor can absorb those, they * the underlying cursor, and when a cursor can absorb those, they
* are removed from the head of the pipeline. For example, an * are removed from the head of the pipeline. For example, an
* early match can be removed and replaced with a Cursor that will * early match can be removed and replaced with a Cursor that will
* do an index scan. * do an index scan.
* *
* The cursor is added to the front of the pipeline's sources. * The cursor is added to the front of the pipeline's sources.
* *
* Must have a ReadContext before entering.
*
* If the returned Runner is non-null, you are responsible for ensu
ring
* it receives appropriate invalidate and kill messages.
*
* @param pPipeline the logical "this" for this operation * @param pPipeline the logical "this" for this operation
* @param pExpCtx the expression context for this pipeline * @param pExpCtx the expression context for this pipeline
*/ */
static void prepareCursorSource( static boost::shared_ptr<Runner> prepareCursorSource(
const intrusive_ptr<Pipeline> &pPipeline, const intrusive_ptr<Pipeline> &pPipeline,
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
private: private:
PipelineD(); // does not exist: prevent instantiation PipelineD(); // does not exist: prevent instantiation
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
2 lines changed or deleted 10 lines changed or added


 plan_cache.h   plan_cache.h 
skipping to change at line 49 skipping to change at line 49
#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_params.h"
#include "mongo/platform/atomic_word.h" #include "mongo/platform/atomic_word.h"
namespace mongo { namespace mongo {
struct PlanRankingDecision; struct PlanRankingDecision;
struct QuerySolution; struct QuerySolution;
struct QuerySolutionNode; struct QuerySolutionNode;
/** /**
* TODO HK notes
* cache should be LRU with some cap on size
* {x:1} and {x:{$gt:7}} not same shape for now -- operator matters
*/
/**
* When the CachedPlanRunner runs a cached query, it can provide feedba ck to the cache. This * When the CachedPlanRunner runs a cached query, it can provide feedba ck to the cache. This
* feedback is available to anyone who retrieves that query in the futu re. * feedback is available to anyone who retrieves that query in the futu re.
*/ */
struct PlanCacheEntryFeedback { struct PlanCacheEntryFeedback {
// How well did the cached plan perform? // How well did the cached plan perform?
boost::scoped_ptr<PlanStageStats> stats; boost::scoped_ptr<PlanStageStats> stats;
// The "goodness" score produced by the plan ranker // The "goodness" score produced by the plan ranker
// corresponding to 'stats'. // corresponding to 'stats'.
double score; double score;
skipping to change at line 247 skipping to change at line 239
// Data provided to the planner to allow it to recreate the solutio ns this entry // Data provided to the planner to allow it to recreate the solutio ns this entry
// represents. Each SolutionCacheData is fully owned here, so in or der to return // represents. Each SolutionCacheData is fully owned here, so in or der to return
// it from the cache a deep copy is made and returned inside Cached Solution. // it from the cache a deep copy is made and returned inside Cached Solution.
std::vector<SolutionCacheData*> plannerData; std::vector<SolutionCacheData*> plannerData;
// An index into plannerData indicating the SolutionCacheData which should be // An index into plannerData indicating the SolutionCacheData which should be
// used to produce a backup solution in the case of a blocking sort . // used to produce a backup solution in the case of a blocking sort .
boost::optional<size_t> backupSoln; boost::optional<size_t> backupSoln;
// XXX: Replace with copy of canonical query? // TODO: Do we really want to just hold a copy of the CanonicalQuer
y? For now we just
// extract the data we need.
//
// Used by the plan cache commands to display an example query // Used by the plan cache commands to display an example query
// of the appropriate shape. // of the appropriate shape.
BSONObj query; BSONObj query;
BSONObj sort; BSONObj sort;
BSONObj projection; BSONObj projection;
// //
// Performance stats // Performance stats
// //
skipping to change at line 272 skipping to change at line 266
// Annotations from cached runs. The CachedSolutionRunner provides these stats about its // Annotations from cached runs. The CachedSolutionRunner provides these stats about its
// runs when they complete. // runs when they complete.
std::vector<PlanCacheEntryFeedback*> feedback; std::vector<PlanCacheEntryFeedback*> feedback;
// The average score of all stored feedback. // The average score of all stored feedback.
boost::optional<double> averageScore; boost::optional<double> averageScore;
// The standard deviation of the scores from stored as feedback. // The standard deviation of the scores from stored as feedback.
boost::optional<double> stddevScore; boost::optional<double> stddevScore;
// Determines the amount of feedback that we are willing to store. // In order to justify eviction, the deviation from the mean must e
Must be >= 1. xceed a
// TODO: how do we tune this? // minimum threshold.
static const size_t kMaxFeedback; static const double kMinDeviation;
// The number of standard deviations which must be exceeded
// in order to determine that the cache entry should be removed.
// Must be positive. TODO how do we tune this?
static const double kStdDevThreshold;
}; };
/** /**
* Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution) * Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
* mapping, the cache contains information on why that mapping was made and statistics on the * mapping, the cache contains information on why that mapping was made and statistics on the
* cache entry's actual performance on subsequent runs. * cache entry's actual performance on subsequent runs.
* *
*/ */
class PlanCache { class PlanCache {
private: private:
MONGO_DISALLOW_COPYING(PlanCache); MONGO_DISALLOW_COPYING(PlanCache);
public: public:
/** /**
* Flush cache when the number of write operations since last
* clear() reaches this limit.
*/
static const int kPlanCacheMaxWriteOperations;
/**
* The maximum number of plan cache entries allowed.
*/
static const int kMaxCacheSize;
/**
* We don't want to cache every possible query. This function * We don't want to cache every possible query. This function
* encapsulates the criteria for what makes a canonical query * encapsulates the criteria for what makes a canonical query
* suitable for lookup/inclusion in the cache. * suitable for lookup/inclusion in the cache.
*/ */
static bool shouldCacheQuery(const CanonicalQuery& query); static bool shouldCacheQuery(const CanonicalQuery& query);
/** /**
* If omitted, namespace set to empty string. * If omitted, namespace set to empty string.
*/ */
PlanCache(); PlanCache();
skipping to change at line 395 skipping to change at line 372
/** /**
* Returns a vector of all cache entries. * Returns a vector of all cache entries.
* Caller owns the result vector and is responsible for cleaning up * Caller owns the result vector and is responsible for cleaning up
* the cache entry copies. * the cache entry copies.
* Used by planCacheListQueryShapes and index_filter_commands_test. cpp. * Used by planCacheListQueryShapes and index_filter_commands_test. cpp.
*/ */
std::vector<PlanCacheEntry*> getAllEntries() const; std::vector<PlanCacheEntry*> getAllEntries() const;
/** /**
* Returns true if there is an entry in the cache for the 'query'.
* Internally calls hasKey() on the LRU cache.
*/
bool contains(const CanonicalQuery& cq) const;
/**
* Returns number of entries in cache. * Returns number of entries in cache.
* Used for testing. * Used for testing.
*/ */
size_t size() const; size_t size() const;
/** /**
* You must notify the cache if you are doing writes, as query pla n utility will change. * You must notify the cache if you are doing writes, as query pla n utility will change.
* Cache is flushed after every 1000 notifications. * Cache is flushed after every 1000 notifications.
*/ */
void notifyOfWriteOp(); void notifyOfWriteOp();
 End of changes. 5 change blocks. 
30 lines changed or deleted 14 lines changed or added


 plan_cache_commands.h   plan_cache_commands.h 
skipping to change at line 123 skipping to change at line 123
/** /**
* Looks up cache keys for collection's plan cache. * Looks up cache keys for collection's plan cache.
* Inserts keys for query into BSON builder. * Inserts keys for query into BSON builder.
*/ */
static Status list(const PlanCache& planCache, BSONObjBuilder* bob) ; static Status list(const PlanCache& planCache, BSONObjBuilder* bob) ;
}; };
/** /**
* planCacheClear * planCacheClear
* *
* { planCacheClear: <collection> } * {
* planCacheClear: <collection>,
* query: <query>,
* sort: <sort>,
* projection: <projection>
* }
* *
*/ */
class PlanCacheClear : public PlanCacheCommand { class PlanCacheClear : public PlanCacheCommand {
public: public:
PlanCacheClear(); PlanCacheClear();
virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj, BSONObjBuilder* bob); virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj, BSONObjBuilder* bob);
/** /**
* Clears collection's plan cache. * Clears collection's plan cache.
* If query shape is provided, clears plans for that single query s hape only.
*/ */
static Status clear(const std::string& ns, PlanCache* planCache); static Status clear(PlanCache* planCache, const std::string& ns, co
}; nst BSONObj& cmdObj);
/**
* planCacheDrop
*
* { planCacheDrop: <collection>, key: <key> } }
*
*/
class PlanCacheDrop : public PlanCacheCommand {
public:
PlanCacheDrop();
virtual Status runPlanCacheCommand(const std::string& ns, BSONObj&
cmdObj,
BSONObjBuilder* bob);
/**
* Drops using a cache key.
*/
static Status drop(PlanCache* planCache,const std::string& ns, con
st BSONObj& cmdObj);
}; };
/** /**
* planCacheListPlans * planCacheListPlans
* *
* { planCacheListPlans: <collection>, key: <key> } } * {
* planCacheListPlans: <collection>,
* query: <query>,
* sort: <sort>,
* projection: <projection>
* }
* *
*/ */
class PlanCacheListPlans : public PlanCacheCommand { class PlanCacheListPlans : public PlanCacheCommand {
public: public:
PlanCacheListPlans(); PlanCacheListPlans();
virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj, virtual Status runPlanCacheCommand(const std::string& ns, BSONObj& cmdObj,
BSONObjBuilder* bob); BSONObjBuilder* bob);
/** /**
* Displays the cached plans for a query shape. * Displays the cached plans for a query shape.
 End of changes. 4 change blocks. 
23 lines changed or deleted 15 lines changed or added


 plan_enumerator.h   plan_enumerator.h 
skipping to change at line 38 skipping to change at line 38
#pragma once #pragma once
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h" #include "mongo/db/query/index_entry.h"
#include "mongo/db/query/index_tag.h" #include "mongo/db/query/index_tag.h"
#include "mongo/db/query/query_knobs.h"
namespace mongo { namespace mongo {
struct PlanEnumeratorParams { struct PlanEnumeratorParams {
PlanEnumeratorParams() : intersect(false) { }
PlanEnumeratorParams() : intersect(false),
maxSolutionsPerOr(internalQueryEnumeration
MaxOrSolutions),
maxIntersectPerAnd(internalQueryEnumeratio
nMaxIntersectPerAnd) { }
// Do we provide solutions that use more indices than the minimum r
equired to provide
// an indexed solution?
bool intersect; bool intersect;
// Not owned here. // Not owned here.
MatchExpression* root; MatchExpression* root;
// Not owned here. // Not owned here.
const vector<IndexEntry>* indices; const vector<IndexEntry>* indices;
// How many plans are we willing to ouput from an OR? We currently
consider
// all possibly OR plans, which means the product of the number of
possibilities
// for each clause of the OR. This could grow disastrously large.
size_t maxSolutionsPerOr;
// How many intersect plans are we willing to output from an AND?
Given that we pursue an
// all-pairs approach, we could wind up creating a lot of enumerati
on possibilities for
// certain inputs.
size_t maxIntersectPerAnd;
}; };
/** /**
* Provides elements from the power set of possible indices to use. Us es the available * Provides elements from the power set of possible indices to use. Us es the available
* predicate information to make better decisions about what indices ar e best. * predicate information to make better decisions about what indices ar e best.
*/ */
class PlanEnumerator { class PlanEnumerator {
MONGO_DISALLOW_COPYING(PlanEnumerator); MONGO_DISALLOW_COPYING(PlanEnumerator);
public: public:
/** /**
skipping to change at line 107 skipping to change at line 123
// An ID we use to index into _memo. An entry in _memo is a NodeAs signment. // An ID we use to index into _memo. An entry in _memo is a NodeAs signment.
typedef size_t MemoID; typedef size_t MemoID;
// An index in _indices. // An index in _indices.
typedef size_t IndexID; typedef size_t IndexID;
// The position of a field in a possibly compound index. // The position of a field in a possibly compound index.
typedef size_t IndexPosition; typedef size_t IndexPosition;
struct PrepMemoContext {
PrepMemoContext() : elemMatchExpr(NULL) { }
MatchExpression* elemMatchExpr;
};
/** /**
* Traverses the match expression and generates the memo structure from it. * Traverses the match expression and generates the memo structure from it.
* Returns true if the provided node uses an index, false otherwise . * Returns true if the provided node uses an index, false otherwise .
*/ */
bool prepMemo(MatchExpression* node); bool prepMemo(MatchExpression* node, PrepMemoContext context);
/** /**
* Traverses the memo structure and annotates the tree with IndexTa gs for the chosen * Traverses the memo structure and annotates the tree with IndexTa gs for the chosen
* indices. * indices.
*/ */
void tagMemo(MemoID id); void tagMemo(MemoID id);
/** /**
* Move to the next enumeration state. Each assignment stores its own enumeration state. * Move to the next enumeration state. Each assignment stores its own enumeration state.
* See the various ____Assignment classes below for details on enum eration state. * See the various ____Assignment classes below for details on enum eration state.
skipping to change at line 168 skipping to change at line 189
// Not owned here. // Not owned here.
MatchExpression* expr; MatchExpression* expr;
// Enumeration state. An indexed predicate's possible states a re the indices that the // Enumeration state. An indexed predicate's possible states a re the indices that the
// predicate can directly use (the 'first' indices). As such t his value ranges from 0 // predicate can directly use (the 'first' indices). As such t his value ranges from 0
// to first.size()-1 inclusive. // to first.size()-1 inclusive.
size_t indexToAssign; size_t indexToAssign;
}; };
struct OrAssignment { struct OrAssignment {
OrAssignment() : counter(0) { }
// Each child of an OR must be indexed for the OR to be indexed
. When an OR moves to a
// subsequent state it just asks all its children to move their
states forward.
// Must use all of subnodes. // Must use all of subnodes.
vector<MemoID> subnodes; vector<MemoID> subnodes;
// No enumeration state. Each child of an OR must be indexed f // The number of OR states that we've enumerated so far.
or the OR to be indexed. size_t counter;
// When an OR moves to a subsequent state it just asks all its
children to move their
// states forward.
}; };
// This is used by AndAssignment and is not an actual assignment. // This is used by AndAssignment and is not an actual assignment.
struct OneIndexAssignment { struct OneIndexAssignment {
// 'preds[i]' is uses index 'index' at position 'positions[i]' // 'preds[i]' is uses index 'index' at position 'positions[i]'
vector<MatchExpression*> preds; vector<MatchExpression*> preds;
vector<IndexPosition> positions; vector<IndexPosition> positions;
IndexID index; IndexID index;
}; };
skipping to change at line 224 skipping to change at line 249
/** /**
* Allocates a NodeAssignment and associates it with the provided ' expr'. * Allocates a NodeAssignment and associates it with the provided ' expr'.
* *
* The unique MemoID of the new assignment is outputted in '*id'. * The unique MemoID of the new assignment is outputted in '*id'.
* The out parameter '*slot' points to the newly allocated NodeAssi gnment. * The out parameter '*slot' points to the newly allocated NodeAssi gnment.
*/ */
void allocateAssignment(MatchExpression* expr, NodeAssignment** slo t, MemoID* id); void allocateAssignment(MatchExpression* expr, NodeAssignment** slo t, MemoID* id);
/** /**
* Predicates inside $elemMatch's that are semantically "$and of $a
nd"
* predicates are not rewritten to the top-level during normalizati
on.
* However, we would like to make predicates inside $elemMatch avai
lable
* for combining index bounds with the top-level $and predicates.
*
* This function deeply traverses $and and $elemMatch expressions o
f
* the tree rooted at 'node', adding all preds that can use an inde
x
* to the output vector 'indexOut'. At the same time, $elemMatch
* context information is stashed in the tags so that we don't lose
* information due to flattening.
*
* Nodes that cannot be deeply traversed are returned via the outpu
t
* vectors 'subnodesOut' and 'mandatorySubnodes'. Subnodes are "man
datory"
* if they *must* use an index (TEXT and GEO).
*
* Does not take ownership of arguments.
*
* Returns false if the AND cannot be indexed. Otherwise returns tr
ue.
*/
bool partitionPreds(MatchExpression* node,
PrepMemoContext context,
vector<MatchExpression*>* indexOut,
vector<MemoID>* subnodesOut,
vector<MemoID>* mandatorySubnodes);
/**
* Finds a set of predicates that can be safely compounded with 'as
signed',
* under the assumption that we are assignining predicates to a com
pound,
* multikey index.
*
* The list of candidate predicates that we could compound is passe
d
* in 'couldCompound'. A subset of these predicates that is safe to
* combine by compounding is returned in the out-parameter 'out'.
*
* Does not take ownership of its arguments.
*
* The rules for when to compound for multikey indices are reasonab
ly
* complex, and are dependent on the structure of $elemMatch's used
* in the query. Ignoring $elemMatch for the time being, the rule i
s this:
*
* "Any set of predicates for which no two predicates share a pat
h
* prefix can be compounded."
*
* Suppose we have predicates over paths 'a.b' and 'a.c'. These can
not
* be compounded because they share the prefix 'a'. Similarly, the
bounds
* for 'a' and 'a.b' cannot be compounded (in the case of multikey
index
* {a: 1, 'a.b': 1}). You *can* compound predicates over the paths
'a.b.c',
* 'd', and 'e.b.c', because there is no shared prefix.
*
* The rules are different in the presence of $elemMatch. For $elem
Match
* {a: {$elemMatch: {<pred1>, ..., <predN>}}}, we are allowed to co
mpound
* bounds for pred1 through predN, even though these predicates sha
re the
* path prefix 'a'. However, we still cannot compound in the case o
f
* {a: {$elemMatch: {'b.c': {$gt: 1}, 'b.d': 5}}} because 'b.c' and
'b.d'
* share a prefix. In other words, what matters inside an $elemMatc
h is not
* the absolute prefix, but rather the "relative prefix" after the
shared
* $elemMatch part of the path.
*
* A few more examples:
* 1) {'a.b': {$elemMatch: {c: {$gt: 1}, d: 5}}}. In this case,
we can
* compound, because the $elemMatch is applied to the shared par
t of
* the path 'a.b'.
*
* 2) {'a.b': 1, a: {$elemMatch: {b: {$gt: 0}}}}. We cannot comb
ine the
* bounds here because the prefix 'a' is shared by two predicate
s which
* are not joined together by an $elemMatch.
*/
void getMultikeyCompoundablePreds(const MatchExpression* assigned,
const vector<MatchExpression*>& c
ouldCompound,
vector<MatchExpression*>* out);
/**
* 'andAssignment' contains assignments that we've already committe
d to outputting,
* including both single index assignments and ixisect assignments.
*
* 'ixisectAssigned' is a set of predicates that we are about to ad
d to 'andAssignment'
* as an index intersection assignment.
*
* Returns true if an single index assignment which is already in '
andAssignment'
* contains a superset of the predicates in 'ixisectAssigned'. This
means that we
* can assign the same preds to a compound index rather than using
index intersection.
*
* Ex.
* Suppose we have indices {a: 1}, {b: 1}, and {a: 1, b: 1} with
query
* {a: 2, b: 2}. When we try to intersect {a: 1} and {b: 1} the p
redicates
* a==2 and b==2 will get assigned to respective indices. But the
n we will
* call this function with ixisectAssigned equal to the set {'a==
2', 'b==2'},
* and notice that we have already assigned this same set of pred
icates to
* the single index {a: 1, b: 1} via compounding.
*/
bool alreadyCompounded(const set<MatchExpression*>& ixisectAssigned
,
const AndAssignment* andAssignment);
/**
* Output index intersection assignments inside of an AND node.
*/
typedef unordered_map<IndexID, vector<MatchExpression*> > IndexToPr
edMap;
/**
* Generate index intersection assignments given the predicate/inde
x structure in idxToFirst
* and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs th
e assignments in
* 'andAssignment'.
*/
void enumerateAndIntersect(const IndexToPredMap& idxToFirst,
const IndexToPredMap& idxToNotFirst,
const vector<MemoID>& subnodes,
AndAssignment* andAssignment);
/**
* Generate one-index-at-once assignments given the predicate/index
structure in idxToFirst
* and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs th
e assignments into
* 'andAssignment'.
*/
void enumerateOneIndex(const IndexToPredMap& idxToFirst,
const IndexToPredMap& idxToNotFirst,
const vector<MemoID>& subnodes,
AndAssignment* andAssignment);
/**
* Try to assign predicates in 'tryCompound' to 'thisIndex' as comp ound assignments. * Try to assign predicates in 'tryCompound' to 'thisIndex' as comp ound assignments.
* Output the assignments in 'assign'. * Output the assignments in 'assign'.
*/ */
void compound(const vector<MatchExpression*>& tryCompound, void compound(const vector<MatchExpression*>& tryCompound,
const IndexEntry& thisIndex, const IndexEntry& thisIndex,
OneIndexAssignment* assign); OneIndexAssignment* assign);
void dumpMemo(); std::string dumpMemo();
// Map from expression to its MemoID. // Map from expression to its MemoID.
unordered_map<MatchExpression*, MemoID> _nodeToId; unordered_map<MatchExpression*, MemoID> _nodeToId;
// Map from MemoID to its precomputed solution info. // Map from MemoID to its precomputed solution info.
unordered_map<MemoID, NodeAssignment*> _memo; unordered_map<MemoID, NodeAssignment*> _memo;
// If true, there are no further enumeration states, and getNext sh ould return false. // If true, there are no further enumeration states, and getNext sh ould return false.
// We could be _done immediately after init if we're unable to outp ut an indexed plan. // We could be _done immediately after init if we're unable to outp ut an indexed plan.
bool _done; bool _done;
skipping to change at line 255 skipping to change at line 398
// //
// Match expression we're planning for. Not owned by us. // Match expression we're planning for. Not owned by us.
MatchExpression* _root; MatchExpression* _root;
// Indices we're allowed to enumerate with. Not owned here. // Indices we're allowed to enumerate with. Not owned here.
const vector<IndexEntry>* _indices; const vector<IndexEntry>* _indices;
// Do we output >1 index per AND (index intersection)? // Do we output >1 index per AND (index intersection)?
bool _ixisect; bool _ixisect;
// How many enumerations are we willing to produce from each OR?
size_t _orLimit;
// How many things do we want from each AND?
size_t _intersectLimit;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 11 change blocks. 
8 lines changed or deleted 210 lines changed or added


 plan_ranker.h   plan_ranker.h 
skipping to change at line 62 skipping to change at line 62
* Populates 'why' with information relevant to how each plan fared in the ranking process. * Populates 'why' with information relevant to how each plan fared in the ranking process.
* Caller owns pointers in 'why'. * Caller owns pointers in 'why'.
* 'candidateOrder' holds indices into candidates ordered by score (winner in first element). * 'candidateOrder' holds indices into candidates ordered by score (winner in first element).
*/ */
static size_t pickBestPlan(const vector<CandidatePlan>& candidates, static size_t pickBestPlan(const vector<CandidatePlan>& candidates,
PlanRankingDecision* why); PlanRankingDecision* why);
/** /**
* Assign the stats tree a 'goodness' score. The higher the score, the better * Assign the stats tree a 'goodness' score. The higher the score, the better
* the plan. The exact value isn't meaningful except for imposing a ranking. * the plan. The exact value isn't meaningful except for imposing a ranking.
*
* XXX: consider moving out of PlanRanker so that the plan
* cache can use directly.
*/ */
static double scoreTree(const PlanStageStats* stats); static double scoreTree(const PlanStageStats* stats);
}; };
/** /**
* A container holding one to-be-ranked plan and its associated/relevan t data. * A container holding one to-be-ranked plan and its associated/relevan t data.
* Does not own any of its pointers. * Does not own any of its pointers.
*/ */
struct CandidatePlan { struct CandidatePlan {
CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w) CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w)
 End of changes. 1 change blocks. 
3 lines changed or deleted 0 lines changed or added


 plan_stage.h   plan_stage.h 
skipping to change at line 127 skipping to change at line 127
IS_EOF, IS_EOF,
// work(...) needs more time to product a result. Call work(.. .) again. There is // work(...) needs more time to product a result. Call work(.. .) again. There is
// nothing output in the out parameter. // nothing output in the out parameter.
NEED_TIME, NEED_TIME,
// Something went wrong but it's not an internal error. Perhap s our collection was // Something went wrong but it's not an internal error. Perhap s our collection was
// dropped or state deleted. // dropped or state deleted.
DEAD, DEAD,
// Something has gone unrecoverably wrong. Stop running this q // Something has gone unrecoverably wrong. Stop running this q
uery. There is nothing uery.
// output in the out parameter. // If the out parameter does not refer to an invalid working se
t member,
// call WorkingSetCommon::getStatusMemberObject() to get detail
s on the failure.
// Any class implementing this interface must set the WSID out
parameter to
// INVALID_ID or a valid WSM ID if FAILURE is returned.
FAILURE, FAILURE,
// Something isn't in memory. Fetch it. // Something isn't in memory. Fetch it.
// //
// Full fetch semantics: // Full fetch semantics:
// The fetch-requesting stage populates the out parameter of wo rk(...) with a WSID that // The fetch-requesting stage populates the out parameter of wo rk(...) with a WSID that
// refers to a WSM with a valid loc. Each stage that receives a NEED_FETCH from a child // refers to a WSM with a valid loc. Each stage that receives a NEED_FETCH from a child
// must propagate the NEED_FETCH up and perform no work. The p lan runner is responsible // must propagate the NEED_FETCH up and perform no work. The p lan runner is responsible
// for paging in the data upon receipt of a NEED_FETCH. The pla n runner does NOT free // for paging in the data upon receipt of a NEED_FETCH. The pla n runner does NOT free
// the WSID of the requested fetch. The stage that requested t he fetch holds the WSID // the WSID of the requested fetch. The stage that requested t he fetch holds the WSID
 End of changes. 1 change blocks. 
3 lines changed or deleted 9 lines changed or added


 plan_stats.h   plan_stats.h 
skipping to change at line 38 skipping to change at line 38
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <cstdlib> #include <cstdlib>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/geo/hash.h"
#include "mongo/db/query/stage_types.h" #include "mongo/db/query/stage_types.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
namespace mongo { namespace mongo {
/** /**
* The interface all specific-to-stage stats provide. * The interface all specific-to-stage stats provide.
*/ */
struct SpecificStats { struct SpecificStats {
virtual ~SpecificStats() { } virtual ~SpecificStats() { }
skipping to change at line 131 skipping to change at line 132
// The stats of the node's children. // The stats of the node's children.
std::vector<PlanStageStats*> children; std::vector<PlanStageStats*> children;
private: private:
MONGO_DISALLOW_COPYING(PlanStageStats); MONGO_DISALLOW_COPYING(PlanStageStats);
}; };
struct AndHashStats : public SpecificStats { struct AndHashStats : public SpecificStats {
AndHashStats() : flaggedButPassed(0), AndHashStats() : flaggedButPassed(0),
flaggedInProgress(0) { } flaggedInProgress(0),
memUsage(0),
memLimit(0) { }
virtual ~AndHashStats() { } virtual ~AndHashStats() { }
virtual SpecificStats* clone() const { virtual SpecificStats* clone() const {
AndHashStats* specific = new AndHashStats(*this); AndHashStats* specific = new AndHashStats(*this);
return specific; return specific;
} }
// Invalidation counters. // Invalidation counters.
// How many results had the AND fully evaluated but were invalidate d? // How many results had the AND fully evaluated but were invalidate d?
skipping to change at line 154 skipping to change at line 157
// How many results were mid-AND but got flagged? // How many results were mid-AND but got flagged?
size_t flaggedInProgress; size_t flaggedInProgress;
// How many entries are in the map after each child? // How many entries are in the map after each child?
// child 'i' produced children[i].common.advanced DiskLocs, of whic h mapAfterChild[i] were // child 'i' produced children[i].common.advanced DiskLocs, of whic h mapAfterChild[i] were
// intersections. // intersections.
std::vector<size_t> mapAfterChild; std::vector<size_t> mapAfterChild;
// mapAfterChild[mapAfterChild.size() - 1] WSMswere match tested. // mapAfterChild[mapAfterChild.size() - 1] WSMswere match tested.
// commonstats.advanced is how many passed. // commonstats.advanced is how many passed.
// What's our current memory usage?
size_t memUsage;
// What's our memory limit?
size_t memLimit;
}; };
struct AndSortedStats : public SpecificStats { struct AndSortedStats : public SpecificStats {
AndSortedStats() : flagged(0), AndSortedStats() : flagged(0),
matchTested(0) { } matchTested(0) { }
virtual ~AndSortedStats() { } virtual ~AndSortedStats() { }
virtual SpecificStats* clone() const { virtual SpecificStats* clone() const {
AndSortedStats* specific = new AndSortedStats(*this); AndSortedStats* specific = new AndSortedStats(*this);
skipping to change at line 257 skipping to change at line 266
// name of the index being used // name of the index being used
std::string indexName; std::string indexName;
BSONObj keyPattern; BSONObj keyPattern;
// A BSON (opaque, ie. hands off other than toString() it) represen tation of the bounds // A BSON (opaque, ie. hands off other than toString() it) represen tation of the bounds
// used. // used.
BSONObj indexBounds; BSONObj indexBounds;
// Contains same information as indexBounds with the addition of in
clusivity of bounds.
std::string indexBoundsVerbose;
// >1 if we're traversing the index along with its order. <1 if we' re traversing it // >1 if we're traversing the index along with its order. <1 if we' re traversing it
// against the order. // against the order.
int direction; int direction;
// Whether this index is over a field that contain array values. // Whether this index is over a field that contain array values.
bool isMultiKey; bool isMultiKey;
size_t yieldMovedCursor; size_t yieldMovedCursor;
size_t dupsTested; size_t dupsTested;
size_t dupsDropped; size_t dupsDropped;
skipping to change at line 302 skipping to change at line 314
size_t dupsDropped; size_t dupsDropped;
// How many calls to invalidate(...) actually removed a DiskLoc fro m our deduping map? // How many calls to invalidate(...) actually removed a DiskLoc fro m our deduping map?
size_t locsForgotten; size_t locsForgotten;
// We know how many passed (it's the # of advanced) and therefore h ow many failed. // We know how many passed (it's the # of advanced) and therefore h ow many failed.
std::vector<size_t> matchTested; std::vector<size_t> matchTested;
}; };
struct SortStats : public SpecificStats { struct SortStats : public SpecificStats {
SortStats() : forcedFetches(0) { } SortStats() : forcedFetches(0), memUsage(0), memLimit(0) { }
virtual ~SortStats() { } virtual ~SortStats() { }
virtual SpecificStats* clone() const { virtual SpecificStats* clone() const {
SortStats* specific = new SortStats(*this); SortStats* specific = new SortStats(*this);
return specific; return specific;
} }
// How many records were we forced to fetch as the result of an inv alidation? // How many records were we forced to fetch as the result of an inv alidation?
size_t forcedFetches; size_t forcedFetches;
// What's our current memory usage?
size_t memUsage;
// What's our memory limit?
size_t memLimit;
}; };
struct MergeSortStats : public SpecificStats { struct MergeSortStats : public SpecificStats {
MergeSortStats() : dupsTested(0), MergeSortStats() : dupsTested(0),
dupsDropped(0), dupsDropped(0),
forcedFetches(0) { } forcedFetches(0) { }
virtual ~MergeSortStats() { } virtual ~MergeSortStats() { }
virtual SpecificStats* clone() const { virtual SpecificStats* clone() const {
skipping to change at line 345 skipping to change at line 363
ShardingFilterStats() : chunkSkips(0) { } ShardingFilterStats() : chunkSkips(0) { }
virtual SpecificStats* clone() const { virtual SpecificStats* clone() const {
ShardingFilterStats* specific = new ShardingFilterStats(*this); ShardingFilterStats* specific = new ShardingFilterStats(*this);
return specific; return specific;
} }
size_t chunkSkips; size_t chunkSkips;
}; };
struct TwoDStats : public SpecificStats {
TwoDStats() { }
virtual SpecificStats* clone() const {
TwoDStats* specific = new TwoDStats(*this);
return specific;
}
// Type of GeoBrowse (box, circle, ...)
std::string type;
// Field name in 2d index.
std::string field;
// Geo hash converter parameters.
// Used to construct a geo hash converter to generate
// explain-style index bounds from geo hashes.
GeoHashConverter::Parameters converterParams;
// Geo hashes generated by GeoBrowse::fillStack.
// Raw data for explain index bounds.
std::vector<GeoHash> expPrefixes;
};
struct TwoDNearStats : public SpecificStats { struct TwoDNearStats : public SpecificStats {
TwoDNearStats() : objectsLoaded(0), nscanned(0) { } TwoDNearStats() : objectsLoaded(0), nscanned(0) { }
virtual SpecificStats* clone() const { virtual SpecificStats* clone() const {
TwoDNearStats* specific = new TwoDNearStats(*this); TwoDNearStats* specific = new TwoDNearStats(*this);
return specific; return specific;
} }
size_t objectsLoaded; size_t objectsLoaded;
// Since 2d's near does all its work in one go we can't divine the real nscanned from // Since 2d's near does all its work in one go we can't divine the real nscanned from
// anything else. // anything else.
size_t nscanned; size_t nscanned;
}; };
struct TextStats : public SpecificStats { struct TextStats : public SpecificStats {
TextStats() : keysExamined(0), fetches(0) { } TextStats() : keysExamined(0), fetches(0), parsedTextQuery() { }
virtual SpecificStats* clone() const { virtual SpecificStats* clone() const {
TextStats* specific = new TextStats(*this); TextStats* specific = new TextStats(*this);
return specific; return specific;
} }
size_t keysExamined; size_t keysExamined;
size_t fetches; size_t fetches;
// Human-readable form of the FTSQuery associated with the text sta
ge.
BSONObj parsedTextQuery;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 9 change blocks. 
3 lines changed or deleted 50 lines changed or added


 planner_access.h   planner_access.h 
skipping to change at line 39 skipping to change at line 39
#pragma once #pragma once
#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/index_bounds_builder.h"
#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_solution.h"
namespace mongo { namespace mongo {
/** /**
* MULTIKEY INDEX BOUNDS RULES
*
* 1. In general for a multikey index, we cannot intersect bounds
* even if the index is not compound.
* Example:
* Let's say we have the document {a: [5, 7]}.
* This document satisfies the query {$and: [ {a: 5}, {a: 7} ] }
* For the index {a:1} we have the keys {"": 5} and {"": 7}.
* Each child of the AND is tagged with the index {a: 1}
* The interval for the {a: 5} branch is [5, 5]. It is exact.
* The interval for the {a: 7} branch is [7, 7]. It is exact.
* The intersection of the intervals is {}.
* If we scan over {}, the intersection of the intervals, we will ret
rieve nothing.
*
* 2. In general for a multikey compound index, we *can* compound the b
ounds.
* For example, if we have multikey index {a: 1, b: 1} and query {a: 2,
b: 3},
* we can use the bounds {a: [[2, 2]], b: [[3, 3]]}.
*
* 3. Despite rule #2, if fields in the compound index share a prefix,
then it
* is not safe to compound the bounds. We can only specify bounds for t
he first
* field.
* Example:
* Let's say we have the document {a: [ {b: 3}, {c: 4} ] }
* This document satisfies the query {'a.b': 3, 'a.c': 4}.
* For the index {'a.b': 1, 'a.c': 1} we have the keys {"": 3, "": nu
ll} and
* {"": null, "":
4}.
* Let's use the aforementioned index to answer the query.
* The bounds for 'a.b' are [3,3], and the bounds for 'a.c' are [4,4]
.
* If we combine the bounds, we would only look at keys {"": 3, "":4
}.
* Therefore we wouldn't look at the document's keys in the index.
* Therefore we don't combine bounds.
*
* 4. There is an exception to rule #1, and that is when we're evaluati
ng
* an $elemMatch.
* Example:
* Let's say that we have the same document from (1), {a: [5, 7]}.
* This document satisfies {a: {$lte: 5, $gte: 7}}, but it does not
* satisfy {a: {$elemMatch: {$lte: 5, $gte: 7}}}. The $elemMatch indi
cates
* that we are allowed to intersect the bounds, which means that we w
ill
* scan over the empty interval {} and retrieve nothing. This is the
* expected result because there is no entry in the array "a" that
* simultaneously satisfies the predicates a<=5 and a>=7.
*
* 5. There is also an exception to rule #3, and that is when we're eva
luating
* an $elemMatch. The bounds can be compounded for predicates that shar
e a prefix
* so long as the shared prefix is the path for which there is an $elem
Match.
* Example:
* Suppose we have the same document from (3), {a: [{b: 3}, {c: 4}]}.
As discussed
* above, we cannot compound the index bounds for query {'a.b': 1, 'a
.c': 1}.
* However, for the query {a: {$elemMatch: {b: 1, c: 1}} we can compo
und the
* bounds because the $elemMatch is applied to the shared prefix "a".
*/
/**
* Methods for creating a QuerySolutionNode tree that accesses the data required by the query. * Methods for creating a QuerySolutionNode tree that accesses the data required by the query.
*/ */
class QueryPlannerAccess { class QueryPlannerAccess {
public: public:
/** /**
* Return a CollectionScanNode that scans as requested in 'query'. * Return a CollectionScanNode that scans as requested in 'query'.
*/ */
static QuerySolutionNode* makeCollectionScan(const CanonicalQuery& query, static QuerySolutionNode* makeCollectionScan(const CanonicalQuery& query,
bool tailable, bool tailable,
const QueryPlannerPara ms& params); const QueryPlannerPara ms& params);
skipping to change at line 108 skipping to change at line 162
/** /**
* Takes ownership of 'root'. * Takes ownership of 'root'.
*/ */
static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& quer y, static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& quer y,
MatchExpression* root, MatchExpression* root,
bool inArrayOperator, bool inArrayOperator,
const vector<IndexEntry>& indices); const vector<IndexEntry>& indices);
/** /**
* Traverses the tree rooted at the $elemMatch expression 'node',
* finding all predicates that can use an index directly and return
ing
* them in the out-parameter vector 'out'.
*
* Traverses only through $and and $elemMatch nodes, not through ot
her
* logical or array nodes like $or and $all.
*/
static void findElemMatchChildren(const MatchExpression* node,
vector<MatchExpression*>* out);
/**
* Helper used by buildIndexedAnd and buildIndexedOr. * Helper used by buildIndexedAnd and buildIndexedOr.
* *
* The children of AND and OR nodes are sorted by the index that th e subtree rooted at * The children of AND and OR nodes are sorted by the index that th e subtree rooted at
* that node uses. Child nodes that use the same index are adjacen t to one another to * that node uses. Child nodes that use the same index are adjacen t to one another to
* facilitate grouping of index scans. As such, the processing for AND and OR is * facilitate grouping of index scans. As such, the processing for AND and OR is
* almost identical. * almost identical.
* *
* See tagForSort and sortUsingTags in index_tag.h for details on o rdering the children * See tagForSort and sortUsingTags in index_tag.h for details on o rdering the children
* of OR and AND. * of OR and AND.
* *
skipping to change at line 141 skipping to change at line 206
* Create a new data access node. * Create a new data access node.
* *
* If the node is an index scan, the bounds for 'expr' are computed and placed into the * If the node is an index scan, the bounds for 'expr' are computed and placed into the
* first field's OIL position. The rest of the OILs are allocated but uninitialized. * first field's OIL position. The rest of the OILs are allocated but uninitialized.
* *
* If the node is a geo node, grab the geo data from 'expr' and stu ff it into the * If the node is a geo node, grab the geo data from 'expr' and stu ff it into the
* geo solution node of the appropriate type. * geo solution node of the appropriate type.
*/ */
static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query, static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query,
const IndexEntry& index, const IndexEntry& index,
size_t pos,
MatchExpression* expr, MatchExpression* expr,
IndexBoundsBuilder::BoundsTi ghtness* tightnessOut); IndexBoundsBuilder::BoundsTi ghtness* tightnessOut);
/** /**
* Merge the predicate 'expr' with the leaf node 'node'. * Merge the predicate 'expr' with the leaf node 'node'.
*/ */
static void mergeWithLeafNode(MatchExpression* expr, static void mergeWithLeafNode(MatchExpression* expr,
const IndexEntry& index, const IndexEntry& index,
size_t pos, size_t pos,
IndexBoundsBuilder::BoundsTightness* tightnessOut, IndexBoundsBuilder::BoundsTightness* tightnessOut,
QuerySolutionNode* node, QuerySolutionNode* node,
MatchExpression::MatchType mergeType) ; MatchExpression::MatchType mergeType) ;
/** /**
* Determines whether it is safe to merge the expression 'expr' wit
h
* the leaf node of the query solution, 'node'.
*
* 'index' provides information about the index used by 'node'.
* 'pos' gives the position in the index (for compound indices) tha
t
* 'expr' needs to use. Finally, 'mergeType' indicates whether we
* will try to merge using an AND or OR.
*
* Does not take ownership of its arguments.
*/
static bool shouldMergeWithLeaf(const MatchExpression* expr,
const IndexEntry& index,
size_t pos,
QuerySolutionNode* node,
MatchExpression::MatchType mergeTyp
e);
/**
* If index scan (regular or expression index), fill in any bounds that are missing in * If index scan (regular or expression index), fill in any bounds that are missing in
* 'node' with the "all values for this field" interval. * 'node' with the "all values for this field" interval.
* *
* If geo, do nothing. * If geo, do nothing.
* If text, punt to finishTextNode.
*/ */
static void finishLeafNode(QuerySolutionNode* node, const IndexEntr y& index); static void finishLeafNode(QuerySolutionNode* node, const IndexEntr y& index);
static void finishTextNode(QuerySolutionNode* node, const IndexEntr
y& index);
private: private:
/** /**
* Add the filter 'match' to the query solution node 'node'. Takes * Add the filter 'match' to the query solution node 'node'. Takes
* ownership of 'match'. * ownership of 'match'.
* *
* The MatchType, 'type', indicates whether 'match' is a child of a n * The MatchType, 'type', indicates whether 'match' is a child of a n
* AND or an OR match expression. * AND or an OR match expression.
*/ */
static void _addFilterToSolutionNode(QuerySolutionNode* node, Match Expression* match, static void _addFilterToSolutionNode(QuerySolutionNode* node, Match Expression* match,
MatchExpression::MatchType typ e); MatchExpression::MatchType typ e);
 End of changes. 6 change blocks. 
0 lines changed or deleted 110 lines changed or added


 planner_ixselect.h   planner_ixselect.h 
skipping to change at line 89 skipping to change at line 89
* *
* If an index is prefixed by the predicate's path, it's always use ful. * If an index is prefixed by the predicate's path, it's always use ful.
* *
* If an index is compound but not prefixed by a predicate's path, it's only useful if * If an index is compound but not prefixed by a predicate's path, it's only useful if
* there exists another predicate that 1. will use that index and 2 . is related to the * there exists another predicate that 1. will use that index and 2 . is related to the
* original predicate by having an AND as a parent. * original predicate by having an AND as a parent.
*/ */
static void rateIndices(MatchExpression* node, static void rateIndices(MatchExpression* node,
string prefix, string prefix,
const vector<IndexEntry>& indices); const vector<IndexEntry>& indices);
/**
* Amend the RelevantTag lists for all predicates in the subtree ro
oted at 'node' to remove
* invalid assignments to text and geo indices.
*
* See the body of this function and the specific stripInvalidAssig
nments functions for details.
*/
static void stripInvalidAssignments(MatchExpression* node,
const vector<IndexEntry>& indic
es);
private:
/**
* Amend the RelevantTag lists for all predicates in the subtree ro
oted at 'node' to remove
* invalid assignments to text indexes.
*
* A predicate on a field from a compound text index with a non-emp
ty index prefix
* (e.g. pred {a: 1, b: 1} on index {a: 1, b: 1, c: "text"}) is onl
y considered valid to
* assign to the text index if it is a direct child of an AND with
the following properties:
* - it has a TEXT child
* - for every index prefix component, it has an EQ child on that c
omponent's path
*
* Note that compatible() enforces the precondition that only EQ no
des are considered
* relevant to text index prefixes.
* If there is a relevant compound text index with a non-empty "ind
ex prefix" (e.g. the
* prefix {a: 1, b: 1} for the index {a: 1, b: 1, c: "text"}), amen
d the RelevantTag(s)
* created above to remove assignments to the text index where the
query does not have
* predicates over each indexed field of the prefix.
*
* This is necessary because text indices do not obey the normal ru
les of sparseness, in
* that they generate no index keys for documents without indexable
text data in at least
* one text field (in fact, text indices ignore the sparse option e
ntirely). For example,
* given the text index {a: 1, b: 1, c: "text"}:
*
* - Document {a: 1, b: 6, c: "hello world"} generates 2 index keys
* - Document {a: 1, b: 7, c: {d: 1}} generates 0 index keys
* - Document {a: 1, b: 8} generates 0 index keys
*
* As a result, the query {a: 1} *cannot* be satisfied by the text
index {a: 1, b: 1, c:
* "text"}, since documents without indexed text data would not be
returned by the query.
* rateIndices() above will eagerly annotate the pred {a: 1} as rel
evant to the text index;
* those annotations get removed here.
*/
static void stripInvalidAssignmentsToTextIndexes(MatchExpression* n
ode,
const vector<Index
Entry>& indices);
/**
* For V1 2dsphere indices we ignore the sparse option. As such we
can use an index
* like {nongeo: 1, geo: "2dsphere"} to answer queries only involvi
ng nongeo.
*
* For V2 2dsphere indices also ignore the sparse flag but indexing
behavior as compared to
* V1 is different. If all of the geo fields are missing from the
document we do not index
* it. As such we cannot use V2 sparse indices unless we have a pr
edicate over a geo
* field.
*
* 2dsphere indices V2 are "geo-sparse." That is, if there aren't
any geo-indexed fields in
* a document it won't be indexed. As such we can't use an index l
ike {foo:1, geo:
* "2dsphere"} to answer a query on 'foo' if the index is V2 as it
will not contain the
* document {foo:1}.
*
* We *can* use it to answer a query on 'foo' if the predicate on '
foo' is AND-related to a
* predicate on every geo field in the index.
*/
static void stripInvalidAssignmentsTo2dsphereIndices(MatchExpressio
n* node,
const vector<I
ndexEntry>& indices);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
0 lines changed or deleted 95 lines changed or added


 privilege_parser.h   privilege_parser.h 
skipping to change at line 157 skipping to change at line 157
static bool parsedPrivilegeToPrivilege(const ParsedPrivilege& parse dPrivilege, static bool parsedPrivilegeToPrivilege(const ParsedPrivilege& parse dPrivilege,
Privilege* result, Privilege* result,
std::string* errmsg); std::string* errmsg);
/** /**
* Takes a Privilege object and turns it into a ParsedPrivilege. * Takes a Privilege object and turns it into a ParsedPrivilege.
*/ */
static bool privilegeToParsedPrivilege(const Privilege& privilege, static bool privilegeToParsedPrivilege(const Privilege& privilege,
ParsedPrivilege* result, ParsedPrivilege* result,
std::string* errmsg); std::string* errmsg);
/** Copies all the fields present in 'this' to 'other'. */
void cloneTo(ParsedPrivilege* other) const;
// //
// bson serializable interface implementation // bson serializable interface implementation
// //
bool isValid(std::string* errMsg) const; bool isValid(std::string* errMsg) const;
BSONObj toBSON() const; BSONObj toBSON() const;
bool parseBSON(const BSONObj& source, std::string* errMsg); bool parseBSON(const BSONObj& source, std::string* errMsg);
void clear(); void clear();
std::string toString() const; std::string toString() const;
 End of changes. 1 change blocks. 
3 lines changed or deleted 0 lines changed or added


 projection_exec.h   projection_exec.h 
skipping to change at line 143 skipping to change at line 143
/** /**
* Appends the element 'e' to the builder 'bob', possibly descendin g into sub-fields of 'e' * Appends the element 'e' to the builder 'bob', possibly descendin g into sub-fields of 'e'
* if needed. * if needed.
*/ */
Status append(BSONObjBuilder* bob, Status append(BSONObjBuilder* bob,
const BSONElement& elt, const BSONElement& elt,
const MatchDetails* details = NULL, const MatchDetails* details = NULL,
const ArrayOpType arrayOpType = ARRAY_OP_NORMAL) cons t; const ArrayOpType arrayOpType = ARRAY_OP_NORMAL) cons t;
// XXX document /**
* Like append, but for arrays.
* Deals with slice and calls appendArray to preserve the array-nes
s.
*/
void appendArray(BSONObjBuilder* bob, const BSONObj& array, bool ne sted = false) const; void appendArray(BSONObjBuilder* bob, const BSONObj& array, bool ne sted = false) const;
// True if default at this level is to include. // True if default at this level is to include.
bool _include; bool _include;
// True if this level can't be skipped or included without recursin g. // True if this level can't be skipped or included without recursin g.
bool _special; bool _special;
// We must group projections with common prefixes together. // We must group projections with common prefixes together.
// TODO: benchmark vector<pair> vs map // TODO: benchmark vector<pair> vs map
// XXX: document //
// Projection is a rooted tree. If we have {a.b: 1, a.c: 1} we don
't want to
// double-traverse the document when we're projecting it. Instead,
we have an entry in
// _fields for 'a' with two sub projections: b:1 and c:1.
FieldMap _fields; FieldMap _fields;
// The raw projection spec. that is passed into init(...) // The raw projection spec. that is passed into init(...)
BSONObj _source; BSONObj _source;
// Should we include the _id field? // Should we include the _id field?
bool _includeID; bool _includeID;
// Arguments from the $slice operator. // Arguments from the $slice operator.
int _skip; int _skip;
int _limit; int _limit;
// Used for $elemMatch and positional operator ($) // Used for $elemMatch and positional operator ($)
Matchers _matchers; Matchers _matchers;
// The matchers above point into BSONObjs and this is where those o bjs live. // The matchers above point into BSONObjs and this is where those o bjs live.
vector<BSONObj> _elemMatchObjs; vector<BSONObj> _elemMatchObjs;
ArrayOpType _arrayOpType; ArrayOpType _arrayOpType;
// Is there an elemMatch or positional operator? // Is there an slice, elemMatch or meta operator?
bool _hasNonSimple; bool _hasNonSimple;
// Is there a projection over a dotted field? // Is there a projection over a dotted field or a $ positional oper ator?
bool _hasDottedField; bool _hasDottedField;
// The full query expression. Used when we need MatchDetails. // The full query expression. Used when we need MatchDetails.
const MatchExpression* _queryExpression; const MatchExpression* _queryExpression;
// Projections that aren't sourced from the document or index keys. // Projections that aren't sourced from the document or index keys.
MetaMap _meta; MetaMap _meta;
// Do we have a returnKey projection? If so we *only* output the i ndex key metadata. If // Do we have a returnKey projection? If so we *only* output the i ndex key metadata. If
// it's not found we output nothing. // it's not found we output nothing.
 End of changes. 4 change blocks. 
4 lines changed or deleted 13 lines changed or added


 qlog.h   qlog.h 
skipping to change at line 38 skipping to change at line 38
#pragma once #pragma once
#include <ostream> #include <ostream>
namespace mongo { namespace mongo {
extern bool verboseQueryLogging; extern bool verboseQueryLogging;
// With a #define like this, we don't evaluate the costly toString()s that are QLOG'd // With a #define like this, we don't evaluate the costly toString()s that are QLOG'd
#define QLOG() if (verboseQueryLogging) log() #define QLOG() if (verboseQueryLogging) log() << "[QLOG] "
bool qlogOff(); bool qlogOff();
bool qlogOn(); bool qlogOn();
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 query_planner_common.h   query_planner_common.h 
skipping to change at line 29 skipping to change at line 29
* linked combinations including the program with the OpenSSL library. Y ou * linked combinations including the program with the OpenSSL library. Y ou
* must comply with the GNU Affero General Public License in all respect s for * must comply with the GNU Affero General Public License in all respect s for
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_solution.h"
#include "mongo/db/query/qlog.h" #include "mongo/db/query/qlog.h"
namespace mongo { namespace mongo {
/** /**
* Methods used by several parts of the planning process. * Methods used by several parts of the planning process.
*/ */
skipping to change at line 107 skipping to change at line 109
// Step 1: reverse the list. // Step 1: reverse the list.
std::reverse(iv.begin(), iv.end()); std::reverse(iv.begin(), iv.end());
// Step 2: reverse each interval. // Step 2: reverse each interval.
for (size_t j = 0; j < iv.size(); ++j) { for (size_t j = 0; j < iv.size(); ++j) {
iv[j].reverse(); iv[j].reverse();
} }
} }
} }
if (!isn->bounds.isValidFor(isn->indexKeyPattern, isn->dire ction)) { if (!isn->bounds.isValidFor(isn->indexKeyPattern, isn->dire ction)) {
QLOG() << "invalid bounds: " << isn->bounds.toString() << endl; QLOG() << "Invalid bounds: " << isn->bounds.toString() << endl;
verify(0); verify(0);
} }
// TODO: we can just negate every value in the already comp uted properties. // TODO: we can just negate every value in the already comp uted properties.
isn->computeProperties(); isn->computeProperties();
} }
else if (STAGE_SORT_MERGE == type) { else if (STAGE_SORT_MERGE == type) {
// reverse direction of comparison for merge // reverse direction of comparison for merge
MergeSortNode* msn = static_cast<MergeSortNode*>(node); MergeSortNode* msn = static_cast<MergeSortNode*>(node);
msn->sort = reverseSortObj(msn->sort); msn->sort = reverseSortObj(msn->sort);
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 query_planner_params.h   query_planner_params.h 
skipping to change at line 35 skipping to change at line 35
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <vector>
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/query/index_entry.h" #include "mongo/db/query/index_entry.h"
#include "mongo/db/query/query_knobs.h"
namespace mongo { namespace mongo {
struct QueryPlannerParams { struct QueryPlannerParams {
QueryPlannerParams() : options(DEFAULT), indexFiltersApplied(false)
{ } QueryPlannerParams() : options(DEFAULT),
indexFiltersApplied(false),
maxIndexedSolutions(internalQueryPlannerMaxI
ndexedSolutions) { }
enum Options { enum Options {
// You probably want to set this. // You probably want to set this.
DEFAULT = 0, DEFAULT = 0,
// Set this if you don't want a table scan. // Set this if you don't want a table scan.
// See http://docs.mongodb.org/manual/reference/parameters/ // See http://docs.mongodb.org/manual/reference/parameters/
NO_TABLE_SCAN = 1, NO_TABLE_SCAN = 1,
// Set this if you want a collscan outputted even if there's an // Set this if you *always* want a collscan outputted, even if
ixscan. there's an ixscan. This
// makes ranking less accurate, especially in the presence of b
locking stages.
INCLUDE_COLLSCAN = 1 << 1, INCLUDE_COLLSCAN = 1 << 1,
// Set this if you're running on a sharded cluster. We'll add a "drop all docs that // Set this if you're running on a sharded cluster. We'll add a "drop all docs that
// shouldn't be on this shard" stage before projection. // shouldn't be on this shard" stage before projection.
// //
// In order to set this, you must check // In order to set this, you must check
// shardingState.needCollectionMetadata(current_namespace) in t he same lock that you use // shardingState.needCollectionMetadata(current_namespace) in t he same lock that you use
// to build the query runner. You must also wrap the Runner in a ClientCursor within the // to build the query runner. You must also wrap the Runner in a ClientCursor within the
// same lock. See the comment on ShardFilterStage for details. // same lock. See the comment on ShardFilterStage for details.
INCLUDE_SHARD_FILTER = 1 << 2, INCLUDE_SHARD_FILTER = 1 << 2,
skipping to change at line 75 skipping to change at line 80
// Set this if you want to turn on index intersection. // Set this if you want to turn on index intersection.
INDEX_INTERSECTION = 1 << 4, INDEX_INTERSECTION = 1 << 4,
// Set this if you want to try to keep documents deleted or mut ated during the execution // Set this if you want to try to keep documents deleted or mut ated during the execution
// of the query in the query results. // of the query in the query results.
KEEP_MUTATIONS = 1 << 5, KEEP_MUTATIONS = 1 << 5,
// Nobody should set this above the getRunner interface. Inter nal flag set as a hint to // Nobody should set this above the getRunner interface. Inter nal flag set as a hint to
// the planner that the caller is actually the count command. // the planner that the caller is actually the count command.
PRIVATE_IS_COUNT = 1 << 6, PRIVATE_IS_COUNT = 1 << 6,
// Set this if you want to handle batchSize properly with sort(
). If limits on SORT
// stages are always actually limits, then this should be left
off. If they are
// sometimes to be interpreted as batchSize, then this should b
e turned on.
SPLIT_LIMITED_SORT = 1 << 7
}; };
// See Options enum above. // See Options enum above.
size_t options; size_t options;
// What indices are available for planning? // What indices are available for planning?
vector<IndexEntry> indices; vector<IndexEntry> indices;
// What's our shard key? If INCLUDE_SHARD_FILTER is set we will cr eate a shard filtering // What's our shard key? If INCLUDE_SHARD_FILTER is set we will cr eate a shard filtering
// stage. If we know the shard key, we can perform covering analys is instead of always // stage. If we know the shard key, we can perform covering analys is instead of always
// forcing a fetch. // forcing a fetch.
BSONObj shardKey; BSONObj shardKey;
// Were index filters applied to indices? // Were index filters applied to indices?
bool indexFiltersApplied; bool indexFiltersApplied;
// What's the max number of indexed solutions we want to output? I
t's expensive to compare
// plans via the MultiPlanRunner, and the set of possible plans is
very large for certain
// index+query combinations.
size_t maxIndexedSolutions;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
4 lines changed or deleted 25 lines changed or added


 query_planner_test_lib.h   query_planner_test_lib.h 
skipping to change at line 55 skipping to change at line 55
class QueryPlannerTestLib { class QueryPlannerTestLib {
public: public:
/** /**
* @param testSoln -- a BSON representation of a query solution * @param testSoln -- a BSON representation of a query solution
* @param trueSoln -- the root node of a query solution tree * @param trueSoln -- the root node of a query solution tree
* *
* Returns true if the BSON representation matches the actual * Returns true if the BSON representation matches the actual
* tree, otherwise returns false. * tree, otherwise returns false.
*/ */
static bool solutionMatches(const BSONObj& testSoln, const QuerySol utionNode* trueSoln); static bool solutionMatches(const BSONObj& testSoln, const QuerySol utionNode* trueSoln);
static bool solutionMatches(const string& testSoln, const QuerySolu
tionNode* trueSoln) {
return solutionMatches(fromjson(testSoln), trueSoln);
}
}; };
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
0 lines changed or deleted 5 lines changed or added


 query_solution.h   query_solution.h 
skipping to change at line 128 skipping to change at line 128
* Return a BSONObjSet representing the possible sort orders of the data stream from this * Return a BSONObjSet representing the possible sort orders of the data stream from this
* node. If the data is not sorted in any particular fashion, retu rns an empty set. * node. If the data is not sorted in any particular fashion, retu rns an empty set.
* *
* Usage: * Usage:
* 1. If our plan gives us a sort order, we don't have to add a sor t stage. * 1. If our plan gives us a sort order, we don't have to add a sor t stage.
* 2. If all the children of an OR have the same sort order, we can maintain that * 2. If all the children of an OR have the same sort order, we can maintain that
* sort order with a STAGE_SORT_MERGE instead of STAGE_OR. * sort order with a STAGE_SORT_MERGE instead of STAGE_OR.
*/ */
virtual const BSONObjSet& getSort() const = 0; virtual const BSONObjSet& getSort() const = 0;
/**
* Make a deep copy.
*/
virtual QuerySolutionNode* clone() const = 0;
/**
* Copy base query solution data from 'this' to 'other'.
*/
void cloneBaseData(QuerySolutionNode* other) const {
for (size_t i = 0; i < this->children.size(); i++) {
other->children.push_back(this->children[i]->clone());
}
if (NULL != this->filter) {
other->filter.reset(this->filter->shallowClone());
}
}
// These are owned here. // These are owned here.
vector<QuerySolutionNode*> children; vector<QuerySolutionNode*> children;
// If a stage has a non-NULL filter all values outputted from that stage must pass that // If a stage has a non-NULL filter all values outputted from that stage must pass that
// filter. // filter.
scoped_ptr<MatchExpression> filter; scoped_ptr<MatchExpression> filter;
protected: protected:
/** /**
* Formatting helper used by toString(). * Formatting helper used by toString().
skipping to change at line 158 skipping to change at line 175
MONGO_DISALLOW_COPYING(QuerySolutionNode); MONGO_DISALLOW_COPYING(QuerySolutionNode);
}; };
/** /**
* A QuerySolution must be entirely self-contained and own everything i nside of it. * A QuerySolution must be entirely self-contained and own everything i nside of it.
* *
* A tree of stages may be built from a QuerySolution. The QuerySoluti on must outlive the tree * A tree of stages may be built from a QuerySolution. The QuerySoluti on must outlive the tree
* of stages. * of stages.
*/ */
struct QuerySolution { struct QuerySolution {
QuerySolution() : hasSortStage(false) { } QuerySolution() : hasBlockingStage(false), indexFilterApplied(false ) { }
// Owned here. // Owned here.
scoped_ptr<QuerySolutionNode> root; scoped_ptr<QuerySolutionNode> root;
// Any filters in root or below point into this object. Must be ow ned. // Any filters in root or below point into this object. Must be ow ned.
BSONObj filterData; BSONObj filterData;
string ns; string ns;
// XXX temporary: if it has a sort stage the sort wasn't provided b // There are two known scenarios in which a query solution might po
y an index, tentially block:
// so we use that index (if it exists) to provide a sort. //
bool hasSortStage; // Sort stage:
// If the solution has a sort stage, the sort wasn't provided by an
index, so we might want
// to scan an index to provide that sort in a non-blocking fashion.
//
// Hashed AND stage:
// The hashed AND stage buffers data from multiple index scans and
could block. In that case,
// we would want to fall back on an alternate non-blocking solution
.
bool hasBlockingStage;
// Runner executing this solution might be interested in knowing
// if the planning process for this solution was based on filtered
indices.
bool indexFilterApplied;
// Owned here. Used by the plan cache. // Owned here. Used by the plan cache.
boost::scoped_ptr<SolutionCacheData> cacheData; boost::scoped_ptr<SolutionCacheData> cacheData;
/** /**
* Output a human-readable string representing the plan. * Output a human-readable string representing the plan.
*/ */
string toString() { string toString() {
if (NULL == root) { if (NULL == root) {
return "empty query solution"; return "empty query solution";
skipping to change at line 205 skipping to change at line 233
virtual StageType getType() const { return STAGE_TEXT; } virtual StageType getType() const { return STAGE_TEXT; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
// text's return is LOC_AND_UNOWNED_OBJ so it's fetched and has all fields. // text's return is LOC_AND_UNOWNED_OBJ so it's fetched and has all fields.
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
QuerySolutionNode* clone() const;
BSONObjSet _sort; BSONObjSet _sort;
BSONObj _indexKeyPattern; BSONObj indexKeyPattern;
std::string _query; std::string query;
std::string _language; std::string language;
// "Prefix" fields of a text index can handle equality predicates.
We group them with the
// text node while creating the text leaf node and convert them int
o a BSONObj index prefix
// when we finish the text leaf node.
BSONObj indexPrefix;
}; };
struct CollectionScanNode : public QuerySolutionNode { struct CollectionScanNode : public QuerySolutionNode {
CollectionScanNode(); CollectionScanNode();
virtual ~CollectionScanNode() { } virtual ~CollectionScanNode() { }
virtual StageType getType() const { return STAGE_COLLSCAN; } virtual StageType getType() const { return STAGE_COLLSCAN; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
QuerySolutionNode* clone() const;
BSONObjSet _sort; BSONObjSet _sort;
// Name of the namespace. // Name of the namespace.
string name; string name;
// Should we make a tailable cursor? // Should we make a tailable cursor?
bool tailable; bool tailable;
int direction; int direction;
skipping to change at line 252 skipping to change at line 289
virtual StageType getType() const { return STAGE_AND_HASH; } virtual StageType getType() const { return STAGE_AND_HASH; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return children.back()->getSort (); } const BSONObjSet& getSort() const { return children.back()->getSort (); }
QuerySolutionNode* clone() const;
BSONObjSet _sort; BSONObjSet _sort;
}; };
struct AndSortedNode : public QuerySolutionNode { struct AndSortedNode : public QuerySolutionNode {
AndSortedNode(); AndSortedNode();
virtual ~AndSortedNode(); virtual ~AndSortedNode();
virtual StageType getType() const { return STAGE_AND_SORTED; } virtual StageType getType() const { return STAGE_AND_SORTED; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return true; } bool sortedByDiskLoc() const { return true; }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
QuerySolutionNode* clone() const;
BSONObjSet _sort; BSONObjSet _sort;
}; };
struct OrNode : public QuerySolutionNode { struct OrNode : public QuerySolutionNode {
OrNode(); OrNode();
virtual ~OrNode(); virtual ~OrNode();
virtual StageType getType() const { return STAGE_OR; } virtual StageType getType() const { return STAGE_OR; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { bool sortedByDiskLoc() const {
// Even if our children are sorted by their diskloc or other fi elds, we don't maintain // Even if our children are sorted by their diskloc or other fi elds, we don't maintain
// any order on the output. // any order on the output.
return false; return false;
} }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
QuerySolutionNode* clone() const;
BSONObjSet _sort; BSONObjSet _sort;
bool dedup; bool dedup;
}; };
struct MergeSortNode : public QuerySolutionNode { struct MergeSortNode : public QuerySolutionNode {
MergeSortNode(); MergeSortNode();
virtual ~MergeSortNode(); virtual ~MergeSortNode();
virtual StageType getType() const { return STAGE_SORT_MERGE; } virtual StageType getType() const { return STAGE_SORT_MERGE; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
QuerySolutionNode* clone() const;
virtual void computeProperties() { virtual void computeProperties() {
for (size_t i = 0; i < children.size(); ++i) { for (size_t i = 0; i < children.size(); ++i) {
children[i]->computeProperties(); children[i]->computeProperties();
} }
_sorts.clear(); _sorts.clear();
_sorts.insert(sort); _sorts.insert(sort);
} }
BSONObjSet _sorts; BSONObjSet _sorts;
skipping to change at line 334 skipping to change at line 379
virtual StageType getType() const { return STAGE_FETCH; } virtual StageType getType() const { return STAGE_FETCH; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
QuerySolutionNode* clone() const;
BSONObjSet _sorts; BSONObjSet _sorts;
}; };
struct IndexScanNode : public QuerySolutionNode { struct IndexScanNode : public QuerySolutionNode {
IndexScanNode(); IndexScanNode();
virtual ~IndexScanNode() { } virtual ~IndexScanNode() { }
virtual void computeProperties(); virtual void computeProperties();
virtual StageType getType() const { return STAGE_IXSCAN; } virtual StageType getType() const { return STAGE_IXSCAN; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return false; } bool fetched() const { return false; }
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const; bool sortedByDiskLoc() const;
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
QuerySolutionNode* clone() const;
BSONObjSet _sorts; BSONObjSet _sorts;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
bool indexIsMultiKey; bool indexIsMultiKey;
int direction; int direction;
// maxScan option to .find() limits how many docs we look at. // maxScan option to .find() limits how many docs we look at.
int maxScan; int maxScan;
skipping to change at line 373 skipping to change at line 422
bool addKeyMetadata; bool addKeyMetadata;
// BIG NOTE: // BIG NOTE:
// If you use simple bounds, we'll use whatever index access method the keypattern implies. // If you use simple bounds, we'll use whatever index access method the keypattern implies.
// If you use the complex bounds, we force Btree access. // If you use the complex bounds, we force Btree access.
// The complex bounds require Btree access. // The complex bounds require Btree access.
IndexBounds bounds; IndexBounds bounds;
}; };
struct ProjectionNode : public QuerySolutionNode { struct ProjectionNode : public QuerySolutionNode {
ProjectionNode() { } /**
* We have a few implementations of the projection functionality.
The most general
* implementation 'DEFAULT' is much slower than the fast-path imple
mentations
* below. We only really have all the information available to cho
ose a projection
* implementation at planning time.
*/
enum ProjectionType {
// This is the most general implementation of the projection fu
nctionality. It handles
// every case.
DEFAULT,
// This is a fast-path for when the projection is fully covered
by one index.
COVERED_ONE_INDEX,
// This is a fast-path for when the projection only has inclusi
ons on non-dotted fields.
SIMPLE_DOC,
};
ProjectionNode() : fullExpression(NULL), projType(DEFAULT) { }
virtual ~ProjectionNode() { } virtual ~ProjectionNode() { }
virtual StageType getType() const { return STAGE_PROJECTION; } virtual StageType getType() const { return STAGE_PROJECTION; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
/** /**
* This node changes the type to OWNED_OBJ. There's no fetching po ssible after this. * This node changes the type to OWNED_OBJ. There's no fetching po ssible after this.
*/ */
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { bool hasField(const string& field) const {
// XXX XXX: perhaps have the QueryProjection pre-allocated and // TODO: Returning false isn't always the right answer -- we ma
defer to it? we don't y either be including
// know what we're dropping. Until we push projection down thi // certain fields, or we may be dropping fields (in which case
s doesn't matter. hasField returns true).
//
// Given that projection sits on top of everything else in .fin
d() it doesn't matter
// what we do here.
return false; return false;
} }
bool sortedByDiskLoc() const { bool sortedByDiskLoc() const {
// Projections destroy the DiskLoc. By returning true here, th is kind of implies that a // Projections destroy the DiskLoc. By returning true here, th is kind of implies that a
// fetch could still be done upstream. // fetch could still be done upstream.
// //
// Perhaps this should be false to not imply that there *is* a DiskLoc? Kind of a // Perhaps this should be false to not imply that there *is* a DiskLoc? Kind of a
// corner case. // corner case.
return children[0]->sortedByDiskLoc(); return children[0]->sortedByDiskLoc();
} }
const BSONObjSet& getSort() const { const BSONObjSet& getSort() const {
// TODO: If we're applying a projection that maintains sort ord er, the prefix of the // TODO: If we're applying a projection that maintains sort ord er, the prefix of the
// sort order we project is the sort order. // sort order we project is the sort order.
return _sorts; return _sorts;
} }
QuerySolutionNode* clone() const;
BSONObjSet _sorts; BSONObjSet _sorts;
// The full query tree. Needed when we have positional operators. // The full query tree. Needed when we have positional operators.
// Owned in the CanonicalQuery, not here. // Owned in the CanonicalQuery, not here.
MatchExpression* fullExpression; MatchExpression* fullExpression;
// Given that we don't yet have a MatchExpression analogue for the expression language, we // Given that we don't yet have a MatchExpression analogue for the expression language, we
// use a BSONObj. // use a BSONObj.
BSONObj projection; BSONObj projection;
// What implementation of the projection algorithm should we use?
ProjectionType projType;
// Only meaningful if projType == COVERED_ONE_INDEX. This is the k
ey pattern of the index
// supplying our covered data. We can pre-compute which fields to
include and cache that
// data for later if we know we only have one index.
BSONObj coveredKeyObj;
}; };
struct SortNode : public QuerySolutionNode { struct SortNode : public QuerySolutionNode {
SortNode() : limit(0) { } SortNode() : limit(0) { }
virtual ~SortNode() { } virtual ~SortNode() { }
virtual StageType getType() const { return STAGE_SORT; } virtual StageType getType() const { return STAGE_SORT; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
QuerySolutionNode* clone() const;
virtual void computeProperties() { virtual void computeProperties() {
for (size_t i = 0; i < children.size(); ++i) { for (size_t i = 0; i < children.size(); ++i) {
children[i]->computeProperties(); children[i]->computeProperties();
} }
_sorts.clear(); _sorts.clear();
_sorts.insert(pattern); _sorts.insert(pattern);
} }
BSONObjSet _sorts; BSONObjSet _sorts;
skipping to change at line 462 skipping to change at line 545
virtual StageType getType() const { return STAGE_LIMIT; } virtual StageType getType() const { return STAGE_LIMIT; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
QuerySolutionNode* clone() const;
int limit; int limit;
}; };
struct SkipNode : public QuerySolutionNode { struct SkipNode : public QuerySolutionNode {
SkipNode() { } SkipNode() { }
virtual ~SkipNode() { } virtual ~SkipNode() { }
virtual StageType getType() const { return STAGE_SKIP; } virtual StageType getType() const { return STAGE_SKIP; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
QuerySolutionNode* clone() const;
int skip; int skip;
}; };
// //
// Geo nodes. A thin wrapper above an IXSCAN until we can yank functio nality out of // Geo nodes. A thin wrapper above an IXSCAN until we can yank functio nality out of
// the IXSCAN layer into the stage layer. // the IXSCAN layer into the stage layer.
// //
// TODO: This is probably an expression index. // TODO: This is probably an expression index.
struct Geo2DNode : public QuerySolutionNode { struct Geo2DNode : public QuerySolutionNode {
skipping to change at line 499 skipping to change at line 586
virtual StageType getType() const { return STAGE_GEO_2D; } virtual StageType getType() const { return STAGE_GEO_2D; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return false; } bool fetched() const { return false; }
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
BSONObjSet _sorts; BSONObjSet _sorts;
QuerySolutionNode* clone() const;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
GeoQuery gq; GeoQuery gq;
}; };
// This is a standalone stage. // This is a standalone stage.
struct GeoNear2DNode : public QuerySolutionNode { struct GeoNear2DNode : public QuerySolutionNode {
GeoNear2DNode() : numWanted(100), addPointMeta(false), addDistMeta( false) { } GeoNear2DNode() : numWanted(100), addPointMeta(false), addDistMeta( false) { }
virtual ~GeoNear2DNode() { } virtual ~GeoNear2DNode() { }
virtual StageType getType() const { return STAGE_GEO_NEAR_2D; } virtual StageType getType() const { return STAGE_GEO_NEAR_2D; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
QuerySolutionNode* clone() const;
BSONObjSet _sorts; BSONObjSet _sorts;
NearQuery nq; NearQuery nq;
int numWanted; int numWanted;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
bool addPointMeta; bool addPointMeta;
bool addDistMeta; bool addDistMeta;
}; };
// This is actually its own standalone stage. // This is actually its own standalone stage.
skipping to change at line 537 skipping to change at line 629
virtual ~GeoNear2DSphereNode() { } virtual ~GeoNear2DSphereNode() { }
virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; } virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
QuerySolutionNode* clone() const;
BSONObjSet _sorts; BSONObjSet _sorts;
NearQuery nq; NearQuery nq;
IndexBounds baseBounds; IndexBounds baseBounds;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
bool addPointMeta; bool addPointMeta;
bool addDistMeta; bool addDistMeta;
}; };
skipping to change at line 568 skipping to change at line 662
ShardingFilterNode() { } ShardingFilterNode() { }
virtual ~ShardingFilterNode() { } virtual ~ShardingFilterNode() { }
virtual StageType getType() const { return STAGE_SHARDING_FILTER; } virtual StageType getType() const { return STAGE_SHARDING_FILTER; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
QuerySolutionNode* clone() const;
}; };
/** /**
* If documents mutate or are deleted during a query, we can (in some c ases) fetch them * If documents mutate or are deleted during a query, we can (in some c ases) fetch them
* and still return them. This stage merges documents that have been m utated or deleted * and still return them. This stage merges documents that have been m utated or deleted
* into the query result stream. * into the query result stream.
*/ */
struct KeepMutationsNode : public QuerySolutionNode { struct KeepMutationsNode : public QuerySolutionNode {
KeepMutationsNode() { } KeepMutationsNode() { }
virtual ~KeepMutationsNode() { } virtual ~KeepMutationsNode() { }
skipping to change at line 591 skipping to change at line 687
// Any flagged results are OWNED_OBJ and therefore we're covered if our child is. // Any flagged results are OWNED_OBJ and therefore we're covered if our child is.
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
// Any flagged results are OWNED_OBJ and as such they'll have any f ield we need. // Any flagged results are OWNED_OBJ and as such they'll have any f ield we need.
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; } const BSONObjSet& getSort() const { return sorts; }
QuerySolutionNode* clone() const;
// Since we merge in flagged results we have no sort order. // Since we merge in flagged results we have no sort order.
BSONObjSet sorts; BSONObjSet sorts;
}; };
/** /**
* Distinct queries only want one value for a given field. We run an i ndex scan but * Distinct queries only want one value for a given field. We run an i ndex scan but
* *always* skip over the current key to the next key. * *always* skip over the current key to the next key.
*/ */
struct DistinctNode : public QuerySolutionNode { struct DistinctNode : public QuerySolutionNode {
DistinctNode() { } DistinctNode() { }
virtual ~DistinctNode() { } virtual ~DistinctNode() { }
virtual StageType getType() const { return STAGE_DISTINCT; } virtual StageType getType() const { return STAGE_DISTINCT; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
// This stage is created "on top" of normal planning and as such th e properties // This stage is created "on top" of normal planning and as such th e properties
// below don't really matter. // below don't really matter.
bool fetched() const { return true; } bool fetched() const { return false; }
bool hasField(const string& field) const { return !indexKeyPattern[ field].eoo(); } bool hasField(const string& field) const { return !indexKeyPattern[ field].eoo(); }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; } const BSONObjSet& getSort() const { return sorts; }
QuerySolutionNode* clone() const;
BSONObjSet sorts; BSONObjSet sorts;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
int direction; int direction;
IndexBounds bounds; IndexBounds bounds;
// We are distinct-ing over the 'fieldNo'-th field of 'indexKeyPatt ern'. // We are distinct-ing over the 'fieldNo'-th field of 'indexKeyPatt ern'.
int fieldNo; int fieldNo;
}; };
/** /**
skipping to change at line 636 skipping to change at line 737
CountNode() { } CountNode() { }
virtual ~CountNode() { } virtual ~CountNode() { }
virtual StageType getType() const { return STAGE_COUNT; } virtual StageType getType() const { return STAGE_COUNT; }
virtual void appendToString(mongoutils::str::stream* ss, int indent ) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; } const BSONObjSet& getSort() const { return sorts; }
QuerySolutionNode* clone() const;
BSONObjSet sorts; BSONObjSet sorts;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
BSONObj startKey; BSONObj startKey;
bool startKeyInclusive; bool startKeyInclusive;
BSONObj endKey; BSONObj endKey;
bool endKeyInclusive; bool endKeyInclusive;
}; };
 End of changes. 27 change blocks. 
14 lines changed or deleted 133 lines changed or added


 replica_set_monitor.h   replica_set_monitor.h 
skipping to change at line 93 skipping to change at line 93
*/ */
void failedHost(const HostAndPort& host); void failedHost(const HostAndPort& host);
/** /**
* Returns true if this node is the master based ONLY on local data . Be careful, return may * Returns true if this node is the master based ONLY on local data . Be careful, return may
* be stale. * be stale.
*/ */
bool isPrimary(const HostAndPort& host) const; bool isPrimary(const HostAndPort& host) const;
/** /**
* Returns true if host is part of this set and is considered up (m
eaning it can accept
* queries).
*/
bool isHostUp(const HostAndPort& host) const;
/**
* How may times in a row have we tried to refresh without successf ully contacting any hosts * How may times in a row have we tried to refresh without successf ully contacting any hosts
* who claim to be members of this set? * who claim to be members of this set?
*/ */
int getConsecutiveFailedScans() const; int getConsecutiveFailedScans() const;
/** /**
* The name of the set. * The name of the set.
*/ */
std::string getName() const; std::string getName() const;
 End of changes. 1 change blocks. 
0 lines changed or deleted 7 lines changed or added


 request.h   request.h 
skipping to change at line 64 skipping to change at line 64
} }
bool expectResponse() const { bool expectResponse() const {
return op() == dbQuery || op() == dbGetMore; return op() == dbQuery || op() == dbGetMore;
} }
bool isCommand() const; bool isCommand() const;
MSGID id() const { MSGID id() const {
return _id; return _id;
} }
DBConfigPtr getConfig() const {
verify( _didInit );
return _config;
}
bool isShardingEnabled() const {
verify( _didInit );
return _config->isShardingEnabled();
}
ChunkManagerPtr getChunkManager() const {
verify( _didInit );
return _chunkManager;
}
ClientInfo * getClientInfo() const { ClientInfo * getClientInfo() const {
return _clientInfo; return _clientInfo;
} }
// ---- remote location info -----
Shard primaryShard() const ;
// ---- low level access ---- // ---- low level access ----
void reply( Message & response , const string& fromServer ); void reply( Message & response , const string& fromServer );
Message& m() { return _m; } Message& m() { return _m; }
DbMessage& d() { return _d; } DbMessage& d() { return _d; }
AbstractMessagingPort* p() const { return _p; } AbstractMessagingPort* p() const { return _p; }
void process( int attempt = 0 ); void process( int attempt = 0 );
void gotInsert();
void init(); void init();
void reset(); void reset();
private: private:
Message& _m; Message& _m;
DbMessage _d; DbMessage _d;
AbstractMessagingPort* _p; AbstractMessagingPort* _p;
MSGID _id; MSGID _id;
DBConfigPtr _config;
ChunkManagerPtr _chunkManager;
ClientInfo * _clientInfo; ClientInfo * _clientInfo;
OpCounters* _counter; OpCounters* _counter;
bool _didInit; bool _didInit;
}; };
} }
 End of changes. 4 change blocks. 
22 lines changed or deleted 0 lines changed or added


 rs.h   rs.h 
skipping to change at line 72 skipping to change at line 72
namespace mongo { namespace mongo {
class Cloner; class Cloner;
class DBClientConnection; class DBClientConnection;
struct HowToFixUp; struct HowToFixUp;
class ReplSetImpl; class ReplSetImpl;
struct Target; struct Target;
extern bool replSet; // true if using repl sets extern bool replSet; // true if using repl sets
extern class ReplSet *theReplSet; // null until initialized extern class ReplSet *theReplSet; // null until initialized
extern Tee *rsLog; extern Tee *rsLog;
extern int maxSyncSourceLagSecs;
class ReplSetCmdline; class ReplSetCmdline;
// Main entry point for replica sets // Main entry point for replica sets
void startReplSets(ReplSetCmdline *replSetCmdline); void startReplSets(ReplSetCmdline *replSetCmdline);
class ReplicationStartSynchronizer { class ReplicationStartSynchronizer {
public: public:
ReplicationStartSynchronizer() : indexRebuildDone(false) {} ReplicationStartSynchronizer() : indexRebuildDone(false) {}
boost::mutex mtx; boost::mutex mtx;
bool indexRebuildDone; bool indexRebuildDone;
skipping to change at line 199 skipping to change at line 201
Guarded<LastYea,lyMutex> ly; Guarded<LastYea,lyMutex> ly;
unsigned yea(unsigned memberId); // throws VoteException unsigned yea(unsigned memberId); // throws VoteException
void electionFailed(unsigned meid); void electionFailed(unsigned meid);
void _electSelf(); void _electSelf();
bool weAreFreshest(bool& allUp, int& nTies); bool weAreFreshest(bool& allUp, int& nTies);
bool sleptLast; // slept last elect() pass bool sleptLast; // slept last elect() pass
// This is a unique id that is changed each time we transition to P RIMARY, as the // This is a unique id that is changed each time we transition to P RIMARY, as the
// result of an election. // result of an election.
OID _electionId; OID _electionId;
// PRIMARY server's time when the election to primary occurred
OpTime _electionTime;
public: public:
Consensus(ReplSetImpl *t) : rs(*t) { Consensus(ReplSetImpl *t) : rs(*t) {
sleptLast = false; sleptLast = false;
steppedDown = 0; steppedDown = 0;
} }
/* if we've stepped down, this is when we are allowed to try to ele ct ourself again. /* if we've stepped down, this is when we are allowed to try to ele ct ourself again.
todo: handle possible weirdnesses at clock skews etc. todo: handle possible weirdnesses at clock skews etc.
*/ */
time_t steppedDown; time_t steppedDown;
int totalVotes() const; int totalVotes() const;
bool aMajoritySeemsToBeUp() const; bool aMajoritySeemsToBeUp() const;
bool shouldRelinquish() const; bool shouldRelinquish() const;
void electSelf(); void electSelf();
void electCmdReceived(BSONObj, BSONObjBuilder*); void electCmdReceived(BSONObj, BSONObjBuilder*);
void multiCommand(BSONObj cmd, list<Target>& L); void multiCommand(BSONObj cmd, list<Target>& L);
OID getElectionId() const { return _electionId; } OID getElectionId() const { return _electionId; }
void setElectionId(OID oid) { _electionId = oid; } void setElectionId(OID oid) { _electionId = oid; }
OpTime getElectionTime() const { return _electionTime; }
void setElectionTime(OpTime electionTime) { _electionTime = electio
nTime; }
}; };
/** /**
* most operations on a ReplSet object should be done while locked. tha t * most operations on a ReplSet object should be done while locked. tha t
* logic implemented here. * logic implemented here.
* *
* Order of locking: lock the replica set, then take a rwlock. * Order of locking: lock the replica set, then take a rwlock.
*/ */
class RSBase : boost::noncopyable { class RSBase : boost::noncopyable {
public: public:
skipping to change at line 396 skipping to change at line 402
/** /**
* Find the closest member (using ping time) with a higher latest o ptime. * Find the closest member (using ping time) with a higher latest o ptime.
*/ */
const Member* getMemberToSyncTo(); const Member* getMemberToSyncTo();
void veto(const string& host, unsigned secs=10); void veto(const string& host, unsigned secs=10);
bool gotForceSync(); bool gotForceSync();
void goStale(const Member* m, const BSONObj& o); void goStale(const Member* m, const BSONObj& o);
OID getElectionId() const { return elect.getElectionId(); } OID getElectionId() const { return elect.getElectionId(); }
OpTime getElectionTime() const { return elect.getElectionTime(); }
private: private:
set<ReplSetHealthPollTask*> healthTasks; set<ReplSetHealthPollTask*> healthTasks;
void endOldHealthTasks(); void endOldHealthTasks();
void startHealthTaskFor(Member *m); void startHealthTaskFor(Member *m);
Consensus elect; Consensus elect;
void relinquish(); void relinquish();
void forgetPrimary(); void forgetPrimary();
protected: protected:
bool _stepDown(int secs); bool _stepDown(int secs);
skipping to change at line 602 skipping to change at line 609
} }
IndexPrefetchConfig getIndexPrefetchConfig() { IndexPrefetchConfig getIndexPrefetchConfig() {
return _indexPrefetchConfig; return _indexPrefetchConfig;
} }
static const int replWriterThreadCount; static const int replWriterThreadCount;
static const int replPrefetcherThreadCount; static const int replPrefetcherThreadCount;
threadpool::ThreadPool& getPrefetchPool() { return _prefetcherPool; } threadpool::ThreadPool& getPrefetchPool() { return _prefetcherPool; }
threadpool::ThreadPool& getWriterPool() { return _writerPool; } threadpool::ThreadPool& getWriterPool() { return _writerPool; }
static const int maxSyncSourceLagSecs;
const ReplSetConfig::MemberCfg& myConfig() const { return _config; } const ReplSetConfig::MemberCfg& myConfig() const { return _config; }
bool tryToGoLiveAsASecondary(OpTime&); // readlocks bool tryToGoLiveAsASecondary(OpTime&); // readlocks
void syncRollback(OplogReader& r); void syncRollback(OplogReader& r);
void syncThread(); void syncThread();
const OpTime lastOtherOpTime() const; const OpTime lastOtherOpTime() const;
/** /**
* The most up to date electable replica * The most up to date electable replica
*/ */
const OpTime lastOtherElectableOpTime() const; const OpTime lastOtherElectableOpTime() const;
 End of changes. 5 change blocks. 
2 lines changed or deleted 8 lines changed or added


 rs_member.h   rs_member.h 
skipping to change at line 105 skipping to change at line 105
// This is the last time we got a heartbeat request from a given me mber. // This is the last time we got a heartbeat request from a given me mber.
time_t lastHeartbeatRecv; time_t lastHeartbeatRecv;
DiagStr lastHeartbeatMsg; DiagStr lastHeartbeatMsg;
DiagStr syncingTo; DiagStr syncingTo;
OpTime opTime; OpTime opTime;
int skew; int skew;
bool authIssue; bool authIssue;
unsigned int ping; // milliseconds unsigned int ping; // milliseconds
static unsigned int numPings; static unsigned int numPings;
// Time node was elected primary
OpTime electionTime;
bool up() const { return health > 0; } bool up() const { return health > 0; }
/** health is set to -1 on startup. that means we haven't even che cked yet. 0 means we checked and it failed. */ /** health is set to -1 on startup. that means we haven't even che cked yet. 0 means we checked and it failed. */
bool maybeUp() const { return health != 0; } bool maybeUp() const { return health != 0; }
long long timeDown() const; // ms long long timeDown() const; // ms
/* true if changed in a way of interest to the repl set manager. */ /* true if changed in a way of interest to the repl set manager. */
bool changed(const HeartbeatInfo& old) const; bool changed(const HeartbeatInfo& old) const;
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 runner.h   runner.h 
skipping to change at line 61 skipping to change at line 61
RUNNER_ADVANCED, RUNNER_ADVANCED,
// We're EOF. We won't return any more results (edge case exce ption: capped+tailable). // We're EOF. We won't return any more results (edge case exce ption: capped+tailable).
RUNNER_EOF, RUNNER_EOF,
// We were killed or had an error. // We were killed or had an error.
RUNNER_DEAD, RUNNER_DEAD,
// getNext was asked for data it cannot provide, or the underly ing PlanStage had an // getNext was asked for data it cannot provide, or the underly ing PlanStage had an
// unrecoverable error. // unrecoverable error.
// If the underlying PlanStage has any information on the error
, it will be available in
// the objOut parameter. Call WorkingSetCommon::toStatusString(
) to retrieve the error
// details from the output BSON object.
RUNNER_ERROR, RUNNER_ERROR,
}; };
static string statestr(RunnerState s) { static string statestr(RunnerState s) {
if (RUNNER_ADVANCED == s) { if (RUNNER_ADVANCED == s) {
return "RUNNER_ADVANCED"; return "RUNNER_ADVANCED";
} }
else if (RUNNER_EOF == s) { else if (RUNNER_EOF == s) {
return "RUNNER_EOF"; return "RUNNER_EOF";
} }
skipping to change at line 152 skipping to change at line 155
* If the caller is running a query, they probably only care about the object. * If the caller is running a query, they probably only care about the object.
* If the caller is an internal client, they may only care about Di skLocs (index scan), or * If the caller is an internal client, they may only care about Di skLocs (index scan), or
* about object + DiskLocs (collection scan). * about object + DiskLocs (collection scan).
* *
* Some notes on objOut and ownership: * Some notes on objOut and ownership:
* *
* objOut may be an owned object in certain cases: invalidation of the underlying DiskLoc, * objOut may be an owned object in certain cases: invalidation of the underlying DiskLoc,
* the object is created from covered index key data, the object is projected or otherwise * the object is created from covered index key data, the object is projected or otherwise
* the result of a computation. * the result of a computation.
* *
* objOut will also be owned when the underlying PlanStage has prov
ided error details in the
* event of a RUNNER_ERROR. Call WorkingSetCommon::toStatusString()
to convert the object
* to a loggable format.
*
* objOut will be unowned if it's the result of a fetch or a collec tion scan. * objOut will be unowned if it's the result of a fetch or a collec tion scan.
*/ */
virtual RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut) = 0; virtual RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut) = 0;
/** /**
* Will the next call to getNext() return EOF? It's useful to know if the runner is done * Will the next call to getNext() return EOF? It's useful to know if the runner is done
* without having to take responsibility for a result. * without having to take responsibility for a result.
*/ */
virtual bool isEOF() = 0; virtual bool isEOF() = 0;
 End of changes. 2 change blocks. 
0 lines changed or deleted 11 lines changed or added


 runner_yield_policy.h   runner_yield_policy.h 
skipping to change at line 63 skipping to change at line 63
} }
/** /**
* Yield the provided runner, registering and deregistering it appr opriately. Deal with * Yield the provided runner, registering and deregistering it appr opriately. Deal with
* deletion during a yield by setting _runnerYielding to ensure der egistration. * deletion during a yield by setting _runnerYielding to ensure der egistration.
* *
* Provided runner MUST be YIELD_MANUAL. * Provided runner MUST be YIELD_MANUAL.
*/ */
bool yieldAndCheckIfOK(Runner* runner, Record* record = NULL) { bool yieldAndCheckIfOK(Runner* runner, Record* record = NULL) {
invariant(runner); invariant(runner);
invariant(runner->collection()); // XXX: should this just retur n true? invariant(runner->collection());
int micros = ClientCursor::suggestYieldMicros(); int micros = ClientCursor::suggestYieldMicros();
// If micros is not positive, no point in yielding, nobody wait ing. // If micros is not positive, no point in yielding, nobody wait ing.
// XXX: Do we want to yield anyway if record is not NULL? //
// TODO: Do we want to yield anyway if record is not NULL?
//
// TODO: Track how many times we actually yield, how many times
micros is <0, etc.
if (micros <= 0) { return true; } if (micros <= 0) { return true; }
// If micros > 0, we should yield. // If micros > 0, we should yield.
runner->saveState(); runner->saveState();
_runnerYielding = runner; _runnerYielding = runner;
runner->collection()->cursorCache()->registerRunner( _runnerYie lding ); runner->collection()->cursorCache()->registerRunner( _runnerYie lding );
// Note that this call checks for interrupt, and thus can throw if interrupt flag is set
staticYield(micros, record); staticYield(micros, record);
if ( runner->collection() ) { if ( runner->collection() ) {
// if the runner was killed, runner->collection() will retu rn NULL // if the runner was killed, runner->collection() will retu rn NULL
// so we don't deregister as it was done when killed // so we don't deregister as it was done when killed
runner->collection()->cursorCache()->deregisterRunner( _run nerYielding ); runner->collection()->cursorCache()->deregisterRunner( _run nerYielding );
} }
_runnerYielding = NULL; _runnerYielding = NULL;
_elapsedTracker.resetLastTime(); _elapsedTracker.resetLastTime();
return runner->restoreState(); return runner->restoreState();
skipping to change at line 102 skipping to change at line 106
* *
* Used for YIELD_AUTO runners. * Used for YIELD_AUTO runners.
*/ */
void yield(Record* rec = NULL) { void yield(Record* rec = NULL) {
int micros = ClientCursor::suggestYieldMicros(); int micros = ClientCursor::suggestYieldMicros();
// If there is anyone waiting on us or if there's a record to p age-in, yield. TODO: Do // If there is anyone waiting on us or if there's a record to p age-in, yield. TODO: Do
// we want to page in the record in the lock even if nobody is waiting for the lock? // we want to page in the record in the lock even if nobody is waiting for the lock?
if (micros > 0 || (NULL != rec)) { if (micros > 0 || (NULL != rec)) {
staticYield(micros, rec); staticYield(micros, rec);
// XXX: when do we really want to reset this? // TODO: When do we really want to reset this? Currently
// we reset it when we
// Currently we reset it when we actually yield. As such w // actually yield. As such we'll keep on trying to yield o
e'll keep on trying nce the tracker has
// to yield once the tracker has elapsed. // elapsed. If we reset it even if we don't yield, we'll w
// ait until the time
// If we reset it even if we don't yield, we'll wait until // interval elapses again to try yielding.
the time interval
// elapses again to try yielding.
_elapsedTracker.resetLastTime(); _elapsedTracker.resetLastTime();
} }
} }
static void staticYield(int micros, const Record* rec = NULL) { static void staticYield(int micros, const Record* rec = NULL) {
ClientCursor::staticYield(micros, "", rec); ClientCursor::staticYield(micros, "", rec);
} }
private: private:
ElapsedTracker _elapsedTracker; ElapsedTracker _elapsedTracker;
 End of changes. 4 change blocks. 
11 lines changed or deleted 14 lines changed or added


 s2_access_method.h   s2_access_method.h 
skipping to change at line 49 skipping to change at line 49
class IndexCursor; class IndexCursor;
struct S2IndexingParams; struct S2IndexingParams;
class S2AccessMethod : public BtreeBasedAccessMethod { class S2AccessMethod : public BtreeBasedAccessMethod {
public: public:
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
S2AccessMethod(IndexCatalogEntry* btreeState); S2AccessMethod(IndexCatalogEntry* btreeState);
virtual ~S2AccessMethod() { } virtual ~S2AccessMethod() { }
/**
* Takes an index spec object for this index and returns a copy twe
aked to conform to the
* expected format. When an index build is initiated, this functio
n is called on the spec
* object the user provides, and the return value of this function
is the final spec object
* that gets saved in the index catalog. Throws a UserException if
'specObj' is invalid.
*/
static BSONObj fixSpec(const BSONObj& specObj);
virtual shared_ptr<KeyGenerator> getKeyGenerator() const { return _
keyGenerator; }
private: private:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// getKeys calls the helper methods below.
void getGeoKeys(const BSONObj& document, const BSONElementSet& elem
ents,
BSONObjSet* out) const;
void getLiteralKeys(const BSONElementSet& elements, BSONObjSet* out
) const;
void getLiteralKeysArray(const BSONObj& obj, BSONObjSet* out) const
;
void getOneLiteralKey(const BSONElement& elt, BSONObjSet *out) cons
t;
S2IndexingParams _params; S2IndexingParams _params;
shared_ptr<KeyGenerator> _keyGenerator;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
11 lines changed or deleted 15 lines changed or added


 s2common.h   s2common.h 
skipping to change at line 42 skipping to change at line 42
#include "third_party/s2/s2regioncoverer.h" #include "third_party/s2/s2regioncoverer.h"
#include "third_party/s2/s2cell.h" #include "third_party/s2/s2cell.h"
#include "third_party/s2/s2polyline.h" #include "third_party/s2/s2polyline.h"
#include "third_party/s2/s2polygon.h" #include "third_party/s2/s2polygon.h"
#include "third_party/s2/s2regioncoverer.h" #include "third_party/s2/s2regioncoverer.h"
#pragma once #pragma once
namespace mongo { namespace mongo {
// An enum describing the version of an S2 index.
enum S2IndexVersion {
// The first version of the S2 index, introduced in MongoDB 2.4.0.
Compatible with MongoDB
// 2.4.0 and later. Supports the following GeoJSON objects: Point,
LineString, Polygon.
S2_INDEX_VERSION_1 = 1,
// The current version of the S2 index, introduced in MongoDB 2.6.0
. Compatible with
// MongoDB 2.6.0 and later. Introduced support for the following G
eoJSON objects:
// MultiPoint, MultiLineString, MultiPolygon, GeometryCollection.
S2_INDEX_VERSION_2 = 2
};
struct S2IndexingParams { struct S2IndexingParams {
// Since we take the cartesian product when we generate keys for an insert, // Since we take the cartesian product when we generate keys for an insert,
// we need a cap. // we need a cap.
size_t maxKeysPerInsert; size_t maxKeysPerInsert;
// This is really an advisory parameter that we pass to the cover g enerator. The // This is really an advisory parameter that we pass to the cover g enerator. The
// finest/coarsest index level determine the required # of cells. // finest/coarsest index level determine the required # of cells.
int maxCellsInCovering; int maxCellsInCovering;
// What's the finest grained level that we'll index? When we query for a point // What's the finest grained level that we'll index? When we query for a point
// we start at that -- we index nothing finer than this. // we start at that -- we index nothing finer than this.
int finestIndexedLevel; int finestIndexedLevel;
// And, what's the coarsest? When we search in larger coverings we know we // And, what's the coarsest? When we search in larger coverings we know we
// can stop here -- we index nothing coarser than this. // can stop here -- we index nothing coarser than this.
int coarsestIndexedLevel; int coarsestIndexedLevel;
// Version of this index (specific to the index type).
S2IndexVersion indexVersion;
double radius; double radius;
string toString() const { string toString() const {
stringstream ss; stringstream ss;
ss << "maxKeysPerInsert: " << maxKeysPerInsert << endl; ss << "maxKeysPerInsert: " << maxKeysPerInsert << endl;
ss << "maxCellsInCovering: " << maxCellsInCovering << endl; ss << "maxCellsInCovering: " << maxCellsInCovering << endl;
ss << "finestIndexedLevel: " << finestIndexedLevel << endl; ss << "finestIndexedLevel: " << finestIndexedLevel << endl;
ss << "coarsestIndexedLevel: " << coarsestIndexedLevel << endl; ss << "coarsestIndexedLevel: " << coarsestIndexedLevel << endl;
ss << "indexVersion: " << indexVersion << endl;
return ss.str(); return ss.str();
} }
void configureCoverer(S2RegionCoverer *coverer) const { void configureCoverer(S2RegionCoverer *coverer) const {
coverer->set_min_level(coarsestIndexedLevel); coverer->set_min_level(coarsestIndexedLevel);
coverer->set_max_level(finestIndexedLevel); coverer->set_max_level(finestIndexedLevel);
// This is advisory; the two above are strict. // This is advisory; the two above are strict.
coverer->set_max_cells(maxCellsInCovering); coverer->set_max_cells(maxCellsInCovering);
} }
}; };
class S2SearchUtil { class S2SearchUtil {
public: public:
// Given a coverer, region, and field name, generate a BSONObj that we can pass to a // Given a coverer, region, and field name, generate a BSONObj that we can pass to a
// FieldRangeSet so that we only examine the keys that the provided region may intersect. // FieldRangeSet so that we only examine the keys that the provided region may intersect.
static BSONObj coverAsBSON(const vector<S2CellId> &cover, const str ing& field, static BSONObj coverAsBSON(const vector<S2CellId> &cover, const str ing& field,
const int coarsestIndexedLevel); const int coarsestIndexedLevel);
static void setCoverLimitsBasedOnArea(double area, S2RegionCoverer *coverer, int coarsestIndexedLevel); static void setCoverLimitsBasedOnArea(double area, S2RegionCoverer *coverer, int coarsestIndexedLevel);
static bool getKeysForObject(const BSONObj& obj, const S2IndexingPa
rams& params,
vector<string>* out);
static bool distanceBetween(const S2Point& us, const BSONObj& them, double *out); static bool distanceBetween(const S2Point& us, const BSONObj& them, double *out);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
3 lines changed or deleted 19 lines changed or added


 s2near.h   s2near.h 
skipping to change at line 94 skipping to change at line 94
S2NearParams _params; S2NearParams _params;
WorkingSet* _ws; WorkingSet* _ws;
// This is the "array index" of the key field that is the near fiel d. We use this to do // This is the "array index" of the key field that is the near fiel d. We use this to do
// cheap is-this-doc-in-the-annulus testing. We also need to know where to stuff the index // cheap is-this-doc-in-the-annulus testing. We also need to know where to stuff the index
// bounds for the various annuluses/annuli. // bounds for the various annuluses/annuli.
int _nearFieldIndex; int _nearFieldIndex;
// Geo filter in index scan (which is owned by fetch stage in _chil
d).
scoped_ptr<MatchExpression> _keyGeoFilter;
scoped_ptr<PlanStage> _child; scoped_ptr<PlanStage> _child;
// The S2 machinery that represents the search annulus. We keep th is around after bounds // The S2 machinery that represents the search annulus. We keep th is around after bounds
// generation to check for intersection. // generation to check for intersection.
S2Cap _innerCap; S2Cap _innerCap;
S2Cap _outerCap; S2Cap _outerCap;
S2RegionIntersection _annulus; S2RegionIntersection _annulus;
// We use this to hold on to the results in an annulus. Results ar e sorted to have // We use this to hold on to the results in an annulus. Results ar e sorted to have
// decreasing distance. // decreasing distance.
skipping to change at line 116 skipping to change at line 119
bool operator<(const Result& other) const { bool operator<(const Result& other) const {
// We want increasing distance, not decreasing, so we rever se the <. // We want increasing distance, not decreasing, so we rever se the <.
return distance > other.distance; return distance > other.distance;
} }
WorkingSetID id; WorkingSetID id;
double distance; double distance;
}; };
// Our index scans aren't deduped so we might see the same doc twic
e in a given
// annulus.
unordered_set<DiskLoc, DiskLoc::Hasher> _seenInScan;
// We compute an annulus of results and cache it here. // We compute an annulus of results and cache it here.
priority_queue<Result> _results; priority_queue<Result> _results;
// For fast invalidation. Perhaps not worth it. // For fast invalidation. Perhaps not worth it.
unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> _invalidation Map; unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> _invalidation Map;
// Geo-related variables. // Geo-related variables.
// At what min distance (arc length) do we start looking for result s? // At what min distance (arc length) do we start looking for result s?
double _minDistance; double _minDistance;
// What's the max distance (arc length) we're willing to look for r esults? // What's the max distance (arc length) we're willing to look for r esults?
 End of changes. 2 change blocks. 
0 lines changed or deleted 9 lines changed or added


 security_key.h   security_key.h 
skipping to change at line 43 skipping to change at line 43
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
namespace mongo { namespace mongo {
/** /**
* @return true if internal authentication parameters has been set up * @return true if internal authentication parameters has been set up
*/ */
extern bool isInternalAuthSet(); extern bool isInternalAuthSet();
/** /**
* This method initializes the internalSecurity object with authenticat ion * This method initializes the internalSecurity object with authenticat ion
* credentials to be used by authenticateInternalUser. This method shou * credentials to be used by authenticateInternalUser.
ld
* only be called once when setting up authentication method for the sy
stem.
*/ */
extern bool setInternalUserAuthParams(BSONObj authParams); extern void setInternalUserAuthParams(const BSONObj& authParamsIn);
/** /**
* This method authenticates to another cluster member using appropriat e * This method authenticates to another cluster member using appropriat e
* authentication data * authentication data
* @return true if the authentication was succesful * @return true if the authentication was succesful
*/ */
extern bool authenticateInternalUser(DBClientWithCommands* conn); extern bool authenticateInternalUser(DBClientWithCommands* conn);
/** /**
* This method checks the validity of filename as a security key, hashe s its * This method checks the validity of filename as a security key, hashe s its
 End of changes. 2 change blocks. 
5 lines changed or deleted 2 lines changed or added


 server_options_helpers.h   server_options_helpers.h 
skipping to change at line 50 skipping to change at line 50
} // namespace optionenvironment } // namespace optionenvironment
namespace moe = mongo::optionenvironment; namespace moe = mongo::optionenvironment;
Status addGeneralServerOptions(moe::OptionSection* options); Status addGeneralServerOptions(moe::OptionSection* options);
Status addWindowsServerOptions(moe::OptionSection* options); Status addWindowsServerOptions(moe::OptionSection* options);
Status addSSLServerOptions(moe::OptionSection* options); Status addSSLServerOptions(moe::OptionSection* options);
/**
* Handle custom validation of server options that can not currently be
done by using
* Constraints in the Environment. See the "validate" function in the
Environment class for
* more details.
*/
Status validateServerOptions(const moe::Environment& params);
/**
* Canonicalize server options for the given environment.
*
* For example, the options "objcheck", "noobjcheck", and "net.wireObje
ctCheck" should all be
* merged into "net.wireObjectCheck".
*/
Status canonicalizeServerOptions(moe::Environment* params);
Status storeServerOptions(const moe::Environment& params, Status storeServerOptions(const moe::Environment& params,
const std::vector<std::string>& args); const std::vector<std::string>& args);
void printCommandLineOpts(); void printCommandLineOpts();
// This function should eventually go away, but needs to be here now be cause we have a lot of // This function should eventually go away, but needs to be here now be cause we have a lot of
// code that is shared between mongod and mongos that must know at runt ime which binary it is in // code that is shared between mongod and mongos that must know at runt ime which binary it is in
bool isMongos(); bool isMongos();
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
0 lines changed or deleted 18 lines changed or added


 shard_filter.h   shard_filter.h 
skipping to change at line 72 skipping to change at line 72
* As implementation details, collection metadata is used to determine the ranges of all data * As implementation details, collection metadata is used to determine the ranges of all data
* not actively migrated (or orphaned). CursorIds are currently used t o establish "active" * not actively migrated (or orphaned). CursorIds are currently used t o establish "active"
* queries before migration commit. * queries before migration commit.
* *
* Combining all this: if a query is started in a db lock and acquires in that (same) lock the * Combining all this: if a query is started in a db lock and acquires in that (same) lock the
* collection metadata and a cursorId, the query will return results fo r exactly the ranges in * collection metadata and a cursorId, the query will return results fo r exactly the ranges in
* the metadata (though of arbitrary staleness). This is the sharded c ollection query contract. * the metadata (though of arbitrary staleness). This is the sharded c ollection query contract.
* *
* END NOTE FROM GREG * END NOTE FROM GREG
* *
* Preconditions: Child must be fetched. TODO XXX: when covering analy * Preconditions: Child must be fetched. TODO: when covering analysis
sis is in just build doc is in just build doc
* and check that against shard key. * and check that against shard key. See SERVER-5022.
*/ */
class ShardFilterStage : public PlanStage { class ShardFilterStage : public PlanStage {
public: public:
ShardFilterStage(const CollectionMetadataPtr& metadata, WorkingSet* ws, PlanStage* child); ShardFilterStage(const CollectionMetadataPtr& metadata, WorkingSet* ws, PlanStage* child);
virtual ~ShardFilterStage(); virtual ~ShardFilterStage();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
 End of changes. 1 change blocks. 
3 lines changed or deleted 3 lines changed or added


 shell_options.h   shell_options.h 
skipping to change at line 56 skipping to change at line 56
std::string url; std::string url;
std::string dbhost; std::string dbhost;
std::string port; std::string port;
std::vector<std::string> files; std::vector<std::string> files;
std::string username; std::string username;
std::string password; std::string password;
bool usingPassword; bool usingPassword;
std::string authenticationMechanism; std::string authenticationMechanism;
std::string authenticationDatabase; std::string authenticationDatabase;
std::string gssapiServiceName;
std::string gssapiHostName;
bool runShell; bool runShell;
bool nodb; bool nodb;
bool norc; bool norc;
std::string script; std::string script;
bool autoKillOp; bool autoKillOp;
bool useWriteCommandsDefault; bool useWriteCommandsDefault;
std::string writeMode;
ShellGlobalParams() : autoKillOp(false), useWriteCommandsDefault(tr ShellGlobalParams() : autoKillOp(false),
ue) { } useWriteCommandsDefault(true),
writeMode("commands") {
}
}; };
extern ShellGlobalParams shellGlobalParams; extern ShellGlobalParams shellGlobalParams;
Status addMongoShellOptions(moe::OptionSection* options); Status addMongoShellOptions(moe::OptionSection* options);
std::string getMongoShellHelp(const StringData& name, const moe::Option Section& options); std::string getMongoShellHelp(const StringData& name, const moe::Option Section& options);
/** /**
* Handle options that should come before validation, such as "help". * Handle options that should come before validation, such as "help".
 End of changes. 3 change blocks. 
2 lines changed or deleted 7 lines changed or added


 signal_handlers.h   signal_handlers.h 
skipping to change at line 36 skipping to change at line 36
* wish to do so, delete this exception statement from your version. If y ou * wish to do so, delete this exception statement from your version. If y ou
* delete this exception statement from all source files in the program, * delete this exception statement from all source files in the program,
* then also delete it in the license file. * then also delete it in the license file.
*/ */
#pragma once #pragma once
namespace mongo { namespace mongo {
/** /**
* Obtains the log file handler and writes the current thread's stack t * Sets up handlers for signals and other events like terminate and new
race to _handler.
* it. This call issues an exit(). The function can safely be called fr
om within a
* signal handler.
* *
* @param signal that this hadler is called for * This must be called very early in main, before runGlobalInitializers ().
*/ */
void printStackAndExit( int signalNum ); void setupSignalHandlers();
/**
* Starts the thread to handle asynchronous signals.
*
* This must be the first thread started from the main thread. Call thi
s immediately after
* initializeServerGlobalState().
*/
void startSignalProcessingThread();
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
7 lines changed or deleted 13 lines changed or added


 sort.h   sort.h 
skipping to change at line 79 skipping to change at line 79
* *
* 'queryObj' is the BSONObj in the .find(...) clause. For multike y arrays we have to * 'queryObj' is the BSONObj in the .find(...) clause. For multike y arrays we have to
* ensure that the value we select to sort by is within bounds gene rated by * ensure that the value we select to sort by is within bounds gene rated by
* executing 'queryObj' using the virtual index with key pattern 's ortSpec'. * executing 'queryObj' using the virtual index with key pattern 's ortSpec'.
*/ */
SortStageKeyGenerator(const BSONObj& sortSpec, const BSONObj& query Obj); SortStageKeyGenerator(const BSONObj& sortSpec, const BSONObj& query Obj);
/** /**
* Returns the key used to sort 'member'. * Returns the key used to sort 'member'.
*/ */
BSONObj getSortKey(const WorkingSetMember& member) const; Status getSortKey(const WorkingSetMember& member, BSONObj* objOut) const;
/** /**
* Passed to std::sort and used to sort the keys that are returned from getSortKey. * Passed to std::sort and used to sort the keys that are returned from getSortKey.
* *
* Returned reference lives as long as 'this'. * Returned reference lives as long as 'this'.
*/ */
const BSONObj& getSortComparator() const { return _comparatorObj; } const BSONObj& getSortComparator() const { return _comparatorObj; }
private: private:
BSONObj getBtreeKey(const BSONObj& memberObj) const; Status getBtreeKey(const BSONObj& memberObj, BSONObj* objOut) const ;
/** /**
* In order to emulate the existing sort behavior we must make unin dexed sort behavior as * In order to emulate the existing sort behavior we must make unin dexed sort behavior as
* consistent as possible with indexed sort behavior. As such, we must only consider index * consistent as possible with indexed sort behavior. As such, we must only consider index
* keys that we would encounter if we were answering the query usin g the sort-providing * keys that we would encounter if we were answering the query usin g the sort-providing
* index. * index.
* *
* Populates _hasBounds and _bounds. * Populates _hasBounds and _bounds.
*/ */
void getBoundsForSort(const BSONObj& queryObj, const BSONObj& sortO bj); void getBoundsForSort(const BSONObj& queryObj, const BSONObj& sortO bj);
skipping to change at line 248 skipping to change at line 248
typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap; typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap;
DataMap _wsidByDiskLoc; DataMap _wsidByDiskLoc;
// //
// Stats // Stats
// //
CommonStats _commonStats; CommonStats _commonStats;
SortStats _specificStats; SortStats _specificStats;
// The usage in bytes of all bufered data that we're sorting. // The usage in bytes of all buffered data that we're sorting.
size_t _memUsage; size_t _memUsage;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 ssl_options.h   ssl_options.h 
skipping to change at line 79 skipping to change at line 79
}; };
extern MONGO_CLIENT_API SSLGlobalParams sslGlobalParams; extern MONGO_CLIENT_API SSLGlobalParams sslGlobalParams;
Status addSSLServerOptions(moe::OptionSection* options); Status addSSLServerOptions(moe::OptionSection* options);
Status addSSLClientOptions(moe::OptionSection* options); Status addSSLClientOptions(moe::OptionSection* options);
Status storeSSLServerOptions(const moe::Environment& params); Status storeSSLServerOptions(const moe::Environment& params);
/**
* Canonicalize SSL options for the given environment that have differe
nt representations with
* the same logical meaning
*/
Status canonicalizeSSLServerOptions(moe::Environment* params);
Status storeSSLClientOptions(const moe::Environment& params); Status storeSSLClientOptions(const moe::Environment& params);
} }
 End of changes. 1 change blocks. 
0 lines changed or deleted 7 lines changed or added


 stage_builder.h   stage_builder.h 
skipping to change at line 43 skipping to change at line 43
#include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_solution.h"
namespace mongo { namespace mongo {
/** /**
* The StageBuilder converts a QuerySolution to an executable tree of P lanStage(s). * The StageBuilder converts a QuerySolution to an executable tree of P lanStage(s).
*/ */
class StageBuilder { class StageBuilder {
public: public:
/** /**
* Turns 'solution' into an executable tree of PlanStage(s). * Turns 'solution' into an executable tree of PlanStage(s). This
function accesses cc()
* and catalog information and as such the caller must have a lock.
* *
* Returns true if the PlanStage tree was built successfully. The root of the tree is in * Returns true if the PlanStage tree was built successfully. The root of the tree is in
* *rootOut and the WorkingSet that the tree uses is in *wsOut. * *rootOut and the WorkingSet that the tree uses is in *wsOut.
* *
* Returns false otherwise. *rootOut and *wsOut are invalid. * Returns false otherwise. *rootOut and *wsOut are invalid.
*/ */
static bool build(const QuerySolution& solution, PlanStage** rootOu t, WorkingSet** wsOut); static bool build(const QuerySolution& solution, PlanStage** rootOu t, WorkingSet** wsOut);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
1 lines changed or deleted 3 lines changed or added


 status-inl.h   status-inl.h 
skipping to change at line 48 skipping to change at line 48
} }
inline bool Status::isOK() const { inline bool Status::isOK() const {
return code() == ErrorCodes::OK; return code() == ErrorCodes::OK;
} }
inline ErrorCodes::Error Status::code() const { inline ErrorCodes::Error Status::code() const {
return _error ? _error->code : ErrorCodes::OK; return _error ? _error->code : ErrorCodes::OK;
} }
inline const char* Status::codeString() const { inline std::string Status::codeString() const {
return ErrorCodes::errorString(code()); return ErrorCodes::errorString(code());
} }
inline std::string Status::reason() const { inline std::string Status::reason() const {
return _error ? _error->reason : std::string(); return _error ? _error->reason : std::string();
} }
inline int Status::location() const { inline int Status::location() const {
return _error ? _error->location : 0; return _error ? _error->location : 0;
} }
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 status.h   status.h 
skipping to change at line 92 skipping to change at line 92
bool operator!=(const ErrorCodes::Error other) const; bool operator!=(const ErrorCodes::Error other) const;
// //
// accessors // accessors
// //
inline bool isOK() const; inline bool isOK() const;
inline ErrorCodes::Error code() const; inline ErrorCodes::Error code() const;
inline const char* codeString() const; inline std::string codeString() const;
inline std::string reason() const; inline std::string reason() const;
inline int location() const; inline int location() const;
std::string toString() const; std::string toString() const;
// //
// Below interface used for testing code only. // Below interface used for testing code only.
// //
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 strategy.h   strategy.h 
skipping to change at line 80 skipping to change at line 80
/** /**
* Executes a command represented in the Request on the sharded clu ster. * Executes a command represented in the Request on the sharded clu ster.
* *
* DEPRECATED: should not be used by new code. * DEPRECATED: should not be used by new code.
*/ */
void clientCommandOp( Request& r ); void clientCommandOp( Request& r );
protected: protected:
void doIndexQuery( Request& r , const Shard& shard );
bool handleSpecialNamespaces( Request& r , QueryMessage& q ); bool handleSpecialNamespaces( Request& r , QueryMessage& q );
}; };
extern Strategy* STRATEGY; extern Strategy* STRATEGY;
} }
 End of changes. 1 change blocks. 
2 lines changed or deleted 0 lines changed or added


 sync_source_feedback.h   sync_source_feedback.h 
skipping to change at line 43 skipping to change at line 43
namespace mongo { namespace mongo {
class Member; class Member;
class SyncSourceFeedback : public BackgroundJob { class SyncSourceFeedback : public BackgroundJob {
public: public:
SyncSourceFeedback() : BackgroundJob(false /*don't selfdelete*/), SyncSourceFeedback() : BackgroundJob(false /*don't selfdelete*/),
_syncTarget(NULL), _syncTarget(NULL),
_oplogReader(new OplogReader()), _oplogReader(new OplogReader()),
_supportsUpdater(false), _supportsUpdater(true),
_positionChanged(false), _positionChanged(false),
_handshakeNeeded(false) {} _handshakeNeeded(false) {}
~SyncSourceFeedback() { ~SyncSourceFeedback() {
delete _oplogReader; delete _oplogReader;
} }
/// Adds an entry to _member for a secondary that has connected to us. /// Adds an entry to _member for a secondary that has connected to us.
void associateMember(const BSONObj& id, const int memberId); void associateMember(const BSONObj& id, const int memberId);
skipping to change at line 133 skipping to change at line 133
} }
void tailCheck() { void tailCheck() {
_oplogReader->tailCheck(); _oplogReader->tailCheck();
} }
void tailingQueryGTE(const char *ns, OpTime t, const BSONObj* field s=0) { void tailingQueryGTE(const char *ns, OpTime t, const BSONObj* field s=0) {
_oplogReader->tailingQueryGTE(ns, t, fields); _oplogReader->tailingQueryGTE(ns, t, fields);
} }
/**
* this mutex protects the _conn field of _oplogReader in that we ca
nnot mix the functions
* which check _conn for null (commonConnect() and connect() do this
) with the function that
* sets the pointer to null (resetConnection()). All other uses of t
he _oplogReader's _conn
* do not need the mutex locked, due to the threading logic that pre
vents _connect()
* from being called concurrently.
*/
boost::mutex oplock;
private: private:
/** /**
* Authenticates _connection using the server's cluster-membership credentials. * Authenticates _connection using the server's cluster-membership credentials.
* *
* Returns true on successful authentication. * Returns true on successful authentication.
*/ */
bool replAuthenticate(); bool replAuthenticate();
/* Sends initialization information to our sync target, also determ ines whether or not they /* Sends initialization information to our sync target, also determ ines whether or not they
* support the updater command. * support the updater command.
 End of changes. 2 change blocks. 
1 lines changed or deleted 14 lines changed or added


 tool_options.h   tool_options.h 
skipping to change at line 58 skipping to change at line 58
ToolGlobalParams() : canUseStdout(true), hostSet(false), portSet(fa lse) { } ToolGlobalParams() : canUseStdout(true), hostSet(false), portSet(fa lse) { }
std::string name; std::string name;
std::string db; std::string db;
std::string coll; std::string coll;
std::string username; std::string username;
std::string password; std::string password;
std::string authenticationDatabase; std::string authenticationDatabase;
std::string authenticationMechanism; std::string authenticationMechanism;
std::string gssapiServiceName;
std::string gssapiHostName;
bool quiet; bool quiet;
bool canUseStdout; bool canUseStdout;
bool noconnection; bool noconnection;
std::vector<std::string> fields; std::vector<std::string> fields;
bool fieldsSpecified; bool fieldsSpecified;
std::string host; // --host std::string host; // --host
bool hostSet; bool hostSet;
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 type_explain.h   type_explain.h 
skipping to change at line 81 skipping to change at line 81
static const BSONField<long long> nScannedObjectsAllPlans; static const BSONField<long long> nScannedObjectsAllPlans;
static const BSONField<long long> nScannedAllPlans; static const BSONField<long long> nScannedAllPlans;
static const BSONField<bool> scanAndOrder; static const BSONField<bool> scanAndOrder;
static const BSONField<bool> indexOnly; static const BSONField<bool> indexOnly;
static const BSONField<long long> nYields; static const BSONField<long long> nYields;
static const BSONField<long long> nChunkSkips; static const BSONField<long long> nChunkSkips;
static const BSONField<long long> millis; static const BSONField<long long> millis;
static const BSONField<BSONObj> indexBounds; static const BSONField<BSONObj> indexBounds;
static const BSONField<std::vector<TypeExplain*> > allPlans; static const BSONField<std::vector<TypeExplain*> > allPlans;
static const BSONField<TypeExplain*> oldPlan; static const BSONField<TypeExplain*> oldPlan;
static const BSONField<bool> indexFilterApplied;
static const BSONField<std::string> server; static const BSONField<std::string> server;
// //
// construction / destruction // construction / destruction
// //
TypeExplain(); TypeExplain();
virtual ~TypeExplain(); virtual ~TypeExplain();
/** Copies all the fields present in 'this' to 'other'. */ /** Copies all the fields present in 'this' to 'other'. */
skipping to change at line 165 skipping to change at line 166
void setIndexOnly(bool indexOnly); void setIndexOnly(bool indexOnly);
void unsetIndexOnly(); void unsetIndexOnly();
bool isIndexOnlySet() const; bool isIndexOnlySet() const;
bool getIndexOnly() const; bool getIndexOnly() const;
void setIDHack(bool idhack); void setIDHack(bool idhack);
void unsetIDHack(); void unsetIDHack();
bool isIDHackSet() const; bool isIDHackSet() const;
bool getIDHack() const; bool getIDHack() const;
void setIndexFilterApplied(bool indexFilterApplied);
void unsetIndexFilterApplied();
bool isIndexFilterAppliedSet() const;
bool getIndexFilterApplied() const;
void setNYields(long long nYields); void setNYields(long long nYields);
void unsetNYields(); void unsetNYields();
bool isNYieldsSet() const; bool isNYieldsSet() const;
long long getNYields() const; long long getNYields() const;
void setNChunkSkips(long long nChunkSkips); void setNChunkSkips(long long nChunkSkips);
void unsetNChunkSkips(); void unsetNChunkSkips();
bool isNChunkSkipsSet() const; bool isNChunkSkipsSet() const;
long long getNChunkSkips() const; long long getNChunkSkips() const;
skipping to change at line 252 skipping to change at line 258
bool _isScanAndOrderSet; bool _isScanAndOrderSet;
// (O) number of entries retrieved either from an index or collect ion across all plans // (O) number of entries retrieved either from an index or collect ion across all plans
bool _indexOnly; bool _indexOnly;
bool _isIndexOnlySet; bool _isIndexOnlySet;
// (O) whether the idhack was used to answer this query // (O) whether the idhack was used to answer this query
bool _idHack; bool _idHack;
bool _isIDHackSet; bool _isIDHackSet;
// (O) whether index filters were used in planning this query
bool _indexFilterApplied;
bool _isIndexFilterAppliedSet;
// (O) number times this plan released and reacquired its lock // (O) number times this plan released and reacquired its lock
long long _nYields; long long _nYields;
bool _isNYieldsSet; bool _isNYieldsSet;
// (O) number times this plan skipped over migrated data // (O) number times this plan skipped over migrated data
long long _nChunkSkips; long long _nChunkSkips;
bool _isNChunkSkipsSet; bool _isNChunkSkipsSet;
// (O) elapsed time this plan took running, in milliseconds // (O) elapsed time this plan took running, in milliseconds
long long _millis; long long _millis;
 End of changes. 3 change blocks. 
0 lines changed or deleted 10 lines changed or added


 update.h   update.h 
skipping to change at line 43 skipping to change at line 43
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/curop.h" #include "mongo/db/curop.h"
#include "mongo/db/ops/update_request.h" #include "mongo/db/ops/update_request.h"
#include "mongo/db/ops/update_result.h" #include "mongo/db/ops/update_result.h"
namespace mongo { namespace mongo {
class CanonicalQuery; class CanonicalQuery;
class UpdateDriver; class UpdateDriver;
/**
* Utility method to execute an update described by "request".
*
* Caller must hold the appropriate database locks.
*/
UpdateResult update(const UpdateRequest& request, OpDebug* opDebug); UpdateResult update(const UpdateRequest& request, OpDebug* opDebug);
/**
* Execute the update described by "request", using the given already-p
arsed
* driver and canonical query.
*
* NOTE: This function is really a utility method for UpdateExecutor.
*
* TODO: Move this into a private method of UpdateExecutor.
*/
UpdateResult update(const UpdateRequest& request, UpdateResult update(const UpdateRequest& request,
OpDebug* opDebug, OpDebug* opDebug,
UpdateDriver* driver, UpdateDriver* driver,
CanonicalQuery* cq); CanonicalQuery* cq);
/** /**
* takes the from document and returns a new document * takes the from document and returns a new document
* after apply all the operators * after apply all the operators
* e.g. * e.g.
* applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) ); * applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
 End of changes. 2 change blocks. 
0 lines changed or deleted 14 lines changed or added


 update_driver.h   update_driver.h 
skipping to change at line 58 skipping to change at line 58
struct Options; struct Options;
UpdateDriver(const Options& opts); UpdateDriver(const Options& opts);
~UpdateDriver(); ~UpdateDriver();
/** /**
* Returns OK and fills in '_mods' if 'updateExpr' is correct. Othe rwise returns an * Returns OK and fills in '_mods' if 'updateExpr' is correct. Othe rwise returns an
* error status with a corresponding description. * error status with a corresponding description.
*/ */
Status parse(const BSONObj& updateExpr); Status parse(const BSONObj& updateExpr, const bool multi = false);
/** /**
* Fills in document with any fields in the query which are valid. * Fills in document with any fields in the query which are valid.
* *
* Valid fields include equality matches like "a":1, or "a.b":false * Valid fields include equality matches like "a":1, or "a.b":false
* *
* Each valid field will be expanded (from dot notation) and confli cts will be * Each valid field will be expanded (from dot notation) and confli cts will be
* checked for all fields added to the underlying document. * checked for all fields added to the underlying document.
* *
* Returns Status::OK() if the document can be used. If there are a ny error or * Returns Status::OK() if the document can be used. If there are a ny error or
skipping to change at line 112 skipping to change at line 112
// Accessors // Accessors
// //
size_t numMods() const; size_t numMods() const;
bool isDocReplacement() const; bool isDocReplacement() const;
bool modsAffectIndices() const; bool modsAffectIndices() const;
void refreshIndexKeys(const IndexPathSet* indexedFields); void refreshIndexKeys(const IndexPathSet* indexedFields);
bool multi() const;
void setMulti(bool multi);
bool upsert() const;
void setUpsert(bool upsert);
bool logOp() const; bool logOp() const;
void setLogOp(bool logOp); void setLogOp(bool logOp);
ModifierInterface::Options modOptions() const; ModifierInterface::Options modOptions() const;
void setModOptions(ModifierInterface::Options modOpts); void setModOptions(ModifierInterface::Options modOpts);
ModifierInterface::ExecInfo::UpdateContext context() const; ModifierInterface::ExecInfo::UpdateContext context() const;
void setContext(ModifierInterface::ExecInfo::UpdateContext context) ; void setContext(ModifierInterface::ExecInfo::UpdateContext context) ;
mutablebson::Document& getDocument() { mutablebson::Document& getDocument() {
skipping to change at line 168 skipping to change at line 162
// What are the list of fields in the collection over which the upd ate is going to be // What are the list of fields in the collection over which the upd ate is going to be
// applied that participate in indices? // applied that participate in indices?
// //
// NOTE: Owned by the collection's info cache!. // NOTE: Owned by the collection's info cache!.
const IndexPathSet* _indexedFields; const IndexPathSet* _indexedFields;
// //
// mutable properties after parsing // mutable properties after parsing
// //
// May this driver apply updates to several documents?
bool _multi;
// May this driver construct a new object if an update for a non-ex
isting one is sent?
bool _upsert;
// Should this driver generate an oplog record when it applies the update? // Should this driver generate an oplog record when it applies the update?
bool _logOp; bool _logOp;
// The options to initiate the mods with // The options to initiate the mods with
ModifierInterface::Options _modOptions; ModifierInterface::Options _modOptions;
// Are any of the fields mentioned in the mods participating in any index? Is set anew // Are any of the fields mentioned in the mods participating in any index? Is set anew
// at each call to update. // at each call to update.
bool _affectIndices; bool _affectIndices;
skipping to change at line 198 skipping to change at line 186
ModifierInterface::ExecInfo::UpdateContext _context; ModifierInterface::ExecInfo::UpdateContext _context;
// The document used to represent or store the object being updated . // The document used to represent or store the object being updated .
mutablebson::Document _objDoc; mutablebson::Document _objDoc;
// The document used to build the oplog entry for the update. // The document used to build the oplog entry for the update.
mutablebson::Document _logDoc; mutablebson::Document _logDoc;
}; };
struct UpdateDriver::Options { struct UpdateDriver::Options {
bool multi;
bool upsert;
bool logOp; bool logOp;
ModifierInterface::Options modOptions; ModifierInterface::Options modOptions;
Options() : multi(false), upsert(false), logOp(false), modOptions() {} Options() : logOp(false), modOptions() {}
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
17 lines changed or deleted 2 lines changed or added


 user_management_commands_parser.h   user_management_commands_parser.h 
skipping to change at line 219 skipping to change at line 219
/** /**
* Takes a BSONArray of name,db pair documents, parses that array and r eturns (via the * Takes a BSONArray of name,db pair documents, parses that array and r eturns (via the
* output param parsedUserNames) a list of the usernames in the input a rray. * output param parsedUserNames) a list of the usernames in the input a rray.
* Performs syntactic validation of "usersArray", only. * Performs syntactic validation of "usersArray", only.
*/ */
Status parseUserNamesFromBSONArray(const BSONArray& usersArray, Status parseUserNamesFromBSONArray(const BSONArray& usersArray,
const StringData& dbname, const StringData& dbname,
std::vector<UserName>* parsedUserNam es); std::vector<UserName>* parsedUserNam es);
struct MergeAuthzCollectionsArgs {
std::string usersCollName;
std::string rolesCollName;
bool drop;
BSONObj writeConcern;
MergeAuthzCollectionsArgs() : drop(false) {}
};
/**
* Takes a command object describing an invocation of the "_mergeAuthzC
ollections" command and
* parses out the name of the temporary collections to use for user and
role data, whether or
* not to drop the existing users/roles, and the writeConcern.
*/
Status parseMergeAuthzCollectionsCommand(const BSONObj& cmdObj,
MergeAuthzCollectionsArgs* par
sedArgs);
} // namespace auth } // namespace auth
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 user_set.h   user_set.h 
skipping to change at line 87 skipping to change at line 87
// Returns the User with the given name, or NULL if not found. // Returns the User with the given name, or NULL if not found.
// Ownership of the returned User remains with the UserSet. The po inter // Ownership of the returned User remains with the UserSet. The po inter
// returned is only guaranteed to remain valid until the next non-c onst method is called // returned is only guaranteed to remain valid until the next non-c onst method is called
// on the UserSet. // on the UserSet.
User* lookup(const UserName& name) const; User* lookup(const UserName& name) const;
// Gets the user whose authentication credentials came from dbname, or NULL if none // Gets the user whose authentication credentials came from dbname, or NULL if none
// exist. There should be at most one such user. // exist. There should be at most one such user.
User* lookupByDBName(const StringData& dbname) const; User* lookupByDBName(const StringData& dbname) const;
// Returns how many users are in the set.
size_t size() const { return _users.size(); };
// Gets an iterator over the names of the users stored in the set. The iterator is // Gets an iterator over the names of the users stored in the set. The iterator is
// valid until the next non-const method is called on the UserSet. // valid until the next non-const method is called on the UserSet.
UserNameIterator getNames() const; UserNameIterator getNames() const;
iterator begin() const { return _users.begin(); } iterator begin() const { return _users.begin(); }
iterator end() const { return _usersEnd; } iterator end() const { return _usersEnd; }
private: private:
typedef std::vector<User*>::iterator mutable_iterator; typedef std::vector<User*>::iterator mutable_iterator;
 End of changes. 1 change blocks. 
3 lines changed or deleted 0 lines changed or added


 v8_db.h   v8_db.h 
skipping to change at line 109 skipping to change at line 109
// UUID constructor // UUID constructor
v8::Handle<v8::Value> uuidInit(V8Scope* scope, const v8::Arguments& arg s); v8::Handle<v8::Value> uuidInit(V8Scope* scope, const v8::Arguments& arg s);
// MD5 constructor // MD5 constructor
v8::Handle<v8::Value> md5Init(V8Scope* scope, const v8::Arguments& args ); v8::Handle<v8::Value> md5Init(V8Scope* scope, const v8::Arguments& args );
// HexData constructor // HexData constructor
v8::Handle<v8::Value> hexDataInit(V8Scope* scope, const v8::Arguments& args); v8::Handle<v8::Value> hexDataInit(V8Scope* scope, const v8::Arguments& args);
// Object.invalidForStorage()
v8::Handle<v8::Value> v8ObjectInvalidForStorage(V8Scope* scope, const v
8::Arguments& args);
// Object.bsonsize() // Object.bsonsize()
v8::Handle<v8::Value> bsonsize(V8Scope* scope, const v8::Arguments& arg s); v8::Handle<v8::Value> bsonsize(V8Scope* scope, const v8::Arguments& arg s);
// global method // global method
// Accepts 2 objects, converts them to BSONObj and calls woCompare on t he first against the // Accepts 2 objects, converts them to BSONObj and calls woCompare on t he first against the
// second. // second.
v8::Handle<v8::Value> bsonWoCompare(V8Scope* scope, const v8::Arguments & args); v8::Handle<v8::Value> bsonWoCompare(V8Scope* scope, const v8::Arguments & args);
// 'db.collection' property handlers // 'db.collection' property handlers
v8::Handle<v8::Value> collectionGetter(v8::Local<v8::String> name, v8::Handle<v8::Value> collectionGetter(v8::Local<v8::String> name,
 End of changes. 1 change blocks. 
0 lines changed or deleted 4 lines changed or added


 working_set.h   working_set.h 
skipping to change at line 73 skipping to change at line 73
/** /**
* Allocate a new query result and return the ID used to get and fr ee it. * Allocate a new query result and return the ID used to get and fr ee it.
*/ */
WorkingSetID allocate(); WorkingSetID allocate();
/** /**
* Get the i-th mutable query result. The pointer will be valid for this id until freed. * Get the i-th mutable query result. The pointer will be valid for this id until freed.
* Do not delete the returned pointer as the WorkingSet retains own ership. Call free() to * Do not delete the returned pointer as the WorkingSet retains own ership. Call free() to
* release it. * release it.
*/ */
WorkingSetMember* get(const WorkingSetID& i) { WorkingSetMember* get(const WorkingSetID& i) const {
dassert(i < _data.size()); // ID has been allocated. dassert(i < _data.size()); // ID has been allocated.
dassert(_data[i].nextFreeOrSelf == i); // ID currently in use. dassert(_data[i].nextFreeOrSelf == i); // ID currently in use.
return _data[i].member; return _data[i].member;
} }
/** /**
* Deallocate the i-th query result and release its resources. * Deallocate the i-th query result and release its resources.
*/ */
void free(const WorkingSetID& i); void free(const WorkingSetID& i);
skipping to change at line 251 skipping to change at line 251
* getFieldDotted uses its state (obj or index data) to produce the field with the provided * getFieldDotted uses its state (obj or index data) to produce the field with the provided
* name. * name.
* *
* Returns true if there is the element is in an index key or in an (owned or unowned) * Returns true if there is the element is in an index key or in an (owned or unowned)
* object. *out is set to the element if so. * object. *out is set to the element if so.
* *
* Returns false otherwise. Returning false indicates a query plan ning error. * Returns false otherwise. Returning false indicates a query plan ning error.
*/ */
bool getFieldDotted(const string& field, BSONElement* out) const; bool getFieldDotted(const string& field, BSONElement* out) const;
/**
* Returns expected memory usage of working set member.
*/
size_t getMemUsage() const;
private: private:
boost::scoped_ptr<WorkingSetComputedData> _computed[WSM_COMPUTED_NU M_TYPES]; boost::scoped_ptr<WorkingSetComputedData> _computed[WSM_COMPUTED_NU M_TYPES];
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 6 lines changed or added


 working_set_common.h   working_set_common.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
namespace mongo { #include "mongo/db/exec/working_set.h"
class WorkingSetMember; namespace mongo {
class WorkingSetCommon { class WorkingSetCommon {
public: public:
/** /**
* Get an owned copy of the BSONObj the WSM refers to. * Get an owned copy of the BSONObj the WSM refers to.
* Requires either a valid BSONObj or valid DiskLoc. * Requires either a valid BSONObj or valid DiskLoc.
* Returns true if the fetch and invalidate succeeded, false otherw ise. * Returns true if the fetch and invalidate succeeded, false otherw ise.
*/ */
static bool fetchAndInvalidateLoc(WorkingSetMember* member); static bool fetchAndInvalidateLoc(WorkingSetMember* member);
/** /**
* Initialize the fields in 'dest' from 'src', creating copies of o wned objects as needed. * Initialize the fields in 'dest' from 'src', creating copies of o wned objects as needed.
*/ */
static void initFrom(WorkingSetMember* dest, const WorkingSetMember & src); static void initFrom(WorkingSetMember* dest, const WorkingSetMember & src);
/**
* Allocate a new WSM and initialize it with
* the code and reason from the status.
* Owned BSON object will have the following layout:
* {
* ok: <ok>, // 1 for OK; 0 otherwise.
* code: <code>, // Status::code()
* errmsg: <errmsg> // Status::reason()
* }
*/
static WorkingSetID allocateStatusMember(WorkingSet* ws, const Stat
us& status);
/**
* Returns true if object was created by allocateStatusMember().
*/
static bool isValidStatusMemberObject(const BSONObj& obj);
/**
* Returns object in working set member created with allocateStatus
Member().
* Does not assume isValidStatusMemberObject.
* If the WSID is invalid or the working set member is created by
* allocateStatusMember, objOut will not be updated.
*/
static void getStatusMemberObject(const WorkingSet& ws, WorkingSetI
D wsid,
BSONObj* objOut);
/**
* Formats working set member object created with allocateStatusMem
ber().
*/
static std::string toStatusString(const BSONObj& obj);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
2 lines changed or deleted 37 lines changed or added


 write_commands.h   write_commands.h 
skipping to change at line 60 skipping to change at line 60
virtual ~WriteCmd() {} virtual ~WriteCmd() {}
protected: protected:
/** /**
* Instantiates a command that can be invoked by "name", which will be capable of issuing * Instantiates a command that can be invoked by "name", which will be capable of issuing
* write batches of type "writeType", and will require privilege "a ction" to run. * write batches of type "writeType", and will require privilege "a ction" to run.
*/ */
WriteCmd( const StringData& name, BatchedCommandRequest::BatchType writeType ); WriteCmd( const StringData& name, BatchedCommandRequest::BatchType writeType );
// Full log of write command can be quite large.
static void redactTooLongLog( mutablebson::Document* cmdObj, const
StringData& fieldName );
private: private:
virtual bool logTheOp(); virtual bool logTheOp();
virtual bool slaveOk() const; virtual bool slaveOk() const;
virtual LockType locktype() const; virtual LockType locktype() const;
virtual Status checkAuthForCommand( ClientBasic* client, virtual Status checkAuthForCommand( ClientBasic* client,
const std::string& dbname, const std::string& dbname,
const BSONObj& cmdObj ); const BSONObj& cmdObj );
skipping to change at line 89 skipping to change at line 92
bool fromRepl); bool fromRepl);
// Type of batch (e.g. insert). // Type of batch (e.g. insert).
BatchedCommandRequest::BatchType _writeType; BatchedCommandRequest::BatchType _writeType;
}; };
class CmdInsert : public WriteCmd { class CmdInsert : public WriteCmd {
MONGO_DISALLOW_COPYING(CmdInsert); MONGO_DISALLOW_COPYING(CmdInsert);
public: public:
CmdInsert(); CmdInsert();
void redactForLogging(mutablebson::Document* cmdObj);
private: private:
virtual void help(stringstream& help) const; virtual void help(stringstream& help) const;
}; };
class CmdUpdate : public WriteCmd { class CmdUpdate : public WriteCmd {
MONGO_DISALLOW_COPYING(CmdUpdate); MONGO_DISALLOW_COPYING(CmdUpdate);
public: public:
CmdUpdate(); CmdUpdate();
void redactForLogging(mutablebson::Document* cmdObj);
private: private:
virtual void help(stringstream& help) const; virtual void help(stringstream& help) const;
}; };
class CmdDelete : public WriteCmd { class CmdDelete : public WriteCmd {
MONGO_DISALLOW_COPYING(CmdDelete); MONGO_DISALLOW_COPYING(CmdDelete);
public: public:
CmdDelete(); CmdDelete();
void redactForLogging(mutablebson::Document* cmdObj);
private: private:
virtual void help(stringstream& help) const; virtual void help(stringstream& help) const;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
0 lines changed or deleted 7 lines changed or added


 write_concern.h   write_concern.h 
skipping to change at line 53 skipping to change at line 53
} }
void reset() { void reset() {
syncMillis = -1; syncMillis = -1;
fsyncFiles = -1; fsyncFiles = -1;
wTimedOut = false; wTimedOut = false;
wTime = -1; wTime = -1;
err = ""; err = "";
} }
void appendTo( BSONObjBuilder* result ) const; void appendTo( const WriteConcernOptions& writeConcern, BSONObjBuil der* result ) const;
int syncMillis; int syncMillis;
int fsyncFiles; int fsyncFiles;
bool wTimedOut; bool wTimedOut;
int wTime; int wTime;
vector<BSONObj> writtenTo; vector<BSONObj> writtenTo;
string err; // this is the old err field, should deprecate string err; // this is the old err field, should deprecate
}; };
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 write_op.h   write_op.h 
skipping to change at line 128 skipping to change at line 128
* The ShardTargeter determines the ShardEndpoints to send child wr ites to, but is not * The ShardTargeter determines the ShardEndpoints to send child wr ites to, but is not
* modified by this operation. * modified by this operation.
* *
* Returns !OK if the targeting process itself fails * Returns !OK if the targeting process itself fails
* (no TargetedWrites will be added, state unchanged) * (no TargetedWrites will be added, state unchanged)
*/ */
Status targetWrites( const NSTargeter& targeter, Status targetWrites( const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites ); std::vector<TargetedWrite*>* targetedWrites );
/** /**
* Returns the number of child writes that were last targeted.
*/
size_t getNumTargeted();
/**
* Resets the state of this write op to _Ready and stops waiting fo r any outstanding * Resets the state of this write op to _Ready and stops waiting fo r any outstanding
* TargetedWrites. Optional error can be provided for reporting. * TargetedWrites. Optional error can be provided for reporting.
* *
* Can only be called when state is _Pending, or is a no-op if call ed when the state * Can only be called when state is _Pending, or is a no-op if call ed when the state
* is still _Ready (and therefore no writes are pending). * is still _Ready (and therefore no writes are pending).
*/ */
void cancelWrites( const WriteErrorDetail* why ); void cancelWrites( const WriteErrorDetail* why );
/** /**
* Marks the targeted write as finished for this write op. * Marks the targeted write as finished for this write op.
 End of changes. 1 change blocks. 
0 lines changed or deleted 5 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/