2d_access_method.h | 2d_access_method.h | |||
---|---|---|---|---|
skipping to change at line 42 | skipping to change at line 42 | |||
#include "mongo/db/index/2d_common.h" | #include "mongo/db/index/2d_common.h" | |||
#include "mongo/db/index/btree_access_method_internal.h" | #include "mongo/db/index/btree_access_method_internal.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
namespace mongo { | namespace mongo { | |||
class IndexCursor; | class IndexCursor; | |||
class IndexDescriptor; | class IndexDescriptor; | |||
struct TwoDIndexingParams; | struct TwoDIndexingParams; | |||
namespace twod_exec { | ||||
class GeoPoint; | ||||
class GeoAccumulator; | ||||
class GeoBrowse; | ||||
class GeoHopper; | ||||
class GeoSearch; | ||||
class GeoCircleBrowse; | ||||
class GeoBoxBrowse; | ||||
class GeoPolygonBrowse; | ||||
class TwoDGeoNearRunner; | ||||
} | ||||
namespace twod_internal { | namespace twod_internal { | |||
class GeoPoint; | class GeoPoint; | |||
class GeoAccumulator; | class GeoAccumulator; | |||
class GeoBrowse; | class GeoBrowse; | |||
class GeoHopper; | class GeoHopper; | |||
class GeoSearch; | class GeoSearch; | |||
class GeoCircleBrowse; | class GeoCircleBrowse; | |||
class GeoBoxBrowse; | class GeoBoxBrowse; | |||
class GeoPolygonBrowse; | class GeoPolygonBrowse; | |||
class TwoDGeoNearRunner; | class TwoDGeoNearRunner; | |||
skipping to change at line 75 | skipping to change at line 87 | |||
private: | private: | |||
friend class TwoDIndexCursor; | friend class TwoDIndexCursor; | |||
friend class twod_internal::GeoPoint; | friend class twod_internal::GeoPoint; | |||
friend class twod_internal::GeoAccumulator; | friend class twod_internal::GeoAccumulator; | |||
friend class twod_internal::GeoBrowse; | friend class twod_internal::GeoBrowse; | |||
friend class twod_internal::GeoHopper; | friend class twod_internal::GeoHopper; | |||
friend class twod_internal::GeoSearch; | friend class twod_internal::GeoSearch; | |||
friend class twod_internal::GeoCircleBrowse; | friend class twod_internal::GeoCircleBrowse; | |||
friend class twod_internal::GeoBoxBrowse; | friend class twod_internal::GeoBoxBrowse; | |||
friend class twod_internal::GeoPolygonBrowse; | friend class twod_internal::GeoPolygonBrowse; | |||
friend class twod_exec::GeoPoint; | ||||
friend class twod_exec::GeoAccumulator; | ||||
friend class twod_exec::GeoBrowse; | ||||
friend class twod_exec::GeoHopper; | ||||
friend class twod_exec::GeoSearch; | ||||
friend class twod_exec::GeoCircleBrowse; | ||||
friend class twod_exec::GeoBoxBrowse; | ||||
friend class twod_exec::GeoPolygonBrowse; | ||||
friend class twod_internal::TwoDGeoNearRunner; | friend class twod_internal::TwoDGeoNearRunner; | |||
BtreeInterface* getInterface() { return _interface; } | BtreeInterface* getInterface() { return _interface; } | |||
IndexDescriptor* getDescriptor() { return _descriptor; } | IndexDescriptor* getDescriptor() { return _descriptor; } | |||
TwoDIndexingParams& getParams() { return _params; } | TwoDIndexingParams& getParams() { return _params; } | |||
// This really gets the 'locs' from the provided obj. | // This really gets the 'locs' from the provided obj. | |||
void getKeys(const BSONObj& obj, vector<BSONObj>& locs) const; | void getKeys(const BSONObj& obj, vector<BSONObj>& locs) const; | |||
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); | virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); | |||
End of changes. 2 change blocks. | ||||
0 lines changed or deleted | 22 lines changed or added | |||
2d_index_cursor.h | 2d_index_cursor.h | |||
---|---|---|---|---|
skipping to change at line 45 | skipping to change at line 45 | |||
#include "mongo/db/index/2d_common.h" | #include "mongo/db/index/2d_common.h" | |||
#include "mongo/db/index/index_cursor.h" | #include "mongo/db/index/index_cursor.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/pdfile.h" | #include "mongo/db/pdfile.h" | |||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
namespace mongo { | namespace mongo { | |||
class TwoDAccessMethod; | class TwoDAccessMethod; | |||
class GeoNearArguments; | class GeoNearArguments; | |||
class IndexDescriptor; | ||||
namespace twod_internal { | namespace twod_internal { | |||
class GeoCursorBase; | class GeoCursorBase; | |||
class TwoDGeoNearRunner { | class TwoDGeoNearRunner { | |||
public: | public: | |||
static bool run2DGeoNear(NamespaceDetails* nsd, int idxNo, cons | static bool run2DGeoNear(IndexCatalog* catalog, | |||
t BSONObj& cmdObj, | IndexDescriptor* descriptor, const BSO | |||
NObj& cmdObj, | ||||
const GeoNearArguments &parsedArgs, st ring& errmsg, | const GeoNearArguments &parsedArgs, st ring& errmsg, | |||
BSONObjBuilder& result, unordered_map< string, double>* stats); | BSONObjBuilder& result, unordered_map< string, double>* stats); | |||
}; | }; | |||
} | } | |||
class TwoDIndexCursor : public IndexCursor { | class TwoDIndexCursor : public IndexCursor { | |||
public: | public: | |||
TwoDIndexCursor(TwoDAccessMethod* accessMethod); | TwoDIndexCursor(TwoDAccessMethod* accessMethod); | |||
/** | /** | |||
End of changes. 2 change blocks. | ||||
2 lines changed or deleted | 4 lines changed or added | |||
accumulator.h | accumulator.h | |||
---|---|---|---|---|
skipping to change at line 128 | skipping to change at line 128 | |||
class AccumulatorSum : public Accumulator { | class AccumulatorSum : public Accumulator { | |||
public: | public: | |||
virtual void processInternal(const Value& input, bool merging); | virtual void processInternal(const Value& input, bool merging); | |||
virtual Value getValue(bool toBeMerged) const; | virtual Value getValue(bool toBeMerged) const; | |||
virtual const char* getOpName() const; | virtual const char* getOpName() const; | |||
virtual void reset(); | virtual void reset(); | |||
static intrusive_ptr<Accumulator> create(); | static intrusive_ptr<Accumulator> create(); | |||
protected: /* reused by AccumulatorAvg */ | private: | |||
AccumulatorSum(); | AccumulatorSum(); | |||
BSONType totalType; | BSONType totalType; | |||
long long longTotal; | long long longTotal; | |||
double doubleTotal; | double doubleTotal; | |||
// count is only used by AccumulatorAvg, but lives here to avoid co | ||||
unting non-numeric values | ||||
long long count; | ||||
}; | }; | |||
class AccumulatorMinMax : public Accumulator { | class AccumulatorMinMax : public Accumulator { | |||
public: | public: | |||
virtual void processInternal(const Value& input, bool merging); | virtual void processInternal(const Value& input, bool merging); | |||
virtual Value getValue(bool toBeMerged) const; | virtual Value getValue(bool toBeMerged) const; | |||
virtual const char* getOpName() const; | virtual const char* getOpName() const; | |||
virtual void reset(); | virtual void reset(); | |||
static intrusive_ptr<Accumulator> createMin(); | static intrusive_ptr<Accumulator> createMin(); | |||
skipping to change at line 170 | skipping to change at line 168 | |||
virtual void reset(); | virtual void reset(); | |||
static intrusive_ptr<Accumulator> create(); | static intrusive_ptr<Accumulator> create(); | |||
private: | private: | |||
AccumulatorPush(); | AccumulatorPush(); | |||
vector<Value> vpValue; | vector<Value> vpValue; | |||
}; | }; | |||
class AccumulatorAvg : public AccumulatorSum { | class AccumulatorAvg : public Accumulator { | |||
typedef AccumulatorSum Super; | ||||
public: | public: | |||
virtual void processInternal(const Value& input, bool merging); | virtual void processInternal(const Value& input, bool merging); | |||
virtual Value getValue(bool toBeMerged) const; | virtual Value getValue(bool toBeMerged) const; | |||
virtual const char* getOpName() const; | virtual const char* getOpName() const; | |||
virtual void reset(); | virtual void reset(); | |||
static intrusive_ptr<Accumulator> create(); | static intrusive_ptr<Accumulator> create(); | |||
private: | private: | |||
AccumulatorAvg(); | AccumulatorAvg(); | |||
double _total; | ||||
long long _count; | ||||
}; | }; | |||
} | } | |||
End of changes. 4 change blocks. | ||||
6 lines changed or deleted | 5 lines changed or added | |||
action_set.h | action_set.h | |||
---|---|---|---|---|
skipping to change at line 29 | skipping to change at line 29 | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/auth/action_type.h" | #include "mongo/db/auth/action_type.h" | |||
namespace mongo { | namespace mongo { | |||
/* | /* | |||
* An ActionSet is a bitmask of ActionTypes that represents a set of a ctions. | * An ActionSet is a bitmask of ActionTypes that represents a set of a ctions. | |||
* These are the actions that a Privilege can grant a user to perform on a resource. | * These are the actions that a Privilege can grant a user to perform on a resource. | |||
* If the special ActionType::anyAction is granted to this set, it aut | ||||
omatically sets all bits | ||||
* in the bitmask, indicating that it contains all possible actions. | ||||
*/ | */ | |||
class ActionSet { | class ActionSet { | |||
public: | public: | |||
ActionSet() : _actions(0) {} | ActionSet() : _actions(0) {} | |||
void addAction(const ActionType& action); | void addAction(const ActionType& action); | |||
void addAllActionsFromSet(const ActionSet& actionSet); | void addAllActionsFromSet(const ActionSet& actionSet); | |||
void addAllActions(); | void addAllActions(); | |||
// Removes action from the set. Also removes the "anyAction" actio | ||||
n, if present. | ||||
// Note: removing the "anyAction" action does *not* remove all othe | ||||
r actions. | ||||
void removeAction(const ActionType& action); | void removeAction(const ActionType& action); | |||
void removeAllActionsFromSet(const ActionSet& actionSet); | void removeAllActionsFromSet(const ActionSet& actionSet); | |||
void removeAllActions(); | void removeAllActions(); | |||
bool empty() const { return _actions.none(); } | bool empty() const { return _actions.none(); } | |||
bool equals(const ActionSet& other) const { return this->_actions = = other._actions; } | bool equals(const ActionSet& other) const { return this->_actions = = other._actions; } | |||
bool contains(const ActionType& action) const; | bool contains(const ActionType& action) const; | |||
skipping to change at line 63 | skipping to change at line 67 | |||
// Returns the string representation of this ActionSet | // Returns the string representation of this ActionSet | |||
std::string toString() const; | std::string toString() const; | |||
// Returns a vector of strings representing the actions in the Acti onSet. | // Returns a vector of strings representing the actions in the Acti onSet. | |||
std::vector<std::string> getActionsAsStrings() const; | std::vector<std::string> getActionsAsStrings() const; | |||
// Takes a comma-separated string of action type string representat ions and returns | // Takes a comma-separated string of action type string representat ions and returns | |||
// an int bitmask of the actions. | // an int bitmask of the actions. | |||
static Status parseActionSetFromString(const std::string& actionsSt ring, ActionSet* result); | static Status parseActionSetFromString(const std::string& actionsSt ring, ActionSet* result); | |||
// Takes a vector of action type string representations and returns | ||||
an ActionSet of the | ||||
// actions. | ||||
static Status parseActionSetFromStringVector(const std::vector<std: | ||||
:string>& actionsVector, | ||||
ActionSet* result); | ||||
private: | private: | |||
// bitmask of actions this privilege grants | // bitmask of actions this privilege grants | |||
std::bitset<ActionType::NUM_ACTION_TYPES> _actions; | std::bitset<ActionType::NUM_ACTION_TYPES> _actions; | |||
}; | }; | |||
static inline bool operator==(const ActionSet& lhs, const ActionSet& rh s) { | static inline bool operator==(const ActionSet& lhs, const ActionSet& rh s) { | |||
return lhs.equals(rhs); | return lhs.equals(rhs); | |||
} | } | |||
End of changes. 3 change blocks. | ||||
0 lines changed or deleted | 14 lines changed or added | |||
action_type.h | action_type.h | |||
---|---|---|---|---|
skipping to change at line 51 | skipping to change at line 51 | |||
std::string toString() const; | std::string toString() const; | |||
// Takes the string representation of a single action type and retu rns the corresponding | // Takes the string representation of a single action type and retu rns the corresponding | |||
// ActionType enum. | // ActionType enum. | |||
static Status parseActionFromString(const std::string& actionString , ActionType* result); | static Status parseActionFromString(const std::string& actionString , ActionType* result); | |||
// Takes an ActionType and returns the string representation | // Takes an ActionType and returns the string representation | |||
static std::string actionToString(const ActionType& action); | static std::string actionToString(const ActionType& action); | |||
static const ActionType addShard; | static const ActionType addShard; | |||
static const ActionType anyAction; | ||||
static const ActionType applicationMessage; | static const ActionType applicationMessage; | |||
static const ActionType auditLogRotate; | static const ActionType auditLogRotate; | |||
static const ActionType authCheck; | static const ActionType authCheck; | |||
static const ActionType authSchemaUpgrade; | ||||
static const ActionType authenticate; | static const ActionType authenticate; | |||
static const ActionType captrunc; | static const ActionType captrunc; | |||
static const ActionType changeAnyCustomData; | static const ActionType changeCustomData; | |||
static const ActionType changeAnyPassword; | static const ActionType changePassword; | |||
static const ActionType changeOwnPassword; | static const ActionType changeOwnPassword; | |||
static const ActionType changeOwnCustomData; | static const ActionType changeOwnCustomData; | |||
static const ActionType clean; | static const ActionType clean; | |||
static const ActionType cleanupOrphaned; | static const ActionType cleanupOrphaned; | |||
static const ActionType clone; | ||||
static const ActionType cloneCollectionLocalSource; | ||||
static const ActionType cloneCollectionTarget; | ||||
static const ActionType closeAllDatabases; | static const ActionType closeAllDatabases; | |||
static const ActionType collMod; | static const ActionType collMod; | |||
static const ActionType collStats; | static const ActionType collStats; | |||
static const ActionType compact; | static const ActionType compact; | |||
static const ActionType connPoolStats; | static const ActionType connPoolStats; | |||
static const ActionType connPoolSync; | static const ActionType connPoolSync; | |||
static const ActionType convertToCapped; | static const ActionType convertToCapped; | |||
static const ActionType copyDBTarget; | ||||
static const ActionType cpuProfiler; | static const ActionType cpuProfiler; | |||
static const ActionType createCollection; | static const ActionType createCollection; | |||
static const ActionType createDatabase; | static const ActionType createDatabase; | |||
static const ActionType createIndex; | static const ActionType createIndex; | |||
static const ActionType createRole; | static const ActionType createRole; | |||
static const ActionType createUser; | static const ActionType createUser; | |||
static const ActionType cursorInfo; | static const ActionType cursorInfo; | |||
static const ActionType dbHash; | static const ActionType dbHash; | |||
static const ActionType dbStats; | static const ActionType dbStats; | |||
static const ActionType diagLogging; | static const ActionType diagLogging; | |||
static const ActionType dropAllRolesForDatabase; | static const ActionType dropAllRolesFromDatabase; | |||
static const ActionType dropAllUsersFromDatabase; | static const ActionType dropAllUsersFromDatabase; | |||
static const ActionType dropCollection; | static const ActionType dropCollection; | |||
static const ActionType dropDatabase; | static const ActionType dropDatabase; | |||
static const ActionType dropIndex; | static const ActionType dropIndex; | |||
static const ActionType dropRole; | static const ActionType dropRole; | |||
static const ActionType dropUser; | static const ActionType dropUser; | |||
static const ActionType emptycapped; | static const ActionType emptycapped; | |||
static const ActionType enableProfiler; | ||||
static const ActionType enableSharding; | static const ActionType enableSharding; | |||
static const ActionType find; | static const ActionType find; | |||
static const ActionType flushRouterConfig; | static const ActionType flushRouterConfig; | |||
static const ActionType fsync; | static const ActionType fsync; | |||
static const ActionType getCmdLineOpts; | static const ActionType getCmdLineOpts; | |||
static const ActionType getLog; | static const ActionType getLog; | |||
static const ActionType getParameter; | static const ActionType getParameter; | |||
static const ActionType getShardMap; | static const ActionType getShardMap; | |||
static const ActionType getShardVersion; | static const ActionType getShardVersion; | |||
static const ActionType grantAnyRole; | static const ActionType grantRole; | |||
static const ActionType grantPrivilegesToRole; | static const ActionType grantPrivilegesToRole; | |||
static const ActionType grantRolesToRole; | static const ActionType grantRolesToRole; | |||
static const ActionType grantRolesToUser; | static const ActionType grantRolesToUser; | |||
static const ActionType handshake; | static const ActionType handshake; | |||
static const ActionType hostInfo; | static const ActionType hostInfo; | |||
static const ActionType indexRead; | ||||
static const ActionType indexStats; | static const ActionType indexStats; | |||
static const ActionType inprog; | static const ActionType inprog; | |||
static const ActionType insert; | static const ActionType insert; | |||
static const ActionType invalidateUserCache; | static const ActionType invalidateUserCache; | |||
static const ActionType killCursors; | static const ActionType killCursors; | |||
static const ActionType killop; | static const ActionType killop; | |||
static const ActionType listDatabases; | static const ActionType listDatabases; | |||
static const ActionType listShards; | static const ActionType listShards; | |||
static const ActionType logRotate; | static const ActionType logRotate; | |||
static const ActionType mapReduceShardedFinish; | static const ActionType mapReduceShardedFinish; | |||
static const ActionType mergeChunks; | ||||
static const ActionType moveChunk; | static const ActionType moveChunk; | |||
static const ActionType movePrimary; | ||||
static const ActionType netstat; | static const ActionType netstat; | |||
static const ActionType profileEnable; | ||||
static const ActionType profileRead; | ||||
static const ActionType reIndex; | static const ActionType reIndex; | |||
static const ActionType remove; | static const ActionType remove; | |||
static const ActionType removeShard; | static const ActionType removeShard; | |||
static const ActionType renameCollection; | static const ActionType renameCollection; | |||
static const ActionType renameCollectionSameDB; | static const ActionType renameCollectionSameDB; | |||
static const ActionType repairDatabase; | static const ActionType repairDatabase; | |||
static const ActionType replSetConfigure; | ||||
static const ActionType replSetElect; | static const ActionType replSetElect; | |||
static const ActionType replSetFreeze; | ||||
static const ActionType replSetFresh; | static const ActionType replSetFresh; | |||
static const ActionType replSetGetRBID; | static const ActionType replSetGetRBID; | |||
static const ActionType replSetGetStatus; | static const ActionType replSetGetStatus; | |||
static const ActionType replSetHeartbeat; | static const ActionType replSetHeartbeat; | |||
static const ActionType replSetInitiate; | ||||
static const ActionType replSetMaintenance; | ||||
static const ActionType replSetReconfig; | static const ActionType replSetReconfig; | |||
static const ActionType replSetStepDown; | static const ActionType replSetStateChange; | |||
static const ActionType replSetSyncFrom; | ||||
static const ActionType replSetUpdatePosition; | static const ActionType replSetUpdatePosition; | |||
static const ActionType resync; | static const ActionType resync; | |||
static const ActionType revokeAnyRole; | static const ActionType revokeRole; | |||
static const ActionType revokePrivilegesFromRole; | static const ActionType revokePrivilegesFromRole; | |||
static const ActionType revokeRolesFromRole; | static const ActionType revokeRolesFromRole; | |||
static const ActionType revokeRolesFromUser; | static const ActionType revokeRolesFromUser; | |||
static const ActionType serverStatus; | static const ActionType serverStatus; | |||
static const ActionType setParameter; | static const ActionType setParameter; | |||
static const ActionType setShardVersion; | static const ActionType setShardVersion; | |||
static const ActionType shardCollection; | ||||
static const ActionType shardingState; | static const ActionType shardingState; | |||
static const ActionType shutdown; | static const ActionType shutdown; | |||
static const ActionType split; | ||||
static const ActionType splitChunk; | static const ActionType splitChunk; | |||
static const ActionType splitVector; | static const ActionType splitVector; | |||
static const ActionType storageDetails; | static const ActionType storageDetails; | |||
static const ActionType top; | static const ActionType top; | |||
static const ActionType touch; | static const ActionType touch; | |||
static const ActionType unlock; | static const ActionType unlock; | |||
static const ActionType unsetSharding; | static const ActionType unsetSharding; | |||
static const ActionType update; | static const ActionType update; | |||
static const ActionType updateRole; | static const ActionType updateRole; | |||
static const ActionType updateUser; | static const ActionType updateUser; | |||
static const ActionType userAdmin; | ||||
static const ActionType userAdminV1; | ||||
static const ActionType validate; | static const ActionType validate; | |||
static const ActionType viewRole; | static const ActionType viewRole; | |||
static const ActionType viewUser; | static const ActionType viewUser; | |||
static const ActionType writebacklisten; | static const ActionType writebacklisten; | |||
static const ActionType writeBacksQueued; | static const ActionType writeBacksQueued; | |||
static const ActionType _migrateClone; | static const ActionType _migrateClone; | |||
static const ActionType _recvChunkAbort; | static const ActionType _recvChunkAbort; | |||
static const ActionType _recvChunkCommit; | static const ActionType _recvChunkCommit; | |||
static const ActionType _recvChunkStart; | static const ActionType _recvChunkStart; | |||
static const ActionType _recvChunkStatus; | static const ActionType _recvChunkStatus; | |||
static const ActionType _transferMods; | static const ActionType _transferMods; | |||
enum ActionTypeIdentifier { | enum ActionTypeIdentifier { | |||
addShardValue, | addShardValue, | |||
anyActionValue, | ||||
applicationMessageValue, | applicationMessageValue, | |||
auditLogRotateValue, | auditLogRotateValue, | |||
authCheckValue, | authCheckValue, | |||
authSchemaUpgradeValue, | ||||
authenticateValue, | authenticateValue, | |||
captruncValue, | captruncValue, | |||
changeAnyCustomDataValue, | changeCustomDataValue, | |||
changeAnyPasswordValue, | changePasswordValue, | |||
changeOwnPasswordValue, | changeOwnPasswordValue, | |||
changeOwnCustomDataValue, | changeOwnCustomDataValue, | |||
cleanValue, | cleanValue, | |||
cleanupOrphanedValue, | cleanupOrphanedValue, | |||
cloneValue, | ||||
cloneCollectionLocalSourceValue, | ||||
cloneCollectionTargetValue, | ||||
closeAllDatabasesValue, | closeAllDatabasesValue, | |||
collModValue, | collModValue, | |||
collStatsValue, | collStatsValue, | |||
compactValue, | compactValue, | |||
connPoolStatsValue, | connPoolStatsValue, | |||
connPoolSyncValue, | connPoolSyncValue, | |||
convertToCappedValue, | convertToCappedValue, | |||
copyDBTargetValue, | ||||
cpuProfilerValue, | cpuProfilerValue, | |||
createCollectionValue, | createCollectionValue, | |||
createDatabaseValue, | createDatabaseValue, | |||
createIndexValue, | createIndexValue, | |||
createRoleValue, | createRoleValue, | |||
createUserValue, | createUserValue, | |||
cursorInfoValue, | cursorInfoValue, | |||
dbHashValue, | dbHashValue, | |||
dbStatsValue, | dbStatsValue, | |||
diagLoggingValue, | diagLoggingValue, | |||
dropAllRolesForDatabaseValue, | dropAllRolesFromDatabaseValue, | |||
dropAllUsersFromDatabaseValue, | dropAllUsersFromDatabaseValue, | |||
dropCollectionValue, | dropCollectionValue, | |||
dropDatabaseValue, | dropDatabaseValue, | |||
dropIndexValue, | dropIndexValue, | |||
dropRoleValue, | dropRoleValue, | |||
dropUserValue, | dropUserValue, | |||
emptycappedValue, | emptycappedValue, | |||
enableProfilerValue, | ||||
enableShardingValue, | enableShardingValue, | |||
findValue, | findValue, | |||
flushRouterConfigValue, | flushRouterConfigValue, | |||
fsyncValue, | fsyncValue, | |||
getCmdLineOptsValue, | getCmdLineOptsValue, | |||
getLogValue, | getLogValue, | |||
getParameterValue, | getParameterValue, | |||
getShardMapValue, | getShardMapValue, | |||
getShardVersionValue, | getShardVersionValue, | |||
grantAnyRoleValue, | grantRoleValue, | |||
grantPrivilegesToRoleValue, | grantPrivilegesToRoleValue, | |||
grantRolesToRoleValue, | grantRolesToRoleValue, | |||
grantRolesToUserValue, | grantRolesToUserValue, | |||
handshakeValue, | handshakeValue, | |||
hostInfoValue, | hostInfoValue, | |||
indexReadValue, | ||||
indexStatsValue, | indexStatsValue, | |||
inprogValue, | inprogValue, | |||
insertValue, | insertValue, | |||
invalidateUserCacheValue, | invalidateUserCacheValue, | |||
killCursorsValue, | killCursorsValue, | |||
killopValue, | killopValue, | |||
listDatabasesValue, | listDatabasesValue, | |||
listShardsValue, | listShardsValue, | |||
logRotateValue, | logRotateValue, | |||
mapReduceShardedFinishValue, | mapReduceShardedFinishValue, | |||
mergeChunksValue, | ||||
moveChunkValue, | moveChunkValue, | |||
movePrimaryValue, | ||||
netstatValue, | netstatValue, | |||
profileEnableValue, | ||||
profileReadValue, | ||||
reIndexValue, | reIndexValue, | |||
removeValue, | removeValue, | |||
removeShardValue, | removeShardValue, | |||
renameCollectionValue, | renameCollectionValue, | |||
renameCollectionSameDBValue, | renameCollectionSameDBValue, | |||
repairDatabaseValue, | repairDatabaseValue, | |||
replSetConfigureValue, | ||||
replSetElectValue, | replSetElectValue, | |||
replSetFreezeValue, | ||||
replSetFreshValue, | replSetFreshValue, | |||
replSetGetRBIDValue, | replSetGetRBIDValue, | |||
replSetGetStatusValue, | replSetGetStatusValue, | |||
replSetHeartbeatValue, | replSetHeartbeatValue, | |||
replSetInitiateValue, | ||||
replSetMaintenanceValue, | ||||
replSetReconfigValue, | replSetReconfigValue, | |||
replSetStepDownValue, | replSetStateChangeValue, | |||
replSetSyncFromValue, | ||||
replSetUpdatePositionValue, | replSetUpdatePositionValue, | |||
resyncValue, | resyncValue, | |||
revokeAnyRoleValue, | revokeRoleValue, | |||
revokePrivilegesFromRoleValue, | revokePrivilegesFromRoleValue, | |||
revokeRolesFromRoleValue, | revokeRolesFromRoleValue, | |||
revokeRolesFromUserValue, | revokeRolesFromUserValue, | |||
serverStatusValue, | serverStatusValue, | |||
setParameterValue, | setParameterValue, | |||
setShardVersionValue, | setShardVersionValue, | |||
shardCollectionValue, | ||||
shardingStateValue, | shardingStateValue, | |||
shutdownValue, | shutdownValue, | |||
splitValue, | ||||
splitChunkValue, | splitChunkValue, | |||
splitVectorValue, | splitVectorValue, | |||
storageDetailsValue, | storageDetailsValue, | |||
topValue, | topValue, | |||
touchValue, | touchValue, | |||
unlockValue, | unlockValue, | |||
unsetShardingValue, | unsetShardingValue, | |||
updateValue, | updateValue, | |||
updateRoleValue, | updateRoleValue, | |||
updateUserValue, | updateUserValue, | |||
userAdminValue, | ||||
userAdminV1Value, | ||||
validateValue, | validateValue, | |||
viewRoleValue, | viewRoleValue, | |||
viewUserValue, | viewUserValue, | |||
writebacklistenValue, | writebacklistenValue, | |||
writeBacksQueuedValue, | writeBacksQueuedValue, | |||
_migrateCloneValue, | _migrateCloneValue, | |||
_recvChunkAbortValue, | _recvChunkAbortValue, | |||
_recvChunkCommitValue, | _recvChunkCommitValue, | |||
_recvChunkStartValue, | _recvChunkStartValue, | |||
_recvChunkStatusValue, | _recvChunkStatusValue, | |||
End of changes. 40 change blocks. | ||||
46 lines changed or deleted | 20 lines changed or added | |||
and_common-inl.h | and_common-inl.h | |||
---|---|---|---|---|
skipping to change at line 53 | skipping to change at line 53 | |||
for (size_t i = 0; i < src->keyData.size(); ++i) { | for (size_t i = 0; i < src->keyData.size(); ++i) { | |||
bool found = false; | bool found = false; | |||
for (size_t j = 0; j < dest->keyData.size(); ++j) { | for (size_t j = 0; j < dest->keyData.size(); ++j) { | |||
if (dest->keyData[j].indexKeyPattern == src->keyData[i] .indexKeyPattern) { | if (dest->keyData[j].indexKeyPattern == src->keyData[i] .indexKeyPattern) { | |||
found = true; | found = true; | |||
break; | break; | |||
} | } | |||
} | } | |||
if (!found) { dest->keyData.push_back(src->keyData[i]); } | if (!found) { dest->keyData.push_back(src->keyData[i]); } | |||
} | } | |||
// Merge computed data. | ||||
if (!dest->hasComputed(WSM_COMPUTED_TEXT_SCORE) && src->hasComp | ||||
uted(WSM_COMPUTED_TEXT_SCORE)) { | ||||
dest->addComputed(src->getComputed(WSM_COMPUTED_TEXT_SCORE) | ||||
->clone()); | ||||
} | ||||
if (!dest->hasComputed(WSM_COMPUTED_GEO_DISTANCE) && src->hasCo | ||||
mputed(WSM_COMPUTED_GEO_DISTANCE)) { | ||||
dest->addComputed(src->getComputed(WSM_COMPUTED_GEO_DISTANC | ||||
E)->clone()); | ||||
} | ||||
} | } | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 13 lines changed or added | |||
assert_util.h | assert_util.h | |||
---|---|---|---|---|
skipping to change at line 116 | skipping to change at line 116 | |||
_ei.msg = str + causedBy( _ei.msg ); | _ei.msg = str + causedBy( _ei.msg ); | |||
} | } | |||
// Utilities for the migration to Status objects | // Utilities for the migration to Status objects | |||
static ErrorCodes::Error convertExceptionCode(int exCode); | static ErrorCodes::Error convertExceptionCode(int exCode); | |||
Status toStatus(const std::string& context) const { | Status toStatus(const std::string& context) const { | |||
return Status(convertExceptionCode(getCode()), context + caused By(*this)); | return Status(convertExceptionCode(getCode()), context + caused By(*this)); | |||
} | } | |||
Status toStatus() const { | Status toStatus() const { | |||
return Status(convertExceptionCode(getCode()), this->toString() ); | return Status(convertExceptionCode(getCode()), this->what()); | |||
} | } | |||
// context when applicable. otherwise "" | // context when applicable. otherwise "" | |||
std::string _shard; | std::string _shard; | |||
virtual std::string toString() const; | virtual std::string toString() const; | |||
const ExceptionInfo& getInfo() const { return _ei; } | const ExceptionInfo& getInfo() const { return _ei; } | |||
private: | private: | |||
static void traceIfNeeded( const DBException& e ); | static void traceIfNeeded( const DBException& e ); | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 1 lines changed or added | |||
audit.h | audit.h | |||
---|---|---|---|---|
skipping to change at line 166 | skipping to change at line 166 | |||
bool isMulti, | bool isMulti, | |||
ErrorCodes::Error result); | ErrorCodes::Error result); | |||
/** | /** | |||
* Logs the result of a createUser command. | * Logs the result of a createUser command. | |||
*/ | */ | |||
void logCreateUser(ClientBasic* client, | void logCreateUser(ClientBasic* client, | |||
const UserName& username, | const UserName& username, | |||
bool password, | bool password, | |||
const BSONObj* customData, | const BSONObj* customData, | |||
const std::vector<User::RoleData>& roles); | const std::vector<RoleName>& roles); | |||
/** | /** | |||
* Logs the result of a dropUser command. | * Logs the result of a dropUser command. | |||
*/ | */ | |||
void logDropUser(ClientBasic* client, | void logDropUser(ClientBasic* client, | |||
const UserName& username); | const UserName& username); | |||
/** | /** | |||
* Logs the result of a dropAllUsersFromDatabase command. | * Logs the result of a dropAllUsersFromDatabase command. | |||
*/ | */ | |||
void logDropAllUsersFromDatabase(ClientBasic* client, | void logDropAllUsersFromDatabase(ClientBasic* client, | |||
const StringData& dbname); | const StringData& dbname); | |||
/** | /** | |||
* Logs the result of a updateUser command. | * Logs the result of a updateUser command. | |||
*/ | */ | |||
void logUpdateUser(ClientBasic* client, | void logUpdateUser(ClientBasic* client, | |||
const UserName& username, | const UserName& username, | |||
bool password, | bool password, | |||
const BSONObj* customData, | const BSONObj* customData, | |||
const std::vector<User::RoleData>* roles); | const std::vector<RoleName>* roles); | |||
/** | /** | |||
* Logs the result of a grantRolesToUser command. | * Logs the result of a grantRolesToUser command. | |||
*/ | */ | |||
void logGrantRolesToUser(ClientBasic* client, | void logGrantRolesToUser(ClientBasic* client, | |||
const UserName& username, | const UserName& username, | |||
const std::vector<RoleName>& roles); | const std::vector<RoleName>& roles); | |||
/** | /** | |||
* Logs the result of a revokeRolesFromUser command. | * Logs the result of a revokeRolesFromUser command. | |||
skipping to change at line 228 | skipping to change at line 228 | |||
/** | /** | |||
* Logs the result of a dropRole command. | * Logs the result of a dropRole command. | |||
*/ | */ | |||
void logDropRole(ClientBasic* client, | void logDropRole(ClientBasic* client, | |||
const RoleName& role); | const RoleName& role); | |||
/** | /** | |||
* Logs the result of a dropAllRolesForDatabase command. | * Logs the result of a dropAllRolesForDatabase command. | |||
*/ | */ | |||
void logDropAllRolesForDatabase(ClientBasic* client, | void logDropAllRolesFromDatabase(ClientBasic* client, | |||
const StringData& dbname); | const StringData& dbname); | |||
/** | /** | |||
* Logs the result of a grantRolesToRole command. | * Logs the result of a grantRolesToRole command. | |||
*/ | */ | |||
void logGrantRolesToRole(ClientBasic* client, | void logGrantRolesToRole(ClientBasic* client, | |||
const RoleName& role, | const RoleName& role, | |||
const std::vector<RoleName>& roles); | const std::vector<RoleName>& roles); | |||
/** | /** | |||
* Logs the result of a revokeRolesFromRole command. | * Logs the result of a revokeRolesFromRole command. | |||
skipping to change at line 278 | skipping to change at line 278 | |||
*/ | */ | |||
void logApplicationMessage(ClientBasic* client, | void logApplicationMessage(ClientBasic* client, | |||
const StringData& msg); | const StringData& msg); | |||
/** | /** | |||
* Logs the result of a shutdown command. | * Logs the result of a shutdown command. | |||
*/ | */ | |||
void logShutdown(ClientBasic* client); | void logShutdown(ClientBasic* client); | |||
/** | /** | |||
* Logs the result of an AuditLogRotate command. | ||||
*/ | ||||
void logAuditLogRotate(ClientBasic* client, | ||||
const StringData& file); | ||||
/** | ||||
* Logs the result of a createIndex command. | * Logs the result of a createIndex command. | |||
*/ | */ | |||
void logCreateIndex(ClientBasic* client, | void logCreateIndex(ClientBasic* client, | |||
const BSONObj* indexSpec, | const BSONObj* indexSpec, | |||
const StringData& indexname, | const StringData& indexname, | |||
const StringData& nsname); | const StringData& nsname); | |||
/** | /** | |||
* Logs the result of a createCollection command. | * Logs the result of a createCollection command. | |||
*/ | */ | |||
skipping to change at line 328 | skipping to change at line 322 | |||
*/ | */ | |||
void logDropDatabase(ClientBasic* client, | void logDropDatabase(ClientBasic* client, | |||
const StringData& dbname); | const StringData& dbname); | |||
/** | /** | |||
* Logs a collection rename event. | * Logs a collection rename event. | |||
*/ | */ | |||
void logRenameCollection(ClientBasic* client, | void logRenameCollection(ClientBasic* client, | |||
const StringData& source, | const StringData& source, | |||
const StringData& target); | const StringData& target); | |||
/** | ||||
* Logs the result of a enableSharding command. | ||||
*/ | ||||
void logEnableSharding(ClientBasic* client, | ||||
const StringData& dbname); | ||||
/** | ||||
* Logs the result of a addShard command. | ||||
*/ | ||||
void logAddShard(ClientBasic* client, | ||||
const StringData& name, | ||||
const std::string& servers, | ||||
long long maxsize); | ||||
/** | ||||
* Logs the result of a removeShard command. | ||||
*/ | ||||
void logRemoveShard(ClientBasic* client, | ||||
const StringData& shardname); | ||||
/** | ||||
* Logs the result of a shardCollection command. | ||||
*/ | ||||
void logShardCollection(ClientBasic* client, | ||||
const StringData& ns, | ||||
const BSONObj& keyPattern, | ||||
bool unique); | ||||
} // namespace audit | } // namespace audit | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 5 change blocks. | ||||
10 lines changed or deleted | 33 lines changed or added | |||
auth_index_d.h | auth_index_d.h | |||
---|---|---|---|---|
skipping to change at line 31 | skipping to change at line 31 | |||
* all of the code used other than as permitted herein. If you modify fil e(s) | * all of the code used other than as permitted herein. If you modify fil e(s) | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/string_data.h" | ||||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
namespace mongo { | namespace mongo { | |||
namespace authindex { | namespace authindex { | |||
/** | /** | |||
* Ensures that exactly the appropriate indexes are present on system c | ||||
ollections supporting | ||||
* authentication and authorization in database "dbname". | ||||
* | ||||
* It is appropriate to call this function on new or existing databases | ||||
, though it is primarily | ||||
* intended for use on existing databases. Under no circumstances may | ||||
it be called on databases | ||||
* with running operations. | ||||
*/ | ||||
void configureSystemIndexes(const StringData& dbname); | ||||
/** | ||||
* Creates the appropriate indexes on _new_ system collections supporti ng authentication and | * Creates the appropriate indexes on _new_ system collections supporti ng authentication and | |||
* authorization. | * authorization. | |||
*/ | */ | |||
void createSystemIndexes(const NamespaceString& ns); | void createSystemIndexes(const NamespaceString& ns); | |||
} // namespace authindex | } // namespace authindex | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 2 change blocks. | ||||
14 lines changed or deleted | 0 lines changed or added | |||
authentication_commands.h | authentication_commands.h | |||
---|---|---|---|---|
skipping to change at line 41 | skipping to change at line 41 | |||
#include <string> | #include <string> | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/auth/user_name.h" | #include "mongo/db/auth/user_name.h" | |||
#include "mongo/db/commands.h" | #include "mongo/db/commands.h" | |||
namespace mongo { | namespace mongo { | |||
class CmdAuthenticate : public Command { | class CmdAuthenticate : public Command { | |||
public: | public: | |||
static void disableCommand(); | static void disableAuthMechanism(std::string authMechanism); | |||
virtual bool logTheOp() { | virtual bool logTheOp() { | |||
return false; | return false; | |||
} | } | |||
virtual bool slaveOk() const { | virtual bool slaveOk() const { | |||
return true; | return true; | |||
} | } | |||
virtual LockType locktype() const { return NONE; } | virtual LockType locktype() const { return NONE; } | |||
virtual void help(stringstream& ss) const { ss << "internal"; } | virtual void help(stringstream& ss) const { ss << "internal"; } | |||
virtual void addRequiredPrivileges(const std::string& dbname, | virtual void addRequiredPrivileges(const std::string& dbname, | |||
const BSONObj& cmdObj, | const BSONObj& cmdObj, | |||
std::vector<Privilege>* out) {} // No auth required | std::vector<Privilege>* out) {} // No auth required | |||
virtual void redactForLogging(mutablebson::Document* cmdObj); | ||||
CmdAuthenticate() : Command("authenticate") {} | CmdAuthenticate() : Command("authenticate") {} | |||
bool run(const string& dbname, | bool run(const string& dbname, | |||
BSONObj& cmdObj, | BSONObj& cmdObj, | |||
int options, | int options, | |||
string& errmsg, | string& errmsg, | |||
BSONObjBuilder& result, | BSONObjBuilder& result, | |||
bool fromRepl); | bool fromRepl); | |||
private: | private: | |||
/** | /** | |||
End of changes. 2 change blocks. | ||||
1 lines changed or deleted | 3 lines changed or added | |||
authorization_manager.h | authorization_manager.h | |||
---|---|---|---|---|
skipping to change at line 35 | skipping to change at line 35 | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/function.hpp> | #include <boost/function.hpp> | |||
#include <boost/scoped_ptr.hpp> | #include <boost/scoped_ptr.hpp> | |||
#include <boost/thread/condition_variable.hpp> | #include <boost/thread/condition_variable.hpp> | |||
#include <boost/thread/mutex.hpp> | #include <boost/thread/mutex.hpp> | |||
#include <memory> | ||||
#include <string> | #include <string> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/bson/mutable/element.h" | #include "mongo/bson/mutable/element.h" | |||
#include "mongo/db/auth/action_set.h" | #include "mongo/db/auth/action_set.h" | |||
#include "mongo/db/auth/resource_pattern.h" | #include "mongo/db/auth/resource_pattern.h" | |||
#include "mongo/db/auth/role_graph.h" | #include "mongo/db/auth/role_graph.h" | |||
#include "mongo/db/auth/user.h" | #include "mongo/db/auth/user.h" | |||
#include "mongo/db/auth/user_name.h" | #include "mongo/db/auth/user_name.h" | |||
skipping to change at line 77 | skipping to change at line 78 | |||
class AuthorizationManager { | class AuthorizationManager { | |||
MONGO_DISALLOW_COPYING(AuthorizationManager); | MONGO_DISALLOW_COPYING(AuthorizationManager); | |||
public: | public: | |||
// The newly constructed AuthorizationManager takes ownership of "e xternalState" | // The newly constructed AuthorizationManager takes ownership of "e xternalState" | |||
explicit AuthorizationManager(AuthzManagerExternalState* externalSt ate); | explicit AuthorizationManager(AuthzManagerExternalState* externalSt ate); | |||
~AuthorizationManager(); | ~AuthorizationManager(); | |||
static const std::string USER_NAME_FIELD_NAME; | static const std::string USER_NAME_FIELD_NAME; | |||
static const std::string USER_SOURCE_FIELD_NAME; | static const std::string USER_DB_FIELD_NAME; | |||
static const std::string ROLE_NAME_FIELD_NAME; | static const std::string ROLE_NAME_FIELD_NAME; | |||
static const std::string ROLE_SOURCE_FIELD_NAME; | static const std::string ROLE_SOURCE_FIELD_NAME; | |||
static const std::string PASSWORD_FIELD_NAME; | static const std::string PASSWORD_FIELD_NAME; | |||
static const std::string V1_USER_NAME_FIELD_NAME; | static const std::string V1_USER_NAME_FIELD_NAME; | |||
static const std::string V1_USER_SOURCE_FIELD_NAME; | static const std::string V1_USER_SOURCE_FIELD_NAME; | |||
static const NamespaceString adminCommandNamespace; | static const NamespaceString adminCommandNamespace; | |||
static const NamespaceString rolesCollectionNamespace; | static const NamespaceString rolesCollectionNamespace; | |||
static const NamespaceString usersAltCollectionNamespace; | ||||
static const NamespaceString usersBackupCollectionNamespace; | ||||
static const NamespaceString usersCollectionNamespace; | static const NamespaceString usersCollectionNamespace; | |||
static const NamespaceString versionCollectionNamespace; | static const NamespaceString versionCollectionNamespace; | |||
/** | ||||
* Query to match the auth schema version document in the versionCo | ||||
llectionNamespace. | ||||
*/ | ||||
static const BSONObj versionDocumentQuery; | ||||
/** | ||||
* Name of the server parameter used to report the auth schema vers | ||||
ion (via getParameter). | ||||
*/ | ||||
static const std::string schemaVersionServerParameter; | ||||
/** | ||||
* Name of the field in the auth schema version document containing | ||||
the current schema | ||||
* version. | ||||
*/ | ||||
static const std::string schemaVersionFieldName; | ||||
/** | ||||
* Value used to represent that the schema version is not cached or | ||||
invalid. | ||||
*/ | ||||
static const int schemaVersionInvalid = 0; | ||||
/** | ||||
* Auth schema version for MongoDB v2.4 and prior. | ||||
*/ | ||||
static const int schemaVersion24 = 1; | ||||
/** | ||||
* Auth schema version for MongoDB v2.6 during the upgrade process. | ||||
Same as | ||||
* schemaVersion26Final, except that user documents are found in ad | ||||
min.new.users, and user | ||||
* management commands are disabled. | ||||
*/ | ||||
static const int schemaVersion26Upgrade = 2; | ||||
/** | ||||
* Auth schema version for MongoDB 2.6. Users are stored in admin. | ||||
system.users, | ||||
* roles in admin.system.roles. | ||||
*/ | ||||
static const int schemaVersion26Final = 3; | ||||
// TODO: Make the following functions no longer static. | // TODO: Make the following functions no longer static. | |||
/** | /** | |||
* Sets whether or not we allow old style (pre v2.4) privilege docu ments for this whole | * Sets whether or not we allow old style (pre v2.4) privilege docu ments for this whole | |||
* server. | * server. Only relevant prior to upgrade. | |||
*/ | */ | |||
static void setSupportOldStylePrivilegeDocuments(bool enabled); | static void setSupportOldStylePrivilegeDocuments(bool enabled); | |||
/** | /** | |||
* Returns true if we allow old style privilege privilege documents for this whole server. | * Returns true if we allow old style privilege privilege documents for this whole server. | |||
*/ | */ | |||
static bool getSupportOldStylePrivilegeDocuments(); | static bool getSupportOldStylePrivilegeDocuments(); | |||
/** | /** | |||
* Takes a vector of privileges and fills the output param "resultA rray" with a BSON array | * Takes a vector of privileges and fills the output param "resultA rray" with a BSON array | |||
skipping to change at line 132 | skipping to change at line 174 | |||
* Sets whether or not access control enforcement is enabled for th is manager. | * Sets whether or not access control enforcement is enabled for th is manager. | |||
*/ | */ | |||
void setAuthEnabled(bool enabled); | void setAuthEnabled(bool enabled); | |||
/** | /** | |||
* Returns true if access control is enabled for this manager . | * Returns true if access control is enabled for this manager . | |||
*/ | */ | |||
bool isAuthEnabled() const; | bool isAuthEnabled() const; | |||
/** | /** | |||
* Sets the version number of the authorization system. Returns an | ||||
invalid status if the | ||||
* version number is not recognized. | ||||
*/ | ||||
Status setAuthorizationVersion(int version); | ||||
/** | ||||
* Returns the version number of the authorization system. | * Returns the version number of the authorization system. | |||
*/ | */ | |||
int getAuthorizationVersion(); | int getAuthorizationVersion(); | |||
// Returns true if there exists at least one privilege document in the system. | // Returns true if there exists at least one privilege document in the system. | |||
bool hasAnyPrivilegeDocuments() const; | bool hasAnyPrivilegeDocuments() const; | |||
/** | /** | |||
* Updates the auth schema version document to reflect that the sys | ||||
tem is upgraded to | ||||
* schemaVersion26Final. | ||||
* | ||||
* Do not call if getAuthorizationVersion() reports a value other t | ||||
han schemaVersion26Final. | ||||
*/ | ||||
Status writeAuthSchemaVersionIfNeeded(); | ||||
/** | ||||
* Creates the given user object in the given database. | * Creates the given user object in the given database. | |||
* 'writeConcern' contains the arguments to be passed to getLastErr or to block for | * 'writeConcern' contains the arguments to be passed to getLastErr or to block for | |||
* successful completion of the write. | * successful completion of the write. | |||
*/ | */ | |||
Status insertPrivilegeDocument(const std::string& dbname, | Status insertPrivilegeDocument(const std::string& dbname, | |||
const BSONObj& userObj, | const BSONObj& userObj, | |||
const BSONObj& writeConcern) const; | const BSONObj& writeConcern) const; | |||
/** | /** | |||
* Updates the given user object with the given update modifier. | * Updates the given user object with the given update modifier. | |||
skipping to change at line 248 | skipping to change at line 292 | |||
* (indirect roles). In the event that some of this information is inconsistent, the | * (indirect roles). In the event that some of this information is inconsistent, the | |||
* document will contain a "warnings" array, with string messages d escribing | * document will contain a "warnings" array, with string messages d escribing | |||
* inconsistencies. | * inconsistencies. | |||
* | * | |||
* If the user does not exist, returns ErrorCodes::UserNotFound. | * If the user does not exist, returns ErrorCodes::UserNotFound. | |||
*/ | */ | |||
Status getUserDescription(const UserName& userName, BSONObj* result ); | Status getUserDescription(const UserName& userName, BSONObj* result ); | |||
/** | /** | |||
* Writes into "result" a document describing the named role and re turns Status::OK(). The | * Writes into "result" a document describing the named role and re turns Status::OK(). The | |||
* description includes the role's in which the named role has memb | * description includes the roles in which the named role has membe | |||
ership, a full list of | rship and a full list of | |||
* the role's privileges, and a full list of the roles of which the | * the roles of which the named role is a member, including those r | |||
named role is a member, | oles memberships held | |||
* including those roles memberships held implicitly through other | * implicitly through other roles (indirect roles). If "showPrivile | |||
roles (indirect roles). | ges" is true, then the | |||
* description documents will also include a full list of the role' | ||||
s privileges. | ||||
* In the event that some of this information is inconsistent, the document will contain a | * In the event that some of this information is inconsistent, the document will contain a | |||
* "warnings" array, with string messages describing inconsistencie s. | * "warnings" array, with string messages describing inconsistencie s. | |||
* | * | |||
* If the role does not exist, returns ErrorCodes::RoleNotFound. | * If the role does not exist, returns ErrorCodes::RoleNotFound. | |||
*/ | */ | |||
Status getRoleDescription(const RoleName& roleName, BSONObj* result | Status getRoleDescription(const RoleName& roleName, bool showPrivil | |||
); | eges, BSONObj* result); | |||
/** | ||||
* Writes into "result" documents describing the roles that are def | ||||
ined on the given | ||||
* database. Each role description document includes the other role | ||||
s in which the role has | ||||
* membership and a full list of the roles of which the named role | ||||
is a member, | ||||
* including those roles memberships held implicitly through other | ||||
roles (indirect roles). | ||||
* If showPrivileges is true, then the description documents will a | ||||
lso include a full list | ||||
* of the role's privileges. If showBuiltinRoles is true, then the | ||||
result array will | ||||
* contain description documents for all the builtin roles for the | ||||
given database, if it | ||||
* is false the result will just include user defined roles. | ||||
* In the event that some of the information in a given role descri | ||||
ption is inconsistent, | ||||
* the document will contain a "warnings" array, with string messag | ||||
es describing | ||||
* inconsistencies. | ||||
*/ | ||||
Status getRoleDescriptionsForDB(const std::string dbname, | ||||
bool showPrivileges, | ||||
bool showBuiltinRoles, | ||||
vector<BSONObj>* result); | ||||
/** | /** | |||
* Returns the User object for the given userName in the out param eter "acquiredUser". | * Returns the User object for the given userName in the out param eter "acquiredUser". | |||
* If the user cache already has a user object for this user, it i ncrements the refcount | * If the user cache already has a user object for this user, it i ncrements the refcount | |||
* on that object and gives out a pointer to it. If no user objec t for this user name | * on that object and gives out a pointer to it. If no user objec t for this user name | |||
* exists yet in the cache, reads the user's privilege document fr om disk, builds up | * exists yet in the cache, reads the user's privilege document fr om disk, builds up | |||
* a User object, sets the refcount to 1, and gives that out. The returned user may | * a User object, sets the refcount to 1, and gives that out. The returned user may | |||
* be invalid by the time the caller gets access to it. | * be invalid by the time the caller gets access to it. | |||
* The AuthorizationManager retains ownership of the returned User object. | * The AuthorizationManager retains ownership of the returned User object. | |||
* On non-OK Status return values, acquiredUser will not be modifi ed. | * On non-OK Status return values, acquiredUser will not be modifi ed. | |||
*/ | */ | |||
Status acquireUser(const UserName& userName, User** acquiredUser); | Status acquireUser(const UserName& userName, User** acquiredUser); | |||
/** | /** | |||
* Decrements the refcount of the given User object. If the refcou nt has gone to zero, | * Decrements the refcount of the given User object. If the refcou nt has gone to zero, | |||
* deletes the User. Caller must stop using its pointer to "user" after calling this. | * deletes the User. Caller must stop using its pointer to "user" after calling this. | |||
*/ | */ | |||
void releaseUser(User* user); | void releaseUser(User* user); | |||
/** | /** | |||
* Returns a User object for a V1-style user with the given "userNa | ||||
me" in "*acquiredUser", | ||||
* On success, "acquiredUser" will have any privileges that the nam | ||||
ed user has on | ||||
* database "dbname". | ||||
* | ||||
* Bumps the returned **acquiredUser's reference count on success. | ||||
*/ | ||||
Status acquireV1UserProbedForDb( | ||||
const UserName& userName, const StringData& dbname, User** | ||||
acquiredUser); | ||||
/** | ||||
* Marks the given user as invalid and removes it from the user cac he. | * Marks the given user as invalid and removes it from the user cac he. | |||
*/ | */ | |||
void invalidateUserByName(const UserName& user); | void invalidateUserByName(const UserName& user); | |||
/** | /** | |||
* Invalidates all users who's source is "dbname" and removes them from the user cache. | * Invalidates all users who's source is "dbname" and removes them from the user cache. | |||
*/ | */ | |||
void invalidateUsersFromDB(const std::string& dbname); | void invalidateUsersFromDB(const std::string& dbname); | |||
/** | /** | |||
* Inserts the given user directly into the _userCache. Used to ad | ||||
d the internalSecurity | ||||
* user into the cache at process startup. | ||||
*/ | ||||
void addInternalUser(User* user); | ||||
/** | ||||
* Initializes the authorization manager. Depending on what versio n the authorization | * Initializes the authorization manager. Depending on what versio n the authorization | |||
* system is at, this may involve building up the user cache and/or the roles graph. | * system is at, this may involve building up the user cache and/or the roles graph. | |||
* This function should be called once at startup and never again a fter that. | * Call this function at startup and after resynchronizing a slave/ secondary. | |||
*/ | */ | |||
Status initialize(); | Status initialize(); | |||
/** | /** | |||
* Invalidates all of the contents of the user cache. | * Invalidates all of the contents of the user cache. | |||
*/ | */ | |||
void invalidateUserCache(); | void invalidateUserCache(); | |||
/** | /** | |||
* Parses privDoc and fully initializes the user object (credential s, roles, and privileges) | * Parses privDoc and fully initializes the user object (credential s, roles, and privileges) | |||
skipping to change at line 327 | skipping to change at line 394 | |||
*/ | */ | |||
bool tryAcquireAuthzUpdateLock(const StringData& why); | bool tryAcquireAuthzUpdateLock(const StringData& why); | |||
/** | /** | |||
* Releases the lock guarding modifications to persistent authoriza tion data, which must | * Releases the lock guarding modifications to persistent authoriza tion data, which must | |||
* already be held. | * already be held. | |||
*/ | */ | |||
void releaseAuthzUpdateLock(); | void releaseAuthzUpdateLock(); | |||
/** | /** | |||
* Upgrades authorization data stored in collections from the v1 fo | * Perform one step in the process of upgrading the stored authoriz | |||
rm (one system.users | ation data to the | |||
* collection per database) to the v2 form (a single admin.system.u | * newest schema. | |||
sers collection). | ||||
* | * | |||
* Returns Status::OK() if the AuthorizationManager and the admin.s | * On success, returns Status::OK(), and *isDone will indicate whet | |||
ystem.version collection | her there are more | |||
* agree that the system is already upgraded, or if the upgrade com | * steps to perform. | |||
pletes successfully. | ||||
* | * | |||
* This method will create and destroy an admin._newusers collectio | * If the authorization data is already fully upgraded, returns Sta | |||
n in addition to writing | tus::OK and sets *isDone | |||
* to admin.system.users and admin.system.version. | * to true, so this is safe to call on a fully upgraded system. | |||
* | * | |||
* User information is taken from the in-memory user cache, constru | * On failure, returns a status other than Status::OK(). In this c | |||
cted at start-up. This | ase, is is typically safe | |||
* is safe to do because MongoD and MongoS build complete copies of | * to try again. | |||
the data stored in | ||||
* *.system.users at start-up if they detect that the upgrade has n | ||||
ot yet completed. | ||||
*/ | */ | |||
Status upgradeAuthCollections(); | Status upgradeSchemaStep(const BSONObj& writeConcern, bool* isDone) ; | |||
/** | /** | |||
* Hook called by replication code to let the AuthorizationManager observe changes | * Hook called by replication code to let the AuthorizationManager observe changes | |||
* to relevant collections. | * to relevant collections. | |||
*/ | */ | |||
void logOp(const char* opstr, | void logOp(const char* opstr, | |||
const char* ns, | const char* ns, | |||
const BSONObj& obj, | const BSONObj& obj, | |||
BSONObj* patt, | BSONObj* patt, | |||
bool* b, | bool* b); | |||
bool fromMigrate, | ||||
const BSONObj* fullObj); | ||||
private: | private: | |||
/** | /** | |||
* Type used to guard accesses and updates to the user cache. | * Type used to guard accesses and updates to the user cache. | |||
*/ | */ | |||
class CacheGuard; | class CacheGuard; | |||
friend class AuthorizationManager::CacheGuard; | friend class AuthorizationManager::CacheGuard; | |||
/** | /** | |||
* Returns the current version number of the authorization system. | * Invalidates all User objects in the cache and removes them from | |||
Should only be called | the cache. | |||
* when holding _userCacheMutex. | * Should only be called when already holding _cacheMutex. | |||
*/ | */ | |||
int _getVersion_inlock() const { return _version; } | void _invalidateUserCache_inlock(); | |||
/** | /** | |||
* Invalidates all User objects in the cache and removes them from | * Fetches user information from a v2-schema user document for the | |||
the cache. | named user, | |||
* Should only be called when already holding _userCacheMutex. | * and stores a pointer to a new user object into *acquiredUser on | |||
success. | ||||
*/ | */ | |||
void _invalidateUserCache_inlock(); | Status _fetchUserV2(const UserName& userName, std::auto_ptr<User>* acquiredUser); | |||
/** | /** | |||
* Initializes the user cache with User objects for every v0 and v1 | * Fetches user information from a v1-schema user document for the | |||
user document in the | named user, possibly | |||
* system, by reading the system.users collection of every database | * examining system.users collections from userName.getDB() and adm | |||
. If this function | in.system.users in the | |||
* returns a non-ok Status, the _userCache should be considered cor | * process. Stores a pointer to a new user object into *acquiredUs | |||
rupt and must be | er on success. | |||
* discarded. This function should be called once at startup (only | ||||
if the system hasn't yet | ||||
* been upgraded to V2 user data format) and never again after that | ||||
. | ||||
*/ | */ | |||
Status _initializeAllV1UserData(); | Status _fetchUserV1(const UserName& userName, std::auto_ptr<User>* acquiredUser); | |||
static bool _doesSupportOldStylePrivileges; | static bool _doesSupportOldStylePrivileges; | |||
/** | /** | |||
* True if access control enforcement is enabled in this Authorizat ionManager. | * True if access control enforcement is enabled in this Authorizat ionManager. | |||
* | * | |||
* Defaults to false. Changes to its value are not synchronized, s o it should only be set | * Defaults to false. Changes to its value are not synchronized, s o it should only be set | |||
* at initalization-time. | * at initalization-time. | |||
*/ | */ | |||
bool _authEnabled; | bool _authEnabled; | |||
scoped_ptr<AuthzManagerExternalState> _externalState; | ||||
/** | /** | |||
* Integer that represents what format version the privilege docume | * Cached value of the authorization schema version. | |||
nts in the system are. | * | |||
* The current version is 2. When upgrading to v2.6 or later from | * May be set by acquireUser() and getAuthorizationVersion(). Inva | |||
v2.4 or prior, the | lidated by | |||
* version is 1. After running the upgrade process to upgrade to t | * invalidateUserCache(). | |||
he new privilege document | * | |||
* format, the version will be 2. | * Reads and writes guarded by CacheGuard. | |||
* All reads/writes to _version must be done within _userCacheMutex | ||||
. | ||||
*/ | */ | |||
int _version; | int _version; | |||
scoped_ptr<AuthzManagerExternalState> _externalState; | ||||
/** | /** | |||
* Caches User objects with information about user privileges, to a void the need to | * Caches User objects with information about user privileges, to a void the need to | |||
* go to disk to read user privilege documents whenever possible. Every User object | * go to disk to read user privilege documents whenever possible. Every User object | |||
* has a reference count - the AuthorizationManager must not delete a User object in the | * has a reference count - the AuthorizationManager must not delete a User object in the | |||
* cache unless its reference count is zero. | * cache unless its reference count is zero. | |||
*/ | */ | |||
unordered_map<UserName, User*> _userCache; | unordered_map<UserName, User*> _userCache; | |||
/** | /** | |||
* Current generation of cached data. Bumped every time part of th | ||||
e cache gets | ||||
* invalidated. | ||||
*/ | ||||
uint64_t _cacheGeneration; | ||||
/** | ||||
* True if there is an update to the _userCache in progress, and th at update is currently in | * True if there is an update to the _userCache in progress, and th at update is currently in | |||
* the "fetch phase", during which it does not hold the _userCacheM utex. | * the "fetch phase", during which it does not hold the _cacheMutex . | |||
* | * | |||
* Manipulated via CacheGuard. | * Manipulated via CacheGuard. | |||
*/ | */ | |||
bool _isFetchPhaseBusy; | bool _isFetchPhaseBusy; | |||
/** | /** | |||
* Protects _userCache, _version and _isFetchPhaseBusy. Manipulate | * Protects _userCache, _cacheGeneration, _version and _isFetchPhas | |||
d via CacheGuard. | eBusy. Manipulated | |||
* via CacheGuard. | ||||
*/ | */ | |||
boost::mutex _userCacheMutex; | boost::mutex _cacheMutex; | |||
/** | /** | |||
* Condition used to signal that it is OK for another CacheGuard to enter a fetch phase. | * Condition used to signal that it is OK for another CacheGuard to enter a fetch phase. | |||
* Manipulated via CacheGuard. | * Manipulated via CacheGuard. | |||
*/ | */ | |||
boost::condition_variable _fetchPhaseIsReady; | boost::condition_variable _fetchPhaseIsReady; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 31 change blocks. | ||||
80 lines changed or deleted | 163 lines changed or added | |||
authorization_session.h | authorization_session.h | |||
---|---|---|---|---|
skipping to change at line 54 | skipping to change at line 54 | |||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* Contains all the authorization logic for a single client connection. It contains a set of | * Contains all the authorization logic for a single client connection. It contains a set of | |||
* the users which have been authenticated, as well as a set of privile ges that have been | * the users which have been authenticated, as well as a set of privile ges that have been | |||
* granted to those users to perform various actions. | * granted to those users to perform various actions. | |||
* | * | |||
* An AuthorizationSession object is present within every mongo::Client Basic object. | * An AuthorizationSession object is present within every mongo::Client Basic object. | |||
* | * | |||
* Predicate methods for checking authorization may in the worst case a | * Users in the _authenticatedUsers cache may get marked as invalid by | |||
cquire read locks | the AuthorizationManager, | |||
* on the admin database. | * for instance if their privileges are changed by a user or role modif | |||
ication command. At the | ||||
* beginning of every user-initiated operation startRequest() gets call | ||||
ed which updates | ||||
* the cached information about any users who have been marked as inval | ||||
id. This guarantees that | ||||
* every operation looks at one consistent view of each user for every | ||||
auth check required over | ||||
* the lifetime of the operation. | ||||
*/ | */ | |||
class AuthorizationSession { | class AuthorizationSession { | |||
MONGO_DISALLOW_COPYING(AuthorizationSession); | MONGO_DISALLOW_COPYING(AuthorizationSession); | |||
public: | public: | |||
// Takes ownership of the externalState. | // Takes ownership of the externalState. | |||
explicit AuthorizationSession(AuthzSessionExternalState* externalSt ate); | explicit AuthorizationSession(AuthzSessionExternalState* externalSt ate); | |||
~AuthorizationSession(); | ~AuthorizationSession(); | |||
AuthorizationManager& getAuthorizationManager(); | AuthorizationManager& getAuthorizationManager(); | |||
skipping to change at line 181 | skipping to change at line 185 | |||
// Utility function for | // Utility function for | |||
// isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), action). | // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), action). | |||
bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, A ctionType action); | bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, A ctionType action); | |||
// Utility function for | // Utility function for | |||
// isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), actions). | // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), actions). | |||
bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, c onst ActionSet& actions); | bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, c onst ActionSet& actions); | |||
private: | private: | |||
// If any users authenticated on this session are marked as invalid | ||||
this updates them with | ||||
// up-to-date information. May require a read lock on the "admin" d | ||||
b to read the user data. | ||||
void _refreshUserInfoAsNeeded(); | ||||
// Checks if this connection is authorized for the given Privilege, ignoring whether or not | // Checks if this connection is authorized for the given Privilege, ignoring whether or not | |||
// we should even be doing authorization checks in general. Note: this may acquire a read | // we should even be doing authorization checks in general. Note: this may acquire a read | |||
// lock on the admin database (to update out-of-date user privilege information). | // lock on the admin database (to update out-of-date user privilege information). | |||
bool _isAuthorizedForPrivilege(const Privilege& privilege); | bool _isAuthorizedForPrivilege(const Privilege& privilege); | |||
scoped_ptr<AuthzSessionExternalState> _externalState; | scoped_ptr<AuthzSessionExternalState> _externalState; | |||
// All Users who have been authenticated on this connection | // All Users who have been authenticated on this connection | |||
UserSet _authenticatedUsers; | UserSet _authenticatedUsers; | |||
}; | }; | |||
End of changes. 2 change blocks. | ||||
3 lines changed or deleted | 17 lines changed or added | |||
authz_manager_external_state.h | authz_manager_external_state.h | |||
---|---|---|---|---|
skipping to change at line 64 | skipping to change at line 64 | |||
virtual ~AuthzManagerExternalState(); | virtual ~AuthzManagerExternalState(); | |||
/** | /** | |||
* Initializes the external state object. Must be called after con struction and before | * Initializes the external state object. Must be called after con struction and before | |||
* calling other methods. Object may not be used after this method returns something other | * calling other methods. Object may not be used after this method returns something other | |||
* than Status::OK(). | * than Status::OK(). | |||
*/ | */ | |||
virtual Status initialize() = 0; | virtual Status initialize() = 0; | |||
/** | /** | |||
* Retrieves the schema version of the persistent data describing u | ||||
sers and roles. | ||||
*/ | ||||
virtual Status getStoredAuthorizationVersion(int* outVersion) = 0; | ||||
/** | ||||
* Writes into "result" a document describing the named user and re turns Status::OK(). The | * Writes into "result" a document describing the named user and re turns Status::OK(). The | |||
* description includes the user credentials, if present, the user' s role membership and | * description includes the user credentials, if present, the user' s role membership and | |||
* delegation information, a full list of the user's privileges, an d a full list of the | * delegation information, a full list of the user's privileges, an d a full list of the | |||
* user's roles, including those roles held implicitly through othe r roles (indirect roles). | * user's roles, including those roles held implicitly through othe r roles (indirect roles). | |||
* In the event that some of this information is inconsistent, the document will contain a | * In the event that some of this information is inconsistent, the document will contain a | |||
* "warnings" array, with string messages describing inconsistencie s. | * "warnings" array, with string messages describing inconsistencie s. | |||
* | * | |||
* If the user does not exist, returns ErrorCodes::UserNotFound. | * If the user does not exist, returns ErrorCodes::UserNotFound. | |||
*/ | */ | |||
virtual Status getUserDescription(const UserName& userName, BSONObj * result) = 0; | virtual Status getUserDescription(const UserName& userName, BSONObj * result) = 0; | |||
/** | /** | |||
* Writes into "result" a document describing the named role and re turns Status::OK(). The | * Writes into "result" a document describing the named role and re turns Status::OK(). The | |||
* description includes the role's in which the named role has memb | * description includes the roles in which the named role has membe | |||
ership, a full list of | rship and a full list of | |||
* the role's privileges, and a full list of the roles of which the | * the roles of which the named role is a member, including those r | |||
named role is a member, | oles memberships held | |||
* including those roles memberships held implicitly through other | * implicitly through other roles (indirect roles). If "showPrivile | |||
roles (indirect roles). | ges" is true, then the | |||
* description documents will also include a full list of the role' | ||||
s privileges. | ||||
* In the event that some of this information is inconsistent, the document will contain a | * In the event that some of this information is inconsistent, the document will contain a | |||
* "warnings" array, with string messages describing inconsistencie s. | * "warnings" array, with string messages describing inconsistencie s. | |||
* | * | |||
* If the role does not exist, returns ErrorCodes::RoleNotFound. | * If the role does not exist, returns ErrorCodes::RoleNotFound. | |||
*/ | */ | |||
virtual Status getRoleDescription(const RoleName& roleName, BSONObj | virtual Status getRoleDescription(const RoleName& roleName, | |||
* result) = 0; | bool showPrivileges, | |||
BSONObj* result) = 0; | ||||
/** | /** | |||
* Gets the privilege information document for "userName". authzVe | * Writes into "result" documents describing the roles that are def | |||
rsion indicates what | ined on the given | |||
* version of the privilege document format is being used, which is | * database. Each role description document includes the other role | |||
needed to know how to | s in which the role has | |||
* query for the user's privilege document. | * membership and a full list of the roles of which the named role | |||
* | is a member, | |||
* including those roles memberships held implicitly through other | ||||
roles (indirect roles). | ||||
* If showPrivileges is true, then the description documents will a | ||||
lso include a full list | ||||
* of the role's privileges. If showBuiltinRoles is true, then the | ||||
result array will | ||||
* contain description documents for all the builtin roles for the | ||||
given database, if it | ||||
* is false the result will just include user defined roles. | ||||
* In the event that some of the information in a given role descri | ||||
ption is inconsistent, | ||||
* the document will contain a "warnings" array, with string messag | ||||
es describing | ||||
* inconsistencies. | ||||
*/ | ||||
virtual Status getRoleDescriptionsForDB(const std::string dbname, | ||||
bool showPrivileges, | ||||
bool showBuiltinRoles, | ||||
vector<BSONObj>* result) = | ||||
0; | ||||
/** | ||||
* Gets the privilege document for "userName" stored in the system. | ||||
users collection of | ||||
* database "dbname". Useful only for schemaVersion24 user documen | ||||
ts. For newer schema | ||||
* versions, use getUserDescription(). | ||||
* | * | |||
* On success, returns Status::OK() and stores a shared-ownership c opy of the document into | * On success, returns Status::OK() and stores a shared-ownership c opy of the document into | |||
* "result". | * "result". | |||
*/ | */ | |||
Status getPrivilegeDocument(const UserName& userName, | Status getPrivilegeDocumentV1( | |||
int authzVersion, | const StringData& dbname, const UserName& userName, BSONObj | |||
BSONObj* result); | * result); | |||
/** | /** | |||
* Returns true if there exists at least one privilege document in the system. | * Returns true if there exists at least one privilege document in the system. | |||
*/ | */ | |||
bool hasAnyPrivilegeDocuments(); | bool hasAnyPrivilegeDocuments(); | |||
/** | /** | |||
* Creates the given user object in the given database. | * Creates the given user object in the given database. | |||
* | * | |||
* TODO(spencer): remove dbname argument once users are only writte n into the admin db | * TODO(spencer): remove dbname argument once users are only writte n into the admin db | |||
*/ | */ | |||
virtual Status insertPrivilegeDocument(const std::string& dbname, | Status insertPrivilegeDocument(const std::string& dbname, | |||
const BSONObj& userObj, | const BSONObj& userObj, | |||
const BSONObj& writeConcern) | const BSONObj& writeConcern); | |||
; | ||||
/** | /** | |||
* Updates the given user object with the given update modifier. | * Updates the given user object with the given update modifier. | |||
*/ | */ | |||
virtual Status updatePrivilegeDocument(const UserName& user, | Status updatePrivilegeDocument(const UserName& user, | |||
const BSONObj& updateObj, | const BSONObj& updateObj, | |||
const BSONObj& writeConcern) | const BSONObj& writeConcern); | |||
; | ||||
/** | /** | |||
* Removes users for the given database matching the given query. | * Removes users for the given database matching the given query. | |||
* Writes into *numRemoved the number of user documents that were m odified. | * Writes into *numRemoved the number of user documents that were m odified. | |||
*/ | */ | |||
virtual Status removePrivilegeDocuments(const BSONObj& query, | Status removePrivilegeDocuments(const BSONObj& query, | |||
const BSONObj& writeConcern | const BSONObj& writeConcern, | |||
, | int* numRemoved); | |||
int* numRemoved); | ||||
/** | /** | |||
* Puts into the *dbnames vector the name of every database in the cluster. | * Puts into the *dbnames vector the name of every database in the cluster. | |||
* May take a global lock, so should only be called during startup. | * May take a global lock, so should only be called during startup. | |||
*/ | */ | |||
virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s) = 0; | virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s) = 0; | |||
/** | /** | |||
* Puts into the *privDocs vector every privilege document from the | ||||
given database's | ||||
* system.users collection. | ||||
*/ | ||||
virtual Status getAllV1PrivilegeDocsForDB(const std::string& dbname | ||||
, | ||||
std::vector<BSONObj>* pri | ||||
vDocs) = 0; | ||||
/** | ||||
* Finds a document matching "query" in "collectionName", and store a shared-ownership | * Finds a document matching "query" in "collectionName", and store a shared-ownership | |||
* copy into "result". | * copy into "result". | |||
* | * | |||
* Returns Status::OK() on success. If no match is found, returns | * Returns Status::OK() on success. If no match is found, returns | |||
* ErrorCodes::NoMatchingDocument. Other errors returned as approp riate. | * ErrorCodes::NoMatchingDocument. Other errors returned as approp riate. | |||
*/ | */ | |||
virtual Status findOne(const NamespaceString& collectionName, | virtual Status findOne(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
BSONObj* result) = 0; | BSONObj* result) = 0; | |||
skipping to change at line 213 | skipping to change at line 230 | |||
/** | /** | |||
* Creates an index with the given pattern on "collectionName". | * Creates an index with the given pattern on "collectionName". | |||
*/ | */ | |||
virtual Status createIndex(const NamespaceString& collectionName, | virtual Status createIndex(const NamespaceString& collectionName, | |||
const BSONObj& pattern, | const BSONObj& pattern, | |||
bool unique, | bool unique, | |||
const BSONObj& writeConcern) = 0; | const BSONObj& writeConcern) = 0; | |||
/** | /** | |||
* Drops the named collection. | * Drops indexes other than the _id index on "collectionName". | |||
*/ | */ | |||
virtual Status dropCollection(const NamespaceString& collectionName | virtual Status dropIndexes(const NamespaceString& collectionName, | |||
, | const BSONObj& writeConcern) = 0; | |||
const BSONObj& writeConcern) = 0; | ||||
/** | ||||
* Renames collection "oldName" to "newName", possibly dropping the | ||||
previous | ||||
* collection named "newName". | ||||
*/ | ||||
virtual Status renameCollection(const NamespaceString& oldName, | ||||
const NamespaceString& newName, | ||||
const BSONObj& writeConcern) = 0; | ||||
/** | ||||
* Copies the contents of collection "fromName" into "toName". Fai | ||||
ls | ||||
* if "toName" is already a collection. | ||||
*/ | ||||
virtual Status copyCollection(const NamespaceString& fromName, | ||||
const NamespaceString& toName, | ||||
const BSONObj& writeConcern) = 0; | ||||
/** | /** | |||
* Tries to acquire the global lock guarding modifications to all p ersistent data related | * Tries to acquire the global lock guarding modifications to all p ersistent data related | |||
* to authorization, namely the admin.system.users, admin.system.ro les, and | * to authorization, namely the admin.system.users, admin.system.ro les, and | |||
* admin.system.version collections. This serializes all writers t o the authorization | * admin.system.version collections. This serializes all writers t o the authorization | |||
* documents, but does not impact readers. | * documents, but does not impact readers. | |||
*/ | */ | |||
virtual bool tryAcquireAuthzUpdateLock(const StringData& why) = 0; | virtual bool tryAcquireAuthzUpdateLock(const StringData& why) = 0; | |||
/** | /** | |||
* Releases the lock guarding modifications to persistent authoriza tion data, which must | * Releases the lock guarding modifications to persistent authoriza tion data, which must | |||
* already be held. | * already be held. | |||
*/ | */ | |||
virtual void releaseAuthzUpdateLock() = 0; | virtual void releaseAuthzUpdateLock() = 0; | |||
virtual void logOp( | virtual void logOp( | |||
const char* op, | const char* op, | |||
const char* ns, | const char* ns, | |||
const BSONObj& o, | const BSONObj& o, | |||
BSONObj* o2, | BSONObj* o2, | |||
bool* b, | bool* b) {} | |||
bool fromMigrateUnused, | ||||
const BSONObj* fullObjUnused) {} | ||||
protected: | protected: | |||
AuthzManagerExternalState(); // This class should never be instanti ated directly. | AuthzManagerExternalState(); // This class should never be instanti ated directly. | |||
/** | static const long long _authzUpdateLockAcquisitionTimeoutMillis = 5 | |||
* Queries the userNamespace with the given query and returns the p | 000; | |||
rivilegeDocument found | ||||
* in *result. Returns Status::OK if it finds a document matching | ||||
the query. If it doesn't | ||||
* find a document matching the query, returns a Status with code U | ||||
serNotFound. Other | ||||
* errors may return other Status codes. | ||||
*/ | ||||
virtual Status _findUser(const std::string& usersNamespace, | ||||
const BSONObj& query, | ||||
BSONObj* result) = 0; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 13 change blocks. | ||||
76 lines changed or deleted | 68 lines changed or added | |||
authz_manager_external_state_d.h | authz_manager_external_state_d.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/function.hpp> | #include <boost/function.hpp> | |||
#include <boost/thread/mutex.hpp> | #include <boost/thread/mutex.hpp> | |||
#include <string> | #include <string> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/auth/authz_manager_external_state.h" | #include "mongo/db/auth/authz_manager_external_state_local.h" | |||
#include "mongo/db/auth/role_graph.h" | #include "mongo/db/auth/role_graph.h" | |||
#include "mongo/db/auth/user_name.h" | #include "mongo/db/auth/user_name.h" | |||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* The implementation of AuthzManagerExternalState functionality for mo ngod. | * The implementation of AuthzManagerExternalState functionality for mo ngod. | |||
*/ | */ | |||
class AuthzManagerExternalStateMongod : public AuthzManagerExternalStat e { | class AuthzManagerExternalStateMongod : public AuthzManagerExternalStat eLocal { | |||
MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongod); | MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongod); | |||
public: | public: | |||
AuthzManagerExternalStateMongod(); | AuthzManagerExternalStateMongod(); | |||
virtual ~AuthzManagerExternalStateMongod(); | virtual ~AuthzManagerExternalStateMongod(); | |||
virtual Status initialize(); | ||||
virtual Status getUserDescription(const UserName& userName, BSONObj | ||||
* result); | ||||
virtual Status getRoleDescription(const RoleName& roleName, BSONObj | ||||
* result); | ||||
virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s); | virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s); | |||
virtual Status getAllV1PrivilegeDocsForDB(const std::string& dbname | ||||
, | ||||
std::vector<BSONObj>* pri | ||||
vDocs); | ||||
virtual Status findOne(const NamespaceString& collectionName, | virtual Status findOne(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
BSONObj* result); | BSONObj* result); | |||
virtual Status query(const NamespaceString& collectionName, | virtual Status query(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
const BSONObj& projection, | const BSONObj& projection, | |||
const boost::function<void(const BSONObj&)>& r esultProcessor); | const boost::function<void(const BSONObj&)>& r esultProcessor); | |||
virtual Status insert(const NamespaceString& collectionName, | virtual Status insert(const NamespaceString& collectionName, | |||
const BSONObj& document, | const BSONObj& document, | |||
const BSONObj& writeConcern); | const BSONObj& writeConcern); | |||
skipping to change at line 88 | skipping to change at line 80 | |||
const BSONObj& writeConcern, | const BSONObj& writeConcern, | |||
int* numUpdated); | int* numUpdated); | |||
virtual Status remove(const NamespaceString& collectionName, | virtual Status remove(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
const BSONObj& writeConcern, | const BSONObj& writeConcern, | |||
int* numRemoved); | int* numRemoved); | |||
virtual Status createIndex(const NamespaceString& collectionName, | virtual Status createIndex(const NamespaceString& collectionName, | |||
const BSONObj& pattern, | const BSONObj& pattern, | |||
bool unique, | bool unique, | |||
const BSONObj& writeConcern); | const BSONObj& writeConcern); | |||
virtual Status dropCollection(const NamespaceString& collectionName | virtual Status dropIndexes(const NamespaceString& collectionName, | |||
, | const BSONObj& writeConcern); | |||
const BSONObj& writeConcern); | ||||
virtual Status renameCollection(const NamespaceString& oldName, | ||||
const NamespaceString& newName, | ||||
const BSONObj& writeConcern); | ||||
virtual Status copyCollection(const NamespaceString& fromName, | ||||
const NamespaceString& toName, | ||||
const BSONObj& writeConcern); | ||||
virtual bool tryAcquireAuthzUpdateLock(const StringData& why); | virtual bool tryAcquireAuthzUpdateLock(const StringData& why); | |||
virtual void releaseAuthzUpdateLock(); | virtual void releaseAuthzUpdateLock(); | |||
virtual void logOp( | ||||
const char* op, | ||||
const char* ns, | ||||
const BSONObj& o, | ||||
BSONObj* o2, | ||||
bool* b, | ||||
bool fromMigrateUnused, | ||||
const BSONObj* fullObjUnused); | ||||
protected: | ||||
virtual Status _findUser(const string& usersNamespace, | ||||
const BSONObj& query, | ||||
BSONObj* result); | ||||
private: | private: | |||
enum RoleGraphState { | virtual Status _getUserDocument(const UserName& userName, BSONObj* | |||
roleGraphStateInitial = 0, | userDoc); | |||
roleGraphStateConsistent, | ||||
roleGraphStateHasCycle | ||||
}; | ||||
/** | ||||
* Initializes the role graph from the contents of the admin.system | ||||
.roles collection. | ||||
*/ | ||||
Status _initializeRoleGraph(); | ||||
/** | ||||
* Eventually consistent, in-memory representation of all roles in | ||||
the system (both | ||||
* user-defined and built-in). Synchronized via _roleGraphMutex. | ||||
*/ | ||||
RoleGraph _roleGraph; | ||||
/** | ||||
* State of _roleGraph, one of "initial", "consistent" and "has cyc | ||||
le". Synchronized via | ||||
* _roleGraphMutex. | ||||
*/ | ||||
RoleGraphState _roleGraphState; | ||||
/** | ||||
* Guards _roleGraphState and _roleGraph. | ||||
*/ | ||||
boost::mutex _roleGraphMutex; | ||||
boost::mutex _authzDataUpdateLock; | ||||
boost::timed_mutex _authzDataUpdateLock; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 8 change blocks. | ||||
69 lines changed or deleted | 7 lines changed or added | |||
authz_manager_external_state_mock.h | authz_manager_external_state_mock.h | |||
---|---|---|---|---|
skipping to change at line 38 | skipping to change at line 38 | |||
#pragma once | #pragma once | |||
#include <boost/function.hpp> | #include <boost/function.hpp> | |||
#include <string> | #include <string> | |||
#include <map> | #include <map> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/auth/authz_manager_external_state.h" | #include "mongo/db/auth/authz_manager_external_state_local.h" | |||
#include "mongo/db/auth/role_graph.h" | #include "mongo/db/auth/role_graph.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
namespace mongo { | namespace mongo { | |||
class AuthorizationManager; | ||||
/** | /** | |||
* Mock of the AuthzManagerExternalState class used only for testing. | * Mock of the AuthzManagerExternalState class used only for testing. | |||
*/ | */ | |||
class AuthzManagerExternalStateMock : public AuthzManagerExternalState { | class AuthzManagerExternalStateMock : public AuthzManagerExternalStateL ocal { | |||
MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMock); | MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMock); | |||
public: | public: | |||
AuthzManagerExternalStateMock() {}; | AuthzManagerExternalStateMock(); | |||
virtual ~AuthzManagerExternalStateMock(); | ||||
virtual Status initialize(); | ||||
virtual Status getUserDescription(const UserName& userName, BSONObj | ||||
* result); | ||||
virtual Status getRoleDescription(const RoleName& roleName, BSONObj | ||||
* result); | ||||
virtual Status insertPrivilegeDocument(const std::string& dbname, | ||||
const BSONObj& userObj, | ||||
const BSONObj& writeConcern) | ||||
; | ||||
virtual Status updatePrivilegeDocument(const UserName& user, | ||||
const BSONObj& updateObj, | ||||
const BSONObj& writeConcern) | ||||
; | ||||
// no-op for the mock | ||||
virtual Status removePrivilegeDocuments(const BSONObj& query, | ||||
const BSONObj& writeConcern | ||||
, | ||||
int* numRemoved); | ||||
void clearPrivilegeDocuments(); | void setAuthorizationManager(AuthorizationManager* authzManager); | |||
void setAuthzVersion(int version); | ||||
virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s); | virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s); | |||
virtual Status getAllV1PrivilegeDocsForDB(const std::string& dbname | ||||
, | ||||
std::vector<BSONObj>* pri | ||||
vDocs); | ||||
virtual Status _findUser(const std::string& usersNamespace, | ||||
const BSONObj& query, | ||||
BSONObj* result); | ||||
virtual Status findOne(const NamespaceString& collectionName, | virtual Status findOne(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
BSONObj* result); | BSONObj* result); | |||
virtual Status query(const NamespaceString& collectionName, | virtual Status query(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
const BSONObj& projection, // Currently unused in mock | const BSONObj& projection, // Currently unused in mock | |||
const boost::function<void(const BSONObj&)>& r esultProcessor); | const boost::function<void(const BSONObj&)>& r esultProcessor); | |||
// This implementation does not understand uniqueness constraints. | // This implementation does not understand uniqueness constraints. | |||
skipping to change at line 119 | skipping to change at line 99 | |||
const BSONObj& writeConcern, | const BSONObj& writeConcern, | |||
int* numUpdated); | int* numUpdated); | |||
virtual Status remove(const NamespaceString& collectionName, | virtual Status remove(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
const BSONObj& writeConcern, | const BSONObj& writeConcern, | |||
int* numRemoved); | int* numRemoved); | |||
virtual Status createIndex(const NamespaceString& collectionName, | virtual Status createIndex(const NamespaceString& collectionName, | |||
const BSONObj& pattern, | const BSONObj& pattern, | |||
bool unique, | bool unique, | |||
const BSONObj& writeConcern); | const BSONObj& writeConcern); | |||
virtual Status dropCollection(const NamespaceString& collectionName | virtual Status dropIndexes(const NamespaceString& collectionName, | |||
, | const BSONObj& writeConcern); | |||
const BSONObj& writeConcern); | ||||
virtual Status renameCollection(const NamespaceString& oldName, | ||||
const NamespaceString& newName, | ||||
const BSONObj& writeConcern); | ||||
virtual Status copyCollection(const NamespaceString& fromName, | ||||
const NamespaceString& toName, | ||||
const BSONObj& writeConcern); | ||||
virtual bool tryAcquireAuthzUpdateLock(const StringData& why); | virtual bool tryAcquireAuthzUpdateLock(const StringData& why); | |||
virtual void releaseAuthzUpdateLock(); | virtual void releaseAuthzUpdateLock(); | |||
Status _findUser(const std::string& usersNamespace, | ||||
const BSONObj& query, | ||||
BSONObj* result); | ||||
std::vector<BSONObj> getCollectionContents(const NamespaceString& c ollectionName); | std::vector<BSONObj> getCollectionContents(const NamespaceString& c ollectionName); | |||
private: | private: | |||
typedef std::vector<BSONObj> BSONObjCollection; | typedef std::vector<BSONObj> BSONObjCollection; | |||
typedef std::map<NamespaceString, BSONObjCollection> NamespaceDocum entMap; | typedef std::map<NamespaceString, BSONObjCollection> NamespaceDocum entMap; | |||
virtual Status _getUserDocument(const UserName& userName, BSONObj* | ||||
userDoc); | ||||
Status _findOneIter(const NamespaceString& collectionName, | Status _findOneIter(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
BSONObjCollection::iterator* result); | BSONObjCollection::iterator* result); | |||
Status _queryVector(const NamespaceString& collectionName, | Status _queryVector(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
std::vector<BSONObjCollection::iterator>* resul t); | std::vector<BSONObjCollection::iterator>* resul t); | |||
AuthorizationManager* _authzManager; // For reporting logOps. | ||||
NamespaceDocumentMap _documents; // Mock database. | NamespaceDocumentMap _documents; // Mock database. | |||
RoleGraph _roleGraph; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 11 change blocks. | ||||
45 lines changed or deleted | 17 lines changed or added | |||
authz_manager_external_state_s.h | authz_manager_external_state_s.h | |||
---|---|---|---|---|
skipping to change at line 55 | skipping to change at line 55 | |||
* The implementation of AuthzManagerExternalState functionality for mo ngos. | * The implementation of AuthzManagerExternalState functionality for mo ngos. | |||
*/ | */ | |||
class AuthzManagerExternalStateMongos : public AuthzManagerExternalStat e{ | class AuthzManagerExternalStateMongos : public AuthzManagerExternalStat e{ | |||
MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongos); | MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongos); | |||
public: | public: | |||
AuthzManagerExternalStateMongos(); | AuthzManagerExternalStateMongos(); | |||
virtual ~AuthzManagerExternalStateMongos(); | virtual ~AuthzManagerExternalStateMongos(); | |||
virtual Status initialize(); | virtual Status initialize(); | |||
virtual Status getStoredAuthorizationVersion(int* outVersion); | ||||
virtual Status getUserDescription(const UserName& userName, BSONObj * result); | virtual Status getUserDescription(const UserName& userName, BSONObj * result); | |||
virtual Status getRoleDescription(const RoleName& roleName, BSONObj | virtual Status getRoleDescription(const RoleName& roleName, | |||
* result); | bool showPrivileges, | |||
BSONObj* result); | ||||
virtual Status getRoleDescriptionsForDB(const std::string dbname, | ||||
bool showPrivileges, | ||||
bool showBuiltinRoles, | ||||
vector<BSONObj>* result); | ||||
virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s); | virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s); | |||
virtual Status getAllV1PrivilegeDocsForDB(const std::string& dbname | ||||
, | ||||
std::vector<BSONObj>* pri | ||||
vDocs); | ||||
virtual Status findOne(const NamespaceString& collectionName, | virtual Status findOne(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
BSONObj* result); | BSONObj* result); | |||
virtual Status query(const NamespaceString& collectionName, | virtual Status query(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
const BSONObj& projection, | const BSONObj& projection, | |||
const boost::function<void(const BSONObj&)>& r esultProcessor); | const boost::function<void(const BSONObj&)>& r esultProcessor); | |||
virtual Status insert(const NamespaceString& collectionName, | virtual Status insert(const NamespaceString& collectionName, | |||
const BSONObj& document, | const BSONObj& document, | |||
const BSONObj& writeConcern); | const BSONObj& writeConcern); | |||
skipping to change at line 89 | skipping to change at line 92 | |||
const BSONObj& writeConcern, | const BSONObj& writeConcern, | |||
int* numUpdated); | int* numUpdated); | |||
virtual Status remove(const NamespaceString& collectionName, | virtual Status remove(const NamespaceString& collectionName, | |||
const BSONObj& query, | const BSONObj& query, | |||
const BSONObj& writeConcern, | const BSONObj& writeConcern, | |||
int* numRemoved); | int* numRemoved); | |||
virtual Status createIndex(const NamespaceString& collectionName, | virtual Status createIndex(const NamespaceString& collectionName, | |||
const BSONObj& pattern, | const BSONObj& pattern, | |||
bool unique, | bool unique, | |||
const BSONObj& writeConcern); | const BSONObj& writeConcern); | |||
virtual Status dropCollection(const NamespaceString& collectionName | virtual Status dropIndexes(const NamespaceString& collectionName, | |||
, | const BSONObj& writeConcern); | |||
const BSONObj& writeConcern); | ||||
virtual Status renameCollection(const NamespaceString& oldName, | ||||
const NamespaceString& newName, | ||||
const BSONObj& writeConcern); | ||||
virtual Status copyCollection(const NamespaceString& fromName, | ||||
const NamespaceString& toName, | ||||
const BSONObj& writeConcern); | ||||
virtual bool tryAcquireAuthzUpdateLock(const StringData& why); | virtual bool tryAcquireAuthzUpdateLock(const StringData& why); | |||
virtual void releaseAuthzUpdateLock(); | virtual void releaseAuthzUpdateLock(); | |||
protected: | ||||
virtual Status _findUser(const string& usersNamespace, | ||||
const BSONObj& query, | ||||
BSONObj* result); | ||||
private: | private: | |||
boost::mutex _distLockGuard; // Guards access to _authzDataUpdateLo ck | boost::mutex _distLockGuard; // Guards access to _authzDataUpdateLo ck | |||
scoped_ptr<ScopedDistributedLock> _authzDataUpdateLock; | scoped_ptr<ScopedDistributedLock> _authzDataUpdateLock; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 5 change blocks. | ||||
22 lines changed or deleted | 10 lines changed or added | |||
authz_session_external_state_mock.h | authz_session_external_state_mock.h | |||
---|---|---|---|---|
skipping to change at line 56 | skipping to change at line 56 | |||
AuthzSessionExternalState(authzManager), _returnValue(false) {} | AuthzSessionExternalState(authzManager), _returnValue(false) {} | |||
virtual bool shouldIgnoreAuthChecks() const { | virtual bool shouldIgnoreAuthChecks() const { | |||
return _returnValue; | return _returnValue; | |||
} | } | |||
void setReturnValueForShouldIgnoreAuthChecks(bool returnValue) { | void setReturnValueForShouldIgnoreAuthChecks(bool returnValue) { | |||
_returnValue = returnValue; | _returnValue = returnValue; | |||
} | } | |||
virtual Status _findUser(const std::string& usersNamespace, | ||||
const BSONObj& query, | ||||
BSONObj* result) const { | ||||
return Status(ErrorCodes::UserNotFound, "User not found"); | ||||
} | ||||
virtual void startRequest() {} | virtual void startRequest() {} | |||
private: | private: | |||
bool _returnValue; | bool _returnValue; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
6 lines changed or deleted | 0 lines changed or added | |||
batch_executor.h | batch_executor.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/s/batched_command_request.h" | #include "mongo/s/write_ops/batched_command_request.h" | |||
#include "mongo/s/batched_command_response.h" | #include "mongo/s/write_ops/batched_command_response.h" | |||
#include "mongo/s/batched_delete_document.h" | #include "mongo/s/write_ops/batched_delete_document.h" | |||
#include "mongo/s/batched_update_document.h" | #include "mongo/s/write_ops/batched_update_document.h" | |||
namespace mongo { | namespace mongo { | |||
class BSONObjBuilder; | class BSONObjBuilder; | |||
class Client; | class Client; | |||
class CurOp; | class CurOp; | |||
class OpCounters; | class OpCounters; | |||
class OpDebug; | class OpDebug; | |||
struct LastError; | struct LastError; | |||
/** | /** | |||
* An instance of WriteBatchExecutor is an object capable of issuing a write batch. | * An instance of WriteBatchExecutor is an object capable of issuing a write batch. | |||
*/ | */ | |||
class WriteBatchExecutor { | class WriteBatchExecutor { | |||
MONGO_DISALLOW_COPYING(WriteBatchExecutor); | MONGO_DISALLOW_COPYING(WriteBatchExecutor); | |||
public: | public: | |||
WriteBatchExecutor( Client* client, OpCounters* opCounters, LastErr | WriteBatchExecutor( const BSONObj& defaultWriteConcern, | |||
or* le ); | Client* client, | |||
OpCounters* opCounters, | ||||
LastError* le ); | ||||
/** | /** | |||
* Issues writes with requested write concern. Fills response with errors if problems | * Issues writes with requested write concern. Fills response with errors if problems | |||
* occur. | * occur. | |||
*/ | */ | |||
void executeBatch( const BatchedCommandRequest& request, BatchedCom mandResponse* response ); | void executeBatch( const BatchedCommandRequest& request, BatchedCom mandResponse* response ); | |||
private: | private: | |||
// TODO: This will change in the near future, but keep like this fo r now | // TODO: This will change in the near future, but keep like this fo r now | |||
skipping to change at line 78 | skipping to change at line 81 | |||
numInserted( 0 ), numUpdated( 0 ), numUpserted( 0 ), nu mDeleted( 0 ) { | numInserted( 0 ), numUpdated( 0 ), numUpserted( 0 ), nu mDeleted( 0 ) { | |||
} | } | |||
int numInserted; | int numInserted; | |||
int numUpdated; | int numUpdated; | |||
int numUpserted; | int numUpserted; | |||
int numDeleted; | int numDeleted; | |||
}; | }; | |||
/** | /** | |||
* Issues a single write. Fills "results" with write result. | * Issues the single write 'itemRef'. Returns true iff write item w | |||
* Returns true iff write item was issued sucessfully and increment | as issued | |||
s stats, populates error | * sucessfully and increments 'stats'. If the item is an upsert, fi | |||
* if not successful. | lls in the | |||
* 'upsertedID' also, with the '_id' chosen for that update. If the | ||||
write failed, | ||||
* returns false and populates 'error' | ||||
*/ | */ | |||
bool applyWriteItem( const BatchItemRef& itemRef, | bool applyWriteItem( const BatchItemRef& itemRef, | |||
WriteStats* stats, | WriteStats* stats, | |||
BSONObj* upsertedID, | ||||
BatchedErrorDetail* error ); | BatchedErrorDetail* error ); | |||
// | // | |||
// Helpers to issue underlying write. | // Helpers to issue underlying write. | |||
// Returns true iff write item was issued sucessfully and increment s stats, populates error | // Returns true iff write item was issued sucessfully and increment s stats, populates error | |||
// if not successful. | // if not successful. | |||
// | // | |||
bool doWrite( const string& ns, | bool doWrite( const string& ns, | |||
const BatchItemRef& itemRef, | const BatchItemRef& itemRef, | |||
CurOp* currentOp, | CurOp* currentOp, | |||
WriteStats* stats, | WriteStats* stats, | |||
BSONObj* upsertedID, | ||||
BatchedErrorDetail* error ); | BatchedErrorDetail* error ); | |||
bool doInsert( const std::string& ns, | bool doInsert( const std::string& ns, | |||
const BSONObj& insertOp, | const BSONObj& insertOp, | |||
CurOp* currentOp, | CurOp* currentOp, | |||
WriteStats* stats, | WriteStats* stats, | |||
BatchedErrorDetail* error ); | BatchedErrorDetail* error ); | |||
bool doUpdate( const std::string& ns, | bool doUpdate( const std::string& ns, | |||
const BatchedUpdateDocument& updateOp, | const BatchedUpdateDocument& updateOp, | |||
CurOp* currentOp, | CurOp* currentOp, | |||
WriteStats* stats, | WriteStats* stats, | |||
BSONObj* upsertedID, | ||||
BatchedErrorDetail* error ); | BatchedErrorDetail* error ); | |||
bool doDelete( const std::string& ns, | bool doDelete( const std::string& ns, | |||
const BatchedDeleteDocument& deleteOp, | const BatchedDeleteDocument& deleteOp, | |||
CurOp* currentOp, | CurOp* currentOp, | |||
WriteStats* stats, | WriteStats* stats, | |||
BatchedErrorDetail* error ); | BatchedErrorDetail* error ); | |||
// Default write concern, if one isn't provide in the batches. | ||||
const BSONObj _defaultWriteConcern; | ||||
// Client object to issue writes on behalf of. | // Client object to issue writes on behalf of. | |||
// Not owned here. | // Not owned here. | |||
Client* _client; | Client* _client; | |||
// OpCounters object to update. | // OpCounters object to update. | |||
// Not owned here. | // Not owned here. | |||
OpCounters* _opCounters; | OpCounters* _opCounters; | |||
// LastError object to use for preparing write results. | // LastError object to use for preparing write results. | |||
// Not owned here. | // Not owned here. | |||
LastError* _le; | LastError* _le; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 8 change blocks. | ||||
10 lines changed or deleted | 22 lines changed or added | |||
batch_write_exec.h | batch_write_exec.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/scoped_ptr.hpp> | #include <boost/scoped_ptr.hpp> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/s/batched_command_request.h" | ||||
#include "mongo/s/batched_command_response.h" | ||||
#include "mongo/s/ns_targeter.h" | #include "mongo/s/ns_targeter.h" | |||
#include "mongo/s/multi_command_dispatch.h" | #include "mongo/s/multi_command_dispatch.h" | |||
#include "mongo/s/shard_resolver.h" | ||||
#include "mongo/s/write_ops/batched_command_request.h" | ||||
#include "mongo/s/write_ops/batched_command_response.h" | ||||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* The BatchWriteExec is able to execute client batch write requests, r esulting in a batch | * The BatchWriteExec is able to execute client batch write requests, r esulting in a batch | |||
* response to send back to the client. | * response to send back to the client. | |||
* | * | |||
* There are two main interfaces the exec uses to "run" the batch: | * There are two main interfaces the exec uses to "run" the batch: | |||
* | * | |||
* - the "targeter" used to generate child batch operations to send to particular shards | * - the "targeter" used to generate child batch operations to send to particular shards | |||
skipping to change at line 60 | skipping to change at line 61 | |||
* results | * results | |||
* | * | |||
* Both the targeter and dispatcher are assumed to be dedicated to this particular | * Both the targeter and dispatcher are assumed to be dedicated to this particular | |||
* BatchWriteExec instance. | * BatchWriteExec instance. | |||
* | * | |||
*/ | */ | |||
class BatchWriteExec { | class BatchWriteExec { | |||
MONGO_DISALLOW_COPYING (BatchWriteExec); | MONGO_DISALLOW_COPYING (BatchWriteExec); | |||
public: | public: | |||
BatchWriteExec( NSTargeter* targeter, MultiCommandDispatch* dispatc | BatchWriteExec( NSTargeter* targeter, | |||
her ) : | ShardResolver* resolver, | |||
_targeter( targeter ), _dispatcher( dispatcher ) { | MultiCommandDispatch* dispatcher ) : | |||
_targeter( targeter ), _resolver( resolver ), _dispatcher( disp | ||||
atcher ) { | ||||
} | } | |||
/** | /** | |||
* Executes a client batch write request by sending child batches t o several shard | * Executes a client batch write request by sending child batches t o several shard | |||
* endpoints, and returns a client batch write response. | * endpoints, and returns a client batch write response. | |||
* | * | |||
* Several network round-trips are generally required to execute a write batch. | * Several network round-trips are generally required to execute a write batch. | |||
* | * | |||
* This function does not throw, any errors are reported via the cl ientResponse. | * This function does not throw, any errors are reported via the cl ientResponse. | |||
* | * | |||
skipping to change at line 83 | skipping to change at line 86 | |||
*/ | */ | |||
void executeBatch( const BatchedCommandRequest& clientRequest, | void executeBatch( const BatchedCommandRequest& clientRequest, | |||
BatchedCommandResponse* clientResponse ); | BatchedCommandResponse* clientResponse ); | |||
private: | private: | |||
// Not owned here | // Not owned here | |||
NSTargeter* _targeter; | NSTargeter* _targeter; | |||
// Not owned here | // Not owned here | |||
ShardResolver* _resolver; | ||||
// Not owned here | ||||
MultiCommandDispatch* _dispatcher; | MultiCommandDispatch* _dispatcher; | |||
}; | }; | |||
} | } | |||
End of changes. 4 change blocks. | ||||
5 lines changed or deleted | 11 lines changed or added | |||
batch_write_op.h | batch_write_op.h | |||
---|---|---|---|---|
skipping to change at line 38 | skipping to change at line 38 | |||
#pragma once | #pragma once | |||
#include <set> | #include <set> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/base/owned_pointer_vector.h" | #include "mongo/base/owned_pointer_vector.h" | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
#include "mongo/s/batched_command_request.h" | ||||
#include "mongo/s/batched_command_response.h" | ||||
#include "mongo/s/batched_error_detail.h" | ||||
#include "mongo/s/ns_targeter.h" | #include "mongo/s/ns_targeter.h" | |||
#include "mongo/s/write_op.h" | #include "mongo/s/write_ops/batched_command_request.h" | |||
#include "mongo/s/write_ops/batched_command_response.h" | ||||
#include "mongo/s/write_ops/batched_error_detail.h" | ||||
#include "mongo/s/write_ops/write_op.h" | ||||
namespace mongo { | namespace mongo { | |||
class TargetedWriteBatch; | class TargetedWriteBatch; | |||
struct ShardError; | struct ShardError; | |||
class TrackedErrors; | class TrackedErrors; | |||
class BatchWriteStats; | ||||
/** | /** | |||
* The BatchWriteOp class manages the lifecycle of a batched write rece ived by mongos. Each | * The BatchWriteOp class manages the lifecycle of a batched write rece ived by mongos. Each | |||
* item in a batch is tracked via a WriteOp, and the function of the Ba tchWriteOp is to | * item in a batch is tracked via a WriteOp, and the function of the Ba tchWriteOp is to | |||
* aggregate the dispatched requests and responses for the underlying W riteOps. | * aggregate the dispatched requests and responses for the underlying W riteOps. | |||
* | * | |||
* Overall, the BatchWriteOp lifecycle is similar to the WriteOp lifecy cle, with the following | * Overall, the BatchWriteOp lifecycle is similar to the WriteOp lifecy cle, with the following | |||
* stages: | * stages: | |||
* | * | |||
* 0) Client request comes in, batch write op is initialized | * 0) Client request comes in, batch write op is initialized | |||
skipping to change at line 80 | skipping to change at line 81 | |||
* 4) If the batch write is not finished, goto 0 | * 4) If the batch write is not finished, goto 0 | |||
* | * | |||
* 5) When all responses come back for all write ops, errors are aggreg ated and returned in | * 5) When all responses come back for all write ops, errors are aggreg ated and returned in | |||
* a client response | * a client response | |||
* | * | |||
*/ | */ | |||
class BatchWriteOp { | class BatchWriteOp { | |||
MONGO_DISALLOW_COPYING(BatchWriteOp); | MONGO_DISALLOW_COPYING(BatchWriteOp); | |||
public: | public: | |||
BatchWriteOp() : | BatchWriteOp(); | |||
_clientRequest( NULL ), _writeOps( NULL ) { | ||||
} | ||||
~BatchWriteOp(); | ~BatchWriteOp(); | |||
/** | /** | |||
* Initializes the BatchWriteOp from a client batch request. | * Initializes the BatchWriteOp from a client batch request. | |||
*/ | */ | |||
void initClientRequest( const BatchedCommandRequest* clientRequest ); | void initClientRequest( const BatchedCommandRequest* clientRequest ); | |||
/** | /** | |||
* Targets one or more of the next write ops in this batch op using a NSTargeter. The | * Targets one or more of the next write ops in this batch op using a NSTargeter. The | |||
skipping to change at line 160 | skipping to change at line 159 | |||
// Array of ops being processed from the client request | // Array of ops being processed from the client request | |||
WriteOp* _writeOps; | WriteOp* _writeOps; | |||
// Current outstanding batch op write requests | // Current outstanding batch op write requests | |||
// Not owned here but tracked for reporting | // Not owned here but tracked for reporting | |||
std::set<const TargetedWriteBatch*> _targeted; | std::set<const TargetedWriteBatch*> _targeted; | |||
// Write concern responses from all write batches so far | // Write concern responses from all write batches so far | |||
OwnedPointerVector<ShardError> _wcErrors; | OwnedPointerVector<ShardError> _wcErrors; | |||
// Upserted ids for the whole write batch | ||||
OwnedPointerVector<BatchedUpsertDetail> _upsertedIds; | ||||
// Stats for the entire batch op | ||||
scoped_ptr<BatchWriteStats> _stats; | ||||
}; | ||||
struct BatchWriteStats { | ||||
BatchWriteStats(); | ||||
int numInserted; | ||||
int numUpserted; | ||||
int numUpdated; | ||||
int numDeleted; | ||||
}; | }; | |||
/** | /** | |||
* Data structure representing the information needed to make a batch r equest, along with | * Data structure representing the information needed to make a batch r equest, along with | |||
* pointers to where the resulting responses should be placed. | * pointers to where the resulting responses should be placed. | |||
* | * | |||
* Internal support for storage as a doubly-linked list, to allow the T argetedWriteBatch to | * Internal support for storage as a doubly-linked list, to allow the T argetedWriteBatch to | |||
* efficiently be registered for reporting. | * efficiently be registered for reporting. | |||
*/ | */ | |||
class TargetedWriteBatch { | class TargetedWriteBatch { | |||
End of changes. 5 change blocks. | ||||
7 lines changed or deleted | 23 lines changed or added | |||
batched_command_request.h | batched_command_request.h | |||
---|---|---|---|---|
skipping to change at line 21 | skipping to change at line 21 | |||
* GNU Affero General Public License for more details. | * GNU Affero General Public License for more details. | |||
* | * | |||
* You should have received a copy of the GNU Affero General Public Lice nse | * You should have received a copy of the GNU Affero General Public Lice nse | |||
* along with this program. If not, see <http://www.gnu.org/licenses/>. | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/s/bson_serializable.h" | #include "mongo/s/bson_serializable.h" | |||
#include "mongo/s/batched_insert_request.h" | #include "mongo/s/write_ops/batched_insert_request.h" | |||
#include "mongo/s/batched_update_request.h" | #include "mongo/s/write_ops/batched_update_request.h" | |||
#include "mongo/s/batched_delete_request.h" | #include "mongo/s/write_ops/batched_delete_request.h" | |||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* This class wraps the different kinds of command requests into a gene rically usable write | * This class wraps the different kinds of command requests into a gene rically usable write | |||
* command request. | * command request. | |||
* | * | |||
* Designed to be a very thin wrapper that mimics the underlying reques ts exactly. Owns the | * Designed to be a very thin wrapper that mimics the underlying reques ts exactly. Owns the | |||
* wrapped request object once constructed. | * wrapped request object once constructed. | |||
*/ | */ | |||
skipping to change at line 74 | skipping to change at line 74 | |||
// bson serializable interface implementation | // bson serializable interface implementation | |||
// | // | |||
virtual bool isValid( std::string* errMsg ) const; | virtual bool isValid( std::string* errMsg ) const; | |||
virtual BSONObj toBSON() const; | virtual BSONObj toBSON() const; | |||
virtual bool parseBSON( const BSONObj& source, std::string* errMsg ); | virtual bool parseBSON( const BSONObj& source, std::string* errMsg ); | |||
virtual void clear(); | virtual void clear(); | |||
virtual std::string toString() const; | virtual std::string toString() const; | |||
// | // | |||
// individual field accessors | // Batch type accessors | |||
// | // | |||
BatchType getBatchType() const; | BatchType getBatchType() const; | |||
BatchedInsertRequest* getInsertRequest() const; | BatchedInsertRequest* getInsertRequest() const; | |||
BatchedUpdateRequest* getUpdateRequest() const; | BatchedUpdateRequest* getUpdateRequest() const; | |||
BatchedDeleteRequest* getDeleteRequest() const; | BatchedDeleteRequest* getDeleteRequest() const; | |||
// Index creation is also an insert, but a weird one. | ||||
bool isInsertIndexRequest() const; | ||||
bool isUniqueIndexRequest() const; | ||||
std::string getTargetingNS() const; | ||||
BSONObj getIndexKeyPattern() const; | ||||
// | ||||
// individual field accessors | ||||
// | ||||
bool isVerboseWC() const; | ||||
void setNS( const StringData& collName ); | void setNS( const StringData& collName ); | |||
void unsetNS(); | void unsetNS(); | |||
bool isNSSet() const; | bool isNSSet() const; | |||
const std::string& getNS() const; | const std::string& getNS() const; | |||
/** | /** | |||
* Write ops are BSONObjs, whose format depends on the type of requ est | * Write ops are BSONObjs, whose format depends on the type of requ est | |||
* TODO: Should be possible to further parse these ops generically if we come up with a | * TODO: Should be possible to further parse these ops generically if we come up with a | |||
* good scheme. | * good scheme. | |||
skipping to change at line 108 | skipping to change at line 119 | |||
void setWriteConcern( const BSONObj& writeConcern ); | void setWriteConcern( const BSONObj& writeConcern ); | |||
void unsetWriteConcern(); | void unsetWriteConcern(); | |||
bool isWriteConcernSet() const; | bool isWriteConcernSet() const; | |||
const BSONObj& getWriteConcern() const; | const BSONObj& getWriteConcern() const; | |||
void setOrdered( bool ordered ); | void setOrdered( bool ordered ); | |||
void unsetOrdered(); | void unsetOrdered(); | |||
bool isOrderedSet() const; | bool isOrderedSet() const; | |||
bool getOrdered() const; | bool getOrdered() const; | |||
void setShardName(const StringData& shardName); | ||||
void unsetShardName(); | ||||
bool isShardNameSet() const; | ||||
const std::string& getShardName() const; | ||||
void setShardVersion( const ChunkVersion& shardVersion ); | void setShardVersion( const ChunkVersion& shardVersion ); | |||
void unsetShardVersion(); | void unsetShardVersion(); | |||
bool isShardVersionSet() const; | bool isShardVersionSet() const; | |||
const ChunkVersion& getShardVersion() const; | const ChunkVersion& getShardVersion() const; | |||
void setSession( long long session ); | void setSession( long long session ); | |||
void unsetSession(); | void unsetSession(); | |||
bool isSessionSet() const; | bool isSessionSet() const; | |||
long long getSession() const; | long long getSession() const; | |||
// | ||||
// Helpers for auth pre-parsing | ||||
// | ||||
/** | ||||
* Helper to determine whether or not there are any upserts in the | ||||
batch | ||||
*/ | ||||
static bool containsUpserts( const BSONObj& writeCmdObj ); | ||||
/** | ||||
* Helper to extract the namespace being indexed from a raw BSON wr | ||||
ite command. | ||||
* | ||||
* Returns false with errMsg if the index write command seems inval | ||||
id. | ||||
* TODO: Remove when we have parsing hooked before authorization | ||||
*/ | ||||
static bool getIndexedNS( const BSONObj& writeCmdObj, | ||||
std::string* nsToIndex, | ||||
std::string* errMsg ); | ||||
private: | private: | |||
BatchType _batchType; | BatchType _batchType; | |||
scoped_ptr<BatchedInsertRequest> _insertReq; | scoped_ptr<BatchedInsertRequest> _insertReq; | |||
scoped_ptr<BatchedUpdateRequest> _updateReq; | scoped_ptr<BatchedUpdateRequest> _updateReq; | |||
scoped_ptr<BatchedDeleteRequest> _deleteReq; | scoped_ptr<BatchedDeleteRequest> _deleteReq; | |||
}; | }; | |||
/** | /** | |||
End of changes. 5 change blocks. | ||||
4 lines changed or deleted | 42 lines changed or added | |||
batched_command_response.h | batched_command_response.h | |||
---|---|---|---|---|
skipping to change at line 25 | skipping to change at line 25 | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/scoped_ptr.hpp> | #include <boost/scoped_ptr.hpp> | |||
#include <string> | #include <string> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/s/batched_error_detail.h" | ||||
#include "mongo/s/bson_serializable.h" | #include "mongo/s/bson_serializable.h" | |||
#include "mongo/s/write_ops/batched_error_detail.h" | ||||
#include "mongo/s/write_ops/batched_upsert_detail.h" | ||||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* This class represents the layout and content of a insert/update/dele te runCommand, | * This class represents the layout and content of a insert/update/dele te runCommand, | |||
* the response side. | * the response side. | |||
*/ | */ | |||
class BatchedCommandResponse : public BSONSerializable { | class BatchedCommandResponse : public BSONSerializable { | |||
MONGO_DISALLOW_COPYING(BatchedCommandResponse); | MONGO_DISALLOW_COPYING(BatchedCommandResponse); | |||
public: | public: | |||
// | // | |||
// schema declarations | // schema declarations | |||
// | // | |||
static const BSONField<bool> ok; | static const BSONField<int> ok; | |||
static const BSONField<int> errCode; | static const BSONField<int> errCode; | |||
static const BSONField<BSONObj> errInfo; | static const BSONField<BSONObj> errInfo; | |||
static const BSONField<string> errMessage; | static const BSONField<string> errMessage; | |||
static const BSONField<long long> n; | static const BSONField<long long> n; | |||
static const BSONField<long long> upserted; | static const BSONField<BSONObj> singleUpserted; // ID type | |||
static const BSONField<std::vector<BatchedUpsertDetail*> > upsertDe | ||||
tails; | ||||
static const BSONField<Date_t> lastOp; | static const BSONField<Date_t> lastOp; | |||
static const BSONField<std::vector<BatchedErrorDetail*> > errDetail s; | static const BSONField<std::vector<BatchedErrorDetail*> > errDetail s; | |||
// | // | |||
// construction / destruction | // construction / destruction | |||
// | // | |||
BatchedCommandResponse(); | BatchedCommandResponse(); | |||
virtual ~BatchedCommandResponse(); | virtual ~BatchedCommandResponse(); | |||
skipping to change at line 75 | skipping to change at line 77 | |||
virtual bool isValid(std::string* errMsg) const; | virtual bool isValid(std::string* errMsg) const; | |||
virtual BSONObj toBSON() const; | virtual BSONObj toBSON() const; | |||
virtual bool parseBSON(const BSONObj& source, std::string* errMsg); | virtual bool parseBSON(const BSONObj& source, std::string* errMsg); | |||
virtual void clear(); | virtual void clear(); | |||
virtual std::string toString() const; | virtual std::string toString() const; | |||
// | // | |||
// individual field accessors | // individual field accessors | |||
// | // | |||
void setOk(bool ok); | void setOk(int ok); | |||
void unsetOk(); | void unsetOk(); | |||
bool isOkSet() const; | bool isOkSet() const; | |||
bool getOk() const; | int getOk() const; | |||
void setErrCode(int errCode); | void setErrCode(int errCode); | |||
void unsetErrCode(); | void unsetErrCode(); | |||
bool isErrCodeSet() const; | bool isErrCodeSet() const; | |||
int getErrCode() const; | int getErrCode() const; | |||
void setErrInfo(const BSONObj& errInfo); | void setErrInfo(const BSONObj& errInfo); | |||
void unsetErrInfo(); | void unsetErrInfo(); | |||
bool isErrInfoSet() const; | bool isErrInfoSet() const; | |||
const BSONObj& getErrInfo() const; | const BSONObj& getErrInfo() const; | |||
skipping to change at line 100 | skipping to change at line 102 | |||
void setErrMessage(const StringData& errMessage); | void setErrMessage(const StringData& errMessage); | |||
void unsetErrMessage(); | void unsetErrMessage(); | |||
bool isErrMessageSet() const; | bool isErrMessageSet() const; | |||
const std::string& getErrMessage() const; | const std::string& getErrMessage() const; | |||
void setN(long long n); | void setN(long long n); | |||
void unsetN(); | void unsetN(); | |||
bool isNSet() const; | bool isNSet() const; | |||
long long getN() const; | long long getN() const; | |||
void setUpserted(long long upserted); | void setSingleUpserted(const BSONObj& singleUpserted); | |||
void unsetUpserted(); | void unsetSingleUpserted(); | |||
bool isUpsertedSet() const; | bool isSingleUpsertedSet() const; | |||
long long getUpserted() const; | const BSONObj& getSingleUpserted() const; | |||
void setUpsertDetails(const std::vector<BatchedUpsertDetail*>& upse | ||||
rtDetails); | ||||
void addToUpsertDetails(BatchedUpsertDetail* upsertDetails); | ||||
void unsetUpsertDetails(); | ||||
bool isUpsertDetailsSet() const; | ||||
std::size_t sizeUpsertDetails() const; | ||||
const std::vector<BatchedUpsertDetail*>& getUpsertDetails() const; | ||||
const BatchedUpsertDetail* getUpsertDetailsAt(std::size_t pos) cons | ||||
t; | ||||
void setLastOp(Date_t lastOp); | void setLastOp(Date_t lastOp); | |||
void unsetLastOp(); | void unsetLastOp(); | |||
bool isLastOpSet() const; | bool isLastOpSet() const; | |||
Date_t getLastOp() const; | Date_t getLastOp() const; | |||
void setErrDetails(const std::vector<BatchedErrorDetail*>& errDetai ls); | void setErrDetails(const std::vector<BatchedErrorDetail*>& errDetai ls); | |||
void addToErrDetails(BatchedErrorDetail* errDetails); | void addToErrDetails(BatchedErrorDetail* errDetails); | |||
void unsetErrDetails(); | void unsetErrDetails(); | |||
bool isErrDetailsSet() const; | bool isErrDetailsSet() const; | |||
std::size_t sizeErrDetails() const; | std::size_t sizeErrDetails() const; | |||
const std::vector<BatchedErrorDetail*>& getErrDetails() const; | const std::vector<BatchedErrorDetail*>& getErrDetails() const; | |||
const BatchedErrorDetail* getErrDetailsAt(std::size_t pos) const; | const BatchedErrorDetail* getErrDetailsAt(std::size_t pos) const; | |||
private: | private: | |||
// Convention: (M)andatory, (O)ptional | // Convention: (M)andatory, (O)ptional | |||
// (M) false if batch didn't get to be applied for any reason | // (M) 0 if batch didn't get to be applied for any reason | |||
bool _ok; | int _ok; | |||
bool _isOkSet; | bool _isOkSet; | |||
// (O) whether all items in the batch applied correctly | // (O) whether all items in the batch applied correctly | |||
int _errCode; | int _errCode; | |||
bool _isErrCodeSet; | bool _isErrCodeSet; | |||
// (O) further details about the error | // (O) further details about the error | |||
BSONObj _errInfo; | BSONObj _errInfo; | |||
bool _isErrInfoSet; | bool _isErrInfoSet; | |||
// (O) whether all items in the batch applied correctly | // (O) whether all items in the batch applied correctly | |||
string _errMessage; | string _errMessage; | |||
bool _isErrMessageSet; | bool _isErrMessageSet; | |||
// (O) number of documents affected | // (O) number of documents affected | |||
long long _n; | long long _n; | |||
bool _isNSet; | bool _isNSet; | |||
// (O) in updates, number of ops that were upserts | // (O) "promoted" _upserted, if the corresponding request containe | |||
long long _upserted; | d only one batch item | |||
bool _isUpsertedSet; | // Should only be present if _upserted is not. | |||
BSONObj _singleUpserted; | ||||
bool _isSingleUpsertedSet; | ||||
// (O) Array of upserted items' _id's | ||||
// Should only be present if _singleUpserted is not. | ||||
boost::scoped_ptr<std::vector<BatchedUpsertDetail*> >_upsertDetails | ||||
; | ||||
// (O) XXX What is lastop? | // (O) XXX What is lastop? | |||
Date_t _lastOp; | Date_t _lastOp; | |||
bool _isLastOpSet; | bool _isLastOpSet; | |||
// (O) Array of item-level error information | // (O) Array of item-level error information | |||
boost::scoped_ptr<std::vector<BatchedErrorDetail*> >_errDetails; | boost::scoped_ptr<std::vector<BatchedErrorDetail*> >_errDetails; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 9 change blocks. | ||||
14 lines changed or deleted | 34 lines changed or added | |||
batched_delete_document.h | batched_delete_document.h | |||
---|---|---|---|---|
skipping to change at line 84 | skipping to change at line 84 | |||
bool isLimitSet() const; | bool isLimitSet() const; | |||
int getLimit() const; | int getLimit() const; | |||
private: | private: | |||
// Convention: (M)andatory, (O)ptional | // Convention: (M)andatory, (O)ptional | |||
// (M) query whose result the delete will remove | // (M) query whose result the delete will remove | |||
BSONObj _query; | BSONObj _query; | |||
bool _isQuerySet; | bool _isQuerySet; | |||
// (O) cap the number of documents to be deleted | // (M) the maximum number of documents to be deleted | |||
int _limit; | int _limit; | |||
bool _isLimitSet; | bool _isLimitSet; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 1 lines changed or added | |||
batched_delete_request.h | batched_delete_request.h | |||
---|---|---|---|---|
skipping to change at line 25 | skipping to change at line 25 | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/scoped_ptr.hpp> | #include <boost/scoped_ptr.hpp> | |||
#include <string> | #include <string> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/s/batched_delete_document.h" | ||||
#include "mongo/s/bson_serializable.h" | #include "mongo/s/bson_serializable.h" | |||
#include "mongo/s/chunk_version.h" | #include "mongo/s/chunk_version.h" | |||
#include "mongo/s/write_ops/batched_delete_document.h" | ||||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* This class represents the layout and content of a batched delete run Command, | * This class represents the layout and content of a batched delete run Command, | |||
* the request side. | * the request side. | |||
*/ | */ | |||
class BatchedDeleteRequest : public BSONSerializable { | class BatchedDeleteRequest : public BSONSerializable { | |||
MONGO_DISALLOW_COPYING(BatchedDeleteRequest); | MONGO_DISALLOW_COPYING(BatchedDeleteRequest); | |||
public: | public: | |||
skipping to change at line 51 | skipping to change at line 51 | |||
// | // | |||
// Name used for the batched delete invocation. | // Name used for the batched delete invocation. | |||
static const std::string BATCHED_DELETE_REQUEST; | static const std::string BATCHED_DELETE_REQUEST; | |||
// Field names and types in the batched delete command type. | // Field names and types in the batched delete command type. | |||
static const BSONField<std::string> collName; | static const BSONField<std::string> collName; | |||
static const BSONField<std::vector<BatchedDeleteDocument*> > delete s; | static const BSONField<std::vector<BatchedDeleteDocument*> > delete s; | |||
static const BSONField<BSONObj> writeConcern; | static const BSONField<BSONObj> writeConcern; | |||
static const BSONField<bool> ordered; | static const BSONField<bool> ordered; | |||
static const BSONField<string> shardName; | ||||
static const BSONField<ChunkVersion> shardVersion; | static const BSONField<ChunkVersion> shardVersion; | |||
static const BSONField<long long> session; | static const BSONField<long long> session; | |||
// | // | |||
// construction / destruction | // construction / destruction | |||
// | // | |||
BatchedDeleteRequest(); | BatchedDeleteRequest(); | |||
virtual ~BatchedDeleteRequest(); | virtual ~BatchedDeleteRequest(); | |||
skipping to change at line 101 | skipping to change at line 102 | |||
void setWriteConcern(const BSONObj& writeConcern); | void setWriteConcern(const BSONObj& writeConcern); | |||
void unsetWriteConcern(); | void unsetWriteConcern(); | |||
bool isWriteConcernSet() const; | bool isWriteConcernSet() const; | |||
const BSONObj& getWriteConcern() const; | const BSONObj& getWriteConcern() const; | |||
void setOrdered(bool ordered); | void setOrdered(bool ordered); | |||
void unsetOrdered(); | void unsetOrdered(); | |||
bool isOrderedSet() const; | bool isOrderedSet() const; | |||
bool getOrdered() const; | bool getOrdered() const; | |||
void setShardName(const StringData& shardName); | ||||
void unsetShardName(); | ||||
bool isShardNameSet() const; | ||||
const std::string& getShardName() const; | ||||
void setShardVersion(const ChunkVersion& shardVersion); | void setShardVersion(const ChunkVersion& shardVersion); | |||
void unsetShardVersion(); | void unsetShardVersion(); | |||
bool isShardVersionSet() const; | bool isShardVersionSet() const; | |||
const ChunkVersion& getShardVersion() const; | const ChunkVersion& getShardVersion() const; | |||
void setSession(long long session); | void setSession(long long session); | |||
void unsetSession(); | void unsetSession(); | |||
bool isSessionSet() const; | bool isSessionSet() const; | |||
long long getSession() const; | long long getSession() const; | |||
skipping to change at line 122 | skipping to change at line 128 | |||
// Convention: (M)andatory, (O)ptional | // Convention: (M)andatory, (O)ptional | |||
// (M) collection we're deleting from | // (M) collection we're deleting from | |||
std::string _collName; | std::string _collName; | |||
bool _isCollNameSet; | bool _isCollNameSet; | |||
// (M) array of individual deletes | // (M) array of individual deletes | |||
std::vector<BatchedDeleteDocument*> _deletes; | std::vector<BatchedDeleteDocument*> _deletes; | |||
bool _isDeletesSet; | bool _isDeletesSet; | |||
// (M) to be issued after the batch applied | // (O) to be issued after the batch applied | |||
BSONObj _writeConcern; | BSONObj _writeConcern; | |||
bool _isWriteConcernSet; | bool _isWriteConcernSet; | |||
// (M) whether batch is issued in parallel or not | // (O) whether batch is issued in parallel or not | |||
bool _ordered; | bool _ordered; | |||
bool _isOrderedSet; | bool _isOrderedSet; | |||
// (O) shard name we're sending this batch to | ||||
std::string _shardName; | ||||
bool _isShardNameSet; | ||||
// (O) version for this collection on a given shard | // (O) version for this collection on a given shard | |||
boost::scoped_ptr<ChunkVersion> _shardVersion; | boost::scoped_ptr<ChunkVersion> _shardVersion; | |||
// (O) session number the inserts belong to | // (O) session number the inserts belong to | |||
long long _session; | long long _session; | |||
bool _isSessionSet; | bool _isSessionSet; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 7 change blocks. | ||||
3 lines changed or deleted | 13 lines changed or added | |||
batched_insert_request.h | batched_insert_request.h | |||
---|---|---|---|---|
skipping to change at line 50 | skipping to change at line 50 | |||
// | // | |||
// Name used for the batched insert invocation. | // Name used for the batched insert invocation. | |||
static const std::string BATCHED_INSERT_REQUEST; | static const std::string BATCHED_INSERT_REQUEST; | |||
// Field names and types in the batched insert command type. | // Field names and types in the batched insert command type. | |||
static const BSONField<std::string> collName; | static const BSONField<std::string> collName; | |||
static const BSONField<std::vector<BSONObj> > documents; | static const BSONField<std::vector<BSONObj> > documents; | |||
static const BSONField<BSONObj> writeConcern; | static const BSONField<BSONObj> writeConcern; | |||
static const BSONField<bool> ordered; | static const BSONField<bool> ordered; | |||
static const BSONField<string> shardName; | ||||
static const BSONField<ChunkVersion> shardVersion; | static const BSONField<ChunkVersion> shardVersion; | |||
static const BSONField<long long> session; | static const BSONField<long long> session; | |||
// | // | |||
// construction / destruction | // construction / destruction | |||
// | // | |||
BatchedInsertRequest(); | BatchedInsertRequest(); | |||
virtual ~BatchedInsertRequest(); | virtual ~BatchedInsertRequest(); | |||
skipping to change at line 100 | skipping to change at line 101 | |||
void setWriteConcern(const BSONObj& writeConcern); | void setWriteConcern(const BSONObj& writeConcern); | |||
void unsetWriteConcern(); | void unsetWriteConcern(); | |||
bool isWriteConcernSet() const; | bool isWriteConcernSet() const; | |||
const BSONObj& getWriteConcern() const; | const BSONObj& getWriteConcern() const; | |||
void setOrdered(bool ordered); | void setOrdered(bool ordered); | |||
void unsetOrdered(); | void unsetOrdered(); | |||
bool isOrderedSet() const; | bool isOrderedSet() const; | |||
bool getOrdered() const; | bool getOrdered() const; | |||
void setShardName(const StringData& shardName); | ||||
void unsetShardName(); | ||||
bool isShardNameSet() const; | ||||
const std::string& getShardName() const; | ||||
void setShardVersion(const ChunkVersion& shardVersion); | void setShardVersion(const ChunkVersion& shardVersion); | |||
void unsetShardVersion(); | void unsetShardVersion(); | |||
bool isShardVersionSet() const; | bool isShardVersionSet() const; | |||
const ChunkVersion& getShardVersion() const; | const ChunkVersion& getShardVersion() const; | |||
void setSession(long long session); | void setSession(long long session); | |||
void unsetSession(); | void unsetSession(); | |||
bool isSessionSet() const; | bool isSessionSet() const; | |||
long long getSession() const; | long long getSession() const; | |||
skipping to change at line 121 | skipping to change at line 127 | |||
// Convention: (M)andatory, (O)ptional | // Convention: (M)andatory, (O)ptional | |||
// (M) collection we're inserting on | // (M) collection we're inserting on | |||
std::string _collName; | std::string _collName; | |||
bool _isCollNameSet; | bool _isCollNameSet; | |||
// (M) array of documents to be inserted | // (M) array of documents to be inserted | |||
std::vector<BSONObj> _documents; | std::vector<BSONObj> _documents; | |||
bool _isDocumentsSet; | bool _isDocumentsSet; | |||
// (M) to be issued after the batch applied | // (O) to be issued after the batch applied | |||
BSONObj _writeConcern; | BSONObj _writeConcern; | |||
bool _isWriteConcernSet; | bool _isWriteConcernSet; | |||
// (M) whether batch is issued in parallel or not | // (O) whether batch is issued in parallel or not | |||
bool _ordered; | bool _ordered; | |||
bool _isOrderedSet; | bool _isOrderedSet; | |||
// (O) shard name we're sending this batch to | ||||
std::string _shardName; | ||||
bool _isShardNameSet; | ||||
// (O) version for this collection on a given shard | // (O) version for this collection on a given shard | |||
boost::scoped_ptr<ChunkVersion> _shardVersion; | boost::scoped_ptr<ChunkVersion> _shardVersion; | |||
// (O) session number the inserts belong to | // (O) session number the inserts belong to | |||
long long _session; | long long _session; | |||
bool _isSessionSet; | bool _isSessionSet; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 5 change blocks. | ||||
2 lines changed or deleted | 12 lines changed or added | |||
batched_update_request.h | batched_update_request.h | |||
---|---|---|---|---|
skipping to change at line 25 | skipping to change at line 25 | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/scoped_ptr.hpp> | #include <boost/scoped_ptr.hpp> | |||
#include <string> | #include <string> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/s/batched_update_document.h" | ||||
#include "mongo/s/bson_serializable.h" | #include "mongo/s/bson_serializable.h" | |||
#include "mongo/s/chunk_version.h" | #include "mongo/s/chunk_version.h" | |||
#include "mongo/s/write_ops/batched_update_document.h" | ||||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* This class represents the layout and content of a batched update run Command, | * This class represents the layout and content of a batched update run Command, | |||
* the request side. | * the request side. | |||
*/ | */ | |||
class BatchedUpdateRequest : public BSONSerializable { | class BatchedUpdateRequest : public BSONSerializable { | |||
MONGO_DISALLOW_COPYING(BatchedUpdateRequest); | MONGO_DISALLOW_COPYING(BatchedUpdateRequest); | |||
public: | public: | |||
skipping to change at line 51 | skipping to change at line 51 | |||
// | // | |||
// Name used for the batched update invocation. | // Name used for the batched update invocation. | |||
static const std::string BATCHED_UPDATE_REQUEST; | static const std::string BATCHED_UPDATE_REQUEST; | |||
// Field names and types in the batched update command type. | // Field names and types in the batched update command type. | |||
static const BSONField<std::string> collName; | static const BSONField<std::string> collName; | |||
static const BSONField<std::vector<BatchedUpdateDocument*> > update s; | static const BSONField<std::vector<BatchedUpdateDocument*> > update s; | |||
static const BSONField<BSONObj> writeConcern; | static const BSONField<BSONObj> writeConcern; | |||
static const BSONField<bool> ordered; | static const BSONField<bool> ordered; | |||
static const BSONField<string> shardName; | ||||
static const BSONField<ChunkVersion> shardVersion; | static const BSONField<ChunkVersion> shardVersion; | |||
static const BSONField<long long> session; | static const BSONField<long long> session; | |||
// | // | |||
// construction / destruction | // construction / destruction | |||
// | // | |||
BatchedUpdateRequest(); | BatchedUpdateRequest(); | |||
virtual ~BatchedUpdateRequest(); | virtual ~BatchedUpdateRequest(); | |||
skipping to change at line 101 | skipping to change at line 102 | |||
void setWriteConcern(const BSONObj& writeConcern); | void setWriteConcern(const BSONObj& writeConcern); | |||
void unsetWriteConcern(); | void unsetWriteConcern(); | |||
bool isWriteConcernSet() const; | bool isWriteConcernSet() const; | |||
const BSONObj& getWriteConcern() const; | const BSONObj& getWriteConcern() const; | |||
void setOrdered(bool ordered); | void setOrdered(bool ordered); | |||
void unsetOrdered(); | void unsetOrdered(); | |||
bool isOrderedSet() const; | bool isOrderedSet() const; | |||
bool getOrdered() const; | bool getOrdered() const; | |||
void setShardName(const StringData& shardName); | ||||
void unsetShardName(); | ||||
bool isShardNameSet() const; | ||||
const std::string& getShardName() const; | ||||
void setShardVersion(const ChunkVersion& shardVersion); | void setShardVersion(const ChunkVersion& shardVersion); | |||
void unsetShardVersion(); | void unsetShardVersion(); | |||
bool isShardVersionSet() const; | bool isShardVersionSet() const; | |||
const ChunkVersion& getShardVersion() const; | const ChunkVersion& getShardVersion() const; | |||
void setSession(long long session); | void setSession(long long session); | |||
void unsetSession(); | void unsetSession(); | |||
bool isSessionSet() const; | bool isSessionSet() const; | |||
long long getSession() const; | long long getSession() const; | |||
skipping to change at line 122 | skipping to change at line 128 | |||
// Convention: (M)andatory, (O)ptional | // Convention: (M)andatory, (O)ptional | |||
// (M) collection we're updating from | // (M) collection we're updating from | |||
std::string _collName; | std::string _collName; | |||
bool _isCollNameSet; | bool _isCollNameSet; | |||
// (M) array of individual updates | // (M) array of individual updates | |||
std::vector<BatchedUpdateDocument*> _updates; | std::vector<BatchedUpdateDocument*> _updates; | |||
bool _isUpdatesSet; | bool _isUpdatesSet; | |||
// (M) to be issued after the batch applied | // (O) to be issued after the batch applied | |||
BSONObj _writeConcern; | BSONObj _writeConcern; | |||
bool _isWriteConcernSet; | bool _isWriteConcernSet; | |||
// (M) whether batch is issued in parallel or not | // (O) whether batch is issued in parallel or not | |||
bool _ordered; | bool _ordered; | |||
bool _isOrderedSet; | bool _isOrderedSet; | |||
// (O) shard name we're sending this batch to | ||||
std::string _shardName; | ||||
bool _isShardNameSet; | ||||
// (O) version for this collection on a given shard | // (O) version for this collection on a given shard | |||
boost::scoped_ptr<ChunkVersion> _shardVersion; | boost::scoped_ptr<ChunkVersion> _shardVersion; | |||
// (O) session number the inserts belong to | // (O) session number the inserts belong to | |||
long long _session; | long long _session; | |||
bool _isSessionSet; | bool _isSessionSet; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 7 change blocks. | ||||
3 lines changed or deleted | 13 lines changed or added | |||
bson_field.h | bson_field.h | |||
---|---|---|---|---|
skipping to change at line 96 | skipping to change at line 96 | |||
BSONFieldValue<T> operator()(const T& t) const { | BSONFieldValue<T> operator()(const T& t) const { | |||
return BSONFieldValue<T>(_name, t); | return BSONFieldValue<T>(_name, t); | |||
} | } | |||
const std::string& name() const { | const std::string& name() const { | |||
return _name; | return _name; | |||
} | } | |||
const T& getDefault() const { | const T& getDefault() const { | |||
dassert(_defaultSet); | ||||
return _default; | return _default; | |||
} | } | |||
const bool hasDefault() const { | const bool hasDefault() const { | |||
return _defaultSet; | return _defaultSet; | |||
} | } | |||
std::string operator()() const { | std::string operator()() const { | |||
return _name; | return _name; | |||
} | } | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 0 lines changed or added | |||
bsondump_options.h | bsondump_options.h | |||
---|---|---|---|---|
skipping to change at line 39 | skipping to change at line 39 | |||
std::string type; | std::string type; | |||
std::string file; | std::string file; | |||
}; | }; | |||
extern BSONDumpGlobalParams bsonDumpGlobalParams; | extern BSONDumpGlobalParams bsonDumpGlobalParams; | |||
Status addBSONDumpOptions(moe::OptionSection* options); | Status addBSONDumpOptions(moe::OptionSection* options); | |||
void printBSONDumpHelp(std::ostream* out); | void printBSONDumpHelp(std::ostream* out); | |||
Status handlePreValidationBSONDumpOptions(const moe::Environment& param | /** | |||
s); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationBSONDumpOptions(const moe::Environment& params) | ||||
; | ||||
Status storeBSONDumpOptions(const moe::Environment& params, | Status storeBSONDumpOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
bsonobj.h | bsonobj.h | |||
---|---|---|---|---|
skipping to change at line 283 | skipping to change at line 283 | |||
/** performs a cursory check on the object's size only. */ | /** performs a cursory check on the object's size only. */ | |||
bool isValid() const; | bool isValid() const; | |||
/** @return ok if it can be stored as a valid embedded doc. | /** @return ok if it can be stored as a valid embedded doc. | |||
* Not valid if any field name: | * Not valid if any field name: | |||
* - contains a "." | * - contains a "." | |||
* - starts with "$" | * - starts with "$" | |||
* -- unless it is a dbref ($ref/$id/[$db]/...) | * -- unless it is a dbref ($ref/$id/[$db]/...) | |||
*/ | */ | |||
inline bool okForStorage() const { | inline bool okForStorage() const { | |||
return _okForStorage(false).isOK(); | return _okForStorage(false, true).isOK(); | |||
} | } | |||
/** Same as above with the following extra restrictions | /** Same as above with the following extra restrictions | |||
* Not valid if: | * Not valid if: | |||
* - "_id" field is a | * - "_id" field is a | |||
* -- Regex | * -- Regex | |||
* -- Array | * -- Array | |||
*/ | */ | |||
inline bool okForStorageAsRoot() const { | inline bool okForStorageAsRoot() const { | |||
return _okForStorage(true).isOK(); | return _okForStorage(true, true).isOK(); | |||
} | } | |||
/** | /** | |||
* Validates that this can be stored as an embedded document | * Validates that this can be stored as an embedded document | |||
* See details above in okForStorage | * See details above in okForStorage | |||
* | * | |||
* If 'deep' is true then validation is done to children | ||||
* | ||||
* If not valid a user readable status message is returned. | * If not valid a user readable status message is returned. | |||
*/ | */ | |||
inline Status storageValidEmbedded() const { | inline Status storageValidEmbedded(const bool deep = true) const { | |||
return _okForStorage(false); | return _okForStorage(false, deep); | |||
} | } | |||
/** | /** | |||
* Validates that this can be stored as a document (in a collection ) | * Validates that this can be stored as a document (in a collection ) | |||
* See details above in okForStorageAsRoot | * See details above in okForStorageAsRoot | |||
* | * | |||
* If 'deep' is true then validation is done to children | ||||
* | ||||
* If not valid a user readable status message is returned. | * If not valid a user readable status message is returned. | |||
*/ | */ | |||
inline Status storageValid() const { | inline Status storageValid(const bool deep = true) const { | |||
return _okForStorage(true); | return _okForStorage(true, deep); | |||
} | } | |||
/** @return true if object is empty -- i.e., {} */ | /** @return true if object is empty -- i.e., {} */ | |||
bool isEmpty() const { return objsize() <= 5; } | bool isEmpty() const { return objsize() <= 5; } | |||
void dump() const; | void dump() const; | |||
/** Alternative output format */ | /** Alternative output format */ | |||
std::string hexDump() const; | std::string hexDump() const; | |||
skipping to change at line 566 | skipping to change at line 570 | |||
void init(Holder *holder) { | void init(Holder *holder) { | |||
_holder = holder; // holder is now managed by intrusive_ptr | _holder = holder; // holder is now managed by intrusive_ptr | |||
init(holder->data); | init(holder->data); | |||
} | } | |||
void init(const char *data) { | void init(const char *data) { | |||
_objdata = data; | _objdata = data; | |||
if ( !isValid() ) | if ( !isValid() ) | |||
_assertInvalid(); | _assertInvalid(); | |||
} | } | |||
Status _okForStorage(bool root) const; | /** | |||
* Validate if the element is okay to be stored in a collection, ma | ||||
ybe as the root element | ||||
* | ||||
* If 'root' is true then checks against _id are made. | ||||
* If 'deep' is false then do not traverse through children | ||||
*/ | ||||
Status _okForStorage(bool root, bool deep) const; | ||||
}; | }; | |||
std::ostream& operator<<( std::ostream &s, const BSONObj &o ); | std::ostream& operator<<( std::ostream &s, const BSONObj &o ); | |||
std::ostream& operator<<( std::ostream &s, const BSONElement &e ); | std::ostream& operator<<( std::ostream &s, const BSONElement &e ); | |||
StringBuilder& operator<<( StringBuilder &s, const BSONObj &o ); | StringBuilder& operator<<( StringBuilder &s, const BSONObj &o ); | |||
StringBuilder& operator<<( StringBuilder &s, const BSONElement &e ); | StringBuilder& operator<<( StringBuilder &s, const BSONElement &e ); | |||
struct BSONArray : BSONObj { | struct BSONArray : BSONObj { | |||
// Don't add anything other than forwarding constructors!!! | // Don't add anything other than forwarding constructors!!! | |||
End of changes. 7 change blocks. | ||||
7 lines changed or deleted | 18 lines changed or added | |||
bsonobjbuilder.h | bsonobjbuilder.h | |||
---|---|---|---|---|
skipping to change at line 199 | skipping to change at line 199 | |||
/** Append a NumberLong */ | /** Append a NumberLong */ | |||
BSONObjBuilder& append(const StringData& fieldName, long long n) { | BSONObjBuilder& append(const StringData& fieldName, long long n) { | |||
_b.appendNum((char) NumberLong); | _b.appendNum((char) NumberLong); | |||
_b.appendStr(fieldName); | _b.appendStr(fieldName); | |||
_b.appendNum(n); | _b.appendNum(n); | |||
return *this; | return *this; | |||
} | } | |||
/** appends a number. if n < max(int)/2 then uses int, otherwise l ong long */ | /** appends a number. if n < max(int)/2 then uses int, otherwise l ong long */ | |||
BSONObjBuilder& appendIntOrLL( const StringData& fieldName , long l ong n ) { | BSONObjBuilder& appendIntOrLL( const StringData& fieldName , long l ong n ) { | |||
long long x = n; | // extra () to avoid max macro on windows | |||
if ( x < 0 ) | static const long long maxInt = (std::numeric_limits<int>::max) | |||
x = x * -1; | () / 2; | |||
if ( x < ( (std::numeric_limits<int>::max)() / 2 ) ) // extra ( | static const long long minInt = -maxInt; | |||
) to avoid max macro on windows | if ( minInt < n && n < maxInt ) { | |||
append( fieldName , (int)n ); | append( fieldName , static_cast<int>( n ) ); | |||
else | } | |||
else { | ||||
append( fieldName , n ); | append( fieldName , n ); | |||
} | ||||
return *this; | return *this; | |||
} | } | |||
/** | /** | |||
* appendNumber is a series of method for appending the smallest se nsible type | * appendNumber is a series of method for appending the smallest se nsible type | |||
* mostly for JS | * mostly for JS | |||
*/ | */ | |||
BSONObjBuilder& appendNumber( const StringData& fieldName , int n ) { | BSONObjBuilder& appendNumber( const StringData& fieldName , int n ) { | |||
return append( fieldName , n ); | return append( fieldName , n ); | |||
} | } | |||
skipping to change at line 233 | skipping to change at line 235 | |||
if ( n < maxInt ) | if ( n < maxInt ) | |||
append( fieldName, static_cast<int>( n ) ); | append( fieldName, static_cast<int>( n ) ); | |||
else | else | |||
append( fieldName, static_cast<long long>( n ) ); | append( fieldName, static_cast<long long>( n ) ); | |||
return *this; | return *this; | |||
} | } | |||
BSONObjBuilder& appendNumber( const StringData& fieldName, long lon g llNumber ) { | BSONObjBuilder& appendNumber( const StringData& fieldName, long lon g llNumber ) { | |||
static const long long maxInt = ( 1LL << 30 ); | static const long long maxInt = ( 1LL << 30 ); | |||
static const long long minInt = -maxInt; | ||||
static const long long maxDouble = ( 1LL << 40 ); | static const long long maxDouble = ( 1LL << 40 ); | |||
static const long long minDouble = -maxDouble; | ||||
long long nonNegative = llNumber >= 0 ? llNumber : -llNumber; | if ( minInt < llNumber && llNumber < maxInt ) { | |||
if ( nonNegative < maxInt ) | ||||
append( fieldName, static_cast<int>( llNumber ) ); | append( fieldName, static_cast<int>( llNumber ) ); | |||
else if ( nonNegative < maxDouble ) | } | |||
else if ( minDouble < llNumber && llNumber < maxDouble ) { | ||||
append( fieldName, static_cast<double>( llNumber ) ); | append( fieldName, static_cast<double>( llNumber ) ); | |||
else | } | |||
else { | ||||
append( fieldName, llNumber ); | append( fieldName, llNumber ); | |||
} | ||||
return *this; | return *this; | |||
} | } | |||
/** Append a double element */ | /** Append a double element */ | |||
BSONObjBuilder& append(const StringData& fieldName, double n) { | BSONObjBuilder& append(const StringData& fieldName, double n) { | |||
_b.appendNum((char) NumberDouble); | _b.appendNum((char) NumberDouble); | |||
_b.appendStr(fieldName); | _b.appendStr(fieldName); | |||
_b.appendNum(n); | _b.appendNum(n); | |||
return *this; | return *this; | |||
} | } | |||
End of changes. 8 change blocks. | ||||
11 lines changed or deleted | 18 lines changed or added | |||
btree_based_builder.h | btree_based_builder.h | |||
---|---|---|---|---|
skipping to change at line 45 | skipping to change at line 45 | |||
namespace IndexUpdateTests { | namespace IndexUpdateTests { | |||
class AddKeysToPhaseOne; | class AddKeysToPhaseOne; | |||
class InterruptAddKeysToPhaseOne; | class InterruptAddKeysToPhaseOne; | |||
class DoDropDups; | class DoDropDups; | |||
class InterruptDoDropDups; | class InterruptDoDropDups; | |||
} | } | |||
namespace mongo { | namespace mongo { | |||
class Collection; | ||||
class BSONObjExternalSorter; | class BSONObjExternalSorter; | |||
class ExternalSortComparison; | class ExternalSortComparison; | |||
class IndexDescriptor; | ||||
class IndexDetails; | class IndexDetails; | |||
class NamespaceDetails; | class NamespaceDetails; | |||
class ProgressMeter; | class ProgressMeter; | |||
class ProgressMeterHolder; | class ProgressMeterHolder; | |||
struct SortPhaseOne; | struct SortPhaseOne; | |||
class BtreeBasedBuilder { | class BtreeBasedBuilder { | |||
public: | public: | |||
/** | /** | |||
* Want to build an index? Call this. Throws DBException. | * Want to build an index? Call this. Throws DBException. | |||
*/ | */ | |||
static uint64_t fastBuildIndex(const char* ns, NamespaceDetails* d, | static uint64_t fastBuildIndex(Collection* collection, IndexDescrip | |||
IndexDetails& idx, | tor* descriptor, | |||
bool mayInterrupt, int idxNo); | bool mayInterrupt); | |||
static DiskLoc makeEmptyIndex(const IndexDetails& idx); | static DiskLoc makeEmptyIndex(const IndexDetails& idx); | |||
static ExternalSortComparison* getComparison(int version, const BSO NObj& keyPattern); | static ExternalSortComparison* getComparison(int version, const BSO NObj& keyPattern); | |||
private: | private: | |||
friend class IndexUpdateTests::AddKeysToPhaseOne; | friend class IndexUpdateTests::AddKeysToPhaseOne; | |||
friend class IndexUpdateTests::InterruptAddKeysToPhaseOne; | friend class IndexUpdateTests::InterruptAddKeysToPhaseOne; | |||
friend class IndexUpdateTests::DoDropDups; | friend class IndexUpdateTests::DoDropDups; | |||
friend class IndexUpdateTests::InterruptDoDropDups; | friend class IndexUpdateTests::InterruptDoDropDups; | |||
static void addKeysToPhaseOne(NamespaceDetails* d, const char* ns, const IndexDetails& idx, | static void addKeysToPhaseOne(Collection* collection, IndexDescript or* idx, | |||
const BSONObj& order, SortPhaseOne* p haseOne, | const BSONObj& order, SortPhaseOne* p haseOne, | |||
int64_t nrecords, ProgressMeter* prog | ProgressMeter* progressMeter, bool ma | |||
ressMeter, | yInterrupt ); | |||
bool mayInterrupt, | ||||
int idxNo); | ||||
static void doDropDups(const char* ns, NamespaceDetails* d, const s et<DiskLoc>& dupsToDrop, | static void doDropDups(Collection* collection, const set<DiskLoc>& dupsToDrop, | |||
bool mayInterrupt ); | bool mayInterrupt ); | |||
}; | }; | |||
// Exposed for testing purposes. | // Exposed for testing purposes. | |||
template< class V > | template< class V > | |||
void buildBottomUpPhases2And3( bool dupsAllowed, | void buildBottomUpPhases2And3( bool dupsAllowed, | |||
IndexDetails& idx, | IndexDescriptor* idx, | |||
BSONObjExternalSorter& sorter, | BSONObjExternalSorter& sorter, | |||
bool dropDups, | bool dropDups, | |||
set<DiskLoc>& dupsToDrop, | set<DiskLoc>& dupsToDrop, | |||
CurOp* op, | CurOp* op, | |||
SortPhaseOne* phase1, | SortPhaseOne* phase1, | |||
ProgressMeterHolder& pm, | ProgressMeterHolder& pm, | |||
Timer& t, | Timer& t, | |||
bool mayInterrupt ); | bool mayInterrupt ); | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 7 change blocks. | ||||
10 lines changed or deleted | 10 lines changed or added | |||
canonical_query.h | canonical_query.h | |||
---|---|---|---|---|
skipping to change at line 36 | skipping to change at line 36 | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/dbmessage.h" | #include "mongo/db/dbmessage.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/matcher/expression.h" | #include "mongo/db/matcher/expression.h" | |||
#include "mongo/db/query/lite_parsed_query.h" | #include "mongo/db/query/lite_parsed_query.h" | |||
#include "mongo/db/query/parsed_projection.h" | #include "mongo/db/query/lite_projection.h" | |||
namespace mongo { | namespace mongo { | |||
class CanonicalQuery { | class CanonicalQuery { | |||
public: | public: | |||
static Status canonicalize(const QueryMessage& qm, CanonicalQuery** out); | static Status canonicalize(const QueryMessage& qm, CanonicalQuery** out); | |||
// These are for testing, when we don't have a QueryMessage. | /** | |||
* For testing or for internal clients to use. | ||||
*/ | ||||
static Status canonicalize(const string& ns, const BSONObj& query, CanonicalQuery** out); | static Status canonicalize(const string& ns, const BSONObj& query, CanonicalQuery** out); | |||
static Status canonicalize(const string& ns, const BSONObj& query, | ||||
long long skip, | ||||
long long limit, CanonicalQuery** out); | ||||
static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort, | static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort, | |||
const BSONObj& proj, CanonicalQuery** ou t); | const BSONObj& proj, CanonicalQuery** ou t); | |||
// What namespace is this query over? | // What namespace is this query over? | |||
const string& ns() const { return _pq->ns(); } | const string& ns() const { return _pq->ns(); } | |||
// | // | |||
// Accessors for the query | // Accessors for the query | |||
// | // | |||
MatchExpression* root() const { return _root.get(); } | MatchExpression* root() const { return _root.get(); } | |||
BSONObj getQueryObj() const { return _pq->getFilter(); } | BSONObj getQueryObj() const { return _pq->getFilter(); } | |||
const LiteParsedQuery& getParsed() const { return *_pq; } | const LiteParsedQuery& getParsed() const { return *_pq; } | |||
ParsedProjection* getProj() const { return _proj.get(); } | LiteProjection* getLiteProj() const { return _liteProj.get(); } | |||
string toString() const; | string toString() const; | |||
private: | private: | |||
// You must go through canonicalize to create a CanonicalQuery. | // You must go through canonicalize to create a CanonicalQuery. | |||
CanonicalQuery() { } | CanonicalQuery() { } | |||
// Takes ownership of lpq | // Takes ownership of lpq | |||
Status init(LiteParsedQuery* lpq); | Status init(LiteParsedQuery* lpq); | |||
scoped_ptr<LiteParsedQuery> _pq; | scoped_ptr<LiteParsedQuery> _pq; | |||
scoped_ptr<ParsedProjection> _proj; | ||||
// _root points into _pq->getFilter() | // _root points into _pq->getFilter() | |||
scoped_ptr<MatchExpression> _root; | scoped_ptr<MatchExpression> _root; | |||
scoped_ptr<LiteProjection> _liteProj; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 6 change blocks. | ||||
5 lines changed or deleted | 12 lines changed or added | |||
catalog_hack.h | catalog_hack.h | |||
---|---|---|---|---|
skipping to change at line 109 | skipping to change at line 109 | |||
static string getAccessMethodName(const BSONObj& keyPattern) { | static string getAccessMethodName(const BSONObj& keyPattern) { | |||
if (shouldOverridePlugin(keyPattern)) { | if (shouldOverridePlugin(keyPattern)) { | |||
return ""; | return ""; | |||
} else { | } else { | |||
return IndexNames::findPluginName(keyPattern); | return IndexNames::findPluginName(keyPattern); | |||
} | } | |||
} | } | |||
static IndexDescriptor* getDescriptor(NamespaceDetails* nsd, int id xNo) { | static IndexDescriptor* getDescriptor(NamespaceDetails* nsd, int id xNo) { | |||
IndexDetails& id = nsd->idx(idxNo); | IndexDetails& id = nsd->idx(idxNo); | |||
return new IndexDescriptor(nsd, idxNo, &id, id.info.obj()); | Collection* c = cc().database()->getCollection( id.parentNS() ) | |||
; | ||||
return new IndexDescriptor(c, idxNo, &id, id.info.obj()); | ||||
} | } | |||
static BtreeBasedAccessMethod* getBtreeBasedIndex(IndexDescriptor* desc) { | static BtreeBasedAccessMethod* getBtreeBasedIndex(IndexDescriptor* desc) { | |||
string type = getAccessMethodName(desc->keyPattern()); | string type = getAccessMethodName(desc->keyPattern()); | |||
if (IndexNames::HASHED == type) { | if (IndexNames::HASHED == type) { | |||
return new HashAccessMethod(desc); | return new HashAccessMethod(desc); | |||
} else if (IndexNames::GEO_2DSPHERE == type) { | } else if (IndexNames::GEO_2DSPHERE == type) { | |||
return new S2AccessMethod(desc); | return new S2AccessMethod(desc); | |||
} else if (IndexNames::TEXT == type || IndexNames::TEXT_INTERNA L == type) { | } else if (IndexNames::TEXT == type || IndexNames::TEXT_INTERNA L == type) { | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 3 lines changed or added | |||
chunk.h | chunk.h | |||
---|---|---|---|---|
skipping to change at line 33 | skipping to change at line 33 | |||
* for all of the code used other than as permitted herein. If you modify | * for all of the code used other than as permitted herein. If you modify | |||
* file(s) with this exception, you may extend this exception to your | * file(s) with this exception, you may extend this exception to your | |||
* version of the file(s), but you are not obligated to do so. If you do not | * version of the file(s), but you are not obligated to do so. If you do not | |||
* wish to do so, delete this exception statement from your version. If y ou | * wish to do so, delete this exception statement from your version. If y ou | |||
* delete this exception statement from all source files in the program, | * delete this exception statement from all source files in the program, | |||
* then also delete it in the license file. | * then also delete it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/string_data.h" | ||||
#include "mongo/bson/util/atomic_int.h" | #include "mongo/bson/util/atomic_int.h" | |||
#include "mongo/client/distlock.h" | #include "mongo/client/distlock.h" | |||
#include "mongo/s/chunk_version.h" | #include "mongo/s/chunk_version.h" | |||
#include "mongo/s/shard.h" | #include "mongo/s/shard.h" | |||
#include "mongo/s/shardkey.h" | #include "mongo/s/shardkey.h" | |||
#include "mongo/util/concurrency/ticketholder.h" | #include "mongo/util/concurrency/ticketholder.h" | |||
namespace mongo { | namespace mongo { | |||
class DBConfig; | class DBConfig; | |||
skipping to change at line 169 | skipping to change at line 170 | |||
// migration support | // migration support | |||
// | // | |||
/** | /** | |||
* Issues a migrate request for this chunk | * Issues a migrate request for this chunk | |||
* | * | |||
* @param to shard to move this chunk to | * @param to shard to move this chunk to | |||
* @param chunSize maximum number of bytes beyond which the migrate should no go trhough | * @param chunSize maximum number of bytes beyond which the migrate should no go trhough | |||
* @param secondaryThrottle whether during migrate all writes shoul d block for repl | * @param secondaryThrottle whether during migrate all writes shoul d block for repl | |||
* @param waitForDelete whether chunk move should wait for cleanup or return immediately | * @param waitForDelete whether chunk move should wait for cleanup or return immediately | |||
* @param maxTimeMS max time for the migrate request | ||||
* @param res the object containing details about the migrate execu tion | * @param res the object containing details about the migrate execu tion | |||
* @return true if move was successful | * @return true if move was successful | |||
*/ | */ | |||
bool moveAndCommit(const Shard& to, | bool moveAndCommit(const Shard& to, | |||
long long chunkSize, | long long chunkSize, | |||
bool secondaryThrottle, | bool secondaryThrottle, | |||
bool waitForDelete, | bool waitForDelete, | |||
int maxTimeMS, | ||||
BSONObj& res) const; | BSONObj& res) const; | |||
/** | /** | |||
* @return size of shard in bytes | * @return size of shard in bytes | |||
* talks to mongod to do this | * talks to mongod to do this | |||
*/ | */ | |||
long getPhysicalSize() const; | long getPhysicalSize() const; | |||
/** | /** | |||
* marks this chunk as a jumbo chunk | * marks this chunk as a jumbo chunk | |||
skipping to change at line 433 | skipping to change at line 436 | |||
* Returns true if, for this shard, the chunks are identical in bot h chunk managers | * Returns true if, for this shard, the chunks are identical in bot h chunk managers | |||
*/ | */ | |||
bool compatibleWith( const ChunkManager& other, const Shard& shard ) const; | bool compatibleWith( const ChunkManager& other, const Shard& shard ) const; | |||
bool compatibleWith( ChunkManagerPtr other, const Shard& shard ) co nst { if( ! other ) return false; return compatibleWith( *other, shard ); } | bool compatibleWith( ChunkManagerPtr other, const Shard& shard ) co nst { if( ! other ) return false; return compatibleWith( *other, shard ); } | |||
bool compatibleWith( const Chunk& other ) const; | bool compatibleWith( const Chunk& other ) const; | |||
bool compatibleWith( ChunkPtr other ) const { if( ! other ) return false; return compatibleWith( *other ); } | bool compatibleWith( ChunkPtr other ) const { if( ! other ) return false; return compatibleWith( *other ); } | |||
string toString() const; | string toString() const; | |||
ChunkVersion getVersion( const StringData& shardName ) const; | ||||
ChunkVersion getVersion( const Shard& shard ) const; | ChunkVersion getVersion( const Shard& shard ) const; | |||
ChunkVersion getVersion() const; | ChunkVersion getVersion() const; | |||
void getInfo( BSONObjBuilder& b ) const; | void getInfo( BSONObjBuilder& b ) const; | |||
/** | /** | |||
* @param me - so i don't get deleted before i'm done | * @param me - so i don't get deleted before i'm done | |||
*/ | */ | |||
void drop( ChunkManagerPtr me ) const; | void drop( ChunkManagerPtr me ) const; | |||
End of changes. 4 change blocks. | ||||
0 lines changed or deleted | 4 lines changed or added | |||
chunk_diff.h | chunk_diff.h | |||
---|---|---|---|---|
skipping to change at line 176 | skipping to change at line 176 | |||
map<ShardType, ChunkVersion>* _maxShardVersions; | map<ShardType, ChunkVersion>* _maxShardVersions; | |||
// Store for later use | // Store for later use | |||
int _validDiffs; | int _validDiffs; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
// Include template definition | // Include template definition | |||
// TODO: Convert to normal .cpp file when normalized | #include "chunk_diff-inl.cpp" | |||
#include "chunk_diff.hpp" | ||||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 0 lines changed or added | |||
chunk_manager_targeter.h | chunk_manager_targeter.h | |||
---|---|---|---|---|
skipping to change at line 30 | skipping to change at line 30 | |||
#include "mongo/bson/bsonobj.h" | #include "mongo/bson/bsonobj.h" | |||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
#include "mongo/s/chunk.h" | #include "mongo/s/chunk.h" | |||
#include "mongo/s/shard.h" | #include "mongo/s/shard.h" | |||
#include "mongo/s/chunk_version.h" | #include "mongo/s/chunk_version.h" | |||
#include "mongo/s/ns_targeter.h" | #include "mongo/s/ns_targeter.h" | |||
namespace mongo { | namespace mongo { | |||
class TargeterStats; | ||||
/** | /** | |||
* NSTargeter based on a ChunkManager implementation. Wraps all except ion codepaths and | * NSTargeter based on a ChunkManager implementation. Wraps all except ion codepaths and | |||
* returns DatabaseNotFound statuses on applicable failures. | * returns DatabaseNotFound statuses on applicable failures. | |||
* | * | |||
* Must be initialized before use, and initialization may fail. | * Must be initialized before use, and initialization may fail. | |||
*/ | */ | |||
class ChunkManagerTargeter : public NSTargeter { | class ChunkManagerTargeter : public NSTargeter { | |||
public: | public: | |||
ChunkManagerTargeter() : | ChunkManagerTargeter(); | |||
_needsTargetingRefresh( false ) { | ||||
} | ||||
/** | /** | |||
* Initializes the ChunkManagerTargeter with the latest targeting i nformation for the | * Initializes the ChunkManagerTargeter with the latest targeting i nformation for the | |||
* namespace. May need to block and load information from a remote config server. | * namespace. May need to block and load information from a remote config server. | |||
* | * | |||
* Returns !OK if the information could not be initialized. | * Returns !OK if the information could not be initialized. | |||
*/ | */ | |||
Status init( const NamespaceString& nss ); | Status init( const NamespaceString& nss ); | |||
const NamespaceString& getNS() const; | const NamespaceString& getNS() const; | |||
Status targetDoc( const BSONObj& doc, ShardEndpoint** endpoint ) co | // Returns ShardKeyNotFound if document does not have a full shard | |||
nst; | key. | |||
Status targetInsert( const BSONObj& doc, ShardEndpoint** endpoint ) | ||||
const; | ||||
Status targetQuery( const BSONObj& query, std::vector<ShardEndpoint | // Returns ShardKeyNotFound if the update can't be targeted without | |||
*>* endpoints ) const; | a shard key. | |||
Status targetUpdate( const BatchedUpdateDocument& updateDoc, | ||||
std::vector<ShardEndpoint*>* endpoints ) const | ||||
; | ||||
// Returns ShardKeyNotFound if the delete can't be targeted without | ||||
a shard key. | ||||
Status targetDelete( const BatchedDeleteDocument& deleteDoc, | ||||
std::vector<ShardEndpoint*>* endpoints ) const | ||||
; | ||||
Status targetAll( std::vector<ShardEndpoint*>* endpoints ) const; | ||||
void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ); | void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ); | |||
void noteCouldNotTarget(); | void noteCouldNotTarget(); | |||
/** | /** | |||
* Replaces the targeting information with the latest information f rom the cache. If this | * Replaces the targeting information with the latest information f rom the cache. If this | |||
* information is stale WRT the noted stale responses or a remote r efresh is needed due | * information is stale WRT the noted stale responses or a remote r efresh is needed due | |||
* to a targeting failure, will contact the config servers to reloa d the metadata. | * to a targeting failure, will contact the config servers to reloa d the metadata. | |||
* | * | |||
* Also see NSTargeter::refreshIfNeeded(). | * Also see NSTargeter::refreshIfNeeded(). | |||
*/ | */ | |||
Status refreshIfNeeded(); | Status refreshIfNeeded(); | |||
/** | ||||
* Returns the stats. Note that the returned stats object is still | ||||
owned by this targeter. | ||||
*/ | ||||
const TargeterStats* getStats() const; | ||||
private: | private: | |||
// Different ways we can refresh metadata | // Different ways we can refresh metadata | |||
// TODO: Improve these ways. | // TODO: Improve these ways. | |||
enum RefreshType { | enum RefreshType { | |||
// No refresh is needed | // No refresh is needed | |||
RefreshType_None, | RefreshType_None, | |||
// The version has gone up, but the collection hasn't been drop ped | // The version has gone up, but the collection hasn't been drop ped | |||
RefreshType_RefreshChunkManager, | RefreshType_RefreshChunkManager, | |||
// The collection may have been dropped, so we need to reload t he db | // The collection may have been dropped, so we need to reload t he db | |||
RefreshType_ReloadDatabase | RefreshType_ReloadDatabase | |||
}; | }; | |||
/** | /** | |||
* Performs an actual refresh from the config server. | * Performs an actual refresh from the config server. | |||
*/ | */ | |||
Status refreshNow( RefreshType refreshType ); | Status refreshNow( RefreshType refreshType ); | |||
/** | ||||
* Returns a vector of ShardEndpoints for a potentially multi-shard | ||||
query. | ||||
* | ||||
* Returns !OK with message if query could not be targeted. | ||||
*/ | ||||
Status targetQuery( const BSONObj& query, std::vector<ShardEndpoint | ||||
*>* endpoints ) const; | ||||
NamespaceString _nss; | NamespaceString _nss; | |||
// Zero or one of these are filled at all times | // Zero or one of these are filled at all times | |||
// If sharded, _manager, if unsharded, _primary, on error, neither | // If sharded, _manager, if unsharded, _primary, on error, neither | |||
ChunkManagerPtr _manager; | ChunkManagerPtr _manager; | |||
ShardPtr _primary; | ShardPtr _primary; | |||
// Map of shard->remote shard version reported from stale errors | // Map of shard->remote shard version reported from stale errors | |||
typedef std::map<std::string, ChunkVersion> ShardVersionMap; | typedef std::map<std::string, ChunkVersion> ShardVersionMap; | |||
ShardVersionMap _remoteShardVersions; | ShardVersionMap _remoteShardVersions; | |||
// Stores whether we need to check the remote server on refresh | // Stores whether we need to check the remote server on refresh | |||
bool _needsTargetingRefresh; | bool _needsTargetingRefresh; | |||
// Represents only the view and not really part of the targeter sta | ||||
te. | ||||
mutable boost::scoped_ptr<TargeterStats> _stats; | ||||
}; | ||||
struct TargeterStats { | ||||
// Map of chunk shard minKey -> approximate delta. This is used for | ||||
deciding | ||||
// whether a chunk might need splitting or not. | ||||
std::map<BSONObj, int> chunkSizeDelta; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 7 change blocks. | ||||
7 lines changed or deleted | 46 lines changed or added | |||
client.h | client.h | |||
---|---|---|---|---|
skipping to change at line 116 | skipping to change at line 116 | |||
void setLastOp( OpTime op ) { _lastOp = op; } | void setLastOp( OpTime op ) { _lastOp = op; } | |||
OpTime getLastOp() const { return _lastOp; } | OpTime getLastOp() const { return _lastOp; } | |||
/** caution -- use Context class instead */ | /** caution -- use Context class instead */ | |||
void setContext(Context *c) { _context = c; } | void setContext(Context *c) { _context = c; } | |||
/* report what the last operation was. used by getlasterror */ | /* report what the last operation was. used by getlasterror */ | |||
void appendLastOp( BSONObjBuilder& b ) const; | void appendLastOp( BSONObjBuilder& b ) const; | |||
bool isGod() const { return _god; } /* this is for map/reduce write s */ | bool isGod() const { return _god; } /* this is for map/reduce write s */ | |||
bool setGod(bool newVal) { const bool prev = _god; _god = newVal; r eturn prev; } | ||||
string toString() const; | string toString() const; | |||
void gotHandshake( const BSONObj& o ); | void gotHandshake( const BSONObj& o ); | |||
BSONObj getRemoteID() const { return _remoteId; } | BSONObj getRemoteID() const { return _remoteId; } | |||
BSONObj getHandshake() const { return _handshake; } | BSONObj getHandshake() const { return _handshake; } | |||
ConnectionId getConnectionId() const { return _connectionId; } | ConnectionId getConnectionId() const { return _connectionId; } | |||
bool inPageFaultRetryableSection() const { return _pageFaultRetryab leSection != 0; } | bool inPageFaultRetryableSection() const { return _pageFaultRetryab leSection != 0; } | |||
PageFaultRetryableSection* getPageFaultRetryableSection() const { r eturn _pageFaultRetryableSection; } | PageFaultRetryableSection* getPageFaultRetryableSection() const { r eturn _pageFaultRetryableSection; } | |||
bool hasWrittenThisPass() const { return _hasWrittenThisPass; } | bool hasWrittenThisPass() const { return _hasWrittenThisPass; } | |||
skipping to change at line 156 | skipping to change at line 157 | |||
bool _hasWrittenThisPass; | bool _hasWrittenThisPass; | |||
PageFaultRetryableSection *_pageFaultRetryableSection; | PageFaultRetryableSection *_pageFaultRetryableSection; | |||
LockState _ls; | LockState _ls; | |||
friend class PageFaultRetryableSection; // TEMP | friend class PageFaultRetryableSection; // TEMP | |||
friend class NoPageFaultsAllowed; // TEMP | friend class NoPageFaultsAllowed; // TEMP | |||
public: | public: | |||
/* set _god=true temporarily, safely */ | ||||
class GodScope { | ||||
bool _prev; | ||||
public: | ||||
GodScope(); | ||||
~GodScope(); | ||||
}; | ||||
/** "read lock, and set my context, all in one operation" | /** "read lock, and set my context, all in one operation" | |||
* This handles (if not recursively locked) opening an unopened da tabase. | * This handles (if not recursively locked) opening an unopened da tabase. | |||
*/ | */ | |||
class ReadContext : boost::noncopyable { | class ReadContext : boost::noncopyable { | |||
public: | public: | |||
ReadContext(const std::string& ns, const std::string& path=stor ageGlobalParams.dbpath); | ReadContext(const std::string& ns, const std::string& path=stor ageGlobalParams.dbpath); | |||
Context& ctx() { return *c.get(); } | Context& ctx() { return *c.get(); } | |||
private: | private: | |||
scoped_ptr<Lock::DBRead> lk; | scoped_ptr<Lock::DBRead> lk; | |||
scoped_ptr<Context> c; | scoped_ptr<Context> c; | |||
skipping to change at line 256 | skipping to change at line 249 | |||
}; // class Client | }; // class Client | |||
/** get the Client object for this thread. */ | /** get the Client object for this thread. */ | |||
inline Client& cc() { | inline Client& cc() { | |||
Client * c = currentClient.get(); | Client * c = currentClient.get(); | |||
verify( c ); | verify( c ); | |||
return *c; | return *c; | |||
} | } | |||
inline Client::GodScope::GodScope() { | ||||
_prev = cc()._god; | ||||
cc()._god = true; | ||||
} | ||||
inline Client::GodScope::~GodScope() { cc()._god = _prev; } | ||||
inline bool haveClient() { return currentClient.get() > 0; } | inline bool haveClient() { return currentClient.get() > 0; } | |||
}; | }; | |||
End of changes. 3 change blocks. | ||||
14 lines changed or deleted | 1 lines changed or added | |||
clientcursor.h | clientcursor.h | |||
---|---|---|---|---|
skipping to change at line 66 | skipping to change at line 66 | |||
* ClientCursor is a wrapper that represents a cursorid from our databa se application's | * ClientCursor is a wrapper that represents a cursorid from our databa se application's | |||
* perspective. | * perspective. | |||
*/ | */ | |||
class ClientCursor : private boost::noncopyable { | class ClientCursor : private boost::noncopyable { | |||
public: | public: | |||
ClientCursor(int qopts, const shared_ptr<Cursor>& c, const StringDa ta& ns, | ClientCursor(int qopts, const shared_ptr<Cursor>& c, const StringDa ta& ns, | |||
BSONObj query = BSONObj()); | BSONObj query = BSONObj()); | |||
ClientCursor(Runner* runner, int qopts = 0, const BSONObj query = B SONObj()); | ClientCursor(Runner* runner, int qopts = 0, const BSONObj query = B SONObj()); | |||
ClientCursor(const string& ns); | ||||
~ClientCursor(); | ~ClientCursor(); | |||
/** | /** | |||
* Assert that there are no open cursors. | * Assert that there are no open cursors. | |||
* Called from DatabaseHolder::closeAll. | * Called from DatabaseHolder::closeAll. | |||
*/ | */ | |||
static void assertNoCursors(); | static void assertNoCursors(); | |||
// | // | |||
// Basic accessors | // Basic accessors | |||
skipping to change at line 402 | skipping to change at line 404 | |||
* use this to assure we don't in the background time out cursor while it is under use. if you | * use this to assure we don't in the background time out cursor while it is under use. if you | |||
* are using noTimeout() already, there is no risk anyway. Further, th is mechanism guards | * are using noTimeout() already, there is no risk anyway. Further, th is mechanism guards | |||
* against two getMore requests on the same cursor executing at the sam e time - which might be | * against two getMore requests on the same cursor executing at the sam e time - which might be | |||
* bad. That should never happen, but if a client driver had a bug, it could (or perhaps some | * bad. That should never happen, but if a client driver had a bug, it could (or perhaps some | |||
* sort of attack situation). | * sort of attack situation). | |||
*/ | */ | |||
class ClientCursorPin : boost::noncopyable { | class ClientCursorPin : boost::noncopyable { | |||
public: | public: | |||
ClientCursorPin( long long cursorid ); | ClientCursorPin( long long cursorid ); | |||
~ClientCursorPin(); | ~ClientCursorPin(); | |||
// This just releases the pin, does not delete the underlying. | ||||
void release(); | void release(); | |||
// Call this to delete the underlying ClientCursor. | // Call this to delete the underlying ClientCursor. | |||
void free(); | void deleteUnderlying(); | |||
ClientCursor *c() const; | ClientCursor *c() const; | |||
private: | private: | |||
CursorId _cursorid; | CursorId _cursorid; | |||
}; | }; | |||
/** Assures safe and reliable cleanup of a ClientCursor. */ | /** Assures safe and reliable cleanup of a ClientCursor. */ | |||
class ClientCursorHolder : boost::noncopyable { | class ClientCursorHolder : boost::noncopyable { | |||
public: | public: | |||
ClientCursorHolder( ClientCursor *c = 0 ); | ClientCursorHolder( ClientCursor *c = 0 ); | |||
~ClientCursorHolder(); | ~ClientCursorHolder(); | |||
End of changes. 3 change blocks. | ||||
1 lines changed or deleted | 4 lines changed or added | |||
collection.h | collection.h | |||
---|---|---|---|---|
skipping to change at line 36 | skipping to change at line 36 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/db/catalog/index_catalog.h" | ||||
#include "mongo/db/diskloc.h" | #include "mongo/db/diskloc.h" | |||
#include "mongo/db/exec/collection_scan_common.h" | #include "mongo/db/exec/collection_scan_common.h" | |||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
#include "mongo/db/storage/record_store.h" | #include "mongo/db/storage/record_store.h" | |||
#include "mongo/db/structure/collection_info_cache.h" | #include "mongo/db/structure/collection_info_cache.h" | |||
#include "mongo/platform/cstdint.h" | #include "mongo/platform/cstdint.h" | |||
namespace mongo { | namespace mongo { | |||
class Database; | class Database; | |||
class ExtentManager; | class ExtentManager; | |||
class NamespaceDetails; | class NamespaceDetails; | |||
class IndexCatalog; | ||||
class CollectionIterator; | class CollectionIterator; | |||
class FlatIterator; | class FlatIterator; | |||
class CappedIterator; | class CappedIterator; | |||
class OpDebug; | ||||
/** | /** | |||
* this is NOT safe through a yield right now | * this is NOT safe through a yield right now | |||
* not sure if it will be, or what yet | * not sure if it will be, or what yet | |||
*/ | */ | |||
class Collection { | class Collection { | |||
public: | public: | |||
Collection( const StringData& fullNS, | Collection( const StringData& fullNS, | |||
NamespaceDetails* details, | NamespaceDetails* details, | |||
Database* database ); | Database* database ); | |||
skipping to change at line 75 | skipping to change at line 79 | |||
bool ok() const { return _magic == 1357924; } | bool ok() const { return _magic == 1357924; } | |||
NamespaceDetails* details() { return _details; } // TODO: remove | NamespaceDetails* details() { return _details; } // TODO: remove | |||
const NamespaceDetails* details() const { return _details; } | const NamespaceDetails* details() const { return _details; } | |||
CollectionInfoCache* infoCache() { return &_infoCache; } | CollectionInfoCache* infoCache() { return &_infoCache; } | |||
const CollectionInfoCache* infoCache() const { return &_infoCache; } | const CollectionInfoCache* infoCache() const { return &_infoCache; } | |||
const NamespaceString& ns() const { return _ns; } | const NamespaceString& ns() const { return _ns; } | |||
const IndexCatalog* getIndexCatalog() const { return &_indexCatalog | ||||
; } | ||||
IndexCatalog* getIndexCatalog() { return &_indexCatalog; } | ||||
bool requiresIdIndex() const; | ||||
BSONObj docFor( const DiskLoc& loc ); | BSONObj docFor( const DiskLoc& loc ); | |||
// ---- things that should move to a CollectionAccessMethod like th | ||||
ing | ||||
CollectionIterator* getIterator( const DiskLoc& start, bool tailabl e, | CollectionIterator* getIterator( const DiskLoc& start, bool tailabl e, | |||
const CollectionScanParams::Direct ion& dir) const; | const CollectionScanParams::Direct ion& dir) const; | |||
void deleteDocument( const DiskLoc& loc, | void deleteDocument( const DiskLoc& loc, | |||
bool cappedOK = false, | bool cappedOK = false, | |||
bool noWarn = false, | bool noWarn = false, | |||
BSONObj* deletedId = 0 ); | BSONObj* deletedId = 0 ); | |||
/** | ||||
* this does NOT modify the doc before inserting | ||||
* i.e. will not add an _id field for documents that are missing it | ||||
*/ | ||||
StatusWith<DiskLoc> insertDocument( const BSONObj& doc, bool enforc | ||||
eQuota ); | ||||
/** | ||||
* updates the document @ oldLocation with newDoc | ||||
* if the document fits in the old space, it is put there | ||||
* if not, it is moved | ||||
* @return the post update location of the doc (may or may not be t | ||||
he same as oldLocation) | ||||
*/ | ||||
StatusWith<DiskLoc> updateDocument( const DiskLoc& oldLocation, | ||||
const BSONObj& newDoc, | ||||
bool enforceQuota, | ||||
OpDebug* debug ); | ||||
int64_t storageSize( int* numExtents = NULL, BSONArrayBuilder* exte | ||||
ntInfo = NULL ) const; | ||||
// ----------- | ||||
// this is temporary, moving up from DB for now | // this is temporary, moving up from DB for now | |||
// this will add a new extent the collection | // this will add a new extent the collection | |||
// the new extent will be returned | // the new extent will be returned | |||
// it will have been added to the linked list already | // it will have been added to the linked list already | |||
Extent* increaseStorageSize( int size, bool enforceQuota ); | Extent* increaseStorageSize( int size, bool enforceQuota ); | |||
// | // | |||
// Stats | // Stats | |||
// | // | |||
uint64_t numRecords() const; | uint64_t numRecords() const; | |||
uint64_t dataSize() const; | ||||
int averageObjectSize() const { | ||||
uint64_t n = numRecords(); | ||||
if ( n == 0 ) | ||||
return 5; | ||||
return static_cast<int>( dataSize() / n ); | ||||
} | ||||
private: | private: | |||
// @return 0 for inf., otherwise a number of files | // @return 0 for inf., otherwise a number of files | |||
int largestFileNumberInQuota() const; | int largestFileNumberInQuota() const; | |||
ExtentManager* getExtentManager(); | ExtentManager* getExtentManager(); | |||
const ExtentManager* getExtentManager() const; | const ExtentManager* getExtentManager() const; | |||
int _magic; | int _magic; | |||
NamespaceString _ns; | NamespaceString _ns; | |||
NamespaceDetails* _details; | NamespaceDetails* _details; | |||
Database* _database; | Database* _database; | |||
RecordStore _recordStore; | RecordStore _recordStore; | |||
CollectionInfoCache _infoCache; | CollectionInfoCache _infoCache; | |||
IndexCatalog _indexCatalog; | ||||
friend class Database; | friend class Database; | |||
friend class FlatIterator; | friend class FlatIterator; | |||
friend class CappedIterator; | friend class CappedIterator; | |||
friend class IndexCatalog; | ||||
}; | }; | |||
} | } | |||
End of changes. 9 change blocks. | ||||
0 lines changed or deleted | 48 lines changed or added | |||
collection_metadata.h | collection_metadata.h | |||
---|---|---|---|---|
skipping to change at line 32 | skipping to change at line 32 | |||
* file(s) with this exception, you may extend this exception to your | * file(s) with this exception, you may extend this exception to your | |||
* version of the file(s), but you are not obligated to do so. If you do not | * version of the file(s), but you are not obligated to do so. If you do not | |||
* wish to do so, delete this exception statement from your version. If you | * wish to do so, delete this exception statement from your version. If you | |||
* delete this exception statement from all source files in the program, | * delete this exception statement from all source files in the program, | |||
* then also delete it in the license file. | * then also delete it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/base/owned_pointer_vector.h" | ||||
#include "mongo/db/field_ref_set.h" | ||||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/s/chunk_version.h" | #include "mongo/s/chunk_version.h" | |||
#include "mongo/s/range_arithmetic.h" | #include "mongo/s/range_arithmetic.h" | |||
#include "mongo/s/type_chunk.h" | #include "mongo/s/type_chunk.h" | |||
namespace mongo { | namespace mongo { | |||
class MetadataLoader; | class MetadataLoader; | |||
// For now, we handle lifecycle of CollectionManager via shared_ptrs | // For now, we handle lifecycle of CollectionManager via shared_ptrs | |||
skipping to change at line 192 | skipping to change at line 194 | |||
} | } | |||
ChunkVersion getShardVersion() const { | ChunkVersion getShardVersion() const { | |||
return _shardVersion; | return _shardVersion; | |||
} | } | |||
BSONObj getKeyPattern() const { | BSONObj getKeyPattern() const { | |||
return _keyPattern; | return _keyPattern; | |||
} | } | |||
const std::vector<FieldRef*>& getKeyPatternFields() const { | ||||
return _keyFields.vector(); | ||||
} | ||||
BSONObj getMinKey() const; | BSONObj getMinKey() const; | |||
BSONObj getMaxKey() const; | BSONObj getMaxKey() const; | |||
std::size_t getNumChunks() const { | std::size_t getNumChunks() const { | |||
return _chunksMap.size(); | return _chunksMap.size(); | |||
} | } | |||
std::size_t getNumPending() const { | std::size_t getNumPending() const { | |||
return _pendingMap.size(); | return _pendingMap.size(); | |||
skipping to change at line 274 | skipping to change at line 280 | |||
// | // | |||
// sharded state below, for when the collection gets sharded | // sharded state below, for when the collection gets sharded | |||
// | // | |||
// highest ChunkVersion for which this metadata's information is ac curate | // highest ChunkVersion for which this metadata's information is ac curate | |||
ChunkVersion _shardVersion; | ChunkVersion _shardVersion; | |||
// key pattern for chunks under this range | // key pattern for chunks under this range | |||
BSONObj _keyPattern; | BSONObj _keyPattern; | |||
// A vector owning the FieldRefs parsed from the shard-key pattern | ||||
of field names. | ||||
OwnedPointerVector<FieldRef> _keyFields; | ||||
// | // | |||
// RangeMaps represent chunks by mapping the min key to the chunk's max key, allowing | // RangeMaps represent chunks by mapping the min key to the chunk's max key, allowing | |||
// efficient lookup and intersection. | // efficient lookup and intersection. | |||
// | // | |||
// Map of ranges of chunks that are migrating but have not been con firmed added yet | // Map of ranges of chunks that are migrating but have not been con firmed added yet | |||
RangeMap _pendingMap; | RangeMap _pendingMap; | |||
// Map of chunks tracked by this shard | // Map of chunks tracked by this shard | |||
RangeMap _chunksMap; | RangeMap _chunksMap; | |||
skipping to change at line 300 | skipping to change at line 309 | |||
/** | /** | |||
* Returns true if this metadata was loaded with all necessary info rmation. | * Returns true if this metadata was loaded with all necessary info rmation. | |||
*/ | */ | |||
bool isValid() const; | bool isValid() const; | |||
/** | /** | |||
* Try to find chunks that are adjacent and record these intervals in the _rangesMap | * Try to find chunks that are adjacent and record these intervals in the _rangesMap | |||
*/ | */ | |||
void fillRanges(); | void fillRanges(); | |||
/** | ||||
* Creates the _keyField* local data | ||||
*/ | ||||
void fillKeyPatternFields(); | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 4 change blocks. | ||||
0 lines changed or deleted | 14 lines changed or added | |||
commands.h | commands.h | |||
---|---|---|---|---|
skipping to change at line 226 | skipping to change at line 226 | |||
static void execCommandClientBasic(Command* c, | static void execCommandClientBasic(Command* c, | |||
ClientBasic& client, | ClientBasic& client, | |||
int queryOptions, | int queryOptions, | |||
const char *ns, | const char *ns, | |||
BSONObj& cmdObj, | BSONObj& cmdObj, | |||
BSONObjBuilder& result, | BSONObjBuilder& result, | |||
bool fromRepl ); | bool fromRepl ); | |||
// Helper for setting errmsg and ok field in command result object. | // Helper for setting errmsg and ok field in command result object. | |||
static void appendCommandStatus(BSONObjBuilder& result, bool ok, co nst std::string& errmsg); | static void appendCommandStatus(BSONObjBuilder& result, bool ok, co nst std::string& errmsg); | |||
static void appendCommandStatus(BSONObjBuilder& result, const Statu | ||||
s& status); | // @return s.isOK() | |||
static bool appendCommandStatus(BSONObjBuilder& result, const Statu | ||||
s& status); | ||||
// Set by command line. Controls whether or not testing-only comma nds should be available. | // Set by command line. Controls whether or not testing-only comma nds should be available. | |||
static int testCommandsEnabled; | static int testCommandsEnabled; | |||
private: | private: | |||
/** | /** | |||
* Checks to see if the client is authorized to run the given comma nd with the given | * Checks to see if the client is authorized to run the given comma nd with the given | |||
* parameters on the given named database. | * parameters on the given named database. | |||
* | * | |||
* fromRepl is true if this command is running as part of oplog app lication, which for | * fromRepl is true if this command is running as part of oplog app lication, which for | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 4 lines changed or added | |||
console.h | console.h | |||
---|---|---|---|---|
skipping to change at line 42 | skipping to change at line 42 | |||
* console faithfully. | * console faithfully. | |||
* | * | |||
* TODO(schwerin): If no console is attached on Windows (services), sho uld writes here go to the | * TODO(schwerin): If no console is attached on Windows (services), sho uld writes here go to the | |||
* event logger? | * event logger? | |||
*/ | */ | |||
class Console { | class Console { | |||
public: | public: | |||
Console(); | Console(); | |||
std::ostream& out(); | std::ostream& out(); | |||
std::istream& in(); | ||||
private: | private: | |||
boost::unique_lock<boost::mutex> _consoleLock; | boost::unique_lock<boost::mutex> _consoleLock; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 0 lines changed or added | |||
core.h | core.h | |||
---|---|---|---|---|
// core.h | ||||
/** | /** | |||
* Copyright (C) 2008-2012 10gen Inc. | * Copyright (C) 2013 MongoDB Inc. | |||
* | * | |||
* This program is free software: you can redistribute it and/or modify | * This program is free software: you can redistribute it and/or modify | |||
* it under the terms of the GNU Affero General Public License, version 3 | * it under the terms of the GNU Affero General Public License, version | |||
, | 3, | |||
* as published by the Free Software Foundation. | * as published by the Free Software Foundation. | |||
* | * | |||
* This program is distributed in the hope that it will be useful, | * This program is distributed in the hope that it will be useful, | |||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||
* GNU Affero General Public License for more details. | * GNU Affero General Public License for more details. | |||
* | * | |||
* You should have received a copy of the GNU Affero General Public Licen | * You should have received a copy of the GNU Affero General Public Lice | |||
se | nse | |||
* along with this program. If not, see <http://www.gnu.org/licenses/>. | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
* | * | |||
* As a special exception, the copyright holders give permission to link | * As a special exception, the copyright holders give permission to link | |||
the | the | |||
* code of portions of this program with the OpenSSL library under certai | * code of portions of this program with the OpenSSL library under certa | |||
n | in | |||
* conditions as described in each individual source file and distribute | * conditions as described in each individual source file and distribute | |||
* linked combinations including the program with the OpenSSL library. Yo | * linked combinations including the program with the OpenSSL library. Y | |||
u | ou | |||
* must comply with the GNU Affero General Public License in all respects | * must comply with the GNU Affero General Public License in all respect | |||
for | s for | |||
* all of the code used other than as permitted herein. If you modify fil | * all of the code used other than as permitted herein. If you modify fi | |||
e(s) | le(s) | |||
* with this exception, you may extend this exception to your version of | * with this exception, you may extend this exception to your version of | |||
the | the | |||
* file(s), but you are not obligated to do so. If you do not wish to do | * file(s), but you are not obligated to do so. If you do not wish to do | |||
so, | so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de | * exception statement from all source files in the program, then also d | |||
lete | elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/pch.h" | ||||
#include "mongo/db/jsobj.h" | ||||
#include "mongo/util/mongoutils/str.h" | ||||
#include <cmath> | #include <cmath> | |||
#ifndef M_PI | #ifndef M_PI | |||
# define M_PI 3.14159265358979323846 | # define M_PI 3.14159265358979323846 | |||
#endif | #endif | |||
#if 0 | namespace mongo { | |||
# define CDEBUG -1 | ||||
#else | ||||
# define CDEBUG 10 | ||||
#endif | ||||
#if 0 | ||||
# define GEODEBUGGING | ||||
# define GEODEBUG(x) cout << x << endl; | ||||
# define GEODEBUGPRINT(x) PRINT(x) | ||||
inline void PREFIXDEBUG(GeoHash prefix, const GeoConvert* g) { | ||||
if (!prefix.constrains()) { | ||||
cout << "\t empty prefix" << endl; | ||||
return ; | ||||
} | ||||
Point ll (g, prefix); // lower left | ||||
prefix.move(1,1); | ||||
Point tr (g, prefix); // top right | ||||
Point center ((ll._x+tr._x)/2, (ll._y+tr._y)/2); | inline double deg2rad(const double deg) { return deg * (M_PI / 180.0); | |||
double radius = fabs(ll._x - tr._x) / 2; | } | |||
cout << "\t ll: " << ll.toString() << " tr: " << tr.toString() | inline double rad2deg(const double rad) { return rad * (180.0 / M_PI); | |||
<< " center: " << center.toString() << " radius: " << radius < | } | |||
< endl; | ||||
inline double computeXScanDistance(double y, double maxDistDegrees) { | ||||
// TODO: this overestimates for large maxDistDegrees far from the e | ||||
quator | ||||
return maxDistDegrees / min(cos(deg2rad(min(+89.0, y + maxDistDegre | ||||
es))), | ||||
cos(deg2rad(max(-89.0, y - maxDistDegre | ||||
es)))); | ||||
} | } | |||
#else | ||||
# define GEODEBUG(x) | ||||
# define GEODEBUGPRINT(x) | ||||
# define PREFIXDEBUG(x, y) | ||||
#endif | ||||
// Used by haystack.cpp. XXX: change to something else/only have one of th | inline bool twoDWontWrap(double x, double y, double radius) { | |||
ese geo things/nuke em | // XXX XXX XXX SERVER-11387 | |||
// all? | // The 0.001 for error is totally bogus and must depend on the bits | |||
#define GEOQUADDEBUG(x) | used. | |||
//#define GEOQUADDEBUG(x) cout << x << endl | double yscandist = rad2deg(radius) + 0.001; | |||
double xscandist = computeXScanDistance(y, yscandist); | ||||
bool ret = x + xscandist < 180 | ||||
&& x - xscandist > -180 | ||||
&& y + yscandist < 90 | ||||
&& y - yscandist > -90; | ||||
return ret; | ||||
} | ||||
// XXX: move elsewhere? | ||||
namespace mongo { | ||||
inline double deg2rad(const double deg) { return deg * (M_PI / 180.0); | ||||
} | ||||
inline double rad2deg(const double rad) { return rad * (180.0 / M_PI); | ||||
} | ||||
} | } | |||
End of changes. 10 change blocks. | ||||
82 lines changed or deleted | 60 lines changed or added | |||
curop.h | curop.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <vector> | #include <vector> | |||
#include "mongo/bson/util/atomic_int.h" | #include "mongo/bson/util/atomic_int.h" | |||
#include "mongo/db/client.h" | #include "mongo/db/client.h" | |||
#include "mongo/db/storage/namespace.h" | #include "mongo/db/catalog/ondisk/namespace.h" | |||
#include "mongo/util/concurrency/spin_lock.h" | #include "mongo/util/concurrency/spin_lock.h" | |||
#include "mongo/util/net/hostandport.h" | #include "mongo/util/net/hostandport.h" | |||
#include "mongo/util/progress_meter.h" | #include "mongo/util/progress_meter.h" | |||
#include "mongo/util/time_support.h" | #include "mongo/util/time_support.h" | |||
namespace mongo { | namespace mongo { | |||
class CurOp; | class CurOp; | |||
/* lifespan is different than CurOp because of recursives with DBDirect Client */ | /* lifespan is different than CurOp because of recursives with DBDirect Client */ | |||
skipping to change at line 304 | skipping to change at line 304 | |||
void setExpectedLatencyMs( long long latency ) { _expectedLatencyMs = latency; } | void setExpectedLatencyMs( long long latency ) { _expectedLatencyMs = latency; } | |||
void recordGlobalTime( long long micros ) const; | void recordGlobalTime( long long micros ) const; | |||
const LockStat& lockStat() const { return _lockStat; } | const LockStat& lockStat() const { return _lockStat; } | |||
LockStat& lockStat() { return _lockStat; } | LockStat& lockStat() { return _lockStat; } | |||
void setKillWaiterFlags(); | void setKillWaiterFlags(); | |||
/** | /** | |||
* this should be used very sparingly | ||||
* generally the Context should set this up | ||||
* but sometimes you want to do it ahead of time | ||||
*/ | ||||
void setNS( const StringData& ns ); | ||||
/** | ||||
* Find a currently running operation matching the given criteria. This assumes that you're | * Find a currently running operation matching the given criteria. This assumes that you're | |||
* going to kill the operation, so it must be called multiple times to get multiple matching | * going to kill the operation, so it must be called multiple times to get multiple matching | |||
* operations. | * operations. | |||
* @param criteria the search to do against the infoNoauth() BSONOb j | * @param criteria the search to do against the infoNoauth() BSONOb j | |||
* @return a pointer to a matching op or NULL if no ops match | * @return a pointer to a matching op or NULL if no ops match | |||
*/ | */ | |||
static CurOp* getOp(const BSONObj& criteria); | static CurOp* getOp(const BSONObj& criteria); | |||
private: | private: | |||
friend class Client; | friend class Client; | |||
void _reset(); | void _reset(); | |||
skipping to change at line 343 | skipping to change at line 350 | |||
int _numYields; | int _numYields; | |||
LockStat _lockStat; | LockStat _lockStat; | |||
// _notifyList is protected by the global killCurrentOp's mtx. | // _notifyList is protected by the global killCurrentOp's mtx. | |||
std::vector<bool*> _notifyList; | std::vector<bool*> _notifyList; | |||
// this is how much "extra" time a query might take | // this is how much "extra" time a query might take | |||
// a writebacklisten for example will block for 30s | // a writebacklisten for example will block for 30s | |||
// so this should be 30000 in that case | // so this should be 30000 in that case | |||
long long _expectedLatencyMs; | long long _expectedLatencyMs; | |||
/** Nested class that implements a time limit ($maxTimeMS) for a Cu | // Time limit for this operation. 0 if the operation has no time l | |||
rOp object. */ | imit. | |||
uint64_t _maxTimeMicros; | ||||
/** Nested class that implements tracking of a time limit for a Cur | ||||
Op object. */ | ||||
class MaxTimeTracker { | class MaxTimeTracker { | |||
MONGO_DISALLOW_COPYING(MaxTimeTracker); | MONGO_DISALLOW_COPYING(MaxTimeTracker); | |||
public: | public: | |||
/** Newly-constructed MaxTimeTracker objects have the time limi t disabled. */ | /** Newly-constructed MaxTimeTracker objects have the time limi t disabled. */ | |||
MaxTimeTracker(); | MaxTimeTracker(); | |||
/** Disables the time limit. */ | /** Disables the time tracker. */ | |||
void reset(); | void reset(); | |||
/** Returns whether or not the time limit is enabled. */ | /** Returns whether or not time tracking is enabled. */ | |||
bool isEnabled() { return _enabled; } | bool isEnabled() const { return _enabled; } | |||
/** | /** | |||
* Enables the time limit to be "durationMicros" microseconds f | * Enables time tracking. The time limit is set to be "duratio | |||
rom "startEpochMicros" | nMicros" microseconds | |||
* (units of microseconds since the epoch). | * from "startEpochMicros" (units of microseconds since the epo | |||
ch). | ||||
* | * | |||
* "durationMicros" must be nonzero. | * "durationMicros" must be nonzero. | |||
*/ | */ | |||
void setTimeLimit(uint64_t startEpochMicros, uint64_t durationM icros); | void setTimeLimit(uint64_t startEpochMicros, uint64_t durationM icros); | |||
/** | /** | |||
* Checks whether the time limit has been hit. Returns false i | * Checks whether the time limit has been hit. Returns false i | |||
f not, or if the time | f not, or if time | |||
* limit is disabled. | * tracking is disabled. | |||
*/ | */ | |||
bool checkTimeLimit(); | bool checkTimeLimit(); | |||
/** | /** | |||
* Returns the number of microseconds remaining for the time li mit, or the special | * Returns the number of microseconds remaining for the time li mit, or the special | |||
* value 0 if the time limit is disabled. | * value 0 if time tracking is disabled. | |||
* | * | |||
* Calling this method is more expensive than calling its sibli ng "checkInterval()", | * Calling this method is more expensive than calling its sibli ng "checkInterval()", | |||
* since an accurate measure of remaining time needs to be calc ulated. | * since an accurate measure of remaining time needs to be calc ulated. | |||
*/ | */ | |||
uint64_t getRemainingMicros() const; | uint64_t getRemainingMicros() const; | |||
private: | private: | |||
// Whether or not this operation is subject to a time limit. | // Whether or not time tracking is enabled for this operation. | |||
bool _enabled; | bool _enabled; | |||
// Point in time at which the time limit is hit. Units of micr oseconds since the | // Point in time at which the time limit is hit. Units of micr oseconds since the | |||
// epoch. | // epoch. | |||
uint64_t _targetEpochMicros; | uint64_t _targetEpochMicros; | |||
// Approximate point in time at which the time limit is hit. Units of milliseconds | // Approximate point in time at which the time limit is hit. Units of milliseconds | |||
// since the server process was started. | // since the server process was started. | |||
int64_t _approxTargetServerMillis; | int64_t _approxTargetServerMillis; | |||
} _maxTimeTracker; | } _maxTimeTracker; | |||
End of changes. 9 change blocks. | ||||
14 lines changed or deleted | 26 lines changed or added | |||
cursors.h | cursors.h | |||
---|---|---|---|---|
skipping to change at line 109 | skipping to change at line 109 | |||
}; | }; | |||
typedef boost::shared_ptr<ShardedClientCursor> ShardedClientCursorPtr; | typedef boost::shared_ptr<ShardedClientCursor> ShardedClientCursorPtr; | |||
class CursorCache { | class CursorCache { | |||
public: | public: | |||
static long long TIMEOUT; | static long long TIMEOUT; | |||
typedef map<long long,ShardedClientCursorPtr> MapSharded; | typedef map<long long,ShardedClientCursorPtr> MapSharded; | |||
typedef map<long long,int> MapShardedInt; | ||||
typedef map<long long,string> MapNormal; | typedef map<long long,string> MapNormal; | |||
CursorCache(); | CursorCache(); | |||
~CursorCache(); | ~CursorCache(); | |||
ShardedClientCursorPtr get( long long id ) const; | ShardedClientCursorPtr get( long long id ) const; | |||
void store( ShardedClientCursorPtr cursor ); | int getMaxTimeMS( long long id ) const; | |||
void store( ShardedClientCursorPtr cursor, int maxTimeMS ); | ||||
void updateMaxTimeMS( long long id, int maxTimeMS ); | ||||
void remove( long long id ); | void remove( long long id ); | |||
void storeRef(const std::string& server, long long id, const std::s tring& ns); | void storeRef(const std::string& server, long long id, const std::s tring& ns); | |||
void removeRef( long long id ); | void removeRef( long long id ); | |||
/** @return the server for id or "" */ | /** @return the server for id or "" */ | |||
string getRef( long long id ) const ; | string getRef( long long id ) const ; | |||
/** @return the ns for id or "" */ | /** @return the ns for id or "" */ | |||
std::string getRefNS(long long id) const ; | std::string getRefNS(long long id) const ; | |||
skipping to change at line 139 | skipping to change at line 142 | |||
long long genId(); | long long genId(); | |||
void doTimeouts(); | void doTimeouts(); | |||
void startTimeoutThread(); | void startTimeoutThread(); | |||
private: | private: | |||
mutable mongo::mutex _mutex; | mutable mongo::mutex _mutex; | |||
PseudoRandom _random; | PseudoRandom _random; | |||
// Maps sharded cursor ID to ShardedClientCursorPtr. | ||||
MapSharded _cursors; | MapSharded _cursors; | |||
MapNormal _refs; // Maps cursor ID to shard name | ||||
MapNormal _refsNS; // Maps cursor ID to namespace | // Maps sharded cursor ID to remaining max time. Value can be any | |||
of: | ||||
// - the constant "kMaxTimeCursorNoTimeLimit", or | ||||
// - the constant "kMaxTimeCursorTimeLimitExpired", or | ||||
// - a positive integer representing milliseconds of remaining time | ||||
MapShardedInt _cursorsMaxTimeMS; | ||||
// Maps passthrough cursor ID to shard name. | ||||
MapNormal _refs; | ||||
// Maps passthrough cursor ID to namespace. | ||||
MapNormal _refsNS; | ||||
long long _shardedTotal; | long long _shardedTotal; | |||
static const int _myLogLevel; | static const int _myLogLevel; | |||
}; | }; | |||
extern CursorCache cursorCache; | extern CursorCache cursorCache; | |||
} | } | |||
End of changes. 4 change blocks. | ||||
3 lines changed or deleted | 18 lines changed or added | |||
d_logic.h | d_logic.h | |||
---|---|---|---|---|
skipping to change at line 63 | skipping to change at line 63 | |||
bool enabled() const { return _enabled; } | bool enabled() const { return _enabled; } | |||
const string& getConfigServer() const { return _configServer; } | const string& getConfigServer() const { return _configServer; } | |||
void enable( const string& server ); | void enable( const string& server ); | |||
// Initialize sharding state and begin authenticating outgoing conn ections and handling | // Initialize sharding state and begin authenticating outgoing conn ections and handling | |||
// shard versions. If this is not run before sharded operations oc cur auth will not work | // shard versions. If this is not run before sharded operations oc cur auth will not work | |||
// and versions will not be tracked. | // and versions will not be tracked. | |||
static void initialize(const string& server); | static void initialize(const string& server); | |||
void gotShardName( const string& name ); | void gotShardName( const string& name ); | |||
void gotShardHost( string host ); | bool setShardName( const string& name ); // Same as above, does not throw | |||
string getShardName() { return _shardName; } | string getShardName() { scoped_lock lk(_mutex); return _shardName; | |||
string getShardHost() { return _shardHost; } | } | |||
/** Reverts back to a state where this mongod is not sharded. */ | /** Reverts back to a state where this mongod is not sharded. */ | |||
void resetShardingState(); | void resetShardingState(); | |||
// versioning support | // versioning support | |||
bool hasVersion( const string& ns ); | bool hasVersion( const string& ns ); | |||
bool hasVersion( const string& ns , ChunkVersion& version ); | bool hasVersion( const string& ns , ChunkVersion& version ); | |||
const ChunkVersion getVersion( const string& ns ) const; | const ChunkVersion getVersion( const string& ns ) const; | |||
skipping to change at line 263 | skipping to change at line 262 | |||
Status doRefreshMetadata( const string& ns, | Status doRefreshMetadata( const string& ns, | |||
const ChunkVersion& reqShardVersion, | const ChunkVersion& reqShardVersion, | |||
bool useRequestedVersion, | bool useRequestedVersion, | |||
ChunkVersion* latestShardVersion ); | ChunkVersion* latestShardVersion ); | |||
bool _enabled; | bool _enabled; | |||
string _configServer; | string _configServer; | |||
string _shardName; | string _shardName; | |||
string _shardHost; | ||||
// protects state below | // protects state below | |||
mutable mongo::mutex _mutex; | mutable mongo::mutex _mutex; | |||
// protects accessing the config server | // protects accessing the config server | |||
// Using a ticket holder so we can have multiple redundant tries at any given time | // Using a ticket holder so we can have multiple redundant tries at any given time | |||
mutable TicketHolder _configServerTickets; | mutable TicketHolder _configServerTickets; | |||
// Map from a namespace into the metadata we need for each collecti on on this shard | // Map from a namespace into the metadata we need for each collecti on on this shard | |||
typedef map<string,CollectionMetadataPtr> CollectionMetadataMap; | typedef map<string,CollectionMetadataPtr> CollectionMetadataMap; | |||
CollectionMetadataMap _collMetadata; | CollectionMetadataMap _collMetadata; | |||
End of changes. 3 change blocks. | ||||
4 lines changed or deleted | 3 lines changed or added | |||
database.h | database.h | |||
---|---|---|---|---|
skipping to change at line 44 | skipping to change at line 44 | |||
#include "mongo/db/namespace_details.h" | #include "mongo/db/namespace_details.h" | |||
#include "mongo/db/storage/extent_manager.h" | #include "mongo/db/storage/extent_manager.h" | |||
#include "mongo/db/storage/record.h" | #include "mongo/db/storage/record.h" | |||
#include "mongo/db/storage_options.h" | #include "mongo/db/storage_options.h" | |||
namespace mongo { | namespace mongo { | |||
class Collection; | class Collection; | |||
class Extent; | class Extent; | |||
class DataFile; | class DataFile; | |||
class IndexCatalog; | ||||
class IndexDetails; | class IndexDetails; | |||
/** | /** | |||
* Database represents a database database | * Database represents a database database | |||
* Each database database has its own set of files -- dbname.ns, dbname .0, dbname.1, ... | * Each database database has its own set of files -- dbname.ns, dbname .0, dbname.1, ... | |||
* NOT memory mapped | * NOT memory mapped | |||
*/ | */ | |||
class Database { | class Database { | |||
public: | public: | |||
// you probably need to be in dbHolderMutex when constructing this | // you probably need to be in dbHolderMutex when constructing this | |||
skipping to change at line 134 | skipping to change at line 135 | |||
const NamespaceIndex& namespaceIndex() const { return _namespaceInd ex; } | const NamespaceIndex& namespaceIndex() const { return _namespaceInd ex; } | |||
NamespaceIndex& namespaceIndex() { return _namespaceIndex; } | NamespaceIndex& namespaceIndex() { return _namespaceIndex; } | |||
// TODO: do not think this method should exist, so should try and e ncapsulate better | // TODO: do not think this method should exist, so should try and e ncapsulate better | |||
ExtentManager& getExtentManager() { return _extentManager; } | ExtentManager& getExtentManager() { return _extentManager; } | |||
const ExtentManager& getExtentManager() const { return _extentManag er; } | const ExtentManager& getExtentManager() const { return _extentManag er; } | |||
Status dropCollection( const StringData& fullns ); | Status dropCollection( const StringData& fullns ); | |||
Collection* createCollection( const StringData& ns, bool capped, co | Collection* createCollection( const StringData& ns, | |||
nst BSONObj* options ); | bool capped = false, | |||
const BSONObj* options = NULL, | ||||
bool allocateDefaultSpace = true ); | ||||
/** | /** | |||
* @param ns - this is fully qualified, which is maybe not ideal ?? ? | * @param ns - this is fully qualified, which is maybe not ideal ?? ? | |||
*/ | */ | |||
Collection* getCollection( const StringData& ns ); | Collection* getCollection( const StringData& ns ); | |||
Status renameCollection( const StringData& fromNS, const StringData & toNS, bool stayTemp ); | Status renameCollection( const StringData& fromNS, const StringData & toNS, bool stayTemp ); | |||
/** | /** | |||
* @return name of an existing database with same text name but dif ferent | * @return name of an existing database with same text name but dif ferent | |||
* casing, if one exists. Otherwise the empty string is returned. If | * casing, if one exists. Otherwise the empty string is returned. If | |||
* 'duplicates' is specified, it is filled with all duplicate names . | * 'duplicates' is specified, it is filled with all duplicate names . | |||
*/ | */ | |||
static string duplicateUncasedName( bool inholderlockalready, const string &name, const string &path, set< string > *duplicates = 0 ); | static string duplicateUncasedName( bool inholderlockalready, const string &name, const string &path, set< string > *duplicates = 0 ); | |||
static Status validateDBName( const StringData& dbname ); | static Status validateDBName( const StringData& dbname ); | |||
const string& getSystemIndexesName() const { return _indexesName; } | ||||
private: | private: | |||
void _clearCollectionCache( const StringData& fullns ); | void _clearCollectionCache( const StringData& fullns ); | |||
void _clearCollectionCache_inlock( const StringData& fullns ); | void _clearCollectionCache_inlock( const StringData& fullns ); | |||
~Database(); // closes files and other cleanup see below. | ~Database(); // closes files and other cleanup see below. | |||
void _addNamespaceToCatalog( const StringData& ns, const BSONObj* o ptions ); | void _addNamespaceToCatalog( const StringData& ns, const BSONObj* o ptions ); | |||
skipping to change at line 202 | skipping to change at line 207 | |||
bool stayTemp ); | bool stayTemp ); | |||
const string _name; // "alleyinsider" | const string _name; // "alleyinsider" | |||
const string _path; // "/data/db" | const string _path; // "/data/db" | |||
NamespaceIndex _namespaceIndex; | NamespaceIndex _namespaceIndex; | |||
ExtentManager _extentManager; | ExtentManager _extentManager; | |||
const string _profileName; // "alleyinsider.system.profile" | const string _profileName; // "alleyinsider.system.profile" | |||
const string _namespacesName; // "alleyinsider.system.namespaces" | const string _namespacesName; // "alleyinsider.system.namespaces" | |||
const string _indexesName; // "alleyinsider.system.indexes" | ||||
const string _extentFreelistName; | const string _extentFreelistName; | |||
CCByLoc _ccByLoc; // use by ClientCursor | CCByLoc _ccByLoc; // use by ClientCursor | |||
RecordStats _recordStats; | RecordStats _recordStats; | |||
int _profile; // 0=off. | int _profile; // 0=off. | |||
int _magic; // used for making sure the object is still loaded in m emory | int _magic; // used for making sure the object is still loaded in m emory | |||
// TODO: probably shouldn't be a std::map | // TODO: probably shouldn't be a std::map | |||
// TODO: make sure deletes go through | // TODO: make sure deletes go through | |||
// this in some ways is a dupe of _namespaceIndex | // this in some ways is a dupe of _namespaceIndex | |||
// but it points to a much more useful data structure | // but it points to a much more useful data structure | |||
typedef std::map< std::string, Collection* > CollectionMap; | typedef std::map< std::string, Collection* > CollectionMap; | |||
CollectionMap _collections; | CollectionMap _collections; | |||
mutex _collectionLock; | mutex _collectionLock; | |||
friend class Collection; | ||||
friend class NamespaceDetails; | friend class NamespaceDetails; | |||
friend class IndexDetails; | friend class IndexDetails; | |||
friend class IndexCatalog; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 6 change blocks. | ||||
2 lines changed or deleted | 9 lines changed or added | |||
dbclient.h | dbclient.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
#include "mongo/client/redef_macros.h" | #include "mongo/client/redef_macros.h" | |||
#include "mongo/pch.h" | #include "mongo/pch.h" | |||
#include "mongo/client/connpool.h" | #include "mongo/client/connpool.h" | |||
#include "mongo/client/dbclient_rs.h" | #include "mongo/client/dbclient_rs.h" | |||
#include "mongo/client/dbclientcursor.h" | #include "mongo/client/dbclientcursor.h" | |||
#include "mongo/client/dbclientinterface.h" | #include "mongo/client/dbclientinterface.h" | |||
#include "mongo/client/gridfs.h" | #include "mongo/client/gridfs.h" | |||
#include "mongo/client/init.h" | ||||
#include "mongo/client/sasl_client_authenticate.h" | #include "mongo/client/sasl_client_authenticate.h" | |||
#include "mongo/client/syncclusterconnection.h" | #include "mongo/client/syncclusterconnection.h" | |||
#include "mongo/util/net/ssl_options.h" | #include "mongo/util/net/ssl_options.h" | |||
#include "mongo/client/undef_macros.h" | #include "mongo/client/undef_macros.h" | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 1 lines changed or added | |||
dbclient_rs.h | dbclient_rs.h | |||
---|---|---|---|---|
skipping to change at line 225 | skipping to change at line 225 | |||
/** | /** | |||
* this is called whenever the config of any replica set changes | * this is called whenever the config of any replica set changes | |||
* currently only 1 globally | * currently only 1 globally | |||
* asserts if one already exists | * asserts if one already exists | |||
* ownership passes to ReplicaSetMonitor and the hook will actually never be deleted | * ownership passes to ReplicaSetMonitor and the hook will actually never be deleted | |||
*/ | */ | |||
static void setConfigChangeHook( ConfigChangeHook hook ); | static void setConfigChangeHook( ConfigChangeHook hook ); | |||
/** | /** | |||
* Stops all monitoring on replica sets and clears all cached infor | * Permanently stops all monitoring on replica sets and clears all | |||
mation as well. | cached information | |||
* Note that this does not prevent new monitors from being created | * as well. As a consequence, NEVER call this if you have other thr | |||
afterwards or even | eads that have a | |||
* while this is being executed. As a consequence, NEVER call this | * DBClientReplicaSet instance. | |||
if you have other | ||||
* threads that has a DBClientReplicaSet instance or will create on | ||||
e before this | ||||
* fully terminates as it will cause a deadlock. This is intended f | ||||
or performing cleanups | ||||
* in unit tests. | ||||
* | ||||
* Warning: Make sure that the monitor thread is running, otherwise | ||||
this can hang | ||||
* indefinitely. | ||||
*/ | */ | |||
static void cleanup(); | static void cleanup(); | |||
~ReplicaSetMonitor(); | ~ReplicaSetMonitor(); | |||
/** @return HostAndPort or throws an exception */ | /** @return HostAndPort or throws an exception */ | |||
HostAndPort getMaster(); | HostAndPort getMaster(); | |||
/** | /** | |||
* notify the monitor that server has faild | * notify the monitor that server has faild | |||
End of changes. 1 change blocks. | ||||
15 lines changed or deleted | 5 lines changed or added | |||
dbclientinterface.h | dbclientinterface.h | |||
---|---|---|---|---|
skipping to change at line 620 | skipping to change at line 620 | |||
int options=0); | int options=0); | |||
/** | /** | |||
* Authenticate a user. | * Authenticate a user. | |||
* | * | |||
* The "params" BSONObj should be initialized with some of the fiel ds below. Which fields | * The "params" BSONObj should be initialized with some of the fiel ds below. Which fields | |||
* are required depends on the mechanism, which is mandatory. | * are required depends on the mechanism, which is mandatory. | |||
* | * | |||
* "mechanism": The string name of the sasl mechanism to use. Mandatory. | * "mechanism": The string name of the sasl mechanism to use. Mandatory. | |||
* "user": The string name of the user to authenticate. Mandat ory. | * "user": The string name of the user to authenticate. Mandat ory. | |||
* "userSource": The database target of the auth command, which identifies the location | * "db": The database target of the auth command, which identif ies the location | |||
* of the credential information for the user. May be "$ex ternal" if | * of the credential information for the user. May be "$ex ternal" if | |||
* credential information is stored outside of the mongo cl uster. Mandatory. | * credential information is stored outside of the mongo cl uster. Mandatory. | |||
* "pwd": The password data. | * "pwd": The password data. | |||
* "digestPassword": Boolean, set to true if the "pwd" is undig ested (default). | * "digestPassword": Boolean, set to true if the "pwd" is undig ested (default). | |||
* "serviceName": The GSSAPI service name to use. Defaults to "mongodb". | * "serviceName": The GSSAPI service name to use. Defaults to "mongodb". | |||
* "serviceHostname": The GSSAPI hostname to use. Defaults to the name of the remote | * "serviceHostname": The GSSAPI hostname to use. Defaults to the name of the remote | |||
* host. | * host. | |||
* | * | |||
* Other fields in "params" are silently ignored. | * Other fields in "params" are silently ignored. | |||
* | * | |||
skipping to change at line 984 | skipping to change at line 984 | |||
virtual void _auth(const BSONObj& params); | virtual void _auth(const BSONObj& params); | |||
/** | /** | |||
* Use the MONGODB-CR protocol to authenticate as "username" agains t the database "dbname", | * Use the MONGODB-CR protocol to authenticate as "username" agains t the database "dbname", | |||
* with the given password. If digestPassword is false, the passwo rd is assumed to be | * with the given password. If digestPassword is false, the passwo rd is assumed to be | |||
* pre-digested. Returns false on failure, and sets "errmsg". | * pre-digested. Returns false on failure, and sets "errmsg". | |||
*/ | */ | |||
bool _authMongoCR(const string &dbname, | bool _authMongoCR(const string &dbname, | |||
const string &username, | const string &username, | |||
const string &pwd, | const string &pwd, | |||
string& errmsg, | BSONObj *info, | |||
bool digestPassword); | bool digestPassword); | |||
/** | /** | |||
* Use the MONGODB-X509 protocol to authenticate as "username. The certificate details | * Use the MONGODB-X509 protocol to authenticate as "username. The certificate details | |||
* has already been communicated automatically as part of the conne ct call. | * has already been communicated automatically as part of the conne ct call. | |||
* Returns false on failure and set "errmsg". | * Returns false on failure and set "errmsg". | |||
*/ | */ | |||
bool _authX509(const string&dbname, | bool _authX509(const string&dbname, | |||
const string &username, | const string &username, | |||
string& errmsg); | BSONObj *info); | |||
private: | private: | |||
enum QueryOptions _cachedAvailableOptions; | enum QueryOptions _cachedAvailableOptions; | |||
bool _haveCachedAvailableOptions; | bool _haveCachedAvailableOptions; | |||
}; | }; | |||
/** | /** | |||
abstract class that implements the core db operations | abstract class that implements the core db operations | |||
*/ | */ | |||
class DBClientBase : public DBClientWithCommands, public DBConnector { | class DBClientBase : public DBClientWithCommands, public DBConnector { | |||
protected: | protected: | |||
static AtomicInt64 ConnectionIdSequence; | static AtomicInt64 ConnectionIdSequence; | |||
long long _connectionId; // unique connection id for this connectio n | long long _connectionId; // unique connection id for this connectio n | |||
WriteConcern _writeConcern; | WriteConcern _writeConcern; | |||
int _minWireVersion; | ||||
int _maxWireVersion; | ||||
public: | public: | |||
static const uint64_t INVALID_SOCK_CREATION_TIME; | static const uint64_t INVALID_SOCK_CREATION_TIME; | |||
DBClientBase() { | DBClientBase() { | |||
_writeConcern = W_NORMAL; | _writeConcern = W_NORMAL; | |||
_connectionId = ConnectionIdSequence.fetchAndAdd(1); | _connectionId = ConnectionIdSequence.fetchAndAdd(1); | |||
_minWireVersion = _maxWireVersion = 0; | ||||
} | } | |||
long long getConnectionId() const { return _connectionId; } | long long getConnectionId() const { return _connectionId; } | |||
WriteConcern getWriteConcern() const { return _writeConcern; } | WriteConcern getWriteConcern() const { return _writeConcern; } | |||
void setWriteConcern( WriteConcern w ) { _writeConcern = w; } | void setWriteConcern( WriteConcern w ) { _writeConcern = w; } | |||
void setWireVersions( int minWireVersion, int maxWireVersion ){ | ||||
_minWireVersion = minWireVersion; | ||||
_maxWireVersion = maxWireVersion; | ||||
} | ||||
int getMinWireVersion() { return _minWireVersion; } | ||||
int getMaxWireVersion() { return _maxWireVersion; } | ||||
/** send a query to the database. | /** send a query to the database. | |||
@param ns namespace to query, format is <dbname>.<collectname>[.<c ollectname>]* | @param ns namespace to query, format is <dbname>.<collectname>[.<c ollectname>]* | |||
@param query query to perform on the collection. this is a BSONOb j (binary JSON) | @param query query to perform on the collection. this is a BSONOb j (binary JSON) | |||
You may format as | You may format as | |||
{ query: { ... }, orderby: { ... } } | { query: { ... }, orderby: { ... } } | |||
to specify a sort order. | to specify a sort order. | |||
@param nToReturn n to return (i.e., limit). 0 = unlimited | @param nToReturn n to return (i.e., limit). 0 = unlimited | |||
@param nToSkip start with the nth item | @param nToSkip start with the nth item | |||
@param fieldsToReturn optional template of which fields to select. if unspecified, returns all fields | @param fieldsToReturn optional template of which fields to select. if unspecified, returns all fields | |||
@param queryOptions see options enum at top of this file | @param queryOptions see options enum at top of this file | |||
End of changes. 6 change blocks. | ||||
4 lines changed or deleted | 15 lines changed or added | |||
dbhelpers.h | dbhelpers.h | |||
---|---|---|---|---|
skipping to change at line 64 | skipping to change at line 64 | |||
class RemoveSaver; | class RemoveSaver; | |||
/* ensure the specified index exists. | /* ensure the specified index exists. | |||
@param keyPattern key pattern, e.g., { ts : 1 } | @param keyPattern key pattern, e.g., { ts : 1 } | |||
@param name index name, e.g., "name_1" | @param name index name, e.g., "name_1" | |||
This method can be a little (not much) cpu-slow, so you may wish to use | This method can be a little (not much) cpu-slow, so you may wish to use | |||
OCCASIONALLY ensureIndex(...); | OCCASIONALLY ensureIndex(...); | |||
Note: use ensureHaveIdIndex() for the _id index: it is faster. | ||||
Note: does nothing if collection does not yet exist. | Note: does nothing if collection does not yet exist. | |||
*/ | */ | |||
static void ensureIndex(const char *ns, BSONObj keyPattern, bool un ique, const char *name); | static void ensureIndex(const char *ns, BSONObj keyPattern, bool un ique, const char *name); | |||
/* fetch a single object from collection ns that matches query. | /* fetch a single object from collection ns that matches query. | |||
set your db SavedContext first. | set your db SavedContext first. | |||
@param query - the query to perform. note this is the low level portion of query so "orderby : ..." | @param query - the query to perform. note this is the low level portion of query so "orderby : ..." | |||
won't work. | won't work. | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 0 lines changed or added | |||
document_source.h | document_source.h | |||
---|---|---|---|---|
skipping to change at line 63 | skipping to change at line 63 | |||
class Document; | class Document; | |||
class Expression; | class Expression; | |||
class ExpressionFieldPath; | class ExpressionFieldPath; | |||
class ExpressionObject; | class ExpressionObject; | |||
class DocumentSourceLimit; | class DocumentSourceLimit; | |||
class DocumentSource : public IntrusiveCounterUnsigned { | class DocumentSource : public IntrusiveCounterUnsigned { | |||
public: | public: | |||
virtual ~DocumentSource() {} | virtual ~DocumentSource() {} | |||
/** | ||||
Set the step for a user-specified pipeline step. | ||||
The step is used for diagnostics. | ||||
@param step step number 0 to n. | ||||
*/ | ||||
void setPipelineStep(int step); | ||||
/** | ||||
Get the user-specified pipeline step. | ||||
@returns the step number, or -1 if it has never been set | ||||
*/ | ||||
int getPipelineStep() const; | ||||
/** Returns the next Document if there is one or boost::none if at EOF. | /** Returns the next Document if there is one or boost::none if at EOF. | |||
* Subclasses must call pExpCtx->checkForInterupt(). | * Subclasses must call pExpCtx->checkForInterupt(). | |||
*/ | */ | |||
virtual boost::optional<Document> getNext() = 0; | virtual boost::optional<Document> getNext() = 0; | |||
/** | /** | |||
* Inform the source that it is no longer needed and may release it s resources. After | * Inform the source that it is no longer needed and may release it s resources. After | |||
* dispose() is called the source must still be able to handle iter ation requests, but may | * dispose() is called the source must still be able to handle iter ation requests, but may | |||
* become eof(). | * become eof(). | |||
* NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will | * NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will | |||
skipping to change at line 202 | skipping to change at line 186 | |||
/* | /* | |||
Most DocumentSources have an underlying source they get their dat a | Most DocumentSources have an underlying source they get their dat a | |||
from. This is a convenience for them. | from. This is a convenience for them. | |||
The default implementation of setSource() sets this; if you don't | The default implementation of setSource() sets this; if you don't | |||
need a source, override that to verify(). The default is to | need a source, override that to verify(). The default is to | |||
verify() if this has already been set. | verify() if this has already been set. | |||
*/ | */ | |||
DocumentSource *pSource; | DocumentSource *pSource; | |||
/* | ||||
The zero-based user-specified pipeline step. Used for diagnostic | ||||
s. | ||||
Will be set to -1 for artificial pipeline steps that were not par | ||||
t | ||||
of the original user specification. | ||||
*/ | ||||
int step; | ||||
intrusive_ptr<ExpressionContext> pExpCtx; | intrusive_ptr<ExpressionContext> pExpCtx; | |||
/* | ||||
for explain: # of rows returned by this source | ||||
This is *not* unsigned so it can be passed to BSONObjBuilder.appe | ||||
nd(). | ||||
*/ | ||||
long long nRowsOut; | ||||
private: | private: | |||
/** | /** | |||
* Create a Value that represents the document source. | * Create a Value that represents the document source. | |||
* | * | |||
* This is used by the default implementation of serializeToArray() to add this object | * This is used by the default implementation of serializeToArray() to add this object | |||
* to a pipeline being serialized. Returning a missing() Value resu lts in no entry | * to a pipeline being serialized. Returning a missing() Value resu lts in no entry | |||
* being added to the array for this stage (DocumentSource). | * being added to the array for this stage (DocumentSource). | |||
*/ | */ | |||
virtual Value serialize(bool explain = false) const = 0; | virtual Value serialize(bool explain = false) const = 0; | |||
}; | }; | |||
/** This class marks DocumentSources that should be split between the r | /** This class marks DocumentSources that should be split between the m | |||
outer and the shards | erger and the shards. | |||
* See Pipeline::splitForSharded() for details | * See Pipeline::Optimizations::Sharded::findSplitPoint() for details. | |||
* | ||||
* TODO inheriting from DocumentSource here was a mistake. It should b | ||||
e separate. | ||||
*/ | */ | |||
class SplittableDocumentSource : public DocumentSource { | class SplittableDocumentSource { | |||
public: | public: | |||
/** returns a source to be run on the shards. | /** returns a source to be run on the shards. | |||
* if NULL, don't run on shards | * if NULL, don't run on shards | |||
*/ | */ | |||
virtual intrusive_ptr<DocumentSource> getShardSource() = 0; | virtual intrusive_ptr<DocumentSource> getShardSource() = 0; | |||
/** returns a source that combines results from shards. | /** returns a source that combines results from shards. | |||
* if NULL, don't run on router | * if NULL, don't run on merger | |||
*/ | */ | |||
virtual intrusive_ptr<DocumentSource> getRouterSource() = 0; | virtual intrusive_ptr<DocumentSource> getMergeSource() = 0; | |||
protected: | protected: | |||
SplittableDocumentSource(intrusive_ptr<ExpressionContext> ctx) :Doc | // It is invalid to delete through a SplittableDocumentSource-typed | |||
umentSource(ctx) {} | pointer. | |||
virtual ~SplittableDocumentSource() {} | ||||
}; | }; | |||
/** This class marks DocumentSources which need mongod-specific functio nality. | /** This class marks DocumentSources which need mongod-specific functio nality. | |||
* It causes a MongodInterface to be injected when in a mongod and pre vents mongos from | * It causes a MongodInterface to be injected when in a mongod and pre vents mongos from | |||
* merging pipelines containing this stage. | * merging pipelines containing this stage. | |||
*/ | */ | |||
class DocumentSourceNeedsMongod { | class DocumentSourceNeedsMongod { | |||
public: | public: | |||
// Wraps mongod-specific functions to allow linking into mongos. | // Wraps mongod-specific functions to allow linking into mongos. | |||
class MongodInterface { | class MongodInterface { | |||
skipping to change at line 332 | skipping to change at line 301 | |||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual boost::optional<Document> getNext(); | virtual boost::optional<Document> getNext(); | |||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
virtual void setSource(DocumentSource *pSource); | virtual void setSource(DocumentSource *pSource); | |||
virtual bool isValidInitialSource() const { return true; } | virtual bool isValidInitialSource() const { return true; } | |||
/* convenient shorthand for a commonly used type */ | /* convenient shorthand for a commonly used type */ | |||
typedef vector<Strategy::CommandResult> ShardOutput; | typedef vector<Strategy::CommandResult> ShardOutput; | |||
/** Returns the result arrays from shards using the 2.4 protocol. | ||||
* Call this instead of getNext() if you want access to the raw st | ||||
reams. | ||||
* This method should only be called at most once. | ||||
*/ | ||||
vector<BSONArray> getArrays(); | ||||
/** | /** | |||
Create a DocumentSource that wraps the output of many shards | Create a DocumentSource that wraps the output of many shards | |||
@param shardOutput output from the individual shards | @param shardOutput output from the individual shards | |||
@param pExpCtx the expression context for the pipeline | @param pExpCtx the expression context for the pipeline | |||
@returns the newly created DocumentSource | @returns the newly created DocumentSource | |||
*/ | */ | |||
static intrusive_ptr<DocumentSourceCommandShards> create( | static intrusive_ptr<DocumentSourceCommandShards> create( | |||
const ShardOutput& shardOutput, | const ShardOutput& shardOutput, | |||
const intrusive_ptr<ExpressionContext>& pExpCtx); | const intrusive_ptr<ExpressionContext>& pExpCtx); | |||
skipping to change at line 479 | skipping to change at line 454 | |||
Yield the cursor sometimes. | Yield the cursor sometimes. | |||
If the state of the world changed during the yield such that we | If the state of the world changed during the yield such that we | |||
are unable to continue execution of the query, this will release the | are unable to continue execution of the query, this will release the | |||
client cursor, and throw an error. NOTE This differs from the | client cursor, and throw an error. NOTE This differs from the | |||
behavior of most other operations, see SERVER-2454. | behavior of most other operations, see SERVER-2454. | |||
*/ | */ | |||
void yieldSometimes(ClientCursor* cursor); | void yieldSometimes(ClientCursor* cursor); | |||
}; | }; | |||
/* | class DocumentSourceGroup : public DocumentSource | |||
This contains all the basic mechanics for filtering a stream of | , public SplittableDocumentSource { | |||
Documents, except for the actual predicate evaluation itself. This w | ||||
as | ||||
factored out so we could create DocumentSources that use both Matcher | ||||
style predicates as well as full Expressions. | ||||
*/ | ||||
class DocumentSourceFilterBase : | ||||
public DocumentSource { | ||||
public: | ||||
// virtuals from DocumentSource | ||||
virtual boost::optional<Document> getNext(); | ||||
/** | ||||
Create a BSONObj suitable for Matcher construction. | ||||
This is used after filter analysis has moved as many filters to | ||||
as early a point as possible in the document processing pipeline. | ||||
See db/Matcher.h and the associated documentation for the format. | ||||
This conversion is used to move back to the low-level find() | ||||
Cursor mechanism. | ||||
@param pBuilder the builder to write to | ||||
*/ | ||||
virtual void toMatcherBson(BSONObjBuilder *pBuilder) const = 0; | ||||
protected: | ||||
DocumentSourceFilterBase( | ||||
const intrusive_ptr<ExpressionContext> &pExpCtx); | ||||
/** | ||||
Test the given document against the predicate and report if it | ||||
should be accepted or not. | ||||
@param pDocument the document to test | ||||
@returns true if the document matches the filter, false otherwise | ||||
*/ | ||||
virtual bool accept(const Document& pDocument) const = 0; | ||||
private: | ||||
bool unstarted; | ||||
}; | ||||
class DocumentSourceGroup : | ||||
public SplittableDocumentSource { | ||||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual boost::optional<Document> getNext(); | virtual boost::optional<Document> getNext(); | |||
virtual const char *getSourceName() const; | virtual const char *getSourceName() const; | |||
virtual void optimize(); | ||||
virtual GetDepsReturn getDependencies(set<string>& deps) const; | virtual GetDepsReturn getDependencies(set<string>& deps) const; | |||
virtual void dispose(); | virtual void dispose(); | |||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
/** | /** | |||
Create a new grouping DocumentSource. | Create a new grouping DocumentSource. | |||
@param pExpCtx the expression context for the pipeline | @param pExpCtx the expression context for the pipeline | |||
@returns the DocumentSource | @returns the DocumentSource | |||
*/ | */ | |||
skipping to change at line 587 | skipping to change at line 521 | |||
@param pBsonElement the BSONELement that defines the group | @param pBsonElement the BSONELement that defines the group | |||
@param pExpCtx the expression context | @param pExpCtx the expression context | |||
@returns the grouping DocumentSource | @returns the grouping DocumentSource | |||
*/ | */ | |||
static intrusive_ptr<DocumentSource> createFromBson( | static intrusive_ptr<DocumentSource> createFromBson( | |||
BSONElement elem, | BSONElement elem, | |||
const intrusive_ptr<ExpressionContext> &pExpCtx); | const intrusive_ptr<ExpressionContext> &pExpCtx); | |||
// Virtuals for SplittableDocumentSource | // Virtuals for SplittableDocumentSource | |||
virtual intrusive_ptr<DocumentSource> getShardSource(); | virtual intrusive_ptr<DocumentSource> getShardSource(); | |||
virtual intrusive_ptr<DocumentSource> getRouterSource(); | virtual intrusive_ptr<DocumentSource> getMergeSource(); | |||
static const char groupName[]; | static const char groupName[]; | |||
private: | private: | |||
DocumentSourceGroup(const intrusive_ptr<ExpressionContext> &pExpCtx ); | DocumentSourceGroup(const intrusive_ptr<ExpressionContext> &pExpCtx ); | |||
/// Spill groups map to disk and returns an iterator to the file. | /// Spill groups map to disk and returns an iterator to the file. | |||
shared_ptr<Sorter<Value, Value>::Iterator> spill(); | shared_ptr<Sorter<Value, Value>::Iterator> spill(); | |||
// Only used by spill. Would be function-local if that were legal i n C++03. | // Only used by spill. Would be function-local if that were legal i n C++03. | |||
skipping to change at line 637 | skipping to change at line 571 | |||
vector<string> vFieldName; | vector<string> vFieldName; | |||
vector<intrusive_ptr<Accumulator> (*)()> vpAccumulatorFactory; | vector<intrusive_ptr<Accumulator> (*)()> vpAccumulatorFactory; | |||
vector<intrusive_ptr<Expression> > vpExpression; | vector<intrusive_ptr<Expression> > vpExpression; | |||
Document makeDocument(const Value& id, const Accumulators& accums, bool mergeableOutput); | Document makeDocument(const Value& id, const Accumulators& accums, bool mergeableOutput); | |||
bool _doingMerge; | bool _doingMerge; | |||
bool _spilled; | bool _spilled; | |||
const bool _extSortAllowed; | const bool _extSortAllowed; | |||
const int _maxMemoryUsageBytes; | const int _maxMemoryUsageBytes; | |||
boost::scoped_ptr<Variables> _variables; | ||||
// only used when !_spilled | // only used when !_spilled | |||
GroupsMap::iterator groupsIterator; | GroupsMap::iterator groupsIterator; | |||
// only used when _spilled | // only used when _spilled | |||
scoped_ptr<Sorter<Value, Value>::Iterator> _sorterIterator; | scoped_ptr<Sorter<Value, Value>::Iterator> _sorterIterator; | |||
pair<Value, Value> _firstPartOfNextGroup; | pair<Value, Value> _firstPartOfNextGroup; | |||
Value _currentId; | Value _currentId; | |||
Accumulators _currentAccumulators; | Accumulators _currentAccumulators; | |||
}; | }; | |||
class DocumentSourceMatch : | class DocumentSourceMatch : public DocumentSource { | |||
public DocumentSourceFilterBase { | ||||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual boost::optional<Document> getNext(); | ||||
virtual const char *getSourceName() const; | virtual const char *getSourceName() const; | |||
virtual bool coalesce(const intrusive_ptr<DocumentSource>& nextSour ce); | ||||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
/** | /** | |||
Create a filter. | Create a filter. | |||
@param pBsonElement the raw BSON specification for the filter | @param pBsonElement the raw BSON specification for the filter | |||
@returns the filter | @returns the filter | |||
*/ | */ | |||
static intrusive_ptr<DocumentSource> createFromBson( | static intrusive_ptr<DocumentSource> createFromBson( | |||
BSONElement elem, | BSONElement elem, | |||
const intrusive_ptr<ExpressionContext> &pCtx); | const intrusive_ptr<ExpressionContext> &pCtx); | |||
/** | /// Returns the query in Matcher syntax. | |||
Create a BSONObj suitable for Matcher construction. | BSONObj getQuery() const; | |||
This is used after filter analysis has moved as many filters to | ||||
as early a point as possible in the document processing pipeline. | ||||
See db/Matcher.h and the associated documentation for the format. | ||||
This conversion is used to move back to the low-level find() | ||||
Cursor mechanism. | ||||
@param pBuilder the builder to write to | ||||
*/ | ||||
void toMatcherBson(BSONObjBuilder *pBuilder) const; | ||||
static const char matchName[]; | static const char matchName[]; | |||
// virtuals from DocumentSourceFilterBase | ||||
virtual bool accept(const Document& pDocument) const; | ||||
/** Returns the portion of the match that can safely be promoted to before a $redact. | /** Returns the portion of the match that can safely be promoted to before a $redact. | |||
* If this returns an empty BSONObj, no part of this match may saf ely be promoted. | * If this returns an empty BSONObj, no part of this match may saf ely be promoted. | |||
* | * | |||
* To be safe to promote, removing a field from a document to be m atched must not cause | * To be safe to promote, removing a field from a document to be m atched must not cause | |||
* that document to be accepted when it would otherwise be rejecte d. As an example, | * that document to be accepted when it would otherwise be rejecte d. As an example, | |||
* {name: {$ne: "bob smith"}} accepts documents without a name fie ld, which means that | * {name: {$ne: "bob smith"}} accepts documents without a name fie ld, which means that | |||
* running this filter before a redact that would remove the name field would leak | * running this filter before a redact that would remove the name field would leak | |||
* information. On the other hand, {age: {$gt:5}} is ok because it doesn't accept documents | * information. On the other hand, {age: {$gt:5}} is ok because it doesn't accept documents | |||
* that have had their age field removed. | * that have had their age field removed. | |||
*/ | */ | |||
BSONObj redactSafePortion() const; | BSONObj redactSafePortion() const; | |||
private: | private: | |||
DocumentSourceMatch(const BSONObj &query, | DocumentSourceMatch(const BSONObj &query, | |||
const intrusive_ptr<ExpressionContext> &pExpCtx); | const intrusive_ptr<ExpressionContext> &pExpCtx); | |||
Matcher matcher; | scoped_ptr<Matcher> matcher; | |||
}; | }; | |||
class DocumentSourceMergeCursors : | class DocumentSourceMergeCursors : | |||
public DocumentSource { | public DocumentSource { | |||
public: | public: | |||
typedef vector<pair<ConnectionString, CursorId> > CursorIds; | typedef vector<pair<ConnectionString, CursorId> > CursorIds; | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
boost::optional<Document> getNext(); | boost::optional<Document> getNext(); | |||
virtual void setSource(DocumentSource *pSource); | virtual void setSource(DocumentSource *pSource); | |||
skipping to change at line 725 | skipping to change at line 648 | |||
static intrusive_ptr<DocumentSource> createFromBson( | static intrusive_ptr<DocumentSource> createFromBson( | |||
BSONElement elem, | BSONElement elem, | |||
const intrusive_ptr<ExpressionContext> &pExpCtx); | const intrusive_ptr<ExpressionContext> &pExpCtx); | |||
static intrusive_ptr<DocumentSource> create( | static intrusive_ptr<DocumentSource> create( | |||
const CursorIds& cursorIds, | const CursorIds& cursorIds, | |||
const intrusive_ptr<ExpressionContext> &pExpCtx); | const intrusive_ptr<ExpressionContext> &pExpCtx); | |||
static const char name[]; | static const char name[]; | |||
/** Returns non-owning pointers to cursors managed by this stage. | ||||
* Call this instead of getNext() if you want access to the raw st | ||||
reams. | ||||
* This method should only be called at most once. | ||||
*/ | ||||
vector<DBClientCursor*> getCursors(); | ||||
private: | private: | |||
struct CursorAndConnection { | struct CursorAndConnection { | |||
CursorAndConnection(ConnectionString host, NamespaceString ns, CursorId id); | CursorAndConnection(ConnectionString host, NamespaceString ns, CursorId id); | |||
ScopedDbConnection connection; | ScopedDbConnection connection; | |||
DBClientCursor cursor; | DBClientCursor cursor; | |||
}; | }; | |||
// using list to enable removing arbitrary elements | // using list to enable removing arbitrary elements | |||
typedef list<boost::shared_ptr<CursorAndConnection> > Cursors; | typedef list<boost::shared_ptr<CursorAndConnection> > Cursors; | |||
DocumentSourceMergeCursors( | DocumentSourceMergeCursors( | |||
const CursorIds& cursorIds, | const CursorIds& cursorIds, | |||
const intrusive_ptr<ExpressionContext> &pExpCtx); | const intrusive_ptr<ExpressionContext> &pExpCtx); | |||
// Converts _cursorIds into active _cursors. | ||||
void start(); | ||||
// This is the description of cursors to merge. | // This is the description of cursors to merge. | |||
const CursorIds _cursorIds; | const CursorIds _cursorIds; | |||
// These are the actual cursors we are merging. Created lazily. | // These are the actual cursors we are merging. Created lazily. | |||
Cursors _cursors; | Cursors _cursors; | |||
Cursors::iterator _currentCursor; | Cursors::iterator _currentCursor; | |||
bool _unstarted; | bool _unstarted; | |||
}; | }; | |||
class DocumentSourceOut : public SplittableDocumentSource | class DocumentSourceOut : public DocumentSource | |||
, public SplittableDocumentSource | ||||
, public DocumentSourceNeedsMongod { | , public DocumentSourceNeedsMongod { | |||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual ~DocumentSourceOut(); | virtual ~DocumentSourceOut(); | |||
virtual boost::optional<Document> getNext(); | virtual boost::optional<Document> getNext(); | |||
virtual const char *getSourceName() const; | virtual const char *getSourceName() const; | |||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
// Virtuals for SplittableDocumentSource | // Virtuals for SplittableDocumentSource | |||
virtual intrusive_ptr<DocumentSource> getShardSource() { return NUL L; } | virtual intrusive_ptr<DocumentSource> getShardSource() { return NUL L; } | |||
virtual intrusive_ptr<DocumentSource> getRouterSource() { return th is; } | virtual intrusive_ptr<DocumentSource> getMergeSource() { return thi s; } | |||
const NamespaceString& getOutputNs() const { return _outputNs; } | const NamespaceString& getOutputNs() const { return _outputNs; } | |||
/** | /** | |||
Create a document source for output and pass-through. | Create a document source for output and pass-through. | |||
This can be put anywhere in a pipeline and will store content as | This can be put anywhere in a pipeline and will store content as | |||
well as pass it on. | well as pass it on. | |||
@param pBsonElement the raw BSON specification for the source | @param pBsonElement the raw BSON specification for the source | |||
skipping to change at line 831 | skipping to change at line 764 | |||
static const char projectName[]; | static const char projectName[]; | |||
/** projection as specified by the user */ | /** projection as specified by the user */ | |||
BSONObj getRaw() const { return _raw; } | BSONObj getRaw() const { return _raw; } | |||
private: | private: | |||
DocumentSourceProject(const intrusive_ptr<ExpressionContext>& pExpC tx, | DocumentSourceProject(const intrusive_ptr<ExpressionContext>& pExpC tx, | |||
const intrusive_ptr<ExpressionObject>& exprOb j); | const intrusive_ptr<ExpressionObject>& exprOb j); | |||
// configuration state | // configuration state | |||
boost::scoped_ptr<Variables> _variables; | ||||
intrusive_ptr<ExpressionObject> pEO; | intrusive_ptr<ExpressionObject> pEO; | |||
BSONObj _raw; | BSONObj _raw; | |||
#if defined(_DEBUG) | #if defined(_DEBUG) | |||
// this is used in DEBUG builds to ensure we are compatible | // this is used in DEBUG builds to ensure we are compatible | |||
Projection _simpleProjection; | Projection _simpleProjection; | |||
#endif | #endif | |||
}; | }; | |||
class DocumentSourceRedact : | class DocumentSourceRedact : | |||
skipping to change at line 858 | skipping to change at line 792 | |||
static intrusive_ptr<DocumentSource> createFromBson( | static intrusive_ptr<DocumentSource> createFromBson( | |||
BSONElement elem, | BSONElement elem, | |||
const intrusive_ptr<ExpressionContext>& expCtx); | const intrusive_ptr<ExpressionContext>& expCtx); | |||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
private: | private: | |||
DocumentSourceRedact(const intrusive_ptr<ExpressionContext>& expCtx , | DocumentSourceRedact(const intrusive_ptr<ExpressionContext>& expCtx , | |||
const intrusive_ptr<Expression>& previsit); | const intrusive_ptr<Expression>& previsit); | |||
boost::optional<Document> redactObject(const Variables& in); | ||||
Value redactValue(const Variables& vars, const Value& in); | ||||
// These both work over _variables | ||||
boost::optional<Document> redactObject(); // redacts CURRENT | ||||
Value redactValue(const Value& in); | ||||
Variables::Id _currentId; | ||||
boost::scoped_ptr<Variables> _variables; | ||||
intrusive_ptr<Expression> _expression; | intrusive_ptr<Expression> _expression; | |||
}; | }; | |||
class DocumentSourceSort : | class DocumentSourceSort : public DocumentSource | |||
public SplittableDocumentSource { | , public SplittableDocumentSource { | |||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual boost::optional<Document> getNext(); | virtual boost::optional<Document> getNext(); | |||
virtual const char *getSourceName() const; | virtual const char *getSourceName() const; | |||
virtual void serializeToArray(vector<Value>& array, bool explain = false) const; | virtual void serializeToArray(vector<Value>& array, bool explain = false) const; | |||
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | |||
virtual void dispose(); | virtual void dispose(); | |||
virtual GetDepsReturn getDependencies(set<string>& deps) const; | virtual GetDepsReturn getDependencies(set<string>& deps) const; | |||
// Virtuals for SplittableDocumentSource | virtual intrusive_ptr<DocumentSource> getShardSource(); | |||
// All work for sort is done in router currently if there is no lim | virtual intrusive_ptr<DocumentSource> getMergeSource(); | |||
it. | ||||
// If there is a limit, the $sort/$limit combination is performed o | ||||
n the | ||||
// shards, then the results are resorted and limited on mongos | ||||
virtual intrusive_ptr<DocumentSource> getShardSource() { return lim | ||||
itSrc ? this : NULL; } | ||||
virtual intrusive_ptr<DocumentSource> getRouterSource() { return th | ||||
is; } | ||||
/** | /** | |||
Add sort key field. | Add sort key field. | |||
Adds a sort key field to the key being built up. A concatenated | Adds a sort key field to the key being built up. A concatenated | |||
key is built up by calling this repeatedly. | key is built up by calling this repeatedly. | |||
@param fieldPath the field path to the key component | @param fieldPath the field path to the key component | |||
@param ascending if true, use the key for an ascending sort, | @param ascending if true, use the key for an ascending sort, | |||
otherwise, use it for descending | otherwise, use it for descending | |||
skipping to change at line 942 | skipping to change at line 876 | |||
/* | /* | |||
Before returning anything, this source must fetch everything from | Before returning anything, this source must fetch everything from | |||
the underlying source and group it. populate() is used to do tha t | the underlying source and group it. populate() is used to do tha t | |||
on the first call to any method on this source. The populated | on the first call to any method on this source. The populated | |||
boolean indicates that this has been done. | boolean indicates that this has been done. | |||
*/ | */ | |||
void populate(); | void populate(); | |||
bool populated; | bool populated; | |||
SortOptions makeSortOptions() const; | ||||
// These are used to merge pre-sorted results from a DocumentSource | ||||
MergeCursors or a | ||||
// DocumentSourceCommandShards depending on whether we have finishe | ||||
d upgrading to 2.6 or | ||||
// not. | ||||
class IteratorFromCursor; | ||||
class IteratorFromBsonArray; | ||||
void populateFromCursors(const vector<DBClientCursor*>& cursors); | ||||
void populateFromBsonArrays(const vector<BSONArray>& arrays); | ||||
/* these two parallel each other */ | /* these two parallel each other */ | |||
typedef vector<intrusive_ptr<ExpressionFieldPath> > SortPaths; | typedef vector<intrusive_ptr<ExpressionFieldPath> > SortPaths; | |||
SortPaths vSortKey; | SortPaths vSortKey; | |||
vector<char> vAscending; // used like vector<bool> but without spec ialization | vector<char> vAscending; // used like vector<bool> but without spec ialization | |||
/// Extracts the fields in vSortKey from the Document; | /// Extracts the fields in vSortKey from the Document; | |||
Value extractKey(const Document& d) const; | Value extractKey(const Document& d) const; | |||
/// Compare two Values according to the specified sort key. | /// Compare two Values according to the specified sort key. | |||
int compare(const Value& lhs, const Value& rhs) const; | int compare(const Value& lhs, const Value& rhs) const; | |||
skipping to change at line 969 | skipping to change at line 913 | |||
int operator()(const MySorter::Data& lhs, const MySorter::Data& rhs) const { | int operator()(const MySorter::Data& lhs, const MySorter::Data& rhs) const { | |||
return _source.compare(lhs.first, rhs.first); | return _source.compare(lhs.first, rhs.first); | |||
} | } | |||
private: | private: | |||
const DocumentSourceSort& _source; | const DocumentSourceSort& _source; | |||
}; | }; | |||
intrusive_ptr<DocumentSourceLimit> limitSrc; | intrusive_ptr<DocumentSourceLimit> limitSrc; | |||
bool _done; | bool _done; | |||
bool _mergingPresorted; | ||||
scoped_ptr<MySorter::Iterator> _output; | scoped_ptr<MySorter::Iterator> _output; | |||
}; | }; | |||
class DocumentSourceLimit : | class DocumentSourceLimit : public DocumentSource | |||
public SplittableDocumentSource { | , public SplittableDocumentSource { | |||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual boost::optional<Document> getNext(); | virtual boost::optional<Document> getNext(); | |||
virtual const char *getSourceName() const; | virtual const char *getSourceName() const; | |||
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | |||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
virtual GetDepsReturn getDependencies(set<string>& deps) const { | virtual GetDepsReturn getDependencies(set<string>& deps) const { | |||
return SEE_NEXT; // This doesn't affect needed fields | return SEE_NEXT; // This doesn't affect needed fields | |||
} | } | |||
skipping to change at line 998 | skipping to change at line 943 | |||
@param pExpCtx the expression context for the pipeline | @param pExpCtx the expression context for the pipeline | |||
@returns the DocumentSource | @returns the DocumentSource | |||
*/ | */ | |||
static intrusive_ptr<DocumentSourceLimit> create( | static intrusive_ptr<DocumentSourceLimit> create( | |||
const intrusive_ptr<ExpressionContext> &pExpCtx, | const intrusive_ptr<ExpressionContext> &pExpCtx, | |||
long long limit); | long long limit); | |||
// Virtuals for SplittableDocumentSource | // Virtuals for SplittableDocumentSource | |||
// Need to run on rounter. Running on shard as well is an optimizat ion. | // Need to run on rounter. Running on shard as well is an optimizat ion. | |||
virtual intrusive_ptr<DocumentSource> getShardSource() { return thi s; } | virtual intrusive_ptr<DocumentSource> getShardSource() { return thi s; } | |||
virtual intrusive_ptr<DocumentSource> getRouterSource() { return th is; } | virtual intrusive_ptr<DocumentSource> getMergeSource() { return thi s; } | |||
long long getLimit() const { return limit; } | long long getLimit() const { return limit; } | |||
void setLimit(long long newLimit) { limit = newLimit; } | void setLimit(long long newLimit) { limit = newLimit; } | |||
/** | /** | |||
Create a limiting DocumentSource from BSON. | Create a limiting DocumentSource from BSON. | |||
This is a convenience method that uses the above, and operates on | This is a convenience method that uses the above, and operates on | |||
a BSONElement that has been deteremined to be an Object with an | a BSONElement that has been deteremined to be an Object with an | |||
element named $limit. | element named $limit. | |||
skipping to change at line 1028 | skipping to change at line 973 | |||
static const char limitName[]; | static const char limitName[]; | |||
private: | private: | |||
DocumentSourceLimit(const intrusive_ptr<ExpressionContext> &pExpCtx , | DocumentSourceLimit(const intrusive_ptr<ExpressionContext> &pExpCtx , | |||
long long limit); | long long limit); | |||
long long limit; | long long limit; | |||
long long count; | long long count; | |||
}; | }; | |||
class DocumentSourceSkip : | class DocumentSourceSkip : public DocumentSource | |||
public SplittableDocumentSource { | , public SplittableDocumentSource { | |||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual boost::optional<Document> getNext(); | virtual boost::optional<Document> getNext(); | |||
virtual const char *getSourceName() const; | virtual const char *getSourceName() const; | |||
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | |||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
virtual GetDepsReturn getDependencies(set<string>& deps) const { | virtual GetDepsReturn getDependencies(set<string>& deps) const { | |||
return SEE_NEXT; // This doesn't affect needed fields | return SEE_NEXT; // This doesn't affect needed fields | |||
} | } | |||
skipping to change at line 1053 | skipping to change at line 998 | |||
@param pExpCtx the expression context | @param pExpCtx the expression context | |||
@returns the DocumentSource | @returns the DocumentSource | |||
*/ | */ | |||
static intrusive_ptr<DocumentSourceSkip> create( | static intrusive_ptr<DocumentSourceSkip> create( | |||
const intrusive_ptr<ExpressionContext> &pExpCtx); | const intrusive_ptr<ExpressionContext> &pExpCtx); | |||
// Virtuals for SplittableDocumentSource | // Virtuals for SplittableDocumentSource | |||
// Need to run on rounter. Can't run on shards. | // Need to run on rounter. Can't run on shards. | |||
virtual intrusive_ptr<DocumentSource> getShardSource() { return NUL L; } | virtual intrusive_ptr<DocumentSource> getShardSource() { return NUL L; } | |||
virtual intrusive_ptr<DocumentSource> getRouterSource() { return th is; } | virtual intrusive_ptr<DocumentSource> getMergeSource() { return thi s; } | |||
long long getSkip() const { return _skip; } | long long getSkip() const { return _skip; } | |||
void setSkip(long long newSkip) { _skip = newSkip; } | void setSkip(long long newSkip) { _skip = newSkip; } | |||
/** | /** | |||
Create a skipping DocumentSource from BSON. | Create a skipping DocumentSource from BSON. | |||
This is a convenience method that uses the above, and operates on | This is a convenience method that uses the above, and operates on | |||
a BSONElement that has been deteremined to be an Object with an | a BSONElement that has been deteremined to be an Object with an | |||
element named $skip. | element named $skip. | |||
skipping to change at line 1122 | skipping to change at line 1067 | |||
void unwindPath(const FieldPath &fieldPath); | void unwindPath(const FieldPath &fieldPath); | |||
// Configuration state. | // Configuration state. | |||
scoped_ptr<FieldPath> _unwindPath; | scoped_ptr<FieldPath> _unwindPath; | |||
// Iteration state. | // Iteration state. | |||
class Unwinder; | class Unwinder; | |||
scoped_ptr<Unwinder> _unwinder; | scoped_ptr<Unwinder> _unwinder; | |||
}; | }; | |||
class DocumentSourceGeoNear : public SplittableDocumentSource | class DocumentSourceGeoNear : public DocumentSource | |||
, public SplittableDocumentSource | ||||
, public DocumentSourceNeedsMongod { | , public DocumentSourceNeedsMongod { | |||
public: | public: | |||
// virtuals from DocumentSource | // virtuals from DocumentSource | |||
virtual boost::optional<Document> getNext(); | virtual boost::optional<Document> getNext(); | |||
virtual const char *getSourceName() const; | virtual const char *getSourceName() const; | |||
virtual void setSource(DocumentSource *pSource); // errors out sinc e this must be first | virtual void setSource(DocumentSource *pSource); // errors out sinc e this must be first | |||
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); | |||
virtual bool isValidInitialSource() const { return true; } | virtual bool isValidInitialSource() const { return true; } | |||
virtual Value serialize(bool explain = false) const; | virtual Value serialize(bool explain = false) const; | |||
// Virtuals for SplittableDocumentSource | // Virtuals for SplittableDocumentSource | |||
virtual intrusive_ptr<DocumentSource> getShardSource(); | virtual intrusive_ptr<DocumentSource> getShardSource(); | |||
virtual intrusive_ptr<DocumentSource> getRouterSource(); | virtual intrusive_ptr<DocumentSource> getMergeSource(); | |||
static intrusive_ptr<DocumentSource> createFromBson( | static intrusive_ptr<DocumentSource> createFromBson( | |||
BSONElement elem, | BSONElement elem, | |||
const intrusive_ptr<ExpressionContext> &pCtx); | const intrusive_ptr<ExpressionContext> &pCtx); | |||
static char geoNearName[]; | static char geoNearName[]; | |||
long long getLimit() { return limit; } | long long getLimit() { return limit; } | |||
// this should only be used for testing | // this should only be used for testing | |||
skipping to change at line 1178 | skipping to change at line 1124 | |||
// these fields are used while processing the results | // these fields are used while processing the results | |||
BSONObj cmdOutput; | BSONObj cmdOutput; | |||
boost::scoped_ptr<BSONObjIterator> resultsIterator; // iterator ove r cmdOutput["results"] | boost::scoped_ptr<BSONObjIterator> resultsIterator; // iterator ove r cmdOutput["results"] | |||
}; | }; | |||
} | } | |||
/* ======================= INLINED IMPLEMENTATIONS ======================== == */ | /* ======================= INLINED IMPLEMENTATIONS ======================== == */ | |||
namespace mongo { | namespace mongo { | |||
inline void DocumentSource::setPipelineStep(int s) { | ||||
step = s; | ||||
} | ||||
inline int DocumentSource::getPipelineStep() const { | ||||
return step; | ||||
} | ||||
inline void DocumentSourceGroup::setIdExpression( | inline void DocumentSourceGroup::setIdExpression( | |||
const intrusive_ptr<Expression> &pExpression) { | const intrusive_ptr<Expression> &pExpression) { | |||
pIdExpression = pExpression; | pIdExpression = pExpression; | |||
} | } | |||
} | } | |||
End of changes. 37 change blocks. | ||||
141 lines changed or deleted | 73 lines changed or added | |||
environment.h | environment.h | |||
---|---|---|---|---|
skipping to change at line 110 | skipping to change at line 110 | |||
Environment() : valid(false) { } | Environment() : valid(false) { } | |||
~Environment() { } | ~Environment() { } | |||
/** These functions are to add Constraints and KeyConstraints w hich will be run against | /** These functions are to add Constraints and KeyConstraints w hich will be run against | |||
* this environment in the following situations: | * this environment in the following situations: | |||
* 1. in the "validate" function | * 1. in the "validate" function | |||
* 2. in the "set" function after validate has been called suc cessfully | * 2. in the "set" function after validate has been called suc cessfully | |||
* | * | |||
* It is an error to call these functions after "validate" has been called | * It is an error to call these functions after "validate" has been called | |||
* | * | |||
* WARNING: These take ownership of the pointer passed in | * NOTE: These DO NOT take ownership of the pointer passed in | |||
*/ | */ | |||
Status addKeyConstraint(KeyConstraint* keyConstraint); | Status addKeyConstraint(KeyConstraint* keyConstraint); | |||
Status addConstraint(Constraint* constraint); | Status addConstraint(Constraint* constraint); | |||
/** Add the Value to this Environment with the given Key. If " validate" has already | /** Add the Value to this Environment with the given Key. If " validate" has already | |||
* been called on this Environment, runs all Constraints on th e new Environment. If | * been called on this Environment, runs all Constraints on th e new Environment. If | |||
* any of the Constraints fail, reverts to the old Environment and returns an error | * any of the Constraints fail, reverts to the old Environment and returns an error | |||
*/ | */ | |||
Status set(const Key& key, const Value& value); | Status set(const Key& key, const Value& value); | |||
skipping to change at line 200 | skipping to change at line 200 | |||
* | * | |||
* Note that the BSON representation only includes fields that were explicitly set using | * Note that the BSON representation only includes fields that were explicitly set using | |||
* setAll or set, and not defaults that were specified using se tDefault. | * setAll or set, and not defaults that were specified using se tDefault. | |||
*/ | */ | |||
BSONObj toBSON() const; | BSONObj toBSON() const; | |||
/* Debugging */ | /* Debugging */ | |||
void dump(); | void dump(); | |||
protected: | protected: | |||
std::vector<boost::shared_ptr<Constraint> > constraints; | std::vector<Constraint*> constraints; | |||
std::vector<boost::shared_ptr<KeyConstraint> > keyConstraints; | std::vector<KeyConstraint*> keyConstraints; | |||
std::map <Key, Value> values; | std::map <Key, Value> values; | |||
std::map <Key, Value> default_values; | std::map <Key, Value> default_values; | |||
bool valid; | bool valid; | |||
}; | }; | |||
template <typename T> | template <typename T> | |||
Status Environment::get(const Key& get_key, T* get_value) const { | Status Environment::get(const Key& get_key, T* get_value) const { | |||
Value value; | Value value; | |||
Status ret = get(get_key, &value); | Status ret = get(get_key, &value); | |||
if (!ret.isOK()) { | if (!ret.isOK()) { | |||
End of changes. 2 change blocks. | ||||
3 lines changed or deleted | 3 lines changed or added | |||
error_codes.h | error_codes.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
* | * | |||
* Do not update this file directly. Update src/mongo/base/error_codes. err instead. | * Do not update this file directly. Update src/mongo/base/error_codes. err instead. | |||
*/ | */ | |||
class ErrorCodes { | class ErrorCodes { | |||
public: | public: | |||
enum Error { | enum Error { | |||
OK = 0, | OK = 0, | |||
InternalError = 1, | InternalError = 1, | |||
BadValue = 2, | BadValue = 2, | |||
DuplicateKey = 3, | OBSOLETE_DuplicateKey = 3, | |||
NoSuchKey = 4, | NoSuchKey = 4, | |||
GraphContainsCycle = 5, | GraphContainsCycle = 5, | |||
HostUnreachable = 6, | HostUnreachable = 6, | |||
HostNotFound = 7, | HostNotFound = 7, | |||
UnknownError = 8, | UnknownError = 8, | |||
FailedToParse = 9, | FailedToParse = 9, | |||
CannotMutateObject = 10, | CannotMutateObject = 10, | |||
UserNotFound = 11, | UserNotFound = 11, | |||
UnsupportedFormat = 12, | UnsupportedFormat = 12, | |||
Unauthorized = 13, | Unauthorized = 13, | |||
skipping to change at line 87 | skipping to change at line 87 | |||
CursorNotFound = 43, | CursorNotFound = 43, | |||
UserDataInconsistent = 45, | UserDataInconsistent = 45, | |||
LockBusy = 46, | LockBusy = 46, | |||
NoMatchingDocument = 47, | NoMatchingDocument = 47, | |||
NamespaceExists = 48, | NamespaceExists = 48, | |||
InvalidRoleModification = 49, | InvalidRoleModification = 49, | |||
ExceededTimeLimit = 50, | ExceededTimeLimit = 50, | |||
ManualInterventionRequired = 51, | ManualInterventionRequired = 51, | |||
DollarPrefixedFieldName = 52, | DollarPrefixedFieldName = 52, | |||
InvalidIdField = 53, | InvalidIdField = 53, | |||
ImmutableIdField = 54, | NotSingleValueField = 54, | |||
InvalidDBRef = 55, | InvalidDBRef = 55, | |||
EmptyFieldName = 56, | EmptyFieldName = 56, | |||
DottedFieldName = 57, | DottedFieldName = 57, | |||
RoleModificationFailed = 58, | RoleModificationFailed = 58, | |||
CommandNotFound = 59, | CommandNotFound = 59, | |||
DatabaseNotFound = 60, | DatabaseNotFound = 60, | |||
ShardKeyNotFound = 61, | ShardKeyNotFound = 61, | |||
OplogOperationUnsupported = 62, | OplogOperationUnsupported = 62, | |||
StaleShardVersion = 63, | StaleShardVersion = 63, | |||
WriteConcernFailed = 64, | WriteConcernFailed = 64, | |||
MultipleErrorsOccurred = 65, | MultipleErrorsOccurred = 65, | |||
ImmutableShardKeyField = 66, | ImmutableField = 66, | |||
CannotCreateIndex = 67, | ||||
IndexAlreadyExists = 68, | ||||
AuthSchemaIncompatible = 69, | ||||
ShardNotFound = 70, | ||||
ReplicaSetNotFound = 71, | ||||
InvalidOptions = 72, | ||||
DuplicateKey = 11000, | ||||
MaxError | MaxError | |||
}; | }; | |||
static const char* errorString(Error err); | static const char* errorString(Error err); | |||
/** | /** | |||
* Parse an Error from its "name". Returns UnknownError if "name" is unrecognized. | * Parse an Error from its "name". Returns UnknownError if "name" is unrecognized. | |||
* | * | |||
* NOTE: Also returns UnknownError for the string "UnknownError". | * NOTE: Also returns UnknownError for the string "UnknownError". | |||
*/ | */ | |||
End of changes. 3 change blocks. | ||||
3 lines changed or deleted | 10 lines changed or added | |||
expression_context.h | expression_context.h | |||
---|---|---|---|---|
skipping to change at line 45 | skipping to change at line 45 | |||
#include "mongo/util/intrusive_counter.h" | #include "mongo/util/intrusive_counter.h" | |||
namespace mongo { | namespace mongo { | |||
struct ExpressionContext : public IntrusiveCounterUnsigned { | struct ExpressionContext : public IntrusiveCounterUnsigned { | |||
public: | public: | |||
ExpressionContext(const InterruptStatus& status, const NamespaceStr ing& ns) | ExpressionContext(const InterruptStatus& status, const NamespaceStr ing& ns) | |||
: inShard(false) | : inShard(false) | |||
, inRouter(false) | , inRouter(false) | |||
, extSortAllowed(false) | , extSortAllowed(false) | |||
, interruptStatus(status) | ||||
, ns(ns) | , ns(ns) | |||
, interruptStatus(status) | ||||
, interruptCounter(interruptCheckPeriod) | ||||
{} | {} | |||
/** Used by a pipeline to check for interrupts so that killOp() wor ks. | /** Used by a pipeline to check for interrupts so that killOp() wor ks. | |||
* @throws if the operation has been interrupted | * @throws if the operation has been interrupted | |||
*/ | */ | |||
void checkForInterrupt() { | void checkForInterrupt() { | |||
// The check could be expensive, at least in relative terms. | if (--interruptCounter == 0) { | |||
RARELY interruptStatus.checkForInterrupt(); | // The checkForInterrupt could be expensive, at least in re | |||
lative terms. | ||||
interruptStatus.checkForInterrupt(); | ||||
interruptCounter = interruptCheckPeriod; | ||||
} | ||||
} | } | |||
bool inShard; | bool inShard; | |||
bool inRouter; | bool inRouter; | |||
bool extSortAllowed; | bool extSortAllowed; | |||
const InterruptStatus& interruptStatus; | ||||
NamespaceString ns; | NamespaceString ns; | |||
std::string tempDir; // Defaults to empty to prevent external sorti ng in mongos. | std::string tempDir; // Defaults to empty to prevent external sorti ng in mongos. | |||
const InterruptStatus& interruptStatus; | ||||
static const int interruptCheckPeriod = 128; | ||||
int interruptCounter; // when 0, check interruptStatus | ||||
}; | }; | |||
} | } | |||
End of changes. 6 change blocks. | ||||
4 lines changed or deleted | 13 lines changed or added | |||
expression_text.h | expression_text.h | |||
---|---|---|---|---|
skipping to change at line 46 | skipping to change at line 46 | |||
namespace mongo { | namespace mongo { | |||
class TextMatchExpression : public LeafMatchExpression { | class TextMatchExpression : public LeafMatchExpression { | |||
public: | public: | |||
TextMatchExpression() : LeafMatchExpression( TEXT ) {} | TextMatchExpression() : LeafMatchExpression( TEXT ) {} | |||
virtual ~TextMatchExpression() {} | virtual ~TextMatchExpression() {} | |||
Status init( const std::string& query, const std::string& language ); | Status init( const std::string& query, const std::string& language ); | |||
// This shouldn't be called and as such will crash. GeoNear always requires an index. | ||||
virtual bool matchesSingleElement( const BSONElement& e ) const; | virtual bool matchesSingleElement( const BSONElement& e ) const; | |||
virtual void debugString( StringBuilder& debug, int level = 0 ) con st; | virtual void debugString( StringBuilder& debug, int level = 0 ) con st; | |||
virtual bool equivalent( const MatchExpression* other ) const; | virtual bool equivalent( const MatchExpression* other ) const; | |||
virtual LeafMatchExpression* shallowClone() const; | virtual LeafMatchExpression* shallowClone() const; | |||
const string& getQuery() const { return _query; } | const string& getQuery() const { return _query; } | |||
const string& getLanguage() const { return _language; } | const string& getLanguage() const { return _language; } | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 0 lines changed or added | |||
extent.h | extent.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/db/diskloc.h" | #include "mongo/db/diskloc.h" | |||
#include "mongo/db/storage/namespace.h" | #include "mongo/db/catalog/ondisk/namespace.h" | |||
namespace mongo { | namespace mongo { | |||
/* extents are datafile regions where all the records within the region | /* extents are datafile regions where all the records within the region | |||
belong to the same namespace. | belong to the same namespace. | |||
(11:12:35 AM) dm10gen: when the extent is allocated, all its empty spac e is stuck into one big DeletedRecord | (11:12:35 AM) dm10gen: when the extent is allocated, all its empty spac e is stuck into one big DeletedRecord | |||
(11:12:55 AM) dm10gen: and that is placed on the free list | (11:12:55 AM) dm10gen: and that is placed on the free list | |||
*/ | */ | |||
#pragma pack(1) | #pragma pack(1) | |||
skipping to change at line 90 | skipping to change at line 90 | |||
void assertOk() const { verify(isOk()); } | void assertOk() const { verify(isOk()); } | |||
Record* getRecord(DiskLoc dl) { | Record* getRecord(DiskLoc dl) { | |||
verify( !dl.isNull() ); | verify( !dl.isNull() ); | |||
verify( dl.sameFile(myLoc) ); | verify( dl.sameFile(myLoc) ); | |||
int x = dl.getOfs() - myLoc.getOfs(); | int x = dl.getOfs() - myLoc.getOfs(); | |||
verify( x > 0 ); | verify( x > 0 ); | |||
return (Record *) (((char *) this) + x); | return (Record *) (((char *) this) + x); | |||
} | } | |||
Extent* getNextExtent(); | ||||
Extent* getPrevExtent(); | ||||
static int maxSize(); | static int maxSize(); | |||
static int minSize() { return 0x1000; } | static int minSize() { return 0x1000; } | |||
/** | /** | |||
* @param len lengt of record we need | * @param len lengt of record we need | |||
* @param lastRecord size of last extent which is a factor in next extent size | * @param lastRecord size of last extent which is a factor in next extent size | |||
*/ | */ | |||
static int followupSize(int len, int lastExtentLen); | static int followupSize(int len, int lastExtentLen); | |||
/** get a suggested size for the first extent in a namespace | /** get a suggested size for the first extent in a namespace | |||
* @param len length of record we need to insert | * @param len length of record we need to insert | |||
End of changes. 2 change blocks. | ||||
4 lines changed or deleted | 1 lines changed or added | |||
extent_manager.h | extent_manager.h | |||
---|---|---|---|---|
skipping to change at line 113 | skipping to change at line 113 | |||
@param capped - true if capped collection | @param capped - true if capped collection | |||
*/ | */ | |||
DiskLoc createExtent( int approxSize, int maxFileNoForQuota ); | DiskLoc createExtent( int approxSize, int maxFileNoForQuota ); | |||
/** | /** | |||
* will return NULL if nothing suitable in free list | * will return NULL if nothing suitable in free list | |||
*/ | */ | |||
DiskLoc allocFromFreeList( int approxSize, bool capped ); | DiskLoc allocFromFreeList( int approxSize, bool capped ); | |||
/** | /** | |||
* TODO: this isn't quite in the right spot | ||||
* really need the concept of a NamespaceStructure in the current | ||||
paradigm | ||||
*/ | ||||
Extent* increaseStorageSize( const string& ns, | ||||
NamespaceDetails* details, | ||||
int size, | ||||
int quotaMax ); | ||||
/** | ||||
* firstExt has to be == lastExt or a chain | * firstExt has to be == lastExt or a chain | |||
*/ | */ | |||
void freeExtents( DiskLoc firstExt, DiskLoc lastExt ); | void freeExtents( DiskLoc firstExt, DiskLoc lastExt ); | |||
void printFreeList() const; | void printFreeList() const; | |||
bool hasFreeList() const { return _freeListDetails != NULL; } | bool hasFreeList() const { return _freeListDetails != NULL; } | |||
/** | /** | |||
* @param loc - has to be for a specific Record | * @param loc - has to be for a specific Record | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 10 lines changed or added | |||
extsort.h | extsort.h | |||
---|---|---|---|---|
skipping to change at line 35 | skipping to change at line 35 | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/pch.h" | #include "mongo/pch.h" | |||
#include "mongo/db/index.h" | #include "mongo/db/storage/index_details.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/curop-inl.h" | #include "mongo/db/curop-inl.h" | |||
#include "mongo/util/array.h" | #include "mongo/util/array.h" | |||
#define MONGO_USE_NEW_SORTER 1 | #define MONGO_USE_NEW_SORTER 1 | |||
#if MONGO_USE_NEW_SORTER | #if MONGO_USE_NEW_SORTER | |||
# include "mongo/db/sorter/sorter.h" | # include "mongo/db/sorter/sorter.h" | |||
#endif | #endif | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 1 lines changed or added | |||
field_checker.h | field_checker.h | |||
---|---|---|---|---|
skipping to change at line 42 | skipping to change at line 42 | |||
namespace mongo { | namespace mongo { | |||
class FieldRef; | class FieldRef; | |||
namespace fieldchecker { | namespace fieldchecker { | |||
/** | /** | |||
* Returns OK if all the below conditions on 'field' are valid: | * Returns OK if all the below conditions on 'field' are valid: | |||
* + Non-empty | * + Non-empty | |||
* + Not the _id field (or a subfield of the _id field, such as _ id.x.y) | ||||
* + Does not start or end with a '.' | * + Does not start or end with a '.' | |||
* + Does not start with a $ | ||||
* Otherwise returns a code indicating cause of failure. | * Otherwise returns a code indicating cause of failure. | |||
*/ | */ | |||
Status isUpdatable(const FieldRef& field); | Status isUpdatable(const FieldRef& field); | |||
/** | /** | |||
* Same behavior of isUpdatable but allowing update fields to start | ||||
with '$'. This | ||||
* supports $unset on legacy fields. | ||||
*/ | ||||
Status isUpdatableLegacy(const FieldRef& field); | ||||
/** | ||||
* Returns true, the position 'pos' of the first $-sign if present in 'fieldRef', and | * Returns true, the position 'pos' of the first $-sign if present in 'fieldRef', and | |||
* how many other $-signs were found in 'count'. Otherwise return f alse. | * how many other $-signs were found in 'count'. Otherwise return f alse. | |||
* | * | |||
* Note: | * Note: | |||
* isPositional assumes that the field is updatable. Call isUpdat able() above to | * isPositional assumes that the field is updatable. Call isUpdat able() above to | |||
* verify. | * verify. | |||
*/ | */ | |||
bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* co unt = NULL); | bool isPositional(const FieldRef& fieldRef, size_t* pos, size_t* co unt = NULL); | |||
} // namespace fieldchecker | } // namespace fieldchecker | |||
End of changes. 3 change blocks. | ||||
9 lines changed or deleted | 0 lines changed or added | |||
field_parser.h | field_parser.h | |||
---|---|---|---|---|
skipping to change at line 125 | skipping to change at line 125 | |||
const BSONField<int>& field, | const BSONField<int>& field, | |||
int* out, | int* out, | |||
string* errMsg = NULL); | string* errMsg = NULL); | |||
static FieldState extractNumber(BSONObj doc, | static FieldState extractNumber(BSONObj doc, | |||
const BSONField<long long>& field, | const BSONField<long long>& field, | |||
long long* out, | long long* out, | |||
string* errMsg = NULL); | string* errMsg = NULL); | |||
/** | /** | |||
* Extracts a document id from a particular field name, which may b | ||||
e of any type but Array. | ||||
* Wraps the extracted id value in a BSONObj with one element and e | ||||
mpty field name. | ||||
*/ | ||||
static FieldState extractID( BSONObj doc, | ||||
const BSONField<BSONObj>& field, | ||||
BSONObj* out, | ||||
string* errMsg = NULL ); | ||||
/** | ||||
* Extracts a mandatory BSONSerializable structure 'field' from the object 'doc'. Write | * Extracts a mandatory BSONSerializable structure 'field' from the object 'doc'. Write | |||
* the extracted contents to '*out' if successful or fills '*errMsg ', if exising, | * the extracted contents to '*out' if successful or fills '*errMsg ', if exising, | |||
* otherwise. This variant relies on T having a parseBSON, which a ll | * otherwise. This variant relies on T having a parseBSON, which a ll | |||
* BSONSerializable's have. | * BSONSerializable's have. | |||
* | * | |||
* TODO: Tighten for BSONSerializable's only | * TODO: Tighten for BSONSerializable's only | |||
*/ | */ | |||
template<typename T> | template<typename T> | |||
static FieldState extract(BSONObj doc, | static FieldState extract(BSONObj doc, | |||
const BSONField<T>& field, | const BSONField<T>& field, | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 11 lines changed or added | |||
field_ref_set.h | field_ref_set.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <set> | #include <set> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/base/owned_pointer_vector.h" | ||||
#include "mongo/base/status.h" | ||||
#include "mongo/db/field_ref.h" | #include "mongo/db/field_ref.h" | |||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* A FieldRefSet holds a set of FieldRefs's that do not conflict with o ne another, that is, | * A FieldRefSet holds a set of FieldRefs's that do not conflict with o ne another, that is, | |||
* they target different subtrees of a given document. Two fieldRef's w ould conflict if they | * they target different subtrees of a given document. Two fieldRef's w ould conflict if they | |||
* are equal or one is prefix of the other. | * are equal or one is prefix of the other. | |||
*/ | */ | |||
class FieldRefSet { | class FieldRefSet { | |||
skipping to change at line 73 | skipping to change at line 75 | |||
inline const_iterator begin() const { | inline const_iterator begin() const { | |||
return _fieldSet.begin(); | return _fieldSet.begin(); | |||
} | } | |||
inline const_iterator end() const { | inline const_iterator end() const { | |||
return _fieldSet.end(); | return _fieldSet.end(); | |||
} | } | |||
/** | /** | |||
* Returns true if the field 'toInsert' can be added in the set wit hout | * Returns true if the field 'toInsert' can be added in the set wit hout | |||
* conflicts. Otwerwise returns false and fill in '*conflict' with the field 'toInsert' | * conflicts. Otherwise returns false and fill in '*conflict' with the field 'toInsert' | |||
* clashed with. | * clashed with. | |||
* | * | |||
* There is no ownership transfer of 'toInsert'. The caller is resp onsible for | * There is no ownership transfer of 'toInsert'. The caller is resp onsible for | |||
* maintaining it alive for as long as the FieldRefSet is so. By th e same token | * maintaining it alive for as long as the FieldRefSet is so. By th e same token | |||
* 'conflict' can only be referred to while the FieldRefSet can. | * 'conflict' can only be referred to while the FieldRefSet can. | |||
*/ | */ | |||
bool insert(const FieldRef* toInsert, const FieldRef** conflict); | bool insert(const FieldRef* toInsert, const FieldRef** conflict); | |||
/** | /** | |||
* Fills the set with the supplied FieldRef*s | ||||
*/ | ||||
void fillFrom(const std::vector<FieldRef*>& fields); | ||||
/** | ||||
* Replace any existing conflicting FieldRef with the shortest (clo | ||||
sest to root) one | ||||
*/ | ||||
void keepShortest(const FieldRef* toInsert); | ||||
/** | ||||
* Find all inserted fields which conflict with the FieldRef 'toChe ck' by the semantics | * Find all inserted fields which conflict with the FieldRef 'toChe ck' by the semantics | |||
* of 'insert', and add those fields to the 'conflicts' set. | * of 'insert', and add those fields to the 'conflicts' set. | |||
*/ | */ | |||
void getConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const; | void getConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const; | |||
void clear() { | void clear() { | |||
_fieldSet.clear(); | _fieldSet.clear(); | |||
} | } | |||
/** | ||||
* A debug/log-able string | ||||
*/ | ||||
const std::string toString() const; | ||||
private: | private: | |||
// A set of field_ref pointers, none of which is owned here. | // A set of field_ref pointers, none of which is owned here. | |||
FieldSet _fieldSet; | FieldSet _fieldSet; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 4 change blocks. | ||||
1 lines changed or deleted | 19 lines changed or added | |||
framework_options.h | framework_options.h | |||
---|---|---|---|---|
skipping to change at line 48 | skipping to change at line 48 | |||
std::vector<std::string> suites; | std::vector<std::string> suites; | |||
std::string filter; | std::string filter; | |||
}; | }; | |||
extern FrameworkGlobalParams frameworkGlobalParams; | extern FrameworkGlobalParams frameworkGlobalParams; | |||
Status addTestFrameworkOptions(moe::OptionSection* options); | Status addTestFrameworkOptions(moe::OptionSection* options); | |||
std::string getTestFrameworkHelp(const StringData& name, const moe::Opt ionSection& options); | std::string getTestFrameworkHelp(const StringData& name, const moe::Opt ionSection& options); | |||
Status preValidationTestFrameworkOptions(const moe::Environment& params | /** | |||
, | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationTestFrameworkOptions(const moe::Environment& pa | ||||
rams, | ||||
const std::vector<std::string> & args); | const std::vector<std::string> & args); | |||
Status storeTestFrameworkOptions(const moe::Environment& params, | Status storeTestFrameworkOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
fts_query.h | fts_query.h | |||
---|---|---|---|---|
skipping to change at line 72 | skipping to change at line 72 | |||
* @return true if any negations or phrase + or - | * @return true if any negations or phrase + or - | |||
*/ | */ | |||
bool hasNonTermPieces() const { | bool hasNonTermPieces() const { | |||
return | return | |||
_negatedTerms.size() > 0 || | _negatedTerms.size() > 0 || | |||
_phrases.size() > 0 || | _phrases.size() > 0 || | |||
_negatedPhrases.size() > 0; | _negatedPhrases.size() > 0; | |||
} | } | |||
string getSearch() const { return _search; } | string getSearch() const { return _search; } | |||
string getLanguage() const { return _language; } | const FTSLanguage getLanguage() const { return _language; } | |||
string toString() const; | string toString() const; | |||
string debugString() const; | string debugString() const; | |||
protected: | protected: | |||
string _search; | string _search; | |||
string _language; | FTSLanguage _language; | |||
vector<string> _terms; | vector<string> _terms; | |||
unordered_set<string> _negatedTerms; | unordered_set<string> _negatedTerms; | |||
vector<string> _phrases; | vector<string> _phrases; | |||
vector<string> _negatedPhrases; | vector<string> _negatedPhrases; | |||
private: | private: | |||
void _addTerm( const StopWords* sw, Stemmer& stemmer, const str ing& term, bool negated ); | void _addTerm( const StopWords* sw, Stemmer& stemmer, const str ing& term, bool negated ); | |||
}; | }; | |||
} | } | |||
End of changes. 2 change blocks. | ||||
2 lines changed or deleted | 2 lines changed or added | |||
fts_spec.h | fts_spec.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <map> | #include <map> | |||
#include <vector> | #include <vector> | |||
#include <string> | #include <string> | |||
#include "mongo/db/fts/fts_language.h" | ||||
#include "mongo/db/fts/fts_util.h" | #include "mongo/db/fts/fts_util.h" | |||
#include "mongo/db/fts/stemmer.h" | #include "mongo/db/fts/stemmer.h" | |||
#include "mongo/db/fts/stop_words.h" | #include "mongo/db/fts/stop_words.h" | |||
#include "mongo/db/fts/tokenizer.h" | #include "mongo/db/fts/tokenizer.h" | |||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
namespace mongo { | namespace mongo { | |||
namespace fts { | namespace fts { | |||
extern const double MAX_WEIGHT; | extern const double MAX_WEIGHT; | |||
typedef std::map<string,double> Weights; // TODO cool map | typedef std::map<string,double> Weights; // TODO cool map | |||
typedef unordered_map<string,double> TermFrequencyMap; | typedef unordered_map<string,double> TermFrequencyMap; | |||
class FTSSpec { | class FTSSpec { | |||
struct Tools { | struct Tools { | |||
Tools( string _language, | Tools( const FTSLanguage _language, | |||
const Stemmer* _stemmer, | const Stemmer* _stemmer, | |||
const StopWords* _stopwords ) | const StopWords* _stopwords ) | |||
: language( _language ) | : language( _language ) | |||
, stemmer( _stemmer ) | , stemmer( _stemmer ) | |||
, stopwords( _stopwords ) {} | , stopwords( _stopwords ) {} | |||
const std::string& language; | const FTSLanguage language; | |||
const Stemmer* stemmer; | const Stemmer* stemmer; | |||
const StopWords* stopwords; | const StopWords* stopwords; | |||
}; | }; | |||
public: | public: | |||
FTSSpec( const BSONObj& indexInfo ); | FTSSpec( const BSONObj& indexInfo ); | |||
bool wildcard() const { return _wildcard; } | bool wildcard() const { return _wildcard; } | |||
const string& defaultLanguage() const { return _defaultLanguage ; } | const FTSLanguage defaultLanguage() const { return _defaultLang uage; } | |||
const string& languageOverrideField() const { return _languageO verrideField; } | const string& languageOverrideField() const { return _languageO verrideField; } | |||
size_t numExtraBefore() const { return _extraBefore.size(); } | size_t numExtraBefore() const { return _extraBefore.size(); } | |||
const std::string& extraBefore( unsigned i ) const { return _ex traBefore[i]; } | const std::string& extraBefore( unsigned i ) const { return _ex traBefore[i]; } | |||
size_t numExtraAfter() const { return _extraAfter.size(); } | size_t numExtraAfter() const { return _extraAfter.size(); } | |||
const std::string& extraAfter( unsigned i ) const { return _ext raAfter[i]; } | const std::string& extraAfter( unsigned i ) const { return _ext raAfter[i]; } | |||
/** | /** | |||
* Find a "language" field, if any, in a given BSON doc. If th | ||||
e language is not on the | ||||
* list of valid languages, return current. | ||||
*/ | ||||
string getLanguageToUse( const BSONObj& userDoc, | ||||
const std::string& currentLanguage ) c | ||||
onst; | ||||
/** | ||||
* Calculates term/score pairs for a BSONObj as applied to this spec. | * Calculates term/score pairs for a BSONObj as applied to this spec. | |||
* - "obj": the BSONObj to traverse; can be a subdocument or ar ray | * - "obj": the BSONObj to traverse; can be a subdocument or ar ray | |||
* - "parentLanguage": nearest enclosing document "language" sp ec for obj | * - "parentLanguage": nearest enclosing document "language" sp ec for obj | |||
* - "parentPath": obj's dotted path in containing document | * - "parentPath": obj's dotted path in containing document | |||
* - "isArray": true if obj is an array | * - "isArray": true if obj is an array | |||
* - "term_freqs": out-parameter to store results | * - "term_freqs": out-parameter to store results | |||
*/ | */ | |||
void scoreDocument( const BSONObj& obj, | void scoreDocument( const BSONObj& obj, | |||
const string& parentLanguage, | const FTSLanguage parentLanguage, | |||
const string& parentPath, | const string& parentPath, | |||
bool isArray, | bool isArray, | |||
TermFrequencyMap* term_freqs ) const; | TermFrequencyMap* term_freqs ) const; | |||
/** | /** | |||
* given a query, pulls out the pieces (in order) that go in th e index first | * given a query, pulls out the pieces (in order) that go in th e index first | |||
*/ | */ | |||
Status getIndexPrefix( const BSONObj& filter, BSONObj* out ) co nst; | Status getIndexPrefix( const BSONObj& filter, BSONObj* out ) co nst; | |||
const Weights& weights() const { return _weights; } | const Weights& weights() const { return _weights; } | |||
static BSONObj fixSpec( const BSONObj& spec ); | static BSONObj fixSpec( const BSONObj& spec ); | |||
private: | private: | |||
/** | ||||
* Get the language override for the given BSON doc. If no lan | ||||
guage override is | ||||
* specified, returns currentLanguage. | ||||
*/ | ||||
const FTSLanguage getLanguageToUse( const BSONObj& userDoc, | ||||
const FTSLanguage currentLa | ||||
nguage ) const; | ||||
void _scoreString( const Tools& tools, | void _scoreString( const Tools& tools, | |||
const StringData& raw, | const StringData& raw, | |||
TermFrequencyMap* term_freqs, | TermFrequencyMap* term_freqs, | |||
double weight ) const; | double weight ) const; | |||
string _defaultLanguage; | FTSLanguage _defaultLanguage; | |||
string _languageOverrideField; | string _languageOverrideField; | |||
bool _wildcard; | bool _wildcard; | |||
// _weights stores a mapping between the fields and the value a s a double | // _weights stores a mapping between the fields and the value a s a double | |||
// basically, how much should an occurence of (query term) in ( field) be worth | // basically, how much should an occurence of (query term) in ( field) be worth | |||
Weights _weights; | Weights _weights; | |||
// other fields to index | // other fields to index | |||
std::vector<string> _extraBefore; | std::vector<string> _extraBefore; | |||
std::vector<string> _extraAfter; | std::vector<string> _extraAfter; | |||
End of changes. 8 change blocks. | ||||
14 lines changed or deleted | 15 lines changed or added | |||
geoparser.h | geoparser.h | |||
---|---|---|---|---|
skipping to change at line 74 | skipping to change at line 74 | |||
static bool isMultiLine(const BSONObj &obj); | static bool isMultiLine(const BSONObj &obj); | |||
static bool parseMultiLine(const BSONObj &obj, MultiLineWithCRS *ou t); | static bool parseMultiLine(const BSONObj &obj, MultiLineWithCRS *ou t); | |||
static bool isMultiPolygon(const BSONObj &obj); | static bool isMultiPolygon(const BSONObj &obj); | |||
static bool parseMultiPolygon(const BSONObj &obj, MultiPolygonWithC RS *out); | static bool parseMultiPolygon(const BSONObj &obj, MultiPolygonWithC RS *out); | |||
static bool isGeometryCollection(const BSONObj &obj); | static bool isGeometryCollection(const BSONObj &obj); | |||
static bool parseGeometryCollection(const BSONObj &obj, GeometryCol lection *out); | static bool parseGeometryCollection(const BSONObj &obj, GeometryCol lection *out); | |||
static bool parsePointWithMaxDistance(const BSONObj& obj, PointWith | ||||
CRS* out, double* maxOut); | ||||
// Return true if the CRS field is 1. missing, or 2. is well-formed and | // Return true if the CRS field is 1. missing, or 2. is well-formed and | |||
// has a datum we accept. Otherwise, return false. | // has a datum we accept. Otherwise, return false. | |||
// NOTE(hk): If this is ever used anywhere but internally, consider | // NOTE(hk): If this is ever used anywhere but internally, consider | |||
// returning states: missing, invalid, unknown, ok, etc. -- whateve r | // returning states: missing, invalid, unknown, ok, etc. -- whateve r | |||
// needed. | // needed. | |||
static bool crsIsOK(const BSONObj& obj); | static bool crsIsOK(const BSONObj& obj); | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 3 lines changed or added | |||
geoquery.h | geoquery.h | |||
---|---|---|---|---|
skipping to change at line 68 | skipping to change at line 68 | |||
* Only polygons (and aggregate types thereof) support contains. | * Only polygons (and aggregate types thereof) support contains. | |||
*/ | */ | |||
bool supportsContains() const; | bool supportsContains() const; | |||
bool hasS2Region() const; | bool hasS2Region() const; | |||
bool hasFlatRegion() const; | bool hasFlatRegion() const; | |||
// Used by s2cursor only to generate a covering of the query object . | // Used by s2cursor only to generate a covering of the query object . | |||
// One region is not NULL and this returns it. | // One region is not NULL and this returns it. | |||
const S2Region& getRegion() const; | const S2Region& getRegion() const; | |||
private: | // XXX FIXME | |||
// private: | ||||
// Does 'this' intersect with the provided type? | // Does 'this' intersect with the provided type? | |||
bool intersects(const S2Cell& otherPoint) const; | bool intersects(const S2Cell& otherPoint) const; | |||
bool intersects(const S2Polyline& otherLine) const; | bool intersects(const S2Polyline& otherLine) const; | |||
bool intersects(const S2Polygon& otherPolygon) const; | bool intersects(const S2Polygon& otherPolygon) const; | |||
// These three just iterate over the geometries and call the 3 meth ods above. | // These three just iterate over the geometries and call the 3 meth ods above. | |||
bool intersects(const MultiPointWithCRS& otherMultiPoint) const; | bool intersects(const MultiPointWithCRS& otherMultiPoint) const; | |||
bool intersects(const MultiLineWithCRS& otherMultiLine) const; | bool intersects(const MultiLineWithCRS& otherMultiLine) const; | |||
bool intersects(const MultiPolygonWithCRS& otherMultiPolygon) const ; | bool intersects(const MultiPolygonWithCRS& otherMultiPolygon) const ; | |||
// Used when 'this' has a polygon somewhere, either in _polygon or _multiPolygon or | // Used when 'this' has a polygon somewhere, either in _polygon or _multiPolygon or | |||
skipping to change at line 137 | skipping to change at line 138 | |||
// If centroid.crs == SPHERE these are meters. | // If centroid.crs == SPHERE these are meters. | |||
double minDistance; | double minDistance; | |||
double maxDistance; | double maxDistance; | |||
// It's either $near or $nearSphere. | // It's either $near or $nearSphere. | |||
bool isNearSphere; | bool isNearSphere; | |||
string toString() const { | string toString() const { | |||
stringstream ss; | stringstream ss; | |||
ss << " field=" << field; | ss << " field=" << field; | |||
ss << " maxdist=" << maxDistance; | ||||
ss << " isNearSphere=" << isNearSphere; | ||||
return ss.str(); | return ss.str(); | |||
} | } | |||
private: | private: | |||
bool parseLegacyQuery(const BSONObj &obj); | bool parseLegacyQuery(const BSONObj &obj); | |||
bool parseNewQuery(const BSONObj &obj); | bool parseNewQuery(const BSONObj &obj); | |||
}; | }; | |||
// This represents either a $within or a $geoIntersects. | // This represents either a $within or a $geoIntersects. | |||
class GeoQuery { | class GeoQuery { | |||
public: | public: | |||
GeoQuery() : field(""), predicate(INVALID) {} | GeoQuery() : field(""), predicate(INVALID), _uniqueDocs(true) {} | |||
GeoQuery(const string& f) : field(f), predicate(INVALID) {} | GeoQuery(const string& f) : field(f), predicate(INVALID), _uniqueDo | |||
cs(true) {} | ||||
enum Predicate { | enum Predicate { | |||
WITHIN, | WITHIN, | |||
INTERSECT, | INTERSECT, | |||
INVALID | INVALID | |||
}; | }; | |||
bool parseFrom(const BSONObj &obj); | bool parseFrom(const BSONObj &obj); | |||
bool satisfiesPredicate(const GeometryContainer &otherContainer) co nst; | bool satisfiesPredicate(const GeometryContainer &otherContainer) co nst; | |||
bool hasS2Region() const; | bool hasS2Region() const; | |||
const S2Region& getRegion() const; | const S2Region& getRegion() const; | |||
string getField() const { return field; } | string getField() const { return field; } | |||
Predicate getPred() const { return predicate; } | Predicate getPred() const { return predicate; } | |||
const GeometryContainer& getGeometry() const { return geoContainer; } | const GeometryContainer& getGeometry() const { return geoContainer; } | |||
bool uniqueDocs() const { return _uniqueDocs; } | ||||
private: | private: | |||
// Try to parse the provided object into the right place. | // Try to parse the provided object into the right place. | |||
bool parseLegacyQuery(const BSONObj &obj); | bool parseLegacyQuery(const BSONObj &obj); | |||
bool parseNewQuery(const BSONObj &obj); | bool parseNewQuery(const BSONObj &obj); | |||
// Name of the field in the query. | // Name of the field in the query. | |||
string field; | string field; | |||
GeometryContainer geoContainer; | GeometryContainer geoContainer; | |||
Predicate predicate; | Predicate predicate; | |||
bool _uniqueDocs; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 5 change blocks. | ||||
3 lines changed or deleted | 10 lines changed or added | |||
index_bounds.h | index_bounds.h | |||
---|---|---|---|---|
skipping to change at line 125 | skipping to change at line 125 | |||
/** | /** | |||
* The states of a key from an index scan. See checkKey below. | * The states of a key from an index scan. See checkKey below. | |||
*/ | */ | |||
enum KeyState { | enum KeyState { | |||
VALID, | VALID, | |||
MUST_ADVANCE, | MUST_ADVANCE, | |||
DONE, | DONE, | |||
}; | }; | |||
/** | /** | |||
* Is 'key' a valid key? Note that this differs from checkKey, whi | ||||
ch assumes that it | ||||
* receives keys in sorted order. | ||||
*/ | ||||
bool isValidKey(const BSONObj& key); | ||||
/** | ||||
* This function checks if the key is within the bounds we're itera ting over and updates any | * This function checks if the key is within the bounds we're itera ting over and updates any | |||
* internal state required to efficiently determine if the key is w ithin our bounds. | * internal state required to efficiently determine if the key is w ithin our bounds. | |||
* | * | |||
* Possible outcomes: | * Possible outcomes: | |||
* | * | |||
* 1. The key is in our bounds. Returns VALID. Caller can use the data associated with the | * 1. The key is in our bounds. Returns VALID. Caller can use the data associated with the | |||
* key. | * key. | |||
* | * | |||
* 2. The key is not in our bounds but has not exceeded the maximum value in our bounds. | * 2. The key is not in our bounds but has not exceeded the maximum value in our bounds. | |||
* Returns MUST_ADVANCE. Caller must advance to the key provided i n the out parameters and | * Returns MUST_ADVANCE. Caller must advance to the key provided i n the out parameters and | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 7 lines changed or added | |||
index_bounds_builder.h | index_bounds_builder.h | |||
---|---|---|---|---|
skipping to change at line 75 | skipping to change at line 75 | |||
static void translateAndIntersect(const MatchExpression* expr, cons t BSONElement& elt, | static void translateAndIntersect(const MatchExpression* expr, cons t BSONElement& elt, | |||
OrderedIntervalList* oilOut, bool * exactOut); | OrderedIntervalList* oilOut, bool * exactOut); | |||
/** | /** | |||
* Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds | * Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds | |||
* with the bounds in oilOut, which is an in/out parameter. | * with the bounds in oilOut, which is an in/out parameter. | |||
*/ | */ | |||
static void translateAndUnion(const MatchExpression* expr, const BS ONElement& elt, | static void translateAndUnion(const MatchExpression* expr, const BS ONElement& elt, | |||
OrderedIntervalList* oilOut, bool* ex actOut); | OrderedIntervalList* oilOut, bool* ex actOut); | |||
private: | ||||
friend class ExpressionMapping; | ||||
/** | /** | |||
* Make a range interval from the provided object. | * Make a range interval from the provided object. | |||
* The object must have exactly two fields. The first field is the start, the second the | * The object must have exactly two fields. The first field is the start, the second the | |||
* end. | * end. | |||
* The two inclusive flags indicate whether or not the start/end fi elds are included in the | * The two inclusive flags indicate whether or not the start/end fi elds are included in the | |||
* interval (closed interval if included, open if not). | * interval (closed interval if included, open if not). | |||
*/ | */ | |||
static Interval makeRangeInterval(const BSONObj& obj, bool startInc lusive, | static Interval makeRangeInterval(const BSONObj& obj, bool startInc lusive, | |||
bool endInclusive); | bool endInclusive); | |||
static Interval makeRangeInterval(const string& start, const string & end, | static Interval makeRangeInterval(const string& start, const string & end, | |||
End of changes. 1 change blocks. | ||||
3 lines changed or deleted | 0 lines changed or added | |||
index_descriptor.h | index_descriptor.h | |||
---|---|---|---|---|
skipping to change at line 33 | skipping to change at line 33 | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/db/index.h" // For IndexDetails. | #include "mongo/db/storage/index_details.h" // For IndexDetails. | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/namespace_details.h" // For NamespaceDetails. | #include "mongo/db/namespace_details.h" // For NamespaceDetails. | |||
#include "mongo/db/structure/collection.h" | ||||
#include "mongo/util/stacktrace.h" | ||||
namespace mongo { | namespace mongo { | |||
class IndexCatalog; | ||||
/** | /** | |||
* OnDiskIndexData (aka IndexDetails) is memory-mapped on-disk index da ta. | * OnDiskIndexData (aka IndexDetails) is memory-mapped on-disk index da ta. | |||
* It contains two DiskLocs: | * It contains two DiskLocs: | |||
* The first points to the head of the index. This is currently turned into a Btree node. | * The first points to the head of the index. This is currently turned into a Btree node. | |||
* The second points to a BSONObj which describes the index. | * The second points to a BSONObj which describes the index. | |||
*/ | */ | |||
typedef IndexDetails OnDiskIndexData; | typedef IndexDetails OnDiskIndexData; | |||
/** | /** | |||
* A cache of information computed from the memory-mapped per-index dat a (OnDiskIndexData). | * A cache of information computed from the memory-mapped per-index dat a (OnDiskIndexData). | |||
skipping to change at line 60 | skipping to change at line 65 | |||
* mutable "head" pointer which is index-specific. | * mutable "head" pointer which is index-specific. | |||
* | * | |||
* All synchronization is the responsibility of the caller. | * All synchronization is the responsibility of the caller. | |||
*/ | */ | |||
class IndexDescriptor { | class IndexDescriptor { | |||
public: | public: | |||
/** | /** | |||
* OnDiskIndexData is a pointer to the memory mapped per-index data . | * OnDiskIndexData is a pointer to the memory mapped per-index data . | |||
* infoObj is a copy of the index-describing BSONObj contained in t he OnDiskIndexData. | * infoObj is a copy of the index-describing BSONObj contained in t he OnDiskIndexData. | |||
*/ | */ | |||
IndexDescriptor(NamespaceDetails* namespaceDetails, int indexNumber , OnDiskIndexData* data, | IndexDescriptor(Collection* collection, int indexNumber, OnDiskInde xData* data, | |||
BSONObj infoObj) | BSONObj infoObj) | |||
: _namespaceDetails(namespaceDetails), _indexNumber(indexNumber | : _magic(123987), | |||
), _onDiskData(data), | _collection(collection), _indexNumber(indexNumber), _onDiskDa | |||
_infoObj(infoObj), _numFields(infoObj.getObjectField("key").n | ta(data), | |||
Fields()) { } | _infoObj(infoObj.getOwned()), | |||
_numFields(infoObj.getObjectField("key").nFields()), | ||||
_keyPattern(infoObj.getObjectField("key").getOwned()), | ||||
_indexName(infoObj.getStringField("name")), | ||||
_parentNS(infoObj.getStringField("ns")), | ||||
_isIdIndex(IndexDetails::isIdIndexPattern( _keyPattern )), | ||||
_sparse(infoObj["sparse"].trueValue()), | ||||
_dropDups(infoObj["dropDups"].trueValue()), | ||||
_unique( _isIdIndex || infoObj["unique"].trueValue() ) | ||||
{ | ||||
_indexNamespace = _parentNS + ".$" + _indexNamespace; | ||||
_version = 0; | ||||
BSONElement e = _infoObj["v"]; | ||||
if ( e.isNumber() ) { | ||||
_version = e.numberInt(); | ||||
} | ||||
} | ||||
~IndexDescriptor() { | ||||
_magic = 555; | ||||
} | ||||
// XXX this is terrible | ||||
IndexDescriptor* clone() const { | ||||
return new IndexDescriptor(_collection, _indexNumber, _onDiskDa | ||||
ta, _infoObj); | ||||
} | ||||
// | // | |||
// Information about the key pattern. | // Information about the key pattern. | |||
// | // | |||
/** | /** | |||
* Return the user-provided index key pattern. | * Return the user-provided index key pattern. | |||
* Example: {geo: "2dsphere", nonGeo: 1} | * Example: {geo: "2dsphere", nonGeo: 1} | |||
* Example: {foo: 1, bar: -1} | * Example: {foo: 1, bar: -1} | |||
*/ | */ | |||
BSONObj keyPattern() const { return _infoObj.getObjectField("key"); } | const BSONObj& keyPattern() const { _checkOk(); return _keyPattern; } | |||
// How many fields do we index / are in the key pattern? | // How many fields do we index / are in the key pattern? | |||
int getNumFields() const { return _numFields; } | int getNumFields() const { _checkOk(); return _numFields; } | |||
// | // | |||
// Information about the index's namespace / collection. | // Information about the index's namespace / collection. | |||
// | // | |||
// Return the name of the index. | // Return the name of the index. | |||
string indexName() const { return _infoObj.getStringField("name"); } | const string& indexName() const { _checkOk(); return _indexName; } | |||
// Return the name of the indexed collection. | // Return the name of the indexed collection. | |||
string parentNS() const { return _infoObj.getStringField("ns"); } | const string& parentNS() const { return _parentNS; } | |||
// Return the name of this index's storage area (database.table.$in dex) | // Return the name of this index's storage area (database.table.$in dex) | |||
string indexNamespace() const { | const string& indexNamespace() const { return _indexNamespace; } | |||
string s = parentNS(); | ||||
verify(!s.empty()); | ||||
s += ".$"; | ||||
s += indexName(); | ||||
return s; | ||||
} | ||||
// | // | |||
// Properties every index has | // Properties every index has | |||
// | // | |||
// Return what version of index this is. | // Return what version of index this is. | |||
int version() const { | int version() const { return _version; } | |||
BSONElement e = _infoObj["v"]; | ||||
if (NumberInt == e.type()) { | ||||
return e.Int(); | ||||
} else { | ||||
return 0; | ||||
} | ||||
} | ||||
// May each key only occur once? | // May each key only occur once? | |||
bool unique() const { return _infoObj["unique"].trueValue(); } | bool unique() const { return _unique; } | |||
// Is dropDups set on this index? | // Is dropDups set on this index? | |||
bool dropDups() const { return _infoObj.getBoolField("dropDups"); } | bool dropDups() const { return _dropDups; } | |||
// Is this index sparse? | // Is this index sparse? | |||
bool isSparse() const { return _infoObj["sparse"].trueValue(); } | bool isSparse() const { return _sparse; } | |||
// Is this index multikey? | // Is this index multikey? | |||
bool isMultikey() const { return _namespaceDetails->isMultikey(_ind | bool isMultikey() const { _checkOk(); return _collection->details() | |||
exNumber); } | ->isMultikey(_indexNumber); } | |||
bool isIdIndex() const { _checkOk(); return _isIdIndex; } | ||||
// | // | |||
// Properties that are Index-specific. | // Properties that are Index-specific. | |||
// | // | |||
// Allow access to arbitrary fields in the per-index info object. Some indices stash | // Allow access to arbitrary fields in the per-index info object. Some indices stash | |||
// index-specific data there. | // index-specific data there. | |||
BSONElement getInfoElement(const string& name) { return _infoObj[na me]; } | BSONElement getInfoElement(const string& name) { return _infoObj[na me]; } | |||
// | // | |||
// "Internals" of accessing the index, used by IndexAccessMethod(s) . | // "Internals" of accessing the index, used by IndexAccessMethod(s) . | |||
// | // | |||
// Return the memory-mapped index data block. | // Return the memory-mapped index data block. | |||
OnDiskIndexData& getOnDisk() { return* _onDiskData; } | OnDiskIndexData& getOnDisk() { _checkOk(); return *_onDiskData; } | |||
// Return the mutable head of the index. | // Return the mutable head of the index. | |||
DiskLoc& getHead() { return _onDiskData->head; } | const DiskLoc& getHead() const { _checkOk(); return _onDiskData->he ad; } | |||
// Return a (rather compact) string representation. | // Return a (rather compact) string representation. | |||
string toString() { return _infoObj.toString(); } | string toString() const { _checkOk(); return _infoObj.toString(); } | |||
// Return the info object. | // Return the info object. | |||
BSONObj infoObj() { return _infoObj; } | const BSONObj& infoObj() const { _checkOk(); return _infoObj; } | |||
// Set multikey attribute. We never unset it. | // Set multikey attribute. We never unset it. | |||
void setMultikey() { | void setMultikey() { | |||
_namespaceDetails->setIndexIsMultikey(parentNS().c_str(), _inde xNumber); | _collection->getIndexCatalog()->markMultikey( this ); | |||
} | } | |||
// Is this index being created in the background? | // Is this index being created in the background? | |||
bool isBackgroundIndex() { | bool isBackgroundIndex() const { | |||
return _indexNumber >= _namespaceDetails->getCompletedIndexCoun | return _indexNumber >= _collection->details()->getCompletedInde | |||
t(); | xCount(); | |||
} | } | |||
// this is the collection over which the index is over | ||||
Collection* getIndexedCollection() const { return _collection; } | ||||
// this is the owner of this IndexDescriptor | ||||
IndexCatalog* getIndexCatalog() const { return _collection->getInde | ||||
xCatalog(); } | ||||
private: | private: | |||
// Related catalog information. | ||||
NamespaceDetails* _namespaceDetails; | void _checkOk() const { | |||
if ( _magic == 123987 ) | ||||
return; | ||||
log() << "uh oh: " << (void*)(this) << " " << _magic; | ||||
verify(0); | ||||
} | ||||
int getIndexNumber() const { return _indexNumber; } | ||||
int _magic; | ||||
// Related catalog information of the parent collection | ||||
Collection* _collection; | ||||
// What # index are we in the catalog represented by _namespaceDeta ils? Needed for setting | // What # index are we in the catalog represented by _namespaceDeta ils? Needed for setting | |||
// and getting multikey. | // and getting multikey. | |||
int _indexNumber; | int _indexNumber; | |||
OnDiskIndexData* _onDiskData; | OnDiskIndexData* _onDiskData; | |||
// The BSONObj describing the index. Accessed through the various members above. | // The BSONObj describing the index. Accessed through the various members above. | |||
const BSONObj _infoObj; | const BSONObj _infoObj; | |||
// How many fields are indexed? | // --- cached data from _infoObj | |||
int64_t _numFields; | ||||
int64_t _numFields; // How many fields are indexed? | ||||
BSONObj _keyPattern; | ||||
string _indexName; | ||||
string _parentNS; | ||||
string _indexNamespace; | ||||
bool _isIdIndex; | ||||
bool _sparse; | ||||
bool _dropDups; | ||||
bool _unique; | ||||
int _version; | ||||
friend class IndexCatalog; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 24 change blocks. | ||||
42 lines changed or deleted | 94 lines changed or added | |||
index_legacy.h | index_legacy.h | |||
---|---|---|---|---|
skipping to change at line 36 | skipping to change at line 36 | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
namespace mongo { | namespace mongo { | |||
class IndexDetails; | class Collection; | |||
class NamespaceDetails; | class IndexDescriptor; | |||
/** | /** | |||
* There has been some behavior concerning indexed access patterns -- b oth pre and post-index | * There has been some behavior concerning indexed access patterns -- b oth pre and post-index | |||
* construction -- that does not quite fit in the access pattern model implemented in | * construction -- that does not quite fit in the access pattern model implemented in | |||
* index/index_access_pattern.h. Such behavior can't be changed in the current implementation of | * index/index_access_pattern.h. Such behavior can't be changed in the current implementation of | |||
* the code. | * the code. | |||
* | * | |||
* We grouped such exception/legacy behavior here. | * We grouped such exception/legacy behavior here. | |||
*/ | */ | |||
class IndexLegacy { | class IndexLegacy { | |||
skipping to change at line 68 | skipping to change at line 68 | |||
* Returns the BSONObj that is inserted into an index when the obje ct is missing the keys | * Returns the BSONObj that is inserted into an index when the obje ct is missing the keys | |||
* the index is over. | * the index is over. | |||
* | * | |||
* For every index *except hash*, this is the BSON equivalent of js tNULL. | * For every index *except hash*, this is the BSON equivalent of js tNULL. | |||
* For the hash index, it's the hash of BSON("" << BSONNULL). | * For the hash index, it's the hash of BSON("" << BSONNULL). | |||
* | * | |||
* s/d_split.cpp needs to know this. | * s/d_split.cpp needs to know this. | |||
* | * | |||
* This is a significant leak of index functionality out of the ind ex layer. | * This is a significant leak of index functionality out of the ind ex layer. | |||
*/ | */ | |||
static BSONObj getMissingField(const BSONObj& infoObj); | static BSONObj getMissingField(Collection* collection, const BSONOb j& infoObj); | |||
/** | /** | |||
* Perform any post-build steps for this index. | * Perform any post-build steps for this index. | |||
* | * | |||
* This is a no-op unless the index is a FTS index. In that case, we set the flag for using | * This is a no-op unless the index is a FTS index. In that case, we set the flag for using | |||
* power of 2 sizes for space allocation. | * power of 2 sizes for space allocation. | |||
*/ | */ | |||
static void postBuildHook(NamespaceDetails* tableToIndex, const Ind exDetails& idx); | static void postBuildHook(Collection* collection, const BSONObj& ke yPattern ); | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 3 change blocks. | ||||
4 lines changed or deleted | 4 lines changed or added | |||
index_rebuilder.h | index_rebuilder.h | |||
---|---|---|---|---|
skipping to change at line 47 | skipping to change at line 47 | |||
// finishes rebuilding them. After they complete rebuilding, the thread terminates. | // finishes rebuilding them. After they complete rebuilding, the thread terminates. | |||
class IndexRebuilder : public BackgroundJob { | class IndexRebuilder : public BackgroundJob { | |||
public: | public: | |||
IndexRebuilder(); | IndexRebuilder(); | |||
std::string name() const; | std::string name() const; | |||
void run(); | void run(); | |||
private: | private: | |||
/** | /** | |||
* Check each collection in the passed in vector to see if it has a ny in-progress index | * Check each collection in the passed in list to see if it has any in-progress index | |||
* builds that need to be retried. If so, calls retryIndexBuild. | * builds that need to be retried. If so, calls retryIndexBuild. | |||
*/ | */ | |||
void checkNS(const std::vector<std::string>& nsToCheck); | void checkNS(const std::list<std::string>& nsToCheck); | |||
/** | ||||
* Actually retry an index build on a given namespace. | ||||
* @param dbName the name of the database for accessing db.system.i | ||||
ndexes | ||||
* @param nsd the namespace details of the namespace building the i | ||||
ndex | ||||
* @param index the offset into nsd's index array of the partially- | ||||
built index | ||||
*/ | ||||
void retryIndexBuild(const std::string& dbName, | ||||
NamespaceDetails* nsd ); | ||||
}; | }; | |||
extern IndexRebuilder indexRebuilder; | extern IndexRebuilder indexRebuilder; | |||
} | } | |||
End of changes. 2 change blocks. | ||||
14 lines changed or deleted | 2 lines changed or added | |||
index_scan.h | index_scan.h | |||
---|---|---|---|---|
skipping to change at line 49 | skipping to change at line 49 | |||
namespace mongo { | namespace mongo { | |||
class IndexAccessMethod; | class IndexAccessMethod; | |||
class IndexCursor; | class IndexCursor; | |||
class IndexDescriptor; | class IndexDescriptor; | |||
class WorkingSet; | class WorkingSet; | |||
struct IndexScanParams { | struct IndexScanParams { | |||
IndexScanParams() : descriptor(NULL), direction(1), limit(0), | IndexScanParams() : descriptor(NULL), direction(1), limit(0), | |||
forceBtreeAccessMethod(false) { } | forceBtreeAccessMethod(false), doNotDedup(false ) { } | |||
IndexDescriptor* descriptor; | IndexDescriptor* descriptor; | |||
IndexBounds bounds; | IndexBounds bounds; | |||
int direction; | int direction; | |||
// This only matters for 2d indices and will be ignored by every ot her index. | // This only matters for 2d indices and will be ignored by every ot her index. | |||
int limit; | int limit; | |||
// Special indices internally open an IndexCursor over themselves b ut as a straight Btree. | // Special indices internally open an IndexCursor over themselves b ut as a straight Btree. | |||
bool forceBtreeAccessMethod; | bool forceBtreeAccessMethod; | |||
bool doNotDedup; | ||||
}; | }; | |||
/** | /** | |||
* Stage scans over an index from startKey to endKey, returning results that pass the provided | * Stage scans over an index from startKey to endKey, returning results that pass the provided | |||
* filter. Internally dedups on DiskLoc. | * filter. Internally dedups on DiskLoc. | |||
* | * | |||
* Sub-stage preconditions: None. Is a leaf and consumes no stage data . | * Sub-stage preconditions: None. Is a leaf and consumes no stage data . | |||
*/ | */ | |||
class IndexScan : public PlanStage { | class IndexScan : public PlanStage { | |||
public: | public: | |||
skipping to change at line 93 | skipping to change at line 95 | |||
virtual PlanStageStats* getStats(); | virtual PlanStageStats* getStats(); | |||
private: | private: | |||
/** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */ | /** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */ | |||
void checkEnd(); | void checkEnd(); | |||
// The WorkingSet we annotate with results. Not owned by us. | // The WorkingSet we annotate with results. Not owned by us. | |||
WorkingSet* _workingSet; | WorkingSet* _workingSet; | |||
// Index access. | // Index access. | |||
scoped_ptr<IndexAccessMethod> _iam; | IndexAccessMethod* _iam; // owned by Collection -> IndexCatalog | |||
scoped_ptr<IndexCursor> _indexCursor; | scoped_ptr<IndexCursor> _indexCursor; | |||
scoped_ptr<IndexDescriptor> _descriptor; | IndexDescriptor* _descriptor; // owned by Collection -> IndexCatalo g | |||
// Have we hit the end of the index scan? | // Have we hit the end of the index scan? | |||
bool _hitEnd; | bool _hitEnd; | |||
// Contains expressions only over fields in the index key. We assu me this is built | // Contains expressions only over fields in the index key. We assu me this is built | |||
// correctly by whomever creates this class. | // correctly by whomever creates this class. | |||
// The filter is not owned by us. | // The filter is not owned by us. | |||
const MatchExpression* _filter; | const MatchExpression* _filter; | |||
// Could our index have duplicates? If so, we use _returned to ded up. | // Could our index have duplicates? If so, we use _returned to ded up. | |||
End of changes. 4 change blocks. | ||||
3 lines changed or deleted | 5 lines changed or added | |||
internal_plans.h | internal_plans.h | |||
---|---|---|---|---|
skipping to change at line 89 | skipping to change at line 89 | |||
} | } | |||
WorkingSet* ws = new WorkingSet(); | WorkingSet* ws = new WorkingSet(); | |||
CollectionScan* cs = new CollectionScan(params, ws, NULL); | CollectionScan* cs = new CollectionScan(params, ws, NULL); | |||
return new InternalRunner(ns.toString(), cs, ws); | return new InternalRunner(ns.toString(), cs, ws); | |||
} | } | |||
/** | /** | |||
* Return an index scan. Caller owns returned pointer. | * Return an index scan. Caller owns returned pointer. | |||
*/ | */ | |||
static Runner* indexScan(const StringData& ns, NamespaceDetails* ns d, int idxNo, | static Runner* indexScan(IndexDescriptor* descriptor, | |||
const BSONObj& startKey, const BSONObj& en dKey, | const BSONObj& startKey, const BSONObj& en dKey, | |||
bool endKeyInclusive, Direction direction = FORWARD, | bool endKeyInclusive, Direction direction = FORWARD, | |||
int options = 0) { | int options = 0) { | |||
verify(NULL != nsd); | verify(descriptor); | |||
const NamespaceString& ns = descriptor->getIndexedCollection()- | ||||
>ns(); | ||||
IndexScanParams params; | IndexScanParams params; | |||
params.descriptor = CatalogHack::getDescriptor(nsd, idxNo); | params.descriptor = descriptor; | |||
verify(NULL != params.descriptor); | ||||
params.direction = direction; | params.direction = direction; | |||
params.bounds.isSimpleRange = true; | params.bounds.isSimpleRange = true; | |||
params.bounds.startKey = startKey; | params.bounds.startKey = startKey; | |||
params.bounds.endKey = endKey; | params.bounds.endKey = endKey; | |||
params.bounds.endKeyInclusive = endKeyInclusive; | params.bounds.endKeyInclusive = endKeyInclusive; | |||
// This always as 'true' as this is the new btreecursor. Even if the underlying index | // This always as 'true' as this is the new btreecursor. Even if the underlying index | |||
// is 'special' (ie, expression) we treat it like a Btree. | // is 'special' (ie, expression) we treat it like a Btree. | |||
params.forceBtreeAccessMethod = true; | params.forceBtreeAccessMethod = true; | |||
WorkingSet* ws = new WorkingSet(); | WorkingSet* ws = new WorkingSet(); | |||
End of changes. 3 change blocks. | ||||
4 lines changed or deleted | 6 lines changed or added | |||
interval.h | interval.h | |||
---|---|---|---|---|
skipping to change at line 79 | skipping to change at line 79 | |||
* 'start'/'endIncluded' are true or not. | * 'start'/'endIncluded' are true or not. | |||
*/ | */ | |||
Interval(BSONObj base, bool startIncluded, bool endInclued); | Interval(BSONObj base, bool startIncluded, bool endInclued); | |||
/** Sets the current interval to the given values (see constructor) */ | /** Sets the current interval to the given values (see constructor) */ | |||
void init(BSONObj base, bool startIncluded, bool endIncluded); | void init(BSONObj base, bool startIncluded, bool endIncluded); | |||
/** Returns true if an empty-constructed interval hasn't been init( )-ialized yet */ | /** Returns true if an empty-constructed interval hasn't been init( )-ialized yet */ | |||
bool isEmpty() const; | bool isEmpty() const; | |||
bool isPoint() const { | ||||
return startInclusive && endInclusive && 0 == start.woCompare(e | ||||
nd, false); | ||||
} | ||||
/** | /** | |||
* Swap start and end points of interval. | * Swap start and end points of interval. | |||
*/ | */ | |||
void reverse(); | void reverse(); | |||
/** Returns how 'this' compares to 'other' */ | /** Returns how 'this' compares to 'other' */ | |||
enum IntervalComparison { | enum IntervalComparison { | |||
// | // | |||
// There is some intersection. | // There is some intersection. | |||
// | // | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 5 lines changed or added | |||
keypattern.h | keypattern.h | |||
---|---|---|---|---|
skipping to change at line 87 | skipping to change at line 87 | |||
bool hasField( const StringData& fieldname ) const { | bool hasField( const StringData& fieldname ) const { | |||
return _prefixes.find( fieldname ) != _prefixes.end(); | return _prefixes.find( fieldname ) != _prefixes.end(); | |||
} | } | |||
/* | /* | |||
* Gets the element of this pattern corresponding to the given fiel dname. | * Gets the element of this pattern corresponding to the given fiel dname. | |||
* Returns eoo if none exists. | * Returns eoo if none exists. | |||
*/ | */ | |||
BSONElement getField( const char* fieldname ) const { return _patte rn[ fieldname ]; } | BSONElement getField( const char* fieldname ) const { return _patte rn[ fieldname ]; } | |||
/* | ||||
* Returns true if the key described by this KeyPattern is a prefix | ||||
of | ||||
* the (potentially) compound key described by 'other' | ||||
*/ | ||||
bool isPrefixOf( const KeyPattern& other ) const { | ||||
return _pattern.isPrefixOf( other.toBSON() ); | ||||
} | ||||
/** | /** | |||
* Is the provided key pattern the index over the ID field? | * Is the provided key pattern the index over the ID field? | |||
* The always required ID index is always {_id: 1} or {_id: -1}. | * The always required ID index is always {_id: 1} or {_id: -1}. | |||
*/ | */ | |||
static bool isIdKeyPattern(const BSONObj& pattern); | static bool isIdKeyPattern(const BSONObj& pattern); | |||
/* Takes a BSONObj whose field names are a prefix of the fields in this keyPattern, and | /* Takes a BSONObj whose field names are a prefix of the fields in this keyPattern, and | |||
* outputs a new bound with MinKey values appended to match the fie lds in this keyPattern | * outputs a new bound with MinKey values appended to match the fie lds in this keyPattern | |||
* (or MaxKey values for descending -1 fields). This is useful in s harding for | * (or MaxKey values for descending -1 fields). This is useful in s harding for | |||
* calculating chunk boundaries when tag ranges are specified on a prefix of the actual | * calculating chunk boundaries when tag ranges are specified on a prefix of the actual | |||
End of changes. 1 change blocks. | ||||
9 lines changed or deleted | 0 lines changed or added | |||
lite_parsed_query.h | lite_parsed_query.h | |||
---|---|---|---|---|
skipping to change at line 53 | skipping to change at line 53 | |||
static Status make(const string& ns, | static Status make(const string& ns, | |||
int ntoskip, | int ntoskip, | |||
int ntoreturn, | int ntoreturn, | |||
int queryoptions, | int queryoptions, | |||
const BSONObj& query, | const BSONObj& query, | |||
const BSONObj& proj, | const BSONObj& proj, | |||
const BSONObj& sort, | const BSONObj& sort, | |||
LiteParsedQuery** out); | LiteParsedQuery** out); | |||
/** | /** | |||
* Helper function to parse a "maxTimeMS" BSONElement. Returns the | * Helper functions to parse maxTimeMS from a command object. Retu | |||
contained value, or an | rns the contained value, | |||
* error on parsing fail. When passed an EOO-type element, returns | * or an error on parsing fail. When passed an EOO-type element, r | |||
0 (special value for | eturns 0 (special value | |||
* "allow to run indefinitely"). | * for "allow to run indefinitely"). | |||
*/ | */ | |||
static StatusWith<int> parseMaxTimeMS(const BSONElement& maxTimeMSE | static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj) | |||
lt); | ; | |||
/** | ||||
* Same as parseMaxTimeMSCommand, but for a query object. | ||||
*/ | ||||
static StatusWith<int> parseMaxTimeMSQuery(const BSONObj& queryObj) | ||||
; | ||||
// Name of the maxTimeMS command option. | ||||
static const string cmdOptionMaxTimeMS; | ||||
// Name of the maxTimeMS query option. | ||||
static const string queryOptionMaxTimeMS; | ||||
const string& ns() const { return _ns; } | const string& ns() const { return _ns; } | |||
bool isLocalDB() const { return _ns.compare(0, 6, "local.") == 0; } | bool isLocalDB() const { return _ns.compare(0, 6, "local.") == 0; } | |||
const BSONObj& getFilter() const { return _filter; } | const BSONObj& getFilter() const { return _filter; } | |||
const BSONObj& getProj() const { return _proj; } | const BSONObj& getProj() const { return _proj; } | |||
const BSONObj& getSort() const { return _sort; } | const BSONObj& getSort() const { return _sort; } | |||
const BSONObj& getHint() const { return _hint; } | const BSONObj& getHint() const { return _hint; } | |||
int getSkip() const { return _ntoskip; } | int getSkip() const { return _ntoskip; } | |||
skipping to change at line 92 | skipping to change at line 103 | |||
int getMaxTimeMS() const { return _maxTimeMS; } | int getMaxTimeMS() const { return _maxTimeMS; } | |||
private: | private: | |||
LiteParsedQuery(); | LiteParsedQuery(); | |||
Status init(const string& ns, int ntoskip, int ntoreturn, int query Options, | Status init(const string& ns, int ntoskip, int ntoreturn, int query Options, | |||
const BSONObj& queryObj, const BSONObj& proj, bool from QueryMessage); | const BSONObj& queryObj, const BSONObj& proj, bool from QueryMessage); | |||
Status initFullQuery(const BSONObj& top); | Status initFullQuery(const BSONObj& top); | |||
static StatusWith<int> parseMaxTimeMS(const BSONElement& maxTimeMSE | ||||
lt); | ||||
string _ns; | string _ns; | |||
int _ntoskip; | int _ntoskip; | |||
int _ntoreturn; | int _ntoreturn; | |||
BSONObj _filter; | BSONObj _filter; | |||
BSONObj _sort; | BSONObj _sort; | |||
BSONObj _proj; | BSONObj _proj; | |||
int _options; | int _options; | |||
bool _wantMore; | bool _wantMore; | |||
bool _explain; | bool _explain; | |||
bool _snapshot; | bool _snapshot; | |||
End of changes. 3 change blocks. | ||||
7 lines changed or deleted | 22 lines changed or added | |||
master_slave.h | master_slave.h | |||
---|---|---|---|---|
skipping to change at line 47 | skipping to change at line 47 | |||
at the master: | at the master: | |||
local.oplog.$<source> | local.oplog.$<source> | |||
*/ | */ | |||
namespace mongo { | namespace mongo { | |||
// Main entry point for master/slave at startup time. | // Main entry point for master/slave at startup time. | |||
void startMasterSlave(); | void startMasterSlave(); | |||
// externed for use with resync.cpp | ||||
extern volatile int relinquishSyncingSome; | ||||
extern volatile int syncing; | ||||
// Global variable that contains a string telling why master/slave halt ed | // Global variable that contains a string telling why master/slave halt ed | |||
extern const char *replAllDead; | extern const char *replAllDead; | |||
/* A replication exception */ | /* A replication exception */ | |||
class SyncException : public DBException { | class SyncException : public DBException { | |||
public: | public: | |||
SyncException() : DBException( "sync exception" , 10001 ) {} | SyncException() : DBException( "sync exception" , 10001 ) {} | |||
}; | }; | |||
namespace threadpool { | namespace threadpool { | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 4 lines changed or added | |||
message_port.h | message_port.h | |||
---|---|---|---|---|
skipping to change at line 128 | skipping to change at line 128 | |||
psock->send( data, context ); | psock->send( data, context ); | |||
} | } | |||
bool connect(SockAddr& farEnd) { | bool connect(SockAddr& farEnd) { | |||
return psock->connect( farEnd ); | return psock->connect( farEnd ); | |||
} | } | |||
#ifdef MONGO_SSL | #ifdef MONGO_SSL | |||
/** | /** | |||
* Initiates the TLS/SSL handshake on this MessagingPort. | * Initiates the TLS/SSL handshake on this MessagingPort. | |||
* When this function returns, further communication on this | * When this function returns, further communication on this | |||
* MessagingPort will be encrypted. | * MessagingPort will be encrypted. | |||
* ssl - Pointer to the global SSLManager. | ||||
* remoteHost - The hostname of the remote server. | ||||
*/ | */ | |||
bool secure( SSLManagerInterface* ssl ) { | bool secure( SSLManagerInterface* ssl, const std::string& remoteHos | |||
return psock->secure( ssl ); | t ) { | |||
return psock->secure( ssl, remoteHost ); | ||||
} | } | |||
#endif | #endif | |||
bool isStillConnected() { | bool isStillConnected() { | |||
return psock->isStillConnected(); | return psock->isStillConnected(); | |||
} | } | |||
uint64_t getSockCreationMicroSec() const { | uint64_t getSockCreationMicroSec() const { | |||
return psock->getSockCreationMicroSec(); | return psock->getSockCreationMicroSec(); | |||
} | } | |||
End of changes. 2 change blocks. | ||||
2 lines changed or deleted | 5 lines changed or added | |||
mmap.h | mmap.h | |||
---|---|---|---|---|
skipping to change at line 19 | skipping to change at line 19 | |||
* http://www.apache.org/licenses/LICENSE-2.0 | * http://www.apache.org/licenses/LICENSE-2.0 | |||
* | * | |||
* Unless required by applicable law or agreed to in writing, software | * Unless required by applicable law or agreed to in writing, software | |||
* distributed under the License is distributed on an "AS IS" BASIS, | * distributed under the License is distributed on an "AS IS" BASIS, | |||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. | |||
* See the License for the specific language governing permissions and | * See the License for the specific language governing permissions and | |||
* limitations under the License. | * limitations under the License. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <set> | ||||
#include <sstream> | ||||
#include <vector> | ||||
#include <boost/thread/xtime.hpp> | #include <boost/thread/xtime.hpp> | |||
#include "concurrency/rwlock.h" | ||||
#include "mongo/util/concurrency/rwlock.h" | ||||
#include "mongo/util/goodies.h" | ||||
namespace mongo { | namespace mongo { | |||
extern const size_t g_minOSPageSizeBytes; | extern const size_t g_minOSPageSizeBytes; | |||
void minOSPageSizeBytesTest(size_t minOSPageSizeBytes); // lame-o | void minOSPageSizeBytesTest(size_t minOSPageSizeBytes); // lame-o | |||
class MAdvise { | class MAdvise { | |||
void *_p; | void *_p; | |||
unsigned _len; | unsigned _len; | |||
public: | public: | |||
skipping to change at line 90 | skipping to change at line 97 | |||
}; | }; | |||
/** @param fun is called for each MongoFile. | /** @param fun is called for each MongoFile. | |||
called from within a mutex that MongoFile uses. so be careful n ot to deadlock. | called from within a mutex that MongoFile uses. so be careful n ot to deadlock. | |||
*/ | */ | |||
template < class F > | template < class F > | |||
static void forEach( F fun ); | static void forEach( F fun ); | |||
/** note: you need to be in mmmutex when using this. forEach (above ) handles that for you automatically. | /** note: you need to be in mmmutex when using this. forEach (above ) handles that for you automatically. | |||
*/ | */ | |||
static set<MongoFile*>& getAllFiles(); | static std::set<MongoFile*>& getAllFiles(); | |||
// callbacks if you need them | // callbacks if you need them | |||
static void (*notifyPreFlush)(); | static void (*notifyPreFlush)(); | |||
static void (*notifyPostFlush)(); | static void (*notifyPostFlush)(); | |||
static int flushAll( bool sync ); // returns n flushed | static int flushAll( bool sync ); // returns n flushed | |||
static long long totalMappedLength(); | static long long totalMappedLength(); | |||
static void closeAllFiles( stringstream &message ); | static void closeAllFiles( std::stringstream &message ); | |||
virtual bool isDurableMappedFile() { return false; } | virtual bool isDurableMappedFile() { return false; } | |||
string filename() const { return _filename; } | string filename() const { return _filename; } | |||
void setFilename(const std::string& fn); | void setFilename(const std::string& fn); | |||
private: | private: | |||
string _filename; | string _filename; | |||
static int _flushAll( bool sync ); // returns n flushed | static int _flushAll( bool sync ); // returns n flushed | |||
protected: | protected: | |||
skipping to change at line 207 | skipping to change at line 214 | |||
; | ; | |||
#else | #else | |||
{ } | { } | |||
#endif | #endif | |||
private: | private: | |||
static void updateLength( const char *filename, unsigned long long &length ); | static void updateLength( const char *filename, unsigned long long &length ); | |||
HANDLE fd; | HANDLE fd; | |||
HANDLE maphandle; | HANDLE maphandle; | |||
vector<void *> views; | std::vector<void *> views; | |||
unsigned long long len; | unsigned long long len; | |||
#ifdef _WIN32 | #ifdef _WIN32 | |||
boost::shared_ptr<mutex> _flushMutex; | boost::shared_ptr<mutex> _flushMutex; | |||
void clearWritableBits(void *privateView); | void clearWritableBits(void *privateView); | |||
public: | public: | |||
static const unsigned ChunkSize = 64 * 1024 * 1024; | static const unsigned ChunkSize = 64 * 1024 * 1024; | |||
static const unsigned NChunks = 1024 * 1024; | static const unsigned NChunks = 1024 * 1024; | |||
#else | #else | |||
void clearWritableBits(void *privateView) { } | void clearWritableBits(void *privateView) { } | |||
skipping to change at line 230 | skipping to change at line 237 | |||
protected: | protected: | |||
/** close the current private view and open a new replacement */ | /** close the current private view and open a new replacement */ | |||
void* remapPrivateView(void *oldPrivateAddr); | void* remapPrivateView(void *oldPrivateAddr); | |||
}; | }; | |||
/** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */ | /** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */ | |||
template < class F > | template < class F > | |||
inline void MongoFile::forEach( F p ) { | inline void MongoFile::forEach( F p ) { | |||
LockMongoFilesShared lklk; | LockMongoFilesShared lklk; | |||
const set<MongoFile*>& mmfiles = MongoFile::getAllFiles(); | const std::set<MongoFile*>& mmfiles = MongoFile::getAllFiles(); | |||
for ( set<MongoFile*>::const_iterator i = mmfiles.begin(); i != mmf | for ( std::set<MongoFile*>::const_iterator i = mmfiles.begin(); i ! | |||
iles.end(); i++ ) | = mmfiles.end(); i++ ) | |||
p(*i); | p(*i); | |||
} | } | |||
#if defined(_WIN32) | #if defined(_WIN32) | |||
class ourbitset { | class ourbitset { | |||
volatile unsigned bits[MemoryMappedFile::NChunks]; // volatile as w e are doing double check locking | volatile unsigned bits[MemoryMappedFile::NChunks]; // volatile as w e are doing double check locking | |||
public: | public: | |||
ourbitset() { | ourbitset() { | |||
memset((void*) bits, 0, sizeof(bits)); | memset((void*) bits, 0, sizeof(bits)); | |||
} | } | |||
End of changes. 6 change blocks. | ||||
7 lines changed or deleted | 14 lines changed or added | |||
mock_multi_command.h | mock_multi_command.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <deque> | #include <deque> | |||
#include "mongo/base/owned_pointer_vector.h" | #include "mongo/base/owned_pointer_vector.h" | |||
#include "mongo/s/batched_command_response.h" | ||||
#include "mongo/s/multi_command_dispatch.h" | #include "mongo/s/multi_command_dispatch.h" | |||
#include "mongo/s/write_ops/batched_command_response.h" | ||||
#include "mongo/unittest/unittest.h" | #include "mongo/unittest/unittest.h" | |||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* A ConnectionString endpoint registered with some kind of error, to s imulate returning when | * A ConnectionString endpoint registered with some kind of error, to s imulate returning when | |||
* the endpoint is used. | * the endpoint is used. | |||
*/ | */ | |||
struct MockEndpoint { | struct MockEndpoint { | |||
skipping to change at line 100 | skipping to change at line 100 | |||
/** | /** | |||
* Returns an error response if the next pending endpoint returned has a corresponding | * Returns an error response if the next pending endpoint returned has a corresponding | |||
* MockEndpoint. | * MockEndpoint. | |||
*/ | */ | |||
Status recvAny( ConnectionString* endpoint, BSONSerializable* respo nse ) { | Status recvAny( ConnectionString* endpoint, BSONSerializable* respo nse ) { | |||
BatchedCommandResponse* batchResponse = // | BatchedCommandResponse* batchResponse = // | |||
static_cast<BatchedCommandResponse*>( response ); | static_cast<BatchedCommandResponse*>( response ); | |||
*endpoint = _pending.front(); | ||||
MockEndpoint* mockEndpoint = releaseByHost( _pending.front() ); | MockEndpoint* mockEndpoint = releaseByHost( _pending.front() ); | |||
_pending.pop_front(); | _pending.pop_front(); | |||
if ( NULL == mockEndpoint ) { | if ( NULL == mockEndpoint ) { | |||
batchResponse->setOk( true ); | batchResponse->setOk( true ); | |||
batchResponse->setN( 0 ); // TODO: Make this accurate | ||||
} | } | |||
else { | else { | |||
batchResponse->setOk( false ); | batchResponse->setOk( false ); | |||
batchResponse->setN( 0 ); | ||||
batchResponse->setErrCode( mockEndpoint->error.getErrCode() ); | batchResponse->setErrCode( mockEndpoint->error.getErrCode() ); | |||
if ( mockEndpoint->error.isErrInfoSet() ) batchResponse->se tErrInfo( mockEndpoint | if ( mockEndpoint->error.isErrInfoSet() ) batchResponse->se tErrInfo( mockEndpoint | |||
->error.getErrInfo() ); | ->error.getErrInfo() ); | |||
batchResponse->setErrMessage( mockEndpoint->error.getErrMes sage() ); | batchResponse->setErrMessage( mockEndpoint->error.getErrMes sage() ); | |||
delete mockEndpoint; | delete mockEndpoint; | |||
} | } | |||
string errMsg; | ||||
ASSERT( batchResponse->isValid( &errMsg ) ); | ||||
return Status::OK(); | return Status::OK(); | |||
} | } | |||
const std::vector<MockEndpoint*>& getEndpoints() const { | const std::vector<MockEndpoint*>& getEndpoints() const { | |||
return _mockEndpoints.vector(); | return _mockEndpoints.vector(); | |||
} | } | |||
private: | private: | |||
// Find a MockEndpoint* by host, and release it so we don't see it again | // Find a MockEndpoint* by host, and release it so we don't see it again | |||
End of changes. 6 change blocks. | ||||
1 lines changed or deleted | 6 lines changed or added | |||
mock_ns_targeter.h | mock_ns_targeter.h | |||
---|---|---|---|---|
skipping to change at line 94 | skipping to change at line 94 | |||
_nss = NamespaceString( _mockRanges.vector().front()->range.ns ); | _nss = NamespaceString( _mockRanges.vector().front()->range.ns ); | |||
} | } | |||
const NamespaceString& getNS() const { | const NamespaceString& getNS() const { | |||
return _nss; | return _nss; | |||
} | } | |||
/** | /** | |||
* Returns a ShardEndpoint for the doc from the mock ranges | * Returns a ShardEndpoint for the doc from the mock ranges | |||
*/ | */ | |||
Status targetDoc( const BSONObj& doc, ShardEndpoint** endpoint ) co nst { | Status targetInsert( const BSONObj& doc, ShardEndpoint** endpoint ) const { | |||
const std::vector<MockRange*>& ranges = getRanges(); | const std::vector<MockRange*>& ranges = getRanges(); | |||
for ( std::vector<MockRange*>::const_iterator it = ranges.begin (); it != ranges.end(); | for ( std::vector<MockRange*>::const_iterator it = ranges.begin (); it != ranges.end(); | |||
++it ) { | ++it ) { | |||
const MockRange* range = *it; | const MockRange* range = *it; | |||
if ( rangeContains( range->range.minKey, range->range.maxKe y, doc ) ) { | if ( rangeContains( range->range.minKey, range->range.maxKe y, doc ) ) { | |||
*endpoint = new ShardEndpoint( range->endpoint ); | *endpoint = new ShardEndpoint( range->endpoint ); | |||
return Status::OK(); | return Status::OK(); | |||
} | } | |||
} | } | |||
return Status( ErrorCodes::UnknownError, "no mock range found f or document" ); | return Status( ErrorCodes::UnknownError, "no mock range found f or document" ); | |||
} | } | |||
/** | /** | |||
* Returns the first ShardEndpoint for the query from the mock rang es. Only can handle | * Returns the first ShardEndpoint for the query from the mock rang es. Only can handle | |||
* queries of the form { field : { $gte : <value>, $lt : <value> } }. | * queries of the form { field : { $gte : <value>, $lt : <value> } }. | |||
*/ | */ | |||
Status targetQuery( const BSONObj& query, std::vector<ShardEndpoint | Status targetUpdate( const BatchedUpdateDocument& updateDoc, | |||
*>* endpoints ) const { | std::vector<ShardEndpoint*>* endpoints ) const | |||
{ | ||||
KeyRange queryRange = parseRange( query ); | return targetQuery( updateDoc.getQuery(), endpoints ); | |||
} | ||||
const std::vector<MockRange*>& ranges = getRanges(); | ||||
for ( std::vector<MockRange*>::const_iterator it = ranges.begin | ||||
(); it != ranges.end(); | ||||
++it ) { | ||||
const MockRange* range = *it; | ||||
if ( rangeOverlaps( queryRange.minKey, | /** | |||
queryRange.maxKey, | * Returns the first ShardEndpoint for the query from the mock rang | |||
range->range.minKey, | es. Only can handle | |||
range->range.maxKey ) ) { | * queries of the form { field : { $gte : <value>, $lt : <value> } | |||
endpoints->push_back( new ShardEndpoint( range->endpoin | }. | |||
t ) ); | */ | |||
} | Status targetDelete( const BatchedDeleteDocument& deleteDoc, | |||
} | std::vector<ShardEndpoint*>* endpoints ) const | |||
{ | ||||
return targetQuery( deleteDoc.getQuery(), endpoints ); | ||||
} | ||||
if ( endpoints->empty() ) return Status( ErrorCodes::UnknownErr | Status targetAll( std::vector<ShardEndpoint*>* endpoints ) const { | |||
or, | // TODO: XXX | |||
"no mock ranges found | // No-op | |||
for query" ); | ||||
return Status::OK(); | return Status::OK(); | |||
} | } | |||
void noteCouldNotTarget() { | void noteCouldNotTarget() { | |||
// No-op | // No-op | |||
} | } | |||
void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ) { | void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ) { | |||
// No-op | // No-op | |||
} | } | |||
skipping to change at line 176 | skipping to change at line 173 | |||
ASSERT( !queryRange[LT.l_].eoo() ); | ASSERT( !queryRange[LT.l_].eoo() ); | |||
BSONObjBuilder minKeyB; | BSONObjBuilder minKeyB; | |||
minKeyB.appendAs( queryRange[GTE.l_], fieldName ); | minKeyB.appendAs( queryRange[GTE.l_], fieldName ); | |||
BSONObjBuilder maxKeyB; | BSONObjBuilder maxKeyB; | |||
maxKeyB.appendAs( queryRange[LT.l_], fieldName ); | maxKeyB.appendAs( queryRange[LT.l_], fieldName ); | |||
return KeyRange( "", minKeyB.obj(), maxKeyB.obj(), BSON( fieldN ame << 1 ) ); | return KeyRange( "", minKeyB.obj(), maxKeyB.obj(), BSON( fieldN ame << 1 ) ); | |||
} | } | |||
/** | ||||
* Returns the first ShardEndpoint for the query from the mock rang | ||||
es. Only can handle | ||||
* queries of the form { field : { $gte : <value>, $lt : <value> } | ||||
}. | ||||
*/ | ||||
Status targetQuery( const BSONObj& query, std::vector<ShardEndpoint | ||||
*>* endpoints ) const { | ||||
KeyRange queryRange = parseRange( query ); | ||||
const std::vector<MockRange*>& ranges = getRanges(); | ||||
for ( std::vector<MockRange*>::const_iterator it = ranges.begin | ||||
(); it != ranges.end(); | ||||
++it ) { | ||||
const MockRange* range = *it; | ||||
if ( rangeOverlaps( queryRange.minKey, | ||||
queryRange.maxKey, | ||||
range->range.minKey, | ||||
range->range.maxKey ) ) { | ||||
endpoints->push_back( new ShardEndpoint( range->endpoin | ||||
t ) ); | ||||
} | ||||
} | ||||
if ( endpoints->empty() ) return Status( ErrorCodes::UnknownErr | ||||
or, | ||||
"no mock ranges found | ||||
for query" ); | ||||
return Status::OK(); | ||||
} | ||||
NamespaceString _nss; | NamespaceString _nss; | |||
// Manually-stored ranges | // Manually-stored ranges | |||
OwnedPointerVector<MockRange> _mockRanges; | OwnedPointerVector<MockRange> _mockRanges; | |||
}; | }; | |||
inline void assertEndpointsEqual( const ShardEndpoint& endpointA, | inline void assertEndpointsEqual( const ShardEndpoint& endpointA, | |||
const ShardEndpoint& endpointB ) { | const ShardEndpoint& endpointB ) { | |||
ASSERT_EQUALS( endpointA.shardName, endpointB.shardName ); | ASSERT_EQUALS( endpointA.shardName, endpointB.shardName ); | |||
ASSERT_EQUALS( endpointA.shardVersion.toLong(), endpointB.shardVers ion.toLong() ); | ASSERT_EQUALS( endpointA.shardVersion.toLong(), endpointB.shardVers ion.toLong() ); | |||
ASSERT_EQUALS( endpointA.shardVersion.epoch(), endpointB.shardVersi on.epoch() ); | ASSERT_EQUALS( endpointA.shardVersion.epoch(), endpointB.shardVersi on.epoch() ); | |||
ASSERT_EQUALS( endpointA.shardHost.toString(), endpointB.shardHost. toString() ); | ||||
} | } | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 6 change blocks. | ||||
25 lines changed or deleted | 54 lines changed or added | |||
mongobridge_options.h | mongobridge_options.h | |||
---|---|---|---|---|
skipping to change at line 48 | skipping to change at line 48 | |||
MongoBridgeGlobalParams() : port(0), delay(0) { } | MongoBridgeGlobalParams() : port(0), delay(0) { } | |||
}; | }; | |||
extern MongoBridgeGlobalParams mongoBridgeGlobalParams; | extern MongoBridgeGlobalParams mongoBridgeGlobalParams; | |||
Status addMongoBridgeOptions(moe::OptionSection* options); | Status addMongoBridgeOptions(moe::OptionSection* options); | |||
void printMongoBridgeHelp(std::ostream* out); | void printMongoBridgeHelp(std::ostream* out); | |||
Status handlePreValidationMongoBridgeOptions(const moe::Environment& pa | /** | |||
rams); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoBridgeOptions(const moe::Environment& para | ||||
ms); | ||||
Status storeMongoBridgeOptions(const moe::Environment& params, | Status storeMongoBridgeOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongod_options.h | mongod_options.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/server_options.h" | #include "mongo/db/server_options.h" | |||
#include "mongo/db/storage_options.h" | #include "mongo/db/storage_options.h" | |||
#include "mongo/util/options_parser/environment.h" | ||||
#include "mongo/util/options_parser/option_section.h" | ||||
namespace mongo { | namespace mongo { | |||
namespace optionenvironment { | namespace optionenvironment { | |||
class OptionSection; | class OptionSection; | |||
class Environment; | ||||
} // namespace optionenvironment | } // namespace optionenvironment | |||
namespace moe = mongo::optionenvironment; | namespace moe = mongo::optionenvironment; | |||
struct MongodGlobalParams { | struct MongodGlobalParams { | |||
bool upgrade; | bool upgrade; | |||
bool repair; | bool repair; | |||
bool scriptingEnabled; // --noscripting | bool scriptingEnabled; // --noscripting | |||
MongodGlobalParams() : | MongodGlobalParams() : | |||
skipping to change at line 61 | skipping to change at line 64 | |||
scriptingEnabled(true) | scriptingEnabled(true) | |||
{ } | { } | |||
}; | }; | |||
extern MongodGlobalParams mongodGlobalParams; | extern MongodGlobalParams mongodGlobalParams; | |||
Status addMongodOptions(moe::OptionSection* options); | Status addMongodOptions(moe::OptionSection* options); | |||
void printMongodHelp(const moe::OptionSection& options); | void printMongodHelp(const moe::OptionSection& options); | |||
Status handlePreValidationMongodOptions(const moe::Environment& params, | /** | |||
* Handle options that should come before validation, such as "help". | ||||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongodOptions(const moe::Environment& params, | ||||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
Status storeMongodOptions(const moe::Environment& params, const std::ve ctor<std::string>& args); | Status storeMongodOptions(const moe::Environment& params, const std::ve ctor<std::string>& args); | |||
} | } | |||
End of changes. 3 change blocks. | ||||
1 lines changed or deleted | 10 lines changed or added | |||
mongodump_options.h | mongodump_options.h | |||
---|---|---|---|---|
skipping to change at line 42 | skipping to change at line 42 | |||
bool repair; | bool repair; | |||
bool snapShotQuery; | bool snapShotQuery; | |||
}; | }; | |||
extern MongoDumpGlobalParams mongoDumpGlobalParams; | extern MongoDumpGlobalParams mongoDumpGlobalParams; | |||
Status addMongoDumpOptions(moe::OptionSection* options); | Status addMongoDumpOptions(moe::OptionSection* options); | |||
void printMongoDumpHelp(std::ostream* out); | void printMongoDumpHelp(std::ostream* out); | |||
Status handlePreValidationMongoDumpOptions(const moe::Environment& para | /** | |||
ms); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoDumpOptions(const moe::Environment& params | ||||
); | ||||
Status storeMongoDumpOptions(const moe::Environment& params, | Status storeMongoDumpOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongoexport_options.h | mongoexport_options.h | |||
---|---|---|---|---|
skipping to change at line 46 | skipping to change at line 46 | |||
unsigned int skip; | unsigned int skip; | |||
unsigned int limit; | unsigned int limit; | |||
}; | }; | |||
extern MongoExportGlobalParams mongoExportGlobalParams; | extern MongoExportGlobalParams mongoExportGlobalParams; | |||
Status addMongoExportOptions(moe::OptionSection* options); | Status addMongoExportOptions(moe::OptionSection* options); | |||
void printMongoExportHelp(std::ostream* out); | void printMongoExportHelp(std::ostream* out); | |||
Status handlePreValidationMongoExportOptions(const moe::Environment& pa | /** | |||
rams); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoExportOptions(const moe::Environment& para | ||||
ms); | ||||
Status storeMongoExportOptions(const moe::Environment& params, | Status storeMongoExportOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongofiles_options.h | mongofiles_options.h | |||
---|---|---|---|---|
skipping to change at line 42 | skipping to change at line 42 | |||
std::string command; | std::string command; | |||
std::string gridFSFilename; | std::string gridFSFilename; | |||
}; | }; | |||
extern MongoFilesGlobalParams mongoFilesGlobalParams; | extern MongoFilesGlobalParams mongoFilesGlobalParams; | |||
Status addMongoFilesOptions(moe::OptionSection* options); | Status addMongoFilesOptions(moe::OptionSection* options); | |||
void printMongoFilesHelp(std::ostream* out); | void printMongoFilesHelp(std::ostream* out); | |||
Status handlePreValidationMongoFilesOptions(const moe::Environment& par | /** | |||
ams); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoFilesOptions(const moe::Environment& param | ||||
s); | ||||
Status storeMongoFilesOptions(const moe::Environment& params, | Status storeMongoFilesOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongoimport_options.h | mongoimport_options.h | |||
---|---|---|---|---|
skipping to change at line 47 | skipping to change at line 47 | |||
bool jsonArray; | bool jsonArray; | |||
bool doimport; | bool doimport; | |||
}; | }; | |||
extern MongoImportGlobalParams mongoImportGlobalParams; | extern MongoImportGlobalParams mongoImportGlobalParams; | |||
Status addMongoImportOptions(moe::OptionSection* options); | Status addMongoImportOptions(moe::OptionSection* options); | |||
void printMongoImportHelp(std::ostream* out); | void printMongoImportHelp(std::ostream* out); | |||
Status handlePreValidationMongoImportOptions(const moe::Environment& pa | /** | |||
rams); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoImportOptions(const moe::Environment& para | ||||
ms); | ||||
Status storeMongoImportOptions(const moe::Environment& params, | Status storeMongoImportOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongooplog_options.h | mongooplog_options.h | |||
---|---|---|---|---|
skipping to change at line 40 | skipping to change at line 40 | |||
std::string from; | std::string from; | |||
std::string ns; | std::string ns; | |||
}; | }; | |||
extern MongoOplogGlobalParams mongoOplogGlobalParams; | extern MongoOplogGlobalParams mongoOplogGlobalParams; | |||
Status addMongoOplogOptions(moe::OptionSection* options); | Status addMongoOplogOptions(moe::OptionSection* options); | |||
void printMongoOplogHelp(std::ostream* out); | void printMongoOplogHelp(std::ostream* out); | |||
Status handlePreValidationMongoOplogOptions(const moe::Environment& par | /** | |||
ams); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoOplogOptions(const moe::Environment& param | ||||
s); | ||||
Status storeMongoOplogOptions(const moe::Environment& params, | Status storeMongoOplogOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongorestore_options.h | mongorestore_options.h | |||
---|---|---|---|---|
skipping to change at line 45 | skipping to change at line 45 | |||
int w; | int w; | |||
std::string restoreDirectory; | std::string restoreDirectory; | |||
}; | }; | |||
extern MongoRestoreGlobalParams mongoRestoreGlobalParams; | extern MongoRestoreGlobalParams mongoRestoreGlobalParams; | |||
Status addMongoRestoreOptions(moe::OptionSection* options); | Status addMongoRestoreOptions(moe::OptionSection* options); | |||
void printMongoRestoreHelp(std::ostream* out); | void printMongoRestoreHelp(std::ostream* out); | |||
Status handlePreValidationMongoRestoreOptions(const moe::Environment& p | /** | |||
arams); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoRestoreOptions(const moe::Environment& par | ||||
ams); | ||||
Status storeMongoRestoreOptions(const moe::Environment& params, | Status storeMongoRestoreOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongos_options.h | mongos_options.h | |||
---|---|---|---|---|
skipping to change at line 21 | skipping to change at line 21 | |||
* GNU Affero General Public License for more details. | * GNU Affero General Public License for more details. | |||
* | * | |||
* You should have received a copy of the GNU Affero General Public Lice nse | * You should have received a copy of the GNU Affero General Public Lice nse | |||
* along with this program. If not, see <http://www.gnu.org/licenses/>. | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/server_options.h" | #include "mongo/db/server_options.h" | |||
#include "mongo/util/options_parser/environment.h" | ||||
#include "mongo/util/options_parser/option_section.h" | ||||
namespace mongo { | namespace mongo { | |||
namespace optionenvironment { | namespace optionenvironment { | |||
class OptionSection; | class OptionSection; | |||
class Environment; | ||||
} // namespace optionenvironment | } // namespace optionenvironment | |||
namespace moe = mongo::optionenvironment; | namespace moe = mongo::optionenvironment; | |||
struct MongosGlobalParams { | struct MongosGlobalParams { | |||
std::vector<std::string> configdbs; | std::vector<std::string> configdbs; | |||
bool upgrade; | bool upgrade; | |||
MongosGlobalParams() : | MongosGlobalParams() : | |||
upgrade(false) | upgrade(false) | |||
{ } | { } | |||
}; | }; | |||
extern MongosGlobalParams mongosGlobalParams; | extern MongosGlobalParams mongosGlobalParams; | |||
Status addMongosOptions(moe::OptionSection* options); | Status addMongosOptions(moe::OptionSection* options); | |||
void printMongosHelp(const moe::OptionSection& options); | void printMongosHelp(const moe::OptionSection& options); | |||
Status handlePreValidationMongosOptions(const moe::Environment& params, | /** | |||
* Handle options that should come before validation, such as "help". | ||||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongosOptions(const moe::Environment& params, | ||||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
Status storeMongosOptions(const moe::Environment& params, const std::ve ctor<std::string>& args); | Status storeMongosOptions(const moe::Environment& params, const std::ve ctor<std::string>& args); | |||
bool isMongos(); | bool isMongos(); | |||
} | } | |||
End of changes. 3 change blocks. | ||||
1 lines changed or deleted | 10 lines changed or added | |||
mongostat_options.h | mongostat_options.h | |||
---|---|---|---|---|
skipping to change at line 45 | skipping to change at line 45 | |||
int sleep; | int sleep; | |||
std::string url; | std::string url; | |||
}; | }; | |||
extern MongoStatGlobalParams mongoStatGlobalParams; | extern MongoStatGlobalParams mongoStatGlobalParams; | |||
Status addMongoStatOptions(moe::OptionSection* options); | Status addMongoStatOptions(moe::OptionSection* options); | |||
void printMongoStatHelp(std::ostream* out); | void printMongoStatHelp(std::ostream* out); | |||
Status handlePreValidationMongoStatOptions(const moe::Environment& para | /** | |||
ms); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoStatOptions(const moe::Environment& params | ||||
); | ||||
Status storeMongoStatOptions(const moe::Environment& params, | Status storeMongoStatOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
mongotop_options.h | mongotop_options.h | |||
---|---|---|---|---|
skipping to change at line 39 | skipping to change at line 39 | |||
bool useLocks; | bool useLocks; | |||
int sleep; | int sleep; | |||
}; | }; | |||
extern MongoTopGlobalParams mongoTopGlobalParams; | extern MongoTopGlobalParams mongoTopGlobalParams; | |||
Status addMongoTopOptions(moe::OptionSection* options); | Status addMongoTopOptions(moe::OptionSection* options); | |||
void printMongoTopHelp(std::ostream* out); | void printMongoTopHelp(std::ostream* out); | |||
Status handlePreValidationMongoTopOptions(const moe::Environment& param | /** | |||
s); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoTopOptions(const moe::Environment& params) | ||||
; | ||||
Status storeMongoTopOptions(const moe::Environment& params, | Status storeMongoTopOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
msg.h | msg.h | |||
---|---|---|---|---|
skipping to change at line 38 | skipping to change at line 38 | |||
* then also delete it in the license file. | * then also delete it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <deque> | #include <deque> | |||
#include <boost/thread/condition.hpp> | #include <boost/thread/condition.hpp> | |||
#include <boost/function.hpp> | #include <boost/function.hpp> | |||
#include "mongo/util/concurrency/mutex.h" | ||||
#include "task.h" | #include "task.h" | |||
namespace mongo { | namespace mongo { | |||
namespace task { | namespace task { | |||
typedef boost::function<void()> lam; | typedef boost::function<void()> lam; | |||
/** typical usage is: task::fork( new Server("threadname") ); */ | /** typical usage is: task::fork( new Server("threadname") ); */ | |||
class Server : public Task { | class Server : public Task { | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 1 lines changed or added | |||
multi_plan_runner.h | multi_plan_runner.h | |||
---|---|---|---|---|
skipping to change at line 32 | skipping to change at line 32 | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/scoped_ptr.hpp> | #include <boost/scoped_ptr.hpp> | |||
#include <deque> | #include <list> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/exec/working_set.h" | #include "mongo/db/exec/working_set.h" | |||
#include "mongo/db/query/plan_ranker.h" // for CandidatePlan | #include "mongo/db/query/plan_ranker.h" // for CandidatePlan | |||
#include "mongo/db/query/runner.h" | #include "mongo/db/query/runner.h" | |||
#include "mongo/db/query/runner_yield_policy.h" | #include "mongo/db/query/runner_yield_policy.h" | |||
namespace mongo { | namespace mongo { | |||
skipping to change at line 136 | skipping to change at line 136 | |||
size_t _failureCount; | size_t _failureCount; | |||
// We need to cache this so that when we switch from running our ca ndidates to using a | // We need to cache this so that when we switch from running our ca ndidates to using a | |||
// PlanExecutor, we can set the right yielding policy on it. | // PlanExecutor, we can set the right yielding policy on it. | |||
Runner::YieldPolicy _policy; | Runner::YieldPolicy _policy; | |||
// The winner of the plan competition... | // The winner of the plan competition... | |||
boost::scoped_ptr<PlanExecutor> _bestPlan; | boost::scoped_ptr<PlanExecutor> _bestPlan; | |||
// ...and any results it produced while working toward winning. | // ...and any results it produced while working toward winning. | |||
std::deque<WorkingSetID> _alreadyProduced; | std::list<WorkingSetID> _alreadyProduced; | |||
// ...and the solution, for caching. | // ...and the solution, for caching. | |||
boost::scoped_ptr<QuerySolution> _bestSolution; | boost::scoped_ptr<QuerySolution> _bestSolution; | |||
// Candidate plans. | // Candidate plans. | |||
std::vector<CandidatePlan> _candidates; | std::vector<CandidatePlan> _candidates; | |||
// Candidate plans' stats. Owned here. | // Candidate plans' stats. Owned here. | |||
std::vector<PlanStageStats*> _candidateStats; | std::vector<PlanStageStats*> _candidateStats; | |||
// Yielding policy we use when we're running candidates. | // Yielding policy we use when we're running candidates. | |||
boost::scoped_ptr<RunnerYieldPolicy> _yieldPolicy; | boost::scoped_ptr<RunnerYieldPolicy> _yieldPolicy; | |||
// The query that we're trying to figure out the best solution to. | // The query that we're trying to figure out the best solution to. | |||
boost::scoped_ptr<CanonicalQuery> _query; | boost::scoped_ptr<CanonicalQuery> _query; | |||
// | ||||
// Backup plan for sort | ||||
// | ||||
QuerySolution* _backupSolution; | ||||
PlanExecutor* _backupPlan; | ||||
std::list<WorkingSetID> _backupAlreadyProduced; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 3 change blocks. | ||||
2 lines changed or deleted | 10 lines changed or added | |||
namespace.h | namespace.h | |||
---|---|---|---|---|
skipping to change at line 81 | skipping to change at line 81 | |||
bool isExtra() const; /* ends with $extr... -- when true an extra b lock not a normal NamespaceDetails block */ | bool isExtra() const; /* ends with $extr... -- when true an extra b lock not a normal NamespaceDetails block */ | |||
enum MaxNsLenValue { MaxNsLen = 128 }; | enum MaxNsLenValue { MaxNsLen = 128 }; | |||
private: | private: | |||
char buf[MaxNsLen]; | char buf[MaxNsLen]; | |||
}; | }; | |||
#pragma pack() | #pragma pack() | |||
} // namespace mongo | } // namespace mongo | |||
#include "mongo/db/storage/namespace-inl.h" | #include "mongo/db/catalog/ondisk/namespace-inl.h" | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 0 lines changed or added | |||
namespace_details.h | namespace_details.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/pch.h" | #include "mongo/pch.h" | |||
#include "mongo/db/d_concurrency.h" | #include "mongo/db/d_concurrency.h" | |||
#include "mongo/db/diskloc.h" | #include "mongo/db/diskloc.h" | |||
#include "mongo/db/index.h" | #include "mongo/db/storage/index_details.h" | |||
#include "mongo/db/index_names.h" | #include "mongo/db/index_names.h" | |||
#include "mongo/db/index_set.h" | #include "mongo/db/index_set.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/storage/durable_mapped_file.h" | #include "mongo/db/storage/durable_mapped_file.h" | |||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
#include "mongo/db/querypattern.h" | #include "mongo/db/querypattern.h" | |||
#include "mongo/db/storage/namespace.h" | #include "mongo/db/catalog/ondisk/namespace.h" | |||
#include "mongo/db/storage/namespace_index.h" | #include "mongo/db/catalog/ondisk/namespace_index.h" | |||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
namespace mongo { | namespace mongo { | |||
class Database; | class Database; | |||
class IndexCatalog; | ||||
/** @return true if a client can modify this namespace even though it i s under ".system." | /** @return true if a client can modify this namespace even though it i s under ".system." | |||
For example <dbname>.system.users is ok for regular clients to upda te. | For example <dbname>.system.users is ok for regular clients to upda te. | |||
@param write used when .system.js | @param write used when .system.js | |||
*/ | */ | |||
bool legalClientSystemNS( const StringData& ns , bool write ); | bool legalClientSystemNS( const StringData& ns , bool write ); | |||
/* deleted lists -- linked lists of deleted records -- are placed in 'b uckets' of various sizes | /* deleted lists -- linked lists of deleted records -- are placed in 'b uckets' of various sizes | |||
so you can look for a deleterecord about the right size. | so you can look for a deleterecord about the right size. | |||
*/ | */ | |||
skipping to change at line 104 | skipping to change at line 105 | |||
int _nIndexes; | int _nIndexes; | |||
// ofs 192 | // ofs 192 | |||
IndexDetails _indexes[NIndexesBase]; | IndexDetails _indexes[NIndexesBase]; | |||
// ofs 352 (16 byte aligned) | // ofs 352 (16 byte aligned) | |||
int _isCapped; // there is wasted space her e if I'm right (ERH) | int _isCapped; // there is wasted space her e if I'm right (ERH) | |||
int _maxDocsInCapped; // max # of objects for a ca pped table, -1 for inf. | int _maxDocsInCapped; // max # of objects for a ca pped table, -1 for inf. | |||
double _paddingFactor; // 1.0 = no padding. | double _paddingFactor; // 1.0 = no padding. | |||
// ofs 386 (16) | // ofs 368 (16) | |||
int _systemFlags; // things that the system sets/cares about | int _systemFlags; // things that the system sets/cares about | |||
DiskLoc _capExtent; // the "current" extent we're writing too for a capped collection | DiskLoc _capExtent; // the "current" extent we're writing too for a capped collection | |||
DiskLoc _capFirstNewRecord; | DiskLoc _capFirstNewRecord; | |||
unsigned short _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h | unsigned short _dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h | |||
unsigned short _indexFileVersion; | unsigned short _indexFileVersion; | |||
unsigned long long _multiKeyIndexBits; | unsigned long long _multiKeyIndexBits; | |||
// ofs 400 (16) | // ofs 400 (16) | |||
skipping to change at line 174 | skipping to change at line 175 | |||
public: | public: | |||
const DiskLoc& capExtent() const { return _capExtent; } | const DiskLoc& capExtent() const { return _capExtent; } | |||
const DiskLoc capFirstNewRecord() const { return _capFirstNewRecord ; } | const DiskLoc capFirstNewRecord() const { return _capFirstNewRecord ; } | |||
DiskLoc& capExtent() { return _capExtent; } | DiskLoc& capExtent() { return _capExtent; } | |||
DiskLoc& capFirstNewRecord() { return _capFirstNewRecord; } | DiskLoc& capFirstNewRecord() { return _capFirstNewRecord; } | |||
private: | private: | |||
Extent *theCapExtent() const { return _capExtent.ext(); } | Extent *theCapExtent() const { return _capExtent.ext(); } | |||
void advanceCapExtent( const char *ns ); | void advanceCapExtent( const StringData& ns ); | |||
DiskLoc __capAlloc(int len); | DiskLoc __capAlloc(int len); | |||
DiskLoc cappedAlloc(const char *ns, int len); | DiskLoc cappedAlloc(const StringData& ns, int len); | |||
DiskLoc &cappedFirstDeletedInCurExtent(); | DiskLoc &cappedFirstDeletedInCurExtent(); | |||
bool nextIsInCapExtent( const DiskLoc &dl ) const; | bool nextIsInCapExtent( const DiskLoc &dl ) const; | |||
public: | public: | |||
const DiskLoc& firstExtent() const { return _firstExtent; } | const DiskLoc& firstExtent() const { return _firstExtent; } | |||
const DiskLoc& lastExtent() const { return _lastExtent; } | const DiskLoc& lastExtent() const { return _lastExtent; } | |||
DiskLoc& firstExtent() { return _firstExtent; } | DiskLoc& firstExtent() { return _firstExtent; } | |||
DiskLoc& lastExtent() { return _lastExtent; } | DiskLoc& lastExtent() { return _lastExtent; } | |||
skipping to change at line 283 | skipping to change at line 284 | |||
} | } | |||
/* hackish - find our index # in the indexes array */ | /* hackish - find our index # in the indexes array */ | |||
int idxNo(const IndexDetails& idx); | int idxNo(const IndexDetails& idx); | |||
/* multikey indexes are indexes where there are more than one key i n the index | /* multikey indexes are indexes where there are more than one key i n the index | |||
for a single document. see multikey in docs. | for a single document. see multikey in docs. | |||
for these, we have to do some dedup work on queries. | for these, we have to do some dedup work on queries. | |||
*/ | */ | |||
bool isMultikey(int i) const { return (_multiKeyIndexBits & (((unsi gned long long) 1) << i)) != 0; } | bool isMultikey(int i) const { return (_multiKeyIndexBits & (((unsi gned long long) 1) << i)) != 0; } | |||
void setIndexIsMultikey(const char *thisns, int i, bool multikey = true); | ||||
/** | /** | |||
* This fetches the IndexDetails for the next empty index slot. The | * @return - if any state was changed | |||
caller must populate | ||||
* returned object. This handles allocating extra index space, if | ||||
necessary. | ||||
*/ | */ | |||
IndexDetails& getNextIndexDetails(const char* thisns); | bool setIndexIsMultikey(int i, bool multikey = true); | |||
/** | /** | |||
* incremements _nIndexes | * This fetches the IndexDetails for the next empty index slot. The | |||
caller must populate | ||||
* returned object. This handles allocating extra index space, if | ||||
necessary. | ||||
*/ | */ | |||
void addIndex(); | IndexDetails& getNextIndexDetails(const char* thisns); | |||
void aboutToDeleteAnIndex() { | ||||
clearSystemFlag( Flag_HaveIdIndex ); | ||||
} | ||||
/* returns index of the first index in which the field is present. -1 if not present. */ | /* returns index of the first index in which the field is present. -1 if not present. */ | |||
int fieldIsIndexed(const char *fieldName); | int fieldIsIndexed(const char *fieldName); | |||
/** | /** | |||
* @return the actual size to create | * @return the actual size to create | |||
* will be >= oldRecordSize | * will be >= oldRecordSize | |||
* based on padding and any other flags | * based on padding and any other flags | |||
*/ | */ | |||
int getRecordAllocationSize( int minRecordSize ); | int getRecordAllocationSize( int minRecordSize ); | |||
skipping to change at line 372 | skipping to change at line 368 | |||
} | } | |||
} | } | |||
/* Returns the index entry for the first index whose prefix contain s | /* Returns the index entry for the first index whose prefix contain s | |||
* 'keyPattern'. If 'requireSingleKey' is true, skip indices that c ontain | * 'keyPattern'. If 'requireSingleKey' is true, skip indices that c ontain | |||
* array attributes. Otherwise, returns NULL. | * array attributes. Otherwise, returns NULL. | |||
*/ | */ | |||
const IndexDetails* findIndexByPrefix( const BSONObj &keyPattern , | const IndexDetails* findIndexByPrefix( const BSONObj &keyPattern , | |||
bool requireSingleKey ); | bool requireSingleKey ); | |||
void removeIndex( int idx ); | ||||
/** | ||||
* removes things beteen getCompletedIndexCount() and getTotalIndex | ||||
Count() | ||||
* this should only be used for crash recovery | ||||
*/ | ||||
void blowAwayInProgressIndexEntries(); | ||||
/** | ||||
* @return the info for the index to retry | ||||
*/ | ||||
BSONObj prepOneUnfinishedIndex(); | ||||
/** | ||||
* swaps all meta data for 2 indexes | ||||
* a and b are 2 index ids, whose contents will be swapped | ||||
* must have a lock on the entire collection to do this | ||||
*/ | ||||
void swapIndex( const char* ns, int a, int b ); | ||||
/* Updates the expireAfterSeconds field of the given index to the v alue in newExpireSecs. | /* Updates the expireAfterSeconds field of the given index to the v alue in newExpireSecs. | |||
* The specified index must already contain an expireAfterSeconds f ield, and the value in | * The specified index must already contain an expireAfterSeconds f ield, and the value in | |||
* that field and newExpireSecs must both be numeric. | * that field and newExpireSecs must both be numeric. | |||
*/ | */ | |||
void updateTTLIndex( int idxNo , const BSONElement& newExpireSecs ) ; | void updateTTLIndex( int idxNo , const BSONElement& newExpireSecs ) ; | |||
const int systemFlags() const { return _systemFlags; } | const int systemFlags() const { return _systemFlags; } | |||
bool isSystemFlagSet( int flag ) const { return _systemFlags & flag ; } | bool isSystemFlagSet( int flag ) const { return _systemFlags & flag ; } | |||
void setSystemFlag( int flag ); | void setSystemFlag( int flag ); | |||
void clearSystemFlag( int flag ); | void clearSystemFlag( int flag ); | |||
skipping to change at line 469 | skipping to change at line 445 | |||
/* predetermine location of the next alloc without actually doing i t. | /* predetermine location of the next alloc without actually doing i t. | |||
if cannot predetermine returns null (so still call alloc() then) | if cannot predetermine returns null (so still call alloc() then) | |||
*/ | */ | |||
DiskLoc allocWillBeAt(const char *ns, int lenToAlloc); | DiskLoc allocWillBeAt(const char *ns, int lenToAlloc); | |||
/** allocate space for a new record from deleted lists. | /** allocate space for a new record from deleted lists. | |||
@param lenToAlloc is WITH header | @param lenToAlloc is WITH header | |||
@return null diskloc if no room - allocate a new extent then | @return null diskloc if no room - allocate a new extent then | |||
*/ | */ | |||
DiskLoc alloc(const char* ns, int lenToAlloc); | DiskLoc alloc(const StringData& ns, int lenToAlloc); | |||
/* add a given record to the deleted chains for this NS */ | /* add a given record to the deleted chains for this NS */ | |||
void addDeletedRec(DeletedRecord *d, DiskLoc dloc); | void addDeletedRec(DeletedRecord *d, DiskLoc dloc); | |||
void dumpDeleted(set<DiskLoc> *extents = 0); | void dumpDeleted(set<DiskLoc> *extents = 0); | |||
// Start from firstExtent by default. | // Start from firstExtent by default. | |||
DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const ; | DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const ; | |||
// Start from lastExtent by default. | // Start from lastExtent by default. | |||
DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const; | DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const; | |||
long long storageSize( int * numExtents = 0 , BSONArrayBuilder * ex | ||||
tentInfo = 0 ) const; | ||||
int averageObjectSize() { | ||||
if ( _stats.nrecords == 0 ) | ||||
return 5; | ||||
return (int) (_stats.datasize / _stats.nrecords); | ||||
} | ||||
NamespaceDetails *writingWithoutExtra() { | NamespaceDetails *writingWithoutExtra() { | |||
return ( NamespaceDetails* ) getDur().writingPtr( this, sizeof( NamespaceDetails ) ); | return ( NamespaceDetails* ) getDur().writingPtr( this, sizeof( NamespaceDetails ) ); | |||
} | } | |||
/** Make all linked Extra objects writeable as well */ | /** Make all linked Extra objects writeable as well */ | |||
NamespaceDetails *writingWithExtra(); | NamespaceDetails *writingWithExtra(); | |||
class IndexBuildBlock { | ||||
public: | ||||
IndexBuildBlock( const string& ns, const string& indexName ); | ||||
~IndexBuildBlock(); | ||||
private: | ||||
string _ns; | ||||
string _indexName; | ||||
}; | ||||
private: | private: | |||
void _removeIndexFromMe( int idx ); | ||||
void _removeIndex( int idx ); | /** | |||
* swaps all meta data for 2 indexes | ||||
* a and b are 2 index ids, whose contents will be swapped | ||||
* must have a lock on the entire collection to do this | ||||
*/ | ||||
void swapIndex( int a, int b ); | ||||
DiskLoc _alloc(const char *ns, int len); | DiskLoc _alloc(const StringData& ns, int len); | |||
void maybeComplain( const char *ns, int len ) const; | void maybeComplain( const StringData& ns, int len ) const; | |||
DiskLoc __stdAlloc(int len, bool willBeAt); | DiskLoc __stdAlloc(int len, bool willBeAt); | |||
void compact(); // combine adjacent deleted records | void compact(); // combine adjacent deleted records | |||
friend class NamespaceIndex; | friend class NamespaceIndex; | |||
friend class IndexCatalog; | ||||
struct ExtraOld { | struct ExtraOld { | |||
// note we could use this field for more chaining later, so don 't waste it: | // note we could use this field for more chaining later, so don 't waste it: | |||
unsigned long long reserved1; | unsigned long long reserved1; | |||
IndexDetails details[NIndexesExtra]; | IndexDetails details[NIndexesExtra]; | |||
unsigned reserved2; | unsigned reserved2; | |||
unsigned reserved3; | unsigned reserved3; | |||
}; | }; | |||
/** Update cappedLastDelRecLastExtent() after capExtent changed in cappedTruncateAfter() */ | /** Update cappedLastDelRecLastExtent() after capExtent changed in cappedTruncateAfter() */ | |||
void cappedTruncateLastDelUpdate(); | void cappedTruncateLastDelUpdate(); | |||
BOOST_STATIC_ASSERT( NIndexesMax <= NIndexesBase + NIndexesExtra*2 ); | BOOST_STATIC_ASSERT( NIndexesMax <= NIndexesBase + NIndexesExtra*2 ); | |||
End of changes. 20 change blocks. | ||||
61 lines changed or deleted | 27 lines changed or added | |||
namespace_index.h | namespace_index.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <list> | #include <list> | |||
#include <string> | #include <string> | |||
#include "mongo/db/diskloc.h" | #include "mongo/db/diskloc.h" | |||
#include "mongo/db/storage/namespace.h" | #include "mongo/db/catalog/ondisk/namespace.h" | |||
#include "mongo/util/hashtab.h" | #include "mongo/util/hashtab.h" | |||
namespace mongo { | namespace mongo { | |||
class NamespaceDetails; | class NamespaceDetails; | |||
/* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog" | /* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog" | |||
if you will: at least the core parts. (Additional info in system.* collections.) | if you will: at least the core parts. (Additional info in system.* collections.) | |||
*/ | */ | |||
class NamespaceIndex { | class NamespaceIndex { | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 1 lines changed or added | |||
namespace_string-inl.h | namespace_string-inl.h | |||
---|---|---|---|---|
skipping to change at line 115 | skipping to change at line 115 | |||
inline NamespaceString::NamespaceString() : _ns(), _dotIndex(0) {} | inline NamespaceString::NamespaceString() : _ns(), _dotIndex(0) {} | |||
inline NamespaceString::NamespaceString( const StringData& nsIn ) { | inline NamespaceString::NamespaceString( const StringData& nsIn ) { | |||
_ns = nsIn.toString(); // copy to our buffer | _ns = nsIn.toString(); // copy to our buffer | |||
_dotIndex = _ns.find( '.' ); | _dotIndex = _ns.find( '.' ); | |||
} | } | |||
inline NamespaceString::NamespaceString( const StringData& dbName, | inline NamespaceString::NamespaceString( const StringData& dbName, | |||
const StringData& collectionNa me ) | const StringData& collectionNa me ) | |||
: _ns(dbName.size() + collectionName.size() + 1, '\0') { | : _ns(dbName.size() + collectionName.size() + 1, '\0') { | |||
dassert(dbName.find('.') == std::string::npos); | uassert(17235, | |||
dassert(collectionName.empty() || collectionName[0] != '.'); | "'.' is an invalid character in a database name", | |||
dbName.find('.') == std::string::npos); | ||||
uassert(17246, | ||||
"Collection names cannot start with '.'", | ||||
collectionName.empty() || collectionName[0] != '.'); | ||||
std::string::iterator it = std::copy(dbName.begin(), dbName.end(), _ns.begin()); | std::string::iterator it = std::copy(dbName.begin(), dbName.end(), _ns.begin()); | |||
*it = '.'; | *it = '.'; | |||
++it; | ++it; | |||
it = std::copy(collectionName.begin(), collectionName.end(), it); | it = std::copy(collectionName.begin(), collectionName.end(), it); | |||
_dotIndex = dbName.size(); | _dotIndex = dbName.size(); | |||
dassert(it == _ns.end()); | dassert(it == _ns.end()); | |||
dassert(_ns[_dotIndex] == '.'); | dassert(_ns[_dotIndex] == '.'); | |||
dassert(_ns.find('\0') == std::string::npos); | dassert(_ns.find('\0') == std::string::npos); | |||
} | } | |||
skipping to change at line 180 | skipping to change at line 184 | |||
/* future : this doesn't need to be an inline. */ | /* future : this doesn't need to be an inline. */ | |||
inline std::string NamespaceString::getSisterNS( const StringData& loca l ) const { | inline std::string NamespaceString::getSisterNS( const StringData& loca l ) const { | |||
verify( local.size() && local[0] != '.' ); | verify( local.size() && local[0] != '.' ); | |||
return db().toString() + "." + local.toString(); | return db().toString() + "." + local.toString(); | |||
} | } | |||
inline std::string NamespaceString::getSystemIndexesCollection() const { | inline std::string NamespaceString::getSystemIndexesCollection() const { | |||
return db().toString() + ".system.indexes"; | return db().toString() + ".system.indexes"; | |||
} | } | |||
inline std::string NamespaceString::getCommandNS() const { | ||||
return db().toString() + ".$cmd"; | ||||
} | ||||
} | } | |||
End of changes. 2 change blocks. | ||||
2 lines changed or deleted | 10 lines changed or added | |||
namespace_string.h | namespace_string.h | |||
---|---|---|---|---|
skipping to change at line 103 | skipping to change at line 103 | |||
bool operator<( const NamespaceString& rhs ) const { return _ns < r hs._ns; } | bool operator<( const NamespaceString& rhs ) const { return _ns < r hs._ns; } | |||
/** ( foo.bar ).getSisterNS( "blah" ) == foo.blah | /** ( foo.bar ).getSisterNS( "blah" ) == foo.blah | |||
*/ | */ | |||
std::string getSisterNS( const StringData& local ) const; | std::string getSisterNS( const StringData& local ) const; | |||
// @return db() + ".system.indexes" | // @return db() + ".system.indexes" | |||
std::string getSystemIndexesCollection() const; | std::string getSystemIndexesCollection() const; | |||
// @return db() + ".$cmd" | ||||
std::string getCommandNS() const; | ||||
/** | /** | |||
* @return true if ns is 'normal'. A "$" is used for namespaces ho lding index data, | * @return true if ns is 'normal'. A "$" is used for namespaces ho lding index data, | |||
* which do not contain BSON objects in their records. ("oplog.$mai n" is the exception) | * which do not contain BSON objects in their records. ("oplog.$mai n" is the exception) | |||
*/ | */ | |||
static bool normal(const StringData& ns); | static bool normal(const StringData& ns); | |||
/** | /** | |||
* @return true if the ns is an oplog one, otherwise false. | * @return true if the ns is an oplog one, otherwise false. | |||
*/ | */ | |||
static bool oplog(const StringData& ns); | static bool oplog(const StringData& ns); | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 3 lines changed or added | |||
new_find.h | new_find.h | |||
---|---|---|---|---|
skipping to change at line 33 | skipping to change at line 33 | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/db/clientcursor.h" | ||||
#include "mongo/db/curop.h" | #include "mongo/db/curop.h" | |||
#include "mongo/db/dbmessage.h" | #include "mongo/db/dbmessage.h" | |||
#include "mongo/db/query/canonical_query.h" | #include "mongo/db/query/canonical_query.h" | |||
#include "mongo/db/query/runner.h" | ||||
#include "mongo/util/net/message.h" | #include "mongo/util/net/message.h" | |||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* Get a runner for a query. Takes ownership of rawCanonicalQuery. | ||||
* | ||||
* If the query is valid and a runner could be created, returns Status: | ||||
:OK() | ||||
* and populates *out with the Runner. | ||||
* | ||||
* If the query cannot be executed, returns a Status indicating why. D | ||||
eletes | ||||
* rawCanonicalQuery. | ||||
*/ | ||||
Status getRunner(CanonicalQuery* rawCanonicalQuery, Runner** out, size_ | ||||
t plannerOptions = 0); | ||||
/** | ||||
* A switch to choose between old Cursor-based code and new Runner-base d code. | * A switch to choose between old Cursor-based code and new Runner-base d code. | |||
*/ | */ | |||
bool isNewQueryFrameworkEnabled(); | bool isNewQueryFrameworkEnabled(); | |||
/** | /** | |||
* Use the new query framework. Called from the dbtest initialization. | * Use the new query framework. Called from the dbtest initialization. | |||
*/ | */ | |||
void enableNewQueryFramework(); | void enableNewQueryFramework(); | |||
/** | /** | |||
skipping to change at line 71 | skipping to change at line 84 | |||
std::string newRunQuery(CanonicalQuery* cq, CurOp& curop, Message &resu lt); | std::string newRunQuery(CanonicalQuery* cq, CurOp& curop, Message &resu lt); | |||
/** | /** | |||
* Can the new system handle the provided query? | * Can the new system handle the provided query? | |||
* | * | |||
* Returns false if not. cqOut is not modified. | * Returns false if not. cqOut is not modified. | |||
* Returns true if so. Caller owns *cqOut. | * Returns true if so. Caller owns *cqOut. | |||
*/ | */ | |||
bool canUseNewSystem(const QueryMessage& qm, CanonicalQuery** cqOut); | bool canUseNewSystem(const QueryMessage& qm, CanonicalQuery** cqOut); | |||
/** | ||||
* RAII approach to ensuring that runners are deregistered in newRunQue | ||||
ry. | ||||
* | ||||
* While retrieving the first bach of results, newRunQuery manually reg | ||||
isters the runner with | ||||
* ClientCursor. Certain query execution paths, namely $where, can thr | ||||
ow an exception. If we | ||||
* fail to deregister the runner, we will call invalidate/kill on the | ||||
* still-registered-yet-deleted runner. | ||||
* | ||||
* For any subsequent calls to getMore, the runner is already registere | ||||
d with ClientCursor | ||||
* by virtue of being cached, so this exception-proofing is not require | ||||
d. | ||||
*/ | ||||
struct DeregisterEvenIfUnderlyingCodeThrows { | ||||
DeregisterEvenIfUnderlyingCodeThrows(Runner* runner) : _runner(runn | ||||
er) { } | ||||
~DeregisterEvenIfUnderlyingCodeThrows() { | ||||
ClientCursor::deregisterRunner(_runner); | ||||
} | ||||
Runner* _runner; | ||||
}; | ||||
} // namespace mongo | } // namespace mongo | |||
End of changes. 4 change blocks. | ||||
0 lines changed or deleted | 41 lines changed or added | |||
ns_targeter.h | ns_targeter.h | |||
---|---|---|---|---|
skipping to change at line 38 | skipping to change at line 38 | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/bson/bsonobj.h" | #include "mongo/bson/bsonobj.h" | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/client/dbclientinterface.h" | #include "mongo/client/dbclientinterface.h" | |||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
#include "mongo/s/chunk_version.h" | #include "mongo/s/chunk_version.h" | |||
#include "mongo/s/write_ops/batched_update_document.h" | ||||
#include "mongo/s/write_ops/batched_delete_document.h" | ||||
namespace mongo { | namespace mongo { | |||
struct ShardEndpoint; | struct ShardEndpoint; | |||
/** | /** | |||
* The NSTargeter interface is used by a WriteOp to generate and target child write operations | * The NSTargeter interface is used by a WriteOp to generate and target child write operations | |||
* to a particular collection. | * to a particular collection. | |||
* | * | |||
* The lifecyle of a NSTargeter is: | * The lifecyle of a NSTargeter is: | |||
skipping to change at line 82 | skipping to change at line 84 | |||
} | } | |||
/** | /** | |||
* Returns the namespace targeted. | * Returns the namespace targeted. | |||
*/ | */ | |||
virtual const NamespaceString& getNS() const = 0; | virtual const NamespaceString& getNS() const = 0; | |||
/** | /** | |||
* Returns a ShardEndpoint for a single document write. | * Returns a ShardEndpoint for a single document write. | |||
* | * | |||
* Returns ShardKeyNotFound if document does not have a full shard key. | ||||
* Returns !OK with message if document could not be targeted for o ther reasons. | * Returns !OK with message if document could not be targeted for o ther reasons. | |||
*/ | */ | |||
virtual Status targetDoc( const BSONObj& doc, ShardEndpoint** endpo int ) const = 0; | virtual Status targetInsert( const BSONObj& doc, ShardEndpoint** en dpoint ) const = 0; | |||
/** | /** | |||
* Returns a vector of ShardEndpoints for a potentially multi-shard query. | * Returns a vector of ShardEndpoints for a potentially multi-shard update. | |||
* | * | |||
* Returns !OK with message if query could not be targeted. | * Returns OK and fills the endpoints; returns a status describing the error otherwise. | |||
*/ | */ | |||
virtual Status targetQuery( const BSONObj& query, | virtual Status targetUpdate( const BatchedUpdateDocument& updateDoc | |||
std::vector<ShardEndpoint*>* endpoints | , | |||
) const = 0; | std::vector<ShardEndpoint*>* endpoints | |||
) const = 0; | ||||
/** | ||||
* Returns a vector of ShardEndpoints for a potentially multi-shard | ||||
delete. | ||||
* | ||||
* Returns OK and fills the endpoints; returns a status describing | ||||
the error otherwise. | ||||
*/ | ||||
virtual Status targetDelete( const BatchedDeleteDocument& deleteDoc | ||||
, | ||||
std::vector<ShardEndpoint*>* endpoints | ||||
) const = 0; | ||||
/** | ||||
* Returns a vector of ShardEndpoints for the entire collection. | ||||
* | ||||
* Returns !OK with message if the full collection could not be tar | ||||
geted. | ||||
*/ | ||||
virtual Status targetAll( std::vector<ShardEndpoint*>* endpoints ) | ||||
const = 0; | ||||
/** | /** | |||
* Informs the targeter that a targeting failure occurred during on e of the last targeting | * Informs the targeter that a targeting failure occurred during on e of the last targeting | |||
* operations. If this is noted, we cannot note stale responses. | * operations. If this is noted, we cannot note stale responses. | |||
*/ | */ | |||
virtual void noteCouldNotTarget() = 0; | virtual void noteCouldNotTarget() = 0; | |||
/** | /** | |||
* Informs the targeter of stale config responses for this namespac e from an endpoint, with | * Informs the targeter of stale config responses for this namespac e from an endpoint, with | |||
* further information available in the returned staleInfo. | * further information available in the returned staleInfo. | |||
skipping to change at line 137 | skipping to change at line 153 | |||
/** | /** | |||
* A ShardEndpoint represents a destination for a targeted query or doc ument. It contains both | * A ShardEndpoint represents a destination for a targeted query or doc ument. It contains both | |||
* the logical target (shard name/version/broadcast) and the physical t arget (host name). | * the logical target (shard name/version/broadcast) and the physical t arget (host name). | |||
*/ | */ | |||
struct ShardEndpoint { | struct ShardEndpoint { | |||
ShardEndpoint() { | ShardEndpoint() { | |||
} | } | |||
ShardEndpoint( const ShardEndpoint& other ) : | ShardEndpoint( const ShardEndpoint& other ) : | |||
shardName( other.shardName ), | shardName( other.shardName ), shardVersion( other.shardVersion | |||
shardVersion( other.shardVersion ), | ) { | |||
shardHost( other.shardHost ) { | ||||
} | } | |||
ShardEndpoint( const string& shardName, | ShardEndpoint( const string& shardName, | |||
const ChunkVersion& shardVersion, | const ChunkVersion& shardVersion ) : | |||
const ConnectionString& shardHost ) : | shardName( shardName ), shardVersion( shardVersion ) { | |||
shardName( shardName ), shardVersion( shardVersion ), shardHost | ||||
( shardHost ) { | ||||
} | } | |||
const std::string shardName; | const std::string shardName; | |||
const ChunkVersion shardVersion; | const ChunkVersion shardVersion; | |||
const ConnectionString shardHost; | ||||
// | // | |||
// For testing *only* - do not use as part of API | // For testing *only* - do not use as part of API | |||
// | // | |||
BSONObj toBSON() const { | BSONObj toBSON() const { | |||
BSONObjBuilder b; | BSONObjBuilder b; | |||
appendBSON( &b ); | appendBSON( &b ); | |||
return b.obj(); | return b.obj(); | |||
} | } | |||
void appendBSON( BSONObjBuilder* builder ) const { | void appendBSON( BSONObjBuilder* builder ) const { | |||
builder->append( "shardName", shardName ); | builder->append( "shardName", shardName ); | |||
shardVersion.addToBSON( *builder, "shardVersion" ); | shardVersion.addToBSON( *builder, "shardVersion" ); | |||
builder->append( "shardHost", shardHost.toString() ); | ||||
} | } | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 10 change blocks. | ||||
16 lines changed or deleted | 34 lines changed or added | |||
option_description.h | option_description.h | |||
---|---|---|---|---|
skipping to change at line 18 | skipping to change at line 18 | |||
* | * | |||
* Unless required by applicable law or agreed to in writing, software | * Unless required by applicable law or agreed to in writing, software | |||
* distributed under the License is distributed on an "AS IS" BASIS, | * distributed under the License is distributed on an "AS IS" BASIS, | |||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
* See the License for the specific language governing permissions and | * See the License for the specific language governing permissions and | |||
* limitations under the License. | * limitations under the License. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/shared_ptr.hpp> | ||||
#include <iostream> | #include <iostream> | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/util/options_parser/constraints.h" | ||||
#include "mongo/util/options_parser/value.h" | #include "mongo/util/options_parser/value.h" | |||
namespace mongo { | namespace mongo { | |||
namespace optionenvironment { | namespace optionenvironment { | |||
/** | /** | |||
* An OptionType is an enum of all the types we support in the OptionsP arser | * An OptionType is an enum of all the types we support in the OptionsP arser | |||
*/ | */ | |||
enum OptionType { | enum OptionType { | |||
StringVector, // po::value< std::vector<std::string> > | StringVector, // po::value< std::vector<std::string> > | |||
skipping to change at line 42 | skipping to change at line 44 | |||
Double, // po::value<double> | Double, // po::value<double> | |||
Int, // po::value<int> | Int, // po::value<int> | |||
Long, // po::value<long> | Long, // po::value<long> | |||
String, // po::value<std::string> | String, // po::value<std::string> | |||
UnsignedLongLong, // po::value<unsigned long long> | UnsignedLongLong, // po::value<unsigned long long> | |||
Unsigned, // po::value<unsigned> | Unsigned, // po::value<unsigned> | |||
Switch // po::bool_switch | Switch // po::bool_switch | |||
}; | }; | |||
/** | /** | |||
* The OptionDescription and PositionalOptionDescription classes are co | * An OptionSources is an enum representing where an option can come fr | |||
ntainers for information | om | |||
* about the options we are expecting either on the command line or in | */ | |||
config files. These | enum OptionSources { | |||
* should be registered in an OptionSection instance and passed to an O | SourceCommandLine = 1, | |||
ptionsParser. | SourceINIConfig = 2, | |||
SourceYAMLConfig = 4, | ||||
SourceAllConfig = SourceINIConfig | SourceYAMLConfig, | ||||
SourceAllLegacy = SourceINIConfig | SourceCommandLine, | ||||
SourceAll = SourceCommandLine | SourceINIConfig | SourceYAMLConfig | ||||
}; | ||||
/** | ||||
* The OptionDescription class is a container for information about the | ||||
options we are expecting | ||||
* either on the command line or in config files. These should be regi | ||||
stered in an | ||||
* OptionSection instance and passed to an OptionsParser. | ||||
*/ | */ | |||
class OptionDescription { | class OptionDescription { | |||
public: | public: | |||
OptionDescription(const std::string& dottedName, | OptionDescription(const std::string& dottedName, | |||
const std::string& singleName, | const std::string& singleName, | |||
const OptionType type, | const OptionType type, | |||
const std::string& description, | const std::string& description) | |||
const bool isVisible = true, | ||||
const Value defaultValue = Value(), | ||||
const Value implicitValue = Value(), | ||||
const bool isComposing = false) | ||||
: _dottedName(dottedName), | : _dottedName(dottedName), | |||
_singleName(singleName), | _singleName(singleName), | |||
_type(type), | _type(type), | |||
_description(description), | _description(description), | |||
_isVisible(isVisible), | _isVisible(true), | |||
_default(defaultValue), | _default(Value()), | |||
_implicit(implicitValue), | _implicit(Value()), | |||
_isComposing(isComposing) { } | _isComposing(false), | |||
_sources(SourceAll), | ||||
_positionalStart(-1), | ||||
_positionalEnd(-1) { } | ||||
/* | ||||
* The following functions are part of the chaining interface for o | ||||
ption registration. See | ||||
* comments below for what each of these attributes mean, and the O | ||||
ptionSection class for | ||||
* more details on the chaining interface. | ||||
*/ | ||||
/** | ||||
* Parsing Attributes. | ||||
* | ||||
* The functions below specify various attributes of our option tha | ||||
t are relevant for | ||||
* parsing. | ||||
*/ | ||||
/* | ||||
* Make this option hidden so it does not appear in command line he | ||||
lp | ||||
*/ | ||||
OptionDescription& hidden(); | ||||
/* | ||||
* Add a default value for this option if it is not specified | ||||
* | ||||
* throws DBException on errors, such as trying to set a default th | ||||
at does not have the same | ||||
* type as the option, or trying to set a default for a composing o | ||||
ption. | ||||
*/ | ||||
OptionDescription& setDefault(Value defaultValue); | ||||
/* | ||||
* Add an implicit value for this option if it is specified with no | ||||
argument | ||||
* | ||||
* throws DBException on errors, such as trying to set an implicit | ||||
value that does not have | ||||
* the same type as the option, or trying to set an implicit value | ||||
for a composing option. | ||||
*/ | ||||
OptionDescription& setImplicit(Value implicitValue); | ||||
/* | ||||
* Make this option composing so that the different sources add the | ||||
ir values instead of | ||||
* overriding (eg. setParameter values in the config file and on th | ||||
e command line all get | ||||
* aggregated together) | ||||
* | ||||
* throws DBException on errors, such as trying to make an option t | ||||
hat is not a vector type | ||||
* composing, or or trying to set an implicit or default value for | ||||
a composing option. | ||||
*/ | ||||
OptionDescription& composing(); | ||||
/* | ||||
* Specify the allowed sources for this option, such as CommandLine | ||||
, JSONConfig, or | ||||
* INIConfig. The default is SourceAll which means the option can | ||||
be present in any source | ||||
*/ | ||||
OptionDescription& setSources(OptionSources sources); | ||||
/* | ||||
* Specify that this is a positional option. "start" should be the | ||||
first position the | ||||
* option can be found in, and "end" is the last position, inclusiv | ||||
e. The positions start | ||||
* at index 1 (after the executable name). If "start" is greater t | ||||
han "end", then the | ||||
* option must be able to support multiple values. Specifying -1 f | ||||
or the "end" means that | ||||
* the option can repeat forever. Any "holes" in the positional ra | ||||
nges will result in an | ||||
* error during parsing. | ||||
* | ||||
* Examples: | ||||
* | ||||
* .positional(1,1) // Single positional argument at position 1 | ||||
* ... | ||||
* .positional(2,3) // More positional arguments at position 2 and | ||||
3 (multivalued option) | ||||
* ... | ||||
* .positional(4,-1) // Can repeat this positional option forever a | ||||
fter position 4 | ||||
* | ||||
* | ||||
* (sverch) TODO: When we can support it (i.e. when we can get rid | ||||
of boost) add a | ||||
* "positionalOnly" attribute that specifies that it is not also a | ||||
command line flag. In | ||||
* boost program options, the only way to have a positional argumen | ||||
t is to register a flag | ||||
* and mark it as also being positional. | ||||
*/ | ||||
OptionDescription& positional(int start, int end); | ||||
/** | ||||
* Validation Constraints. | ||||
* | ||||
* The functions below specify constraints that must be met in orde | ||||
r for this option to be | ||||
* valid. These do not get checked during parsing, but will be add | ||||
ed to the result | ||||
* Environment so that they will get checked when the Environment i | ||||
s validated. | ||||
*/ | ||||
/** | ||||
* Specifies the range allowed for this option. Only allowed for o | ||||
ptions with numeric type. | ||||
*/ | ||||
OptionDescription& validRange(long min, long max); | ||||
/** | ||||
* Adds a constraint for this option. During parsing, this Constra | ||||
int will be added to the | ||||
* result Environment, ensuring that it will get checked when the e | ||||
nvironment is validated. | ||||
* See the documentation on the Constraint and Environment classes | ||||
for more details. | ||||
* | ||||
* WARNING: This function takes ownership of the Constraint pointer | ||||
that is passed in. | ||||
*/ | ||||
OptionDescription& addConstraint(Constraint* c); | ||||
std::string _dottedName; // Used for JSON config and in Environment | std::string _dottedName; // Used for JSON config and in Environment | |||
std::string _singleName; // Used for boost command line and INI | std::string _singleName; // Used for boost command line and INI | |||
OptionType _type; // Storage type of the argument value, or switch type (bool) | OptionType _type; // Storage type of the argument value, or switch type (bool) | |||
// (required by boost) | // (required by boost) | |||
std::string _description; // Description of option printed in help output | std::string _description; // Description of option printed in help output | |||
bool _isVisible; // Visible in help output | bool _isVisible; // Visible in help output | |||
Value _default; // Value if option is not specified | Value _default; // Value if option is not specified | |||
Value _implicit; // Value if option is specified with no argument | Value _implicit; // Value if option is specified with no argument | |||
bool _isComposing; // Aggregate values from different sources inste ad of overriding | bool _isComposing; // Aggregate values from different sources inste ad of overriding | |||
}; | OptionSources _sources; // Places where an option can be specified | |||
(current sources are | ||||
class PositionalOptionDescription { | // command line, json config, and ini confi | |||
public: | g) | |||
PositionalOptionDescription(const std::string& name, | int _positionalStart; // The starting position if this is a positio | |||
const OptionType type, | nal option. -1 otherwise. | |||
int count = 1) | int _positionalEnd; // The ending position if this is a positional | |||
: _name(name), | option. -1 if unlimited. | |||
_type(type), | ||||
_count(count) { } | // TODO(sverch): We have to use pointers to keep track of the Const | |||
rants because we rely on | ||||
std::string _name; // Name used to access the value of this option | // inheritance to make Constraints work. We have to use shared_ptr | |||
after parsing | s because the | |||
OptionType _type; // Storage type of the positional argument (requi | // OptionDescription is sometimes copied and because it is stored i | |||
red by boost) | n a std::list in the | |||
int _count; // Max number of times this option can be specified. - | // OptionSection. We should think about a better solution for the | |||
1 = unlimited | ownership semantics of | |||
// these classes. Note that the Environment (the storage for resul | ||||
ts of option parsing) has | ||||
// to know about the constraints for all the options, which is anot | ||||
her factor to consider | ||||
// when thinking about ownership. | ||||
std::vector<boost::shared_ptr<Constraint> > _constraints; // Constr | ||||
aints that must be met | ||||
// for th | ||||
is option to be valid | ||||
}; | }; | |||
} // namespace optionenvironment | } // namespace optionenvironment | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 6 change blocks. | ||||
32 lines changed or deleted | 183 lines changed or added | |||
option_section.h | option_section.h | |||
---|---|---|---|---|
skipping to change at line 20 | skipping to change at line 20 | |||
* distributed under the License is distributed on an "AS IS" BASIS, | * distributed under the License is distributed on an "AS IS" BASIS, | |||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||
* See the License for the specific language governing permissions and | * See the License for the specific language governing permissions and | |||
* limitations under the License. | * limitations under the License. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/util/options_parser/option_description.h" | #include "mongo/util/options_parser/option_description.h" | |||
#include <boost/program_options.hpp> | #include <boost/program_options.hpp> | |||
#include <boost/shared_ptr.hpp> | ||||
#include <iostream> | #include <iostream> | |||
#include <list> | ||||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
namespace mongo { | namespace mongo { | |||
namespace optionenvironment { | namespace optionenvironment { | |||
namespace po = boost::program_options; | namespace po = boost::program_options; | |||
/** A container for OptionDescription instances and PositionalOptionDes | /** | |||
cription instances as | * A container for OptionDescription instances as well as other Option | |||
* well as other OptionSection instances. Provides a description of a | Section instances. | |||
ll options that are | * Provides a description of all options that are supported to be pass | |||
* supported to be passed in to an OptionsParser. Has utility functio | ed in to an | |||
ns to support the various | * OptionsParser. Has utility functions to support the various format | |||
* formats needed by the parsing process | s needed by the parsing | |||
* process | ||||
* | * | |||
* The sections and section names only matter in the help string. For sections in a JSON | * The sections and section names only matter in the help string. For sections in a JSON | |||
* config, look at the dots in the dottedName of the relevant OptionDe scription | * config, look at the dots in the dottedName of the relevant OptionDe scription | |||
* | * | |||
* Usage: | * Usage: | |||
* | * | |||
* namespace moe = mongo::optionenvironment; | * namespace moe = mongo::optionenvironment; | |||
* | * | |||
* moe::OptionsParser parser; | * moe::OptionsParser parser; | |||
* moe::Environment environment; | * moe::Environment environment; | |||
* moe::OptionSection options; | * moe::OptionSection options; | |||
* moe::OptionSection subSection("Section Name"); | * moe::OptionSection subSection("Section Name"); | |||
* | * | |||
* // Register our allowed option flags with our OptionSection | * // Register our allowed option flags with our OptionSection | |||
* options.addOption(moe::OptionDescription("help", "help", moe::Switc h, "Display Help")); | * options.addOptionChaining("help", "help", moe::Switch, "Display Hel p"); | |||
* | * | |||
* // Register our positional options with our OptionSection | * // Register our positional options with our OptionSection | |||
* options.addPositionalOption(moe::PositionalOptionDescription("comma nd", moe::String)); | * options.addOptionChaining("command", "command", moe::String, "Comma nd").positional(1, 1); | |||
* | * | |||
* // Add a subsection | * // Add a subsection | |||
* subSection.addOption(moe::OptionDescription("port", "port", moe::In t, "Port")); | * subSection.addOptionChaining("port", "port", moe::Int, "Port"); | |||
* options.addSection(subSection); | * options.addSection(subSection); | |||
* | * | |||
* // Run the parser | * // Run the parser | |||
* Status ret = parser.run(options, argc, argv, envp, &environment); | * Status ret = parser.run(options, argc, argv, envp, &environment); | |||
* if (!ret.isOK()) { | * if (!ret.isOK()) { | |||
* cerr << options.helpString() << endl; | * cerr << options.helpString() << endl; | |||
* exit(EXIT_FAILURE); | * exit(EXIT_FAILURE); | |||
* } | * } | |||
*/ | */ | |||
skipping to change at line 76 | skipping to change at line 79 | |||
OptionSection(const std::string& name) : _name(name) { } | OptionSection(const std::string& name) : _name(name) { } | |||
OptionSection() { } | OptionSection() { } | |||
// Construction interface | // Construction interface | |||
/** | /** | |||
* Add a sub section to this section. Used mainly to keep track of section headers for when | * Add a sub section to this section. Used mainly to keep track of section headers for when | |||
* we need generate the help string for the command line | * we need generate the help string for the command line | |||
*/ | */ | |||
Status addSection(const OptionSection& subSection); | Status addSection(const OptionSection& subSection); | |||
/** | /** | |||
* Add an option to this section | * Add an option to this section, and returns a reference to an Opt | |||
*/ | ionDescription to allow | |||
Status addOption(const OptionDescription& option); | * for chaining. | |||
/** | * | |||
* Add a positional option to this section. Also adds a normal hid | * Examples: | |||
den option with the same | * | |||
* name as the PositionalOptionDescription because that is the mech | * options.addOptionChaining("option", "option", moe::String, "Chai | |||
anism boost program | ning Registration") | |||
* options uses. Unfortunately this means that positional options | * .hidden().setDefault(moe::Value("defaul | |||
can also be accessed by | t")) | |||
* name in the config files and via command line flags | * .setImplicit(moe::Value("implicit")); | |||
* | ||||
* This creates a hidden option that has default and implicit value | ||||
s. | ||||
* | ||||
* options.addOptionChaining("name", "name", moe::String, "Composin | ||||
g Option") | ||||
* .composing().sources(SourceAllConfig); | ||||
* | ||||
* This creates an option that is composing and can be specified on | ||||
ly in config files. | ||||
* | ||||
* See the OptionDescription class for details on the supported att | ||||
ributes. | ||||
* | ||||
* throws DBException on errors, such as attempting to register an | ||||
option with the same name | ||||
* as another option. These represent programming errors that shou | ||||
ld not happen during | ||||
* normal operation. | ||||
*/ | */ | |||
Status addPositionalOption(const PositionalOptionDescription& posit | OptionDescription& addOptionChaining(const std::string& dottedName, | |||
ionalOption); | const std::string& singleName, | |||
const OptionType type, | ||||
const std::string& description | ||||
); | ||||
// These functions are used by the OptionsParser to make calls into boost::program_options | // These functions are used by the OptionsParser to make calls into boost::program_options | |||
Status getBoostOptions(po::options_description* boostOptions, | Status getBoostOptions(po::options_description* boostOptions, | |||
bool visibleOnly = false, | bool visibleOnly = false, | |||
bool includeDefaults = false) const; | bool includeDefaults = false, | |||
OptionSources = SourceAll) const; | ||||
Status getBoostPositionalOptions( | Status getBoostPositionalOptions( | |||
po::positional_options_description* boostPositionalOptions) const; | po::positional_options_description* boostPositionalOptions) const; | |||
// This is needed so that the parser can iterate over all registere d options to get the | // This is needed so that the parser can iterate over all registere d options to get the | |||
// correct names when populating the Environment, as well as check that a parameter that was | // correct names when populating the Environment, as well as check that a parameter that was | |||
// found has been registered and has the correct type | // found has been registered and has the correct type | |||
Status getAllOptions(std::vector<OptionDescription>* options) const ; | Status getAllOptions(std::vector<OptionDescription>* options) const ; | |||
/** | /** | |||
* Populates the given map with all the default values for any opti ons in this option | * Populates the given map with all the default values for any opti ons in this option | |||
* section and all sub sections. | * section and all sub sections. | |||
*/ | */ | |||
Status getDefaults(std::map<Key, Value>* values) const; | Status getDefaults(std::map<Key, Value>* values) const; | |||
/** | ||||
* Populates the given vector with all the constraints for all opti | ||||
ons in this section and | ||||
* sub sections. | ||||
*/ | ||||
Status getConstraints(std::vector<boost::shared_ptr<Constraint > >* | ||||
constraints) const; | ||||
std::string positionalHelpString(const std::string& execName) const ; | std::string positionalHelpString(const std::string& execName) const ; | |||
std::string helpString() const; | std::string helpString() const; | |||
// Debugging | // Debugging | |||
void dump() const; | void dump() const; | |||
private: | private: | |||
std::string _name; | std::string _name; | |||
std::vector<OptionSection> _subSections; | std::list<OptionSection> _subSections; | |||
std::vector<OptionDescription> _options; | std::list<OptionDescription> _options; | |||
std::vector<PositionalOptionDescription> _positionalOptions; | ||||
}; | }; | |||
} // namespace optionenvironment | } // namespace optionenvironment | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 12 change blocks. | ||||
27 lines changed or deleted | 61 lines changed or added | |||
options_parser.h | options_parser.h | |||
---|---|---|---|---|
skipping to change at line 31 | skipping to change at line 31 | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
namespace mongo { | namespace mongo { | |||
namespace optionenvironment { | namespace optionenvironment { | |||
class Environment; | class Environment; | |||
class OptionSection; | class OptionSection; | |||
class Value; | class Value; | |||
/** Handles parsing of the command line as well as JSON and INI config files. Takes an | /** Handles parsing of the command line as well as YAML and INI config files. Takes an | |||
* OptionSection instance that describes the allowed options, parses a rgv (env not yet | * OptionSection instance that describes the allowed options, parses a rgv (env not yet | |||
* supported), and populates an Environment with the results. | * supported), and populates an Environment with the results. | |||
* | * | |||
* Usage: | * Usage: | |||
* | * | |||
* namespace moe = mongo::optionenvironment; | * namespace moe = mongo::optionenvironment; | |||
* | * | |||
* moe::OptionsParser parser; | * moe::OptionsParser parser; | |||
* moe::Environment environment; | * moe::Environment environment; | |||
* moe::OptionSection options; | * moe::OptionSection options; | |||
* | * | |||
* // Register our allowed options with our OptionSection | * // Register our allowed options with our OptionSection | |||
* options.addOption(moe::OptionDescription("help", "help", moe::Switc | * options.addOptionChaining("help", "help", moe::Switch, "Display Hel | |||
h, "Display Help")); | p"); | |||
* options.addOption(moe::OptionDescription("port", "port", moe::Int, | * options.addOptionChaining("port", "port", moe::Int, "Port"); | |||
"Port")); | ||||
* | * | |||
* // Run the parser | * // Run the parser | |||
* Status ret = parser.run(options, argv, env, &environment); | * Status ret = parser.run(options, argv, env, &environment); | |||
* if (!ret.isOK()) { | * if (!ret.isOK()) { | |||
* cerr << options.helpString() << endl; | * cerr << options.helpString() << endl; | |||
* exit(EXIT_FAILURE); | * exit(EXIT_FAILURE); | |||
* } | * } | |||
* | * | |||
* bool displayHelp; | * bool displayHelp; | |||
* ret = environment.get(moe::Key("help"), &displayHelp); | * ret = environment.get(moe::Key("help"), &displayHelp); | |||
skipping to change at line 78 | skipping to change at line 78 | |||
* ret = environment.get(moe::Key("port"), &port); | * ret = environment.get(moe::Key("port"), &port); | |||
* if (ret.isOK()) { | * if (ret.isOK()) { | |||
* // We have overridden port here, otherwise it stays as the defa ult. | * // We have overridden port here, otherwise it stays as the defa ult. | |||
* } | * } | |||
*/ | */ | |||
class OptionsParser { | class OptionsParser { | |||
public: | public: | |||
OptionsParser() { } | OptionsParser() { } | |||
virtual ~OptionsParser() { } | virtual ~OptionsParser() { } | |||
/** Handles parsing of the command line as well as JSON and INI con fig files. The | /** Handles parsing of the command line as well as YAML and INI con fig files. The | |||
* OptionSection be a description of the allowed options. This fu nction populates the | * OptionSection be a description of the allowed options. This fu nction populates the | |||
* given Environment with the results of parsing the command line and or config files but | * given Environment with the results of parsing the command line and or config files but | |||
* does not call validate on the Environment. | * does not call validate on the Environment. | |||
* | * | |||
* The only special option is the "config" option. This function will check if the | * The only special option is the "config" option. This function will check if the | |||
* "config" option was set on the command line and if so attempt t o read the given config | * "config" option was set on the command line and if so attempt t o read the given config | |||
* file. For binaries that do not support config files, the "conf ig" option should not be | * file. For binaries that do not support config files, the "conf ig" option should not be | |||
* registered in the OptionSection. | * registered in the OptionSection. | |||
*/ | */ | |||
Status run(const OptionSection&, | Status run(const OptionSection&, | |||
skipping to change at line 101 | skipping to change at line 101 | |||
Environment*); | Environment*); | |||
private: | private: | |||
/** Handles parsing of the command line and adds the results to the given Environment */ | /** Handles parsing of the command line and adds the results to the given Environment */ | |||
Status parseCommandLine(const OptionSection&, | Status parseCommandLine(const OptionSection&, | |||
const std::vector<std::string>& argv, Envir onment*); | const std::vector<std::string>& argv, Envir onment*); | |||
/** Handles parsing of an INI config string and adds the results to the given Environment */ | /** Handles parsing of an INI config string and adds the results to the given Environment */ | |||
Status parseINIConfigFile(const OptionSection&, const std::string& config, Environment*); | Status parseINIConfigFile(const OptionSection&, const std::string& config, Environment*); | |||
/** Handles parsing of a JSON config string and adds the results to | ||||
the given Environment */ | ||||
Status parseJSONConfigFile(const OptionSection&, const std::string& | ||||
config, Environment*); | ||||
/** Gets defaults from the OptionSection and adds them to the given Environment */ | /** Gets defaults from the OptionSection and adds them to the given Environment */ | |||
Status addDefaultValues(const OptionSection&, Environment*); | Status addDefaultValues(const OptionSection&, Environment*); | |||
/** Detects whether the given string represents a JSON config file | ||||
or an INI config file */ | ||||
bool isJSONConfig(const std::string& config); | ||||
/** Reads the given config file into the output string. This funct ion is virtual for | /** Reads the given config file into the output string. This funct ion is virtual for | |||
* testing purposes only. */ | * testing purposes only. */ | |||
virtual Status readConfigFile(const std::string& filename, std::str ing*); | virtual Status readConfigFile(const std::string& filename, std::str ing*); | |||
}; | }; | |||
} // namespace optionenvironment | } // namespace optionenvironment | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 5 change blocks. | ||||
15 lines changed or deleted | 5 lines changed or added | |||
pdfile.h | pdfile.h | |||
---|---|---|---|---|
skipping to change at line 76 | skipping to change at line 76 | |||
bool repairDatabase(string db, string &errmsg, bool preserveClonedFiles OnFailure = false, bool backupOriginalFiles = false); | bool repairDatabase(string db, string &errmsg, bool preserveClonedFiles OnFailure = false, bool backupOriginalFiles = false); | |||
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForRe plication, bool *deferIdIndex = 0); | bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForRe plication, bool *deferIdIndex = 0); | |||
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc=DiskLoc()); | shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc=DiskLoc()); | |||
bool isValidNS( const StringData& ns ); | bool isValidNS( const StringData& ns ); | |||
/*--------------------------------------------------------------------- */ | /*--------------------------------------------------------------------- */ | |||
class DataFileMgr { | class DataFileMgr { | |||
friend class BasicCursor; | ||||
public: | public: | |||
DataFileMgr(); | DataFileMgr(); | |||
void init(const string& path ); | ||||
/** @return DiskLoc where item ends up */ | ||||
// changedId should be initialized to false | ||||
const DiskLoc updateRecord( | ||||
const char *ns, | ||||
Collection* collection, | ||||
Record *toupdate, const DiskLoc& dl, | ||||
const char *buf, int len, OpDebug& debug, bool god=false); | ||||
// The object o may be updated if modified on insert. | // The object o may be updated if modified on insert. | |||
void insertAndLog( const char *ns, const BSONObj &o, bool god = fal se, bool fromMigrate = false ); | void insertAndLog( const char *ns, const BSONObj &o, bool god = fal se, bool fromMigrate = false ); | |||
/** | /** | |||
* insert() will add an _id to the object if not present. If you w ould like to see the | * insert() will add an _id to the object if not present. If you w ould like to see the | |||
* final object after such an addition, use this method. | * final object after such an addition, use this method. | |||
* note: does NOT put on oplog | * note: does NOT put on oplog | |||
* @param o both and in and out param | * @param o both and in and out param | |||
* @param mayInterrupt When true, killop may interrupt the function call. | * @param mayInterrupt When true, killop may interrupt the function call. | |||
skipping to change at line 134 | skipping to change at line 124 | |||
/* special version of insert for transaction logging -- streamlined a bit. | /* special version of insert for transaction logging -- streamlined a bit. | |||
assumes ns is capped and no indexes | assumes ns is capped and no indexes | |||
no _id field check | no _id field check | |||
*/ | */ | |||
Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int len); | Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int len); | |||
static Extent* getExtent(const DiskLoc& dl); | static Extent* getExtent(const DiskLoc& dl); | |||
static Record* getRecord(const DiskLoc& dl); | static Record* getRecord(const DiskLoc& dl); | |||
static DeletedRecord* getDeletedRecord(const DiskLoc& dl); | static DeletedRecord* getDeletedRecord(const DiskLoc& dl); | |||
void deleteRecord(const char *ns, Record *todelete, const DiskLoc& | void deleteRecord(NamespaceDetails* d, const StringData& ns, Record | |||
dl, bool cappedOK = false, bool noWarn = false, bool logOp=false); | *todelete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false, | |||
bool logOp=false); | ||||
void deleteRecord(NamespaceDetails* d, const char *ns, Record *tode | ||||
lete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false, bool l | ||||
ogOp=false); | ||||
/* does not clean up indexes, etc. : just deletes the record in the pdfile. use deleteRecord() to unindex */ | /* does not clean up indexes, etc. : just deletes the record in the pdfile. use deleteRecord() to unindex */ | |||
void _deleteRecord(NamespaceDetails *d, const char *ns, Record *tod | void _deleteRecord(NamespaceDetails *d, const StringData& ns, Recor | |||
elete, const DiskLoc& dl); | d *todelete, const DiskLoc& dl); | |||
/** | ||||
* accessor/mutator for the 'precalced' keys (that is, sorted index | ||||
keys) | ||||
* | ||||
* NB: 'precalced' is accessed from fastBuildIndex(), which is call | ||||
ed from insert-related | ||||
* methods like insertWithObjMod(). It is mutated from various cal | ||||
lers of the insert | ||||
* methods, which assume 'precalced' will not change while in the i | ||||
nsert method. This | ||||
* should likely be refactored so theDataFileMgr takes full respons | ||||
ibility. | ||||
*/ | ||||
SortPhaseOne* getPrecalced() const; | ||||
void setPrecalced(SortPhaseOne* precalced); | ||||
mongo::mutex _precalcedMutex; | ||||
private: | ||||
vector<DataFile *> files; | ||||
SortPhaseOne* _precalced; | ||||
}; | }; | |||
extern DataFileMgr theDataFileMgr; | extern DataFileMgr theDataFileMgr; | |||
#pragma pack(1) | #pragma pack(1) | |||
class DeletedRecord { | class DeletedRecord { | |||
public: | public: | |||
int lengthWithHeaders() const { _accessing(); return _lengthWithHea ders; } | int lengthWithHeaders() const { _accessing(); return _lengthWithHea ders; } | |||
skipping to change at line 299 | skipping to change at line 271 | |||
char _data[4]; | char _data[4]; | |||
public: | public: | |||
static bool MemoryTrackingEnabled; | static bool MemoryTrackingEnabled; | |||
}; | }; | |||
#pragma pack() | #pragma pack() | |||
// XXX-ERH | // XXX-ERH | |||
inline Extent* Extent::getNextExtent() { | ||||
return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext); | ||||
} | ||||
inline Extent* Extent::getPrevExtent() { | ||||
return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev); | ||||
} | ||||
inline DiskLoc Record::getNext(const DiskLoc& myLoc) { | inline DiskLoc Record::getNext(const DiskLoc& myLoc) { | |||
_accessing(); | _accessing(); | |||
if ( _nextOfs != DiskLoc::NullOfs ) { | if ( _nextOfs != DiskLoc::NullOfs ) { | |||
/* defensive */ | /* defensive */ | |||
if ( _nextOfs >= 0 && _nextOfs < 10 ) { | if ( _nextOfs >= 0 && _nextOfs < 10 ) { | |||
logContext("Assertion failure - Record::getNext() referenci ng a deleted record?"); | logContext("Assertion failure - Record::getNext() referenci ng a deleted record?"); | |||
return DiskLoc(); | return DiskLoc(); | |||
} | } | |||
return DiskLoc(myLoc.a(), _nextOfs); | return DiskLoc(myLoc.a(), _nextOfs); | |||
skipping to change at line 435 | skipping to change at line 399 | |||
return BSONObj( r->data() ); | return BSONObj( r->data() ); | |||
} | } | |||
DiskLoc allocateSpaceForANewRecord(const char* ns, | DiskLoc allocateSpaceForANewRecord(const char* ns, | |||
NamespaceDetails* d, | NamespaceDetails* d, | |||
int32_t lenWHdr, | int32_t lenWHdr, | |||
bool god); | bool god); | |||
void addRecordToRecListInExtent(Record* r, DiskLoc loc); | void addRecordToRecListInExtent(Record* r, DiskLoc loc); | |||
/** | ||||
* Static helpers to manipulate the list of unfinished index builds. | ||||
*/ | ||||
class IndexBuildsInProgress { | ||||
public: | ||||
/** | ||||
* Find an unfinished index build by name. Does not search finishe | ||||
d index builds. | ||||
*/ | ||||
static int get(const char* ns, const std::string& indexName); | ||||
/** | ||||
* Remove an unfinished index build from the list of index builds a | ||||
nd move every subsequent | ||||
* unfinished index build back one. E.g., if x, y, z, and w are bu | ||||
ilding and someone kills | ||||
* y, this method would rearrange the list to be x, z, w, (empty), | ||||
etc. | ||||
*/ | ||||
static void remove(const char* ns, int offset); | ||||
}; | ||||
} // namespace mongo | } // namespace mongo | |||
End of changes. 6 change blocks. | ||||
69 lines changed or deleted | 5 lines changed or added | |||
pdfile_version.h | pdfile_version.h | |||
---|---|---|---|---|
skipping to change at line 41 | skipping to change at line 41 | |||
namespace mongo { | namespace mongo { | |||
// pdfile versions | // pdfile versions | |||
const int PDFILE_VERSION = 4; | const int PDFILE_VERSION = 4; | |||
const int PDFILE_VERSION_MINOR_22_AND_OLDER = 5; | const int PDFILE_VERSION_MINOR_22_AND_OLDER = 5; | |||
const int PDFILE_VERSION_MINOR_24_AND_NEWER = 6; | const int PDFILE_VERSION_MINOR_24_AND_NEWER = 6; | |||
// For backward compatibility with versions before 2.4.0 all new DBs st art | // For backward compatibility with versions before 2.4.0 all new DBs st art | |||
// with PDFILE_VERSION_MINOR_22_AND_OLDER and are converted when the fi rst | // with PDFILE_VERSION_MINOR_22_AND_OLDER and are converted when the fi rst | |||
// index using a new plugin is created. See the logic in | // index using a new plugin is created. See the logic in | |||
// prepareToBuildIndex() and upgradeMinorVersionOrAssert() for details | // IndexCatalog::_upgradeDatabaseMinorVersionIfNeeded for details | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 1 lines changed or added | |||
pipeline.h | pipeline.h | |||
---|---|---|---|---|
skipping to change at line 59 | skipping to change at line 59 | |||
struct OpDesc; // local private struct | struct OpDesc; // local private struct | |||
class Privilege; | class Privilege; | |||
/** mongodb "commands" (sent via db.$cmd.findOne(...)) | /** mongodb "commands" (sent via db.$cmd.findOne(...)) | |||
subclass to make a command. define a singleton object for it. | subclass to make a command. define a singleton object for it. | |||
*/ | */ | |||
class Pipeline : | class Pipeline : | |||
public IntrusiveCounterUnsigned { | public IntrusiveCounterUnsigned { | |||
public: | public: | |||
/** | /** | |||
Create a pipeline from the command. | * Create a pipeline from the command. | |||
* | ||||
@param errmsg where to write errors, if there are any | * @param errmsg where to write errors, if there are any | |||
@param cmdObj the command object sent from the client | * @param cmdObj the command object sent from the client | |||
@returns the pipeline, if created, otherwise a NULL reference | * @returns the pipeline, if created, otherwise a NULL reference | |||
*/ | */ | |||
static intrusive_ptr<Pipeline> parseCommand( | static intrusive_ptr<Pipeline> parseCommand( | |||
string &errmsg, BSONObj &cmdObj, | string& errmsg, | |||
const intrusive_ptr<ExpressionContext> &pCtx); | const BSONObj& cmdObj, | |||
const intrusive_ptr<ExpressionContext>& pCtx); | ||||
/// Helper to implement Command::addRequiredPrivileges | /// Helper to implement Command::addRequiredPrivileges | |||
static void addRequiredPrivileges(Command* commandTemplate, | static void addRequiredPrivileges(Command* commandTemplate, | |||
const string& dbname, | const string& dbname, | |||
BSONObj cmdObj, | BSONObj cmdObj, | |||
vector<Privilege>* out); | vector<Privilege>* out); | |||
intrusive_ptr<ExpressionContext> getContext() const { return pCtx; } | intrusive_ptr<ExpressionContext> getContext() const { return pCtx; } | |||
/** | /** | |||
Split the current Pipeline into a Pipeline for each shard, and | Split the current Pipeline into a Pipeline for each shard, and | |||
a Pipeline that combines the results within mongos. | a Pipeline that combines the results within mongos. | |||
This permanently alters this pipeline for the merging operation. | This permanently alters this pipeline for the merging operation. | |||
@returns the Spec for the pipeline command that should be sent | @returns the Spec for the pipeline command that should be sent | |||
to the shards | to the shards | |||
*/ | */ | |||
intrusive_ptr<Pipeline> splitForSharded(); | intrusive_ptr<Pipeline> splitForSharded(); | |||
/** | /** If the pipeline starts with a $match, return its BSON predicate | |||
If the pipeline starts with a $match, dump its BSON predicate | . | |||
specification to the supplied builder and return true. | * Returns empty BSON if the first stage isn't $match. | |||
@param pQueryBuilder the builder to put the match BSON into | ||||
@returns true if a match was found and dumped to pQueryBuilder, | ||||
false otherwise | ||||
*/ | */ | |||
bool getInitialQuery(BSONObjBuilder *pQueryBuilder) const; | BSONObj getInitialQuery() const; | |||
/** | /** | |||
Write the Pipeline as a BSONObj command. This should be the | Write the Pipeline as a BSONObj command. This should be the | |||
inverse of parseCommand(). | inverse of parseCommand(). | |||
This is only intended to be used by the shard command obtained | This is only intended to be used by the shard command obtained | |||
from splitForSharded(). Some pipeline operations in the merge | from splitForSharded(). Some pipeline operations in the merge | |||
process do not have equivalent command forms, and using this on | process do not have equivalent command forms, and using this on | |||
the mongos Pipeline will cause assertions. | the mongos Pipeline will cause assertions. | |||
skipping to change at line 157 | skipping to change at line 153 | |||
for the Pipeline. It exists because of linkage requirements. | for the Pipeline. It exists because of linkage requirements. | |||
Pipeline needs to function in mongod and mongos. PipelineD | Pipeline needs to function in mongod and mongos. PipelineD | |||
contains extra functionality required in mongod, and which can't | contains extra functionality required in mongod, and which can't | |||
appear in mongos because the required symbols are unavailable | appear in mongos because the required symbols are unavailable | |||
for linking there. Consider PipelineD to be an extension of this | for linking there. Consider PipelineD to be an extension of this | |||
class for mongod only. | class for mongod only. | |||
*/ | */ | |||
friend class PipelineD; | friend class PipelineD; | |||
private: | private: | |||
class Optimizations { | ||||
public: | ||||
// These contain static functions that optimize pipelines in va | ||||
rious ways. | ||||
// They are classes rather than namespaces so that they can be | ||||
friends of Pipeline. | ||||
// Classes are defined in pipeline_optimizations.h. | ||||
class Local; | ||||
class Sharded; | ||||
}; | ||||
friend class Optimizations::Local; | ||||
friend class Optimizations::Sharded; | ||||
static const char pipelineName[]; | static const char pipelineName[]; | |||
static const char explainName[]; | static const char explainName[]; | |||
static const char fromRouterName[]; | static const char fromRouterName[]; | |||
static const char serverPipelineName[]; | static const char serverPipelineName[]; | |||
static const char mongosPipelineName[]; | static const char mongosPipelineName[]; | |||
Pipeline(const intrusive_ptr<ExpressionContext> &pCtx); | Pipeline(const intrusive_ptr<ExpressionContext> &pCtx); | |||
typedef std::deque<boost::intrusive_ptr<DocumentSource> > SourceCon tainer; | typedef std::deque<boost::intrusive_ptr<DocumentSource> > SourceCon tainer; | |||
SourceContainer sources; | SourceContainer sources; | |||
bool explain; | bool explain; | |||
boost::intrusive_ptr<ExpressionContext> pCtx; | boost::intrusive_ptr<ExpressionContext> pCtx; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 6 change blocks. | ||||
16 lines changed or deleted | 26 lines changed or added | |||
plan_ranker.h | plan_ranker.h | |||
---|---|---|---|---|
skipping to change at line 31 | skipping to change at line 31 | |||
* all of the code used other than as permitted herein. If you modify fi le(s) | * all of the code used other than as permitted herein. If you modify fi le(s) | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <deque> | #include <list> | |||
#include <vector> | #include <vector> | |||
#include "mongo/db/exec/plan_stage.h" | #include "mongo/db/exec/plan_stage.h" | |||
#include "mongo/db/exec/plan_stats.h" | #include "mongo/db/exec/plan_stats.h" | |||
#include "mongo/db/exec/working_set.h" | #include "mongo/db/exec/working_set.h" | |||
#include "mongo/db/query/query_solution.h" | #include "mongo/db/query/query_solution.h" | |||
namespace mongo { | namespace mongo { | |||
struct CandidatePlan; | struct CandidatePlan; | |||
skipping to change at line 75 | skipping to change at line 75 | |||
*/ | */ | |||
struct CandidatePlan { | struct CandidatePlan { | |||
CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w) | CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w) | |||
: solution(s), root(r), ws(w), failed(false) { } | : solution(s), root(r), ws(w), failed(false) { } | |||
QuerySolution* solution; | QuerySolution* solution; | |||
PlanStage* root; | PlanStage* root; | |||
WorkingSet* ws; | WorkingSet* ws; | |||
// Any results produced during the plan's execution prior to rankin g are retained here. | // Any results produced during the plan's execution prior to rankin g are retained here. | |||
std::deque<WorkingSetID> results; | std::list<WorkingSetID> results; | |||
bool failed; | bool failed; | |||
}; | }; | |||
/** | /** | |||
* Information about why a plan was picked to be the best. Data here i s placed into the cache | * Information about why a plan was picked to be the best. Data here i s placed into the cache | |||
* and used by the CachedPlanRunner to compare expected performance wit h actual. | * and used by the CachedPlanRunner to compare expected performance wit h actual. | |||
*/ | */ | |||
struct PlanRankingDecision { | struct PlanRankingDecision { | |||
PlanRankingDecision() : statsOfWinner(NULL), onlyOneSolution(false) { } | PlanRankingDecision() : statsOfWinner(NULL), onlyOneSolution(false) { } | |||
End of changes. 2 change blocks. | ||||
2 lines changed or deleted | 2 lines changed or added | |||
plan_stats.h | plan_stats.h | |||
---|---|---|---|---|
skipping to change at line 262 | skipping to change at line 262 | |||
virtual ~MergeSortStats() { } | virtual ~MergeSortStats() { } | |||
uint64_t dupsTested; | uint64_t dupsTested; | |||
uint64_t dupsDropped; | uint64_t dupsDropped; | |||
// How many records were we forced to fetch as the result of an inv alidation? | // How many records were we forced to fetch as the result of an inv alidation? | |||
uint64_t forcedFetches; | uint64_t forcedFetches; | |||
}; | }; | |||
struct ShardingFilterStats : public SpecificStats { | ||||
ShardingFilterStats() : chunkSkips(0) { } | ||||
uint64_t chunkSkips; | ||||
}; | ||||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 6 lines changed or added | |||
prefetch.h | prefetch.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/diskloc.h" | #include "mongo/db/diskloc.h" | |||
namespace mongo { | namespace mongo { | |||
class NamespaceDetails; | class Collection; | |||
// page in both index and data pages for an op from the oplog | // page in both index and data pages for an op from the oplog | |||
void prefetchPagesForReplicatedOp(const BSONObj& op); | void prefetchPagesForReplicatedOp(const BSONObj& op); | |||
// page in pages needed for all index lookups on a given object | // page in pages needed for all index lookups on a given object | |||
void prefetchIndexPages(NamespaceDetails *nsd, const BSONObj& obj); | void prefetchIndexPages(Collection *nsd, const BSONObj& obj); | |||
// page in the data pages for a record associated with an object | // page in the data pages for a record associated with an object | |||
void prefetchRecordPages(const char *ns, const BSONObj& obj); | void prefetchRecordPages(const char *ns, const BSONObj& obj); | |||
} | } | |||
End of changes. 2 change blocks. | ||||
2 lines changed or deleted | 2 lines changed or added | |||
qlog.h | qlog.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <ostream> | #include <ostream> | |||
namespace mongo { | namespace mongo { | |||
std::ostream& QLOG(); | std::ostream& QLOG(); | |||
bool qlogOff(); | ||||
bool qlogOn(); | ||||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 3 lines changed or added | |||
query_planner.h | query_planner.h | |||
---|---|---|---|---|
skipping to change at line 37 | skipping to change at line 37 | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/db/query/canonical_query.h" | #include "mongo/db/query/canonical_query.h" | |||
#include "mongo/db/query/index_entry.h" | #include "mongo/db/query/index_entry.h" | |||
#include "mongo/db/query/query_solution.h" | #include "mongo/db/query/query_solution.h" | |||
namespace mongo { | namespace mongo { | |||
/** | struct QueryPlannerParams { | |||
* QueryPlanner's job is to provide an entry point to the query plannin | ||||
g and optimization | ||||
* process. | ||||
*/ | ||||
class QueryPlanner { | ||||
public: | ||||
enum Options { | enum Options { | |||
// You probably want to set this. | // You probably want to set this. | |||
DEFAULT = 0, | DEFAULT = 0, | |||
// Set this if you don't want a table scan. | // Set this if you don't want a table scan. | |||
// See http://docs.mongodb.org/manual/reference/parameters/ | // See http://docs.mongodb.org/manual/reference/parameters/ | |||
NO_TABLE_SCAN = 1, | NO_TABLE_SCAN = 1, | |||
// Set this if you want a collscan outputted even if there's an ixscan. | // Set this if you want a collscan outputted even if there's an ixscan. | |||
INCLUDE_COLLSCAN = 2, | INCLUDE_COLLSCAN = 2, | |||
// Set this if you're running on a sharded cluster. We'll add | ||||
a "drop all docs that | ||||
// shouldn't be on this shard" stage before projection. | ||||
// | ||||
// In order to set this, you must check | ||||
// shardingState.needCollectionMetadata(current_namespace) in t | ||||
he same lock that you use | ||||
// to build the query runner. | ||||
INCLUDE_SHARD_FILTER = 4, | ||||
}; | }; | |||
// See Options enum above. | ||||
size_t options; | ||||
// What indices are available for planning? | ||||
vector<IndexEntry> indices; | ||||
// What's our shard key? If INCLUDE_SHARD_FILTER is set we will cr | ||||
eate a shard filtering | ||||
// stage. If we know the shard key, we can perform covering analys | ||||
is instead of always | ||||
// forcing a fetch. | ||||
BSONObj shardKey; | ||||
}; | ||||
/** | ||||
* QueryPlanner's job is to provide an entry point to the query plannin | ||||
g and optimization | ||||
* process. | ||||
*/ | ||||
class QueryPlanner { | ||||
public: | ||||
/** | /** | |||
* Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the | * Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the | |||
* provided indices to generate a solution. | * indices and other data in 'params' to plan with. | |||
* | * | |||
* Caller owns pointers in *out. | * Caller owns pointers in *out. | |||
*/ | */ | |||
static void plan(const CanonicalQuery& query, | static void plan(const CanonicalQuery& query, | |||
const vector<IndexEntry>& indices, | const QueryPlannerParams& params, | |||
size_t options, | ||||
vector<QuerySolution*>* out); | vector<QuerySolution*>* out); | |||
private: | private: | |||
// | // | |||
// Index Selection methods. | // Index Selection methods. | |||
// | // | |||
/** | /** | |||
* Return all the fields in the tree rooted at 'node' that we can u se an index on | * Return all the fields in the tree rooted at 'node' that we can u se an index on | |||
* in order to answer the query. | * in order to answer the query. | |||
skipping to change at line 123 | skipping to change at line 143 | |||
static void rateIndices(MatchExpression* node, string prefix, | static void rateIndices(MatchExpression* node, string prefix, | |||
const vector<IndexEntry>& indices); | const vector<IndexEntry>& indices); | |||
// | // | |||
// Collection Scan Data Access method. | // Collection Scan Data Access method. | |||
// | // | |||
/** | /** | |||
* Return a CollectionScanNode that scans as requested in 'query'. | * Return a CollectionScanNode that scans as requested in 'query'. | |||
*/ | */ | |||
static QuerySolution* makeCollectionScan(const CanonicalQuery& quer | static QuerySolution* makeCollectionScan(const CanonicalQuery& quer | |||
y, bool tailable); | y, | |||
bool tailable, | ||||
const QueryPlannerParams& | ||||
params); | ||||
// | // | |||
// Indexed Data Access methods. | // Indexed Data Access methods. | |||
// | // | |||
// The inArrayOperator flag deserves some attention. It is set whe n we're processing a child of | // The inArrayOperator flag deserves some attention. It is set whe n we're processing a child of | |||
// a MatchExpression::ALL or MatchExpression::ELEM_MATCH_OBJECT. | // a MatchExpression::ALL or MatchExpression::ELEM_MATCH_OBJECT. | |||
// | // | |||
// When true, the following behavior changes for all methods below that take it as an argument: | // When true, the following behavior changes for all methods below that take it as an argument: | |||
// 0. No deletion of MatchExpression(s). In fact, | // 0. No deletion of MatchExpression(s). In fact, | |||
// 1. No mutation of the MatchExpression at all. We need the tree as-is in order to perform | // 1. No mutation of the MatchExpression at all. We need the tree as-is in order to perform | |||
// a filter on the entire tree. | // a filter on the entire tree. | |||
// 2. No fetches performed. There will be a final fetch by the cal ler of buildIndexedDataAccess | // 2. No fetches performed. There will be a final fetch by the cal ler of buildIndexedDataAccess | |||
// who set the value of inArrayOperator to true. | // who set the value of inArrayOperator to true. | |||
// 3. No compound indices are used and no bounds are combined. The se are incorrect in the context | // 3. No compound indices are used and no bounds are combined. The se are incorrect in the context | |||
// of these operators. | // of these operators. | |||
// | // | |||
/** | /** | |||
* If 'inArrayOperator' is false, takes ownership of 'root'. | * If 'inArrayOperator' is false, takes ownership of 'root'. | |||
*/ | */ | |||
static QuerySolutionNode* buildIndexedDataAccess(MatchExpression* r | static QuerySolutionNode* buildIndexedDataAccess(const CanonicalQue | |||
oot, | ry& query, | |||
MatchExpression* r | ||||
oot, | ||||
bool inArrayOperat or, | bool inArrayOperat or, | |||
const vector<Index Entry>& indices); | const vector<Index Entry>& indices); | |||
/** | /** | |||
* Takes ownership of 'root'. | * Takes ownership of 'root'. | |||
*/ | */ | |||
static QuerySolutionNode* buildIndexedAnd(MatchExpression* root, | static QuerySolutionNode* buildIndexedAnd(const CanonicalQuery& que | |||
ry, | ||||
MatchExpression* root, | ||||
bool inArrayOperator, | bool inArrayOperator, | |||
const vector<IndexEntry>& indices); | const vector<IndexEntry>& indices); | |||
/** | /** | |||
* Takes ownership of 'root'. | * Takes ownership of 'root'. | |||
*/ | */ | |||
static QuerySolutionNode* buildIndexedOr(MatchExpression* root, | static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& quer | |||
y, | ||||
MatchExpression* root, | ||||
bool inArrayOperator, | bool inArrayOperator, | |||
const vector<IndexEntry>& indices); | const vector<IndexEntry>& indices); | |||
/** | /** | |||
* Helper used by buildIndexedAnd and buildIndexedOr. | * Helper used by buildIndexedAnd and buildIndexedOr. | |||
* | * | |||
* The children of AND and OR nodes are sorted by the index that th e subtree rooted at | * The children of AND and OR nodes are sorted by the index that th e subtree rooted at | |||
* that node uses. Child nodes that use the same index are adjacen t to one another to | * that node uses. Child nodes that use the same index are adjacen t to one another to | |||
* facilitate grouping of index scans. As such, the processing for AND and OR is | * facilitate grouping of index scans. As such, the processing for AND and OR is | |||
* almost identical. | * almost identical. | |||
* | * | |||
* See tagForSort and sortUsingTags in index_tag.h for details on o rdering the children | * See tagForSort and sortUsingTags in index_tag.h for details on o rdering the children | |||
* of OR and AND. | * of OR and AND. | |||
* | * | |||
* Does not take ownership of 'root' but may remove children from i t. | * Does not take ownership of 'root' but may remove children from i t. | |||
*/ | */ | |||
static bool processIndexScans(MatchExpression* root, | static bool processIndexScans(const CanonicalQuery& query, | |||
MatchExpression* root, | ||||
bool inArrayOperator, | bool inArrayOperator, | |||
const vector<IndexEntry>& indices, | const vector<IndexEntry>& indices, | |||
vector<QuerySolutionNode*>* out); | vector<QuerySolutionNode*>* out); | |||
// | // | |||
// Helpers for creating an index scan. | // Helpers for creating an index scan. | |||
// | // | |||
/** | /** | |||
* Create a new data access node. | * Create a new data access node. | |||
* | * | |||
* If the node is an index scan, the bounds for 'expr' are computed and placed into the | * If the node is an index scan, the bounds for 'expr' are computed and placed into the | |||
* first field's OIL position. The rest of the OILs are allocated but uninitialized. | * first field's OIL position. The rest of the OILs are allocated but uninitialized. | |||
* | * | |||
* If the node is a geo node, XXX. | * If the node is a geo node, grab the geo data from 'expr' and stu | |||
ff it into the | ||||
* geo solution node of the appropriate type. | ||||
*/ | */ | |||
static QuerySolutionNode* makeLeafNode(const IndexEntry& index, | static QuerySolutionNode* makeLeafNode(const IndexEntry& index, | |||
MatchExpression* expr, | MatchExpression* expr, | |||
bool* exact); | bool* exact); | |||
/** | /** | |||
* Merge the predicate 'expr' with the leaf node 'node'. | * Merge the predicate 'expr' with the leaf node 'node'. | |||
*/ | */ | |||
static void mergeWithLeafNode(MatchExpression* expr, const IndexEnt ry& index, | static void mergeWithLeafNode(MatchExpression* expr, const IndexEnt ry& index, | |||
size_t pos, bool* exactOut, QuerySolu tionNode* node, | size_t pos, bool* exactOut, QuerySolu tionNode* node, | |||
MatchExpression::MatchType mergeType) ; | MatchExpression::MatchType mergeType) ; | |||
/** | /** | |||
* If index scan, fill in any bounds that are missing in 'node' wit | * If index scan (regular or expression index), fill in any bounds | |||
h the "all values for | that are missing in | |||
* this field" interval. | * 'node' with the "all values for this field" interval. | |||
* | * | |||
* If geo, XXX. | * If geo, do nothing. | |||
*/ | */ | |||
static void finishLeafNode(QuerySolutionNode* node, const IndexEntr y& index); | static void finishLeafNode(QuerySolutionNode* node, const IndexEntr y& index); | |||
// | // | |||
// Analysis of Data Access | // Analysis of Data Access | |||
// | // | |||
/** | /** | |||
* In brief: performs sort and covering analysis. | * In brief: performs sort and covering analysis. | |||
* | * | |||
skipping to change at line 230 | skipping to change at line 257 | |||
* to perform sorting, projection, or other operations that are ind ependent of the source | * to perform sorting, projection, or other operations that are ind ependent of the source | |||
* of the data. These stages are added atop 'solnRoot'. | * of the data. These stages are added atop 'solnRoot'. | |||
* | * | |||
* 'taggedRoot' is a copy of the parse tree. Nodes in 'solnRoot' m ay point into it. | * 'taggedRoot' is a copy of the parse tree. Nodes in 'solnRoot' m ay point into it. | |||
* | * | |||
* Takes ownership of 'solnRoot' and 'taggedRoot'. | * Takes ownership of 'solnRoot' and 'taggedRoot'. | |||
* | * | |||
* Caller owns the returned QuerySolution. | * Caller owns the returned QuerySolution. | |||
*/ | */ | |||
static QuerySolution* analyzeDataAccess(const CanonicalQuery& query , | static QuerySolution* analyzeDataAccess(const CanonicalQuery& query , | |||
const QueryPlannerParams& p arams, | ||||
QuerySolutionNode* solnRoot ); | QuerySolutionNode* solnRoot ); | |||
/** | ||||
* Return a plan that uses the provided index as a proxy for a coll | ||||
ection scan. | ||||
*/ | ||||
static QuerySolution* scanWholeIndex(const IndexEntry& index, | ||||
const CanonicalQuery& query, | ||||
const QueryPlannerParams& para | ||||
ms, | ||||
int direction = 1); | ||||
/** | ||||
* Traverse the tree rooted at 'root' reversing ixscans and other s | ||||
orts. | ||||
*/ | ||||
static void reverseScans(QuerySolutionNode* root); | ||||
/** | ||||
* Assumes each OIL in bounds is increasing. | ||||
* | ||||
* Aligns OILs (and bounds) according to the kp direction * the sca | ||||
nDir. | ||||
*/ | ||||
static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int | ||||
scanDir = 1); | ||||
/** | ||||
* Does the index with key pattern 'kp' provide the sort that 'quer | ||||
y' wants? | ||||
*/ | ||||
static bool providesSort(const CanonicalQuery& query, const BSONObj | ||||
& kp); | ||||
/** | ||||
* Get the bounds for the sort in 'query' used by the sort stage. | ||||
Output the bounds | ||||
* in 'node'. | ||||
*/ | ||||
static void getBoundsForSort(const CanonicalQuery& query, SortNode* | ||||
node); | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 15 change blocks. | ||||
22 lines changed or deleted | 99 lines changed or added | |||
query_solution.h | query_solution.h | |||
---|---|---|---|---|
skipping to change at line 31 | skipping to change at line 31 | |||
* all of the code used other than as permitted herein. If you modify fi le(s) | * all of the code used other than as permitted herein. If you modify fi le(s) | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/db/jsobj.h" | ||||
#include "mongo/db/matcher/expression.h" | #include "mongo/db/matcher/expression.h" | |||
#include "mongo/db/geo/geoquery.h" | #include "mongo/db/geo/geoquery.h" | |||
#include "mongo/db/fts/fts_query.h" | #include "mongo/db/fts/fts_query.h" | |||
#include "mongo/db/query/index_bounds.h" | #include "mongo/db/query/index_bounds.h" | |||
#include "mongo/db/query/projection_parser.h" | #include "mongo/db/query/lite_projection.h" | |||
#include "mongo/db/query/stage_types.h" | #include "mongo/db/query/stage_types.h" | |||
namespace mongo { | namespace mongo { | |||
using mongo::fts::FTSQuery; | using mongo::fts::FTSQuery; | |||
/** | /** | |||
* This is an abstract representation of a query plan. It can be trans cribed into a tree of | * This is an abstract representation of a query plan. It can be trans cribed into a tree of | |||
* PlanStages, which can then be handed to a PlanRunner for execution. | * PlanStages, which can then be handed to a PlanRunner for execution. | |||
*/ | */ | |||
struct QuerySolutionNode { | struct QuerySolutionNode { | |||
QuerySolutionNode() { } | QuerySolutionNode() { } | |||
virtual ~QuerySolutionNode() { } | virtual ~QuerySolutionNode() { | |||
for (size_t i = 0; i < children.size(); ++i) { | ||||
delete children[i]; | ||||
} | ||||
} | ||||
/** | ||||
* Return a string representation of this node and any children. | ||||
*/ | ||||
string toString() const; | ||||
/** | /** | |||
* What stage should this be transcribed to? See stage_types.h. | * What stage should this be transcribed to? See stage_types.h. | |||
*/ | */ | |||
virtual StageType getType() const = 0; | virtual StageType getType() const = 0; | |||
string toString() const { | ||||
stringstream ss; | ||||
appendToString(&ss, 0); | ||||
return ss.str(); | ||||
} | ||||
/** | /** | |||
* Internal function called by toString() | * Internal function called by toString() | |||
* | * | |||
* TODO: Consider outputting into a BSONObj or builder thereof. | * TODO: Consider outputting into a BSONObj or builder thereof. | |||
*/ | */ | |||
virtual void appendToString(stringstream* ss, int indent) const = 0 ; | virtual void appendToString(stringstream* ss, int indent) const = 0 ; | |||
// | ||||
// Computed properties | ||||
// | ||||
/** | ||||
* Must be called before any properties are examined. | ||||
*/ | ||||
virtual void computeProperties() { | ||||
for (size_t i = 0; i < children.size(); ++i) { | ||||
children[i]->computeProperties(); | ||||
} | ||||
} | ||||
/** | /** | |||
* If true, one of these are true: | * If true, one of these are true: | |||
* 1. All outputs are already fetched, or | * 1. All outputs are already fetched, or | |||
* 2. There is a projection in place and a fetch is not re quired. | * 2. There is a projection in place and a fetch is not re quired. | |||
* | * | |||
* If false, a fetch needs to be placed above the root in order to provide results. | * If false, a fetch needs to be placed above the root in order to provide results. | |||
* | * | |||
* Usage: To determine if every possible result that might reach th e root | * Usage: To determine if every possible result that might reach th e root | |||
* will be fully-fetched or not. We don't want any surplus fetches . | * will be fully-fetched or not. We don't want any surplus fetches . | |||
*/ | */ | |||
skipping to change at line 101 | skipping to change at line 118 | |||
/** | /** | |||
* Returns true if the tree rooted at this node provides data that is sorted by the | * Returns true if the tree rooted at this node provides data that is sorted by the | |||
* its location on disk. | * its location on disk. | |||
* | * | |||
* Usage: If all the children of an STAGE_AND_HASH have this proper ty, we can compute the | * Usage: If all the children of an STAGE_AND_HASH have this proper ty, we can compute the | |||
* AND faster by replacing the STAGE_AND_HASH with STAGE_AND_SORTED . | * AND faster by replacing the STAGE_AND_HASH with STAGE_AND_SORTED . | |||
*/ | */ | |||
virtual bool sortedByDiskLoc() const = 0; | virtual bool sortedByDiskLoc() const = 0; | |||
/** | /** | |||
* Return a BSONObj representing the sort order of the data stream | * Return a BSONObjSet representing the possible sort orders of the | |||
from this node. If the data | data stream from this | |||
* is not sorted in any particular fashion, returns BSONObj(). | * node. If the data is not sorted in any particular fashion, retu | |||
* | rns an empty set. | |||
* TODO: Is BSONObj really the best way to represent this? | ||||
* | * | |||
* Usage: | * Usage: | |||
* 1. If our plan gives us a sort order, we don't have to add a sor t stage. | * 1. If our plan gives us a sort order, we don't have to add a sor t stage. | |||
* 2. If all the children of an OR have the same sort order, we can maintain that | * 2. If all the children of an OR have the same sort order, we can maintain that | |||
* sort order with a STAGE_SORT_MERGE instead of STAGE_OR. | * sort order with a STAGE_SORT_MERGE instead of STAGE_OR. | |||
*/ | */ | |||
virtual BSONObj getSort() const = 0; | virtual const BSONObjSet& getSort() const = 0; | |||
// These are owned here. | ||||
vector<QuerySolutionNode*> children; | ||||
scoped_ptr<MatchExpression> filter; | ||||
protected: | protected: | |||
static void addIndent(stringstream* ss, int level) { | /** | |||
for (int i = 0; i < level; ++i) { | * Formatting helper used by toString(). | |||
*ss << "---"; | */ | |||
} | static void addIndent(stringstream* ss, int level); | |||
} | ||||
/** | ||||
* Every solution node has properties and this adds the debug info | ||||
for the | ||||
* properties. | ||||
*/ | ||||
void addCommon(stringstream* ss, int indent) const; | ||||
private: | private: | |||
MONGO_DISALLOW_COPYING(QuerySolutionNode); | MONGO_DISALLOW_COPYING(QuerySolutionNode); | |||
}; | }; | |||
/** | /** | |||
* A QuerySolution must be entirely self-contained and own everything i nside of it. | * A QuerySolution must be entirely self-contained and own everything i nside of it. | |||
* | * | |||
* A tree of stages may be built from a QuerySolution. The QuerySoluti on must outlive the tree | * A tree of stages may be built from a QuerySolution. The QuerySoluti on must outlive the tree | |||
* of stages. | * of stages. | |||
skipping to change at line 172 | skipping to change at line 197 | |||
TextNode() : _numWanted(100) { } | TextNode() : _numWanted(100) { } | |||
virtual ~TextNode() { } | virtual ~TextNode() { } | |||
virtual StageType getType() const { return STAGE_TEXT; } | virtual StageType getType() const { return STAGE_TEXT; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return false; } | bool fetched() const { return false; } | |||
bool hasField(const string& field) const { return false; } | bool hasField(const string& field) const { return false; } | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return _indexKeyPattern; } | const BSONObjSet& getSort() const { return _sort; } | |||
BSONObjSet _sort; | ||||
uint32_t _numWanted; | uint32_t _numWanted; | |||
BSONObj _indexKeyPattern; | BSONObj _indexKeyPattern; | |||
std::string _query; | std::string _query; | |||
std::string _language; | std::string _language; | |||
scoped_ptr<MatchExpression> _filter; | ||||
}; | }; | |||
struct CollectionScanNode : public QuerySolutionNode { | struct CollectionScanNode : public QuerySolutionNode { | |||
CollectionScanNode(); | CollectionScanNode(); | |||
virtual ~CollectionScanNode() { } | virtual ~CollectionScanNode() { } | |||
virtual StageType getType() const { return STAGE_COLLSCAN; } | virtual StageType getType() const { return STAGE_COLLSCAN; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return true; } | bool fetched() const { return true; } | |||
bool hasField(const string& field) const { return true; } | bool hasField(const string& field) const { return true; } | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return BSONObj(); } | const BSONObjSet& getSort() const { return _sort; } | |||
BSONObjSet _sort; | ||||
// Name of the namespace. | // Name of the namespace. | |||
string name; | string name; | |||
// Should we make a tailable cursor? | // Should we make a tailable cursor? | |||
bool tailable; | bool tailable; | |||
int direction; | int direction; | |||
scoped_ptr<MatchExpression> filter; | ||||
}; | }; | |||
struct AndHashNode : public QuerySolutionNode { | struct AndHashNode : public QuerySolutionNode { | |||
AndHashNode(); | AndHashNode(); | |||
virtual ~AndHashNode(); | virtual ~AndHashNode(); | |||
virtual StageType getType() const { return STAGE_AND_HASH; } | virtual StageType getType() const { return STAGE_AND_HASH; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const; | bool fetched() const; | |||
bool hasField(const string& field) const; | bool hasField(const string& field) const; | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return BSONObj(); } | const BSONObjSet& getSort() const { return _sort; } | |||
scoped_ptr<MatchExpression> filter; | BSONObjSet _sort; | |||
vector<QuerySolutionNode*> children; | ||||
}; | }; | |||
struct AndSortedNode : public QuerySolutionNode { | struct AndSortedNode : public QuerySolutionNode { | |||
AndSortedNode(); | AndSortedNode(); | |||
virtual ~AndSortedNode(); | virtual ~AndSortedNode(); | |||
virtual StageType getType() const { return STAGE_AND_SORTED; } | virtual StageType getType() const { return STAGE_AND_SORTED; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const; | bool fetched() const; | |||
bool hasField(const string& field) const; | bool hasField(const string& field) const; | |||
bool sortedByDiskLoc() const { return true; } | bool sortedByDiskLoc() const { return true; } | |||
BSONObj getSort() const { return BSONObj(); } | const BSONObjSet& getSort() const { return _sort; } | |||
scoped_ptr<MatchExpression> filter; | BSONObjSet _sort; | |||
vector<QuerySolutionNode*> children; | ||||
}; | }; | |||
struct OrNode : public QuerySolutionNode { | struct OrNode : public QuerySolutionNode { | |||
OrNode(); | OrNode(); | |||
virtual ~OrNode(); | virtual ~OrNode(); | |||
virtual StageType getType() const { return STAGE_OR; } | virtual StageType getType() const { return STAGE_OR; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const; | bool fetched() const; | |||
bool hasField(const string& field) const; | bool hasField(const string& field) const; | |||
bool sortedByDiskLoc() const { | bool sortedByDiskLoc() const { | |||
// Even if our children are sorted by their diskloc or other fi elds, we don't maintain | // Even if our children are sorted by their diskloc or other fi elds, we don't maintain | |||
// any order on the output. | // any order on the output. | |||
return false; | return false; | |||
} | } | |||
BSONObj getSort() const { return BSONObj(); } | const BSONObjSet& getSort() const { return _sort; } | |||
BSONObjSet _sort; | ||||
bool dedup; | bool dedup; | |||
// XXX why is this here | ||||
scoped_ptr<MatchExpression> filter; | ||||
vector<QuerySolutionNode*> children; | ||||
}; | }; | |||
struct MergeSortNode : public QuerySolutionNode { | struct MergeSortNode : public QuerySolutionNode { | |||
MergeSortNode(); | MergeSortNode(); | |||
virtual ~MergeSortNode(); | virtual ~MergeSortNode(); | |||
virtual StageType getType() const { return STAGE_SORT_MERGE; } | virtual StageType getType() const { return STAGE_SORT_MERGE; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const; | bool fetched() const; | |||
bool hasField(const string& field) const; | bool hasField(const string& field) const; | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return sort; } | ||||
const BSONObjSet& getSort() const { return _sorts; } | ||||
virtual void computeProperties() { | ||||
for (size_t i = 0; i < children.size(); ++i) { | ||||
children[i]->computeProperties(); | ||||
} | ||||
_sorts.clear(); | ||||
_sorts.insert(sort); | ||||
} | ||||
BSONObjSet _sorts; | ||||
BSONObj sort; | BSONObj sort; | |||
bool dedup; | bool dedup; | |||
// XXX why is this here | ||||
scoped_ptr<MatchExpression> filter; | ||||
vector<QuerySolutionNode*> children; | ||||
}; | }; | |||
struct FetchNode : public QuerySolutionNode { | struct FetchNode : public QuerySolutionNode { | |||
FetchNode(); | FetchNode(); | |||
virtual ~FetchNode() { } | virtual ~FetchNode() { } | |||
virtual StageType getType() const { return STAGE_FETCH; } | virtual StageType getType() const { return STAGE_FETCH; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return true; } | bool fetched() const { return true; } | |||
bool hasField(const string& field) const { return true; } | bool hasField(const string& field) const { return true; } | |||
bool sortedByDiskLoc() const { return child->sortedByDiskLoc(); } | bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( | |||
BSONObj getSort() const { return child->getSort(); } | ); } | |||
const BSONObjSet& getSort() const { return children[0]->getSort(); | ||||
} | ||||
scoped_ptr<MatchExpression> filter; | BSONObjSet _sorts; | |||
scoped_ptr<QuerySolutionNode> child; | ||||
}; | }; | |||
struct IndexScanNode : public QuerySolutionNode { | struct IndexScanNode : public QuerySolutionNode { | |||
IndexScanNode(); | IndexScanNode(); | |||
virtual ~IndexScanNode() { } | virtual ~IndexScanNode() { } | |||
virtual void computeProperties(); | ||||
virtual StageType getType() const { return STAGE_IXSCAN; } | virtual StageType getType() const { return STAGE_IXSCAN; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return false; } | bool fetched() const { return false; } | |||
bool hasField(const string& field) const; | bool hasField(const string& field) const; | |||
bool sortedByDiskLoc() const; | bool sortedByDiskLoc() const; | |||
const BSONObjSet& getSort() const { return _sorts; } | ||||
// XXX: We need a better way of dealing with sorting and equalities | BSONObjSet _sorts; | |||
on a prefix of the key | ||||
// pattern. If we are using the index {a:1, b:1} to answer the pre | ||||
dicate {a: 10}, it's | ||||
// sorted both by the index key pattern and by the pattern {b: 1}. | ||||
How do we expose this? | ||||
// Perhaps migrate to sortedBy(...) instead of getSort(). In this | ||||
case, the ixscan can | ||||
// return true for both of those sort orders. | ||||
// | ||||
// This doesn't work for detecting that we can use a merge sort, th | ||||
ough. Perhaps we should | ||||
// just pick one sort order and miss out on the other case? For th | ||||
e golden query we want | ||||
// our sort order to be {b: 1}. | ||||
BSONObj getSort() const { return indexKeyPattern; } | ||||
BSONObj indexKeyPattern; | BSONObj indexKeyPattern; | |||
bool indexIsMultiKey; | bool indexIsMultiKey; | |||
scoped_ptr<MatchExpression> filter; | ||||
// Only set for 2d. | // Only set for 2d. | |||
int limit; | int limit; | |||
int direction; | int direction; | |||
// BIG NOTE: | // BIG NOTE: | |||
// If you use simple bounds, we'll use whatever index access method the keypattern implies. | // If you use simple bounds, we'll use whatever index access method the keypattern implies. | |||
// If you use the complex bounds, we force Btree access. | // If you use the complex bounds, we force Btree access. | |||
// The complex bounds require Btree access. | // The complex bounds require Btree access. | |||
IndexBounds bounds; | IndexBounds bounds; | |||
}; | }; | |||
struct ProjectionNode : public QuerySolutionNode { | struct ProjectionNode : public QuerySolutionNode { | |||
ProjectionNode() : projection(NULL) { } | ProjectionNode() : liteProjection(NULL) { } | |||
virtual ~ProjectionNode() { } | virtual ~ProjectionNode() { } | |||
virtual StageType getType() const { return STAGE_PROJECTION; } | virtual StageType getType() const { return STAGE_PROJECTION; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
/** | /** | |||
* This node changes the type to OWNED_OBJ. There's no fetching po ssible after this. | * This node changes the type to OWNED_OBJ. There's no fetching po ssible after this. | |||
*/ | */ | |||
bool fetched() const { return true; } | bool fetched() const { return true; } | |||
skipping to change at line 367 | skipping to change at line 386 | |||
// know what we're dropping. Until we push projection down thi s doesn't matter. | // know what we're dropping. Until we push projection down thi s doesn't matter. | |||
return false; | return false; | |||
} | } | |||
bool sortedByDiskLoc() const { | bool sortedByDiskLoc() const { | |||
// Projections destroy the DiskLoc. By returning true here, th is kind of implies that a | // Projections destroy the DiskLoc. By returning true here, th is kind of implies that a | |||
// fetch could still be done upstream. | // fetch could still be done upstream. | |||
// | // | |||
// Perhaps this should be false to not imply that there *is* a DiskLoc? Kind of a | // Perhaps this should be false to not imply that there *is* a DiskLoc? Kind of a | |||
// corner case. | // corner case. | |||
return child->sortedByDiskLoc(); | return children[0]->sortedByDiskLoc(); | |||
} | } | |||
BSONObj getSort() const { | const BSONObjSet& getSort() const { | |||
// TODO: If we're applying a projection that maintains sort ord er, the prefix of the | // TODO: If we're applying a projection that maintains sort ord er, the prefix of the | |||
// sort order we project is the sort order. | // sort order we project is the sort order. | |||
return BSONObj(); | return _sorts; | |||
} | } | |||
// Points into the CanonicalQuery. | BSONObjSet _sorts; | |||
ParsedProjection* projection; | ||||
scoped_ptr<QuerySolutionNode> child; | // Points into the CanonicalQuery, not owned here. | |||
LiteProjection* liteProjection; | ||||
// TODO: Filter | // The full query tree. Needed when we have positional operators. | |||
// Owned in the CanonicalQuery, not here. | ||||
MatchExpression* fullExpression; | ||||
}; | }; | |||
struct SortNode : public QuerySolutionNode { | struct SortNode : public QuerySolutionNode { | |||
SortNode() { } | SortNode() : hasBounds(false) { } | |||
virtual ~SortNode() { } | virtual ~SortNode() { } | |||
virtual StageType getType() const { return STAGE_SORT; } | virtual StageType getType() const { return STAGE_SORT; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return child->fetched(); } | bool fetched() const { return children[0]->fetched(); } | |||
bool hasField(const string& field) const { return child->hasField(f | bool hasField(const string& field) const { return children[0]->hasF | |||
ield); } | ield(field); } | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return pattern; } | ||||
const BSONObjSet& getSort() const { return _sorts; } | ||||
virtual void computeProperties() { | ||||
for (size_t i = 0; i < children.size(); ++i) { | ||||
children[i]->computeProperties(); | ||||
} | ||||
_sorts.clear(); | ||||
_sorts.insert(pattern); | ||||
} | ||||
BSONObjSet _sorts; | ||||
BSONObj pattern; | BSONObj pattern; | |||
scoped_ptr<QuerySolutionNode> child; | ||||
// TODO: Filter | bool hasBounds; | |||
// XXX | ||||
IndexBounds bounds; | ||||
}; | }; | |||
struct LimitNode : public QuerySolutionNode { | struct LimitNode : public QuerySolutionNode { | |||
LimitNode() { } | LimitNode() { } | |||
virtual ~LimitNode() { } | virtual ~LimitNode() { } | |||
virtual StageType getType() const { return STAGE_LIMIT; } | virtual StageType getType() const { return STAGE_LIMIT; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return child->fetched(); } | bool fetched() const { return children[0]->fetched(); } | |||
bool hasField(const string& field) const { return child->hasField(f | bool hasField(const string& field) const { return children[0]->hasF | |||
ield); } | ield(field); } | |||
bool sortedByDiskLoc() const { return child->sortedByDiskLoc(); } | bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( | |||
BSONObj getSort() const { return child->getSort(); } | ); } | |||
const BSONObjSet& getSort() const { return children[0]->getSort(); | ||||
} | ||||
int limit; | int limit; | |||
scoped_ptr<QuerySolutionNode> child; | ||||
}; | }; | |||
struct SkipNode : public QuerySolutionNode { | struct SkipNode : public QuerySolutionNode { | |||
SkipNode() { } | SkipNode() { } | |||
virtual ~SkipNode() { } | virtual ~SkipNode() { } | |||
virtual StageType getType() const { return STAGE_SKIP; } | virtual StageType getType() const { return STAGE_SKIP; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return child->fetched(); } | bool fetched() const { return children[0]->fetched(); } | |||
bool hasField(const string& field) const { return child->hasField(f | bool hasField(const string& field) const { return children[0]->hasF | |||
ield); } | ield(field); } | |||
bool sortedByDiskLoc() const { return child->sortedByDiskLoc(); } | bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( | |||
BSONObj getSort() const { return child->getSort(); } | ); } | |||
const BSONObjSet& getSort() const { return children[0]->getSort(); | ||||
} | ||||
int skip; | int skip; | |||
scoped_ptr<QuerySolutionNode> child; | ||||
}; | }; | |||
// | // | |||
// Geo nodes. A thin wrapper above an IXSCAN until we can yank functio nality out of | // Geo nodes. A thin wrapper above an IXSCAN until we can yank functio nality out of | |||
// the IXSCAN layer into the stage layer. | // the IXSCAN layer into the stage layer. | |||
// | // | |||
struct GeoNear2DNode : public QuerySolutionNode { | // TODO: This is probably an expression index. | |||
GeoNear2DNode() : numWanted(100) { } | struct Geo2DNode : public QuerySolutionNode { | |||
virtual ~GeoNear2DNode() { } | Geo2DNode() { } | |||
virtual ~Geo2DNode() { } | ||||
virtual StageType getType() const { return STAGE_GEO_NEAR_2D; } | virtual StageType getType() const { return STAGE_GEO_2D; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return false; } | bool fetched() const { return false; } | |||
bool hasField(const string& field) const; | bool hasField(const string& field) const; | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return BSONObj(); } | const BSONObjSet& getSort() const { return _sorts; } | |||
BSONObjSet _sorts; | ||||
int numWanted; | ||||
BSONObj indexKeyPattern; | BSONObj indexKeyPattern; | |||
BSONObj seek; | GeoQuery gq; | |||
}; | }; | |||
// TODO: This is probably an expression index. | // This is a standalone stage. | |||
struct Geo2DNode : public QuerySolutionNode { | struct GeoNear2DNode : public QuerySolutionNode { | |||
Geo2DNode() { } | GeoNear2DNode() : numWanted(100) { } | |||
virtual ~Geo2DNode() { } | virtual ~GeoNear2DNode() { } | |||
virtual StageType getType() const { return STAGE_GEO_2D; } | virtual StageType getType() const { return STAGE_GEO_NEAR_2D; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return false; } | bool fetched() const { return true; } | |||
bool hasField(const string& field) const; | bool hasField(const string& field) const { return true; } | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return BSONObj(); } | const BSONObjSet& getSort() const { return _sorts; } | |||
BSONObjSet _sorts; | ||||
NearQuery nq; | ||||
int numWanted; | ||||
BSONObj indexKeyPattern; | BSONObj indexKeyPattern; | |||
BSONObj seek; | ||||
}; | }; | |||
// This is actually its own standalone stage. | // This is actually its own standalone stage. | |||
struct GeoNear2DSphereNode : public QuerySolutionNode { | struct GeoNear2DSphereNode : public QuerySolutionNode { | |||
GeoNear2DSphereNode() { } | GeoNear2DSphereNode() { } | |||
virtual ~GeoNear2DSphereNode() { } | virtual ~GeoNear2DSphereNode() { } | |||
virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; } | virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; } | |||
virtual void appendToString(stringstream* ss, int indent) const; | virtual void appendToString(stringstream* ss, int indent) const; | |||
bool fetched() const { return true; } | bool fetched() const { return true; } | |||
bool hasField(const string& field) const { return true; } | bool hasField(const string& field) const { return true; } | |||
bool sortedByDiskLoc() const { return false; } | bool sortedByDiskLoc() const { return false; } | |||
BSONObj getSort() const { return BSONObj(); } | const BSONObjSet& getSort() const { return _sorts; } | |||
BSONObjSet _sorts; | ||||
NearQuery nq; | NearQuery nq; | |||
IndexBounds baseBounds; | IndexBounds baseBounds; | |||
BSONObj indexKeyPattern; | BSONObj indexKeyPattern; | |||
scoped_ptr<MatchExpression> filter; | }; | |||
// | ||||
// Internal nodes used to provide functionality | ||||
// | ||||
/** | ||||
* If we're answering a query on a sharded cluster, docs must be checke | ||||
d against the shard key | ||||
* to ensure that we don't return data that shouldn't be there. This m | ||||
ust be done prior to | ||||
* projection, and in fact should be done as early as possible to avoid | ||||
propagating stale data | ||||
* through the pipeline. | ||||
*/ | ||||
struct ShardingFilterNode : public QuerySolutionNode { | ||||
ShardingFilterNode() { } | ||||
virtual ~ShardingFilterNode() { } | ||||
virtual StageType getType() const { return STAGE_SHARDING_FILTER; } | ||||
virtual void appendToString(stringstream* ss, int indent) const; | ||||
bool fetched() const { return children[0]->fetched(); } | ||||
bool hasField(const string& field) const { return children[0]->hasF | ||||
ield(field); } | ||||
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( | ||||
); } | ||||
const BSONObjSet& getSort() const { return children[0]->getSort(); | ||||
} | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 55 change blocks. | ||||
108 lines changed or deleted | 176 lines changed or added | |||
record_store.h | record_store.h | |||
---|---|---|---|---|
skipping to change at line 43 | skipping to change at line 43 | |||
#include "mongo/db/diskloc.h" | #include "mongo/db/diskloc.h" | |||
namespace mongo { | namespace mongo { | |||
class ExtentManager; | class ExtentManager; | |||
class NamespaceDetails; | class NamespaceDetails; | |||
class Record; | class Record; | |||
class RecordStore { | class RecordStore { | |||
public: | public: | |||
RecordStore(); | RecordStore( const StringData& ns ); | |||
void init( NamespaceDetails* details, | void init( NamespaceDetails* details, | |||
ExtentManager* em, | ExtentManager* em, | |||
bool isSystemIndexes ); | bool isSystemIndexes ); | |||
void deallocRecord( const DiskLoc& dl, Record* todelete ); | void deallocRecord( const DiskLoc& dl, Record* todelete ); | |||
StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int quotaMa | ||||
x ); | ||||
private: | private: | |||
std::string _ns; | ||||
NamespaceDetails* _details; | NamespaceDetails* _details; | |||
ExtentManager* _extentManager; | ExtentManager* _extentManager; | |||
bool _isSystemIndexes; | bool _isSystemIndexes; | |||
}; | }; | |||
} | } | |||
End of changes. 3 change blocks. | ||||
1 lines changed or deleted | 5 lines changed or added | |||
rename_collection.h | rename_collection.h | |||
---|---|---|---|---|
skipping to change at line 38 | skipping to change at line 38 | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include <vector> | #include <vector> | |||
#include "mongo/db/auth/privilege.h" | #include "mongo/db/auth/privilege.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
namespace mongo { | namespace mongo { | |||
class ClientBasic; | ||||
namespace rename_collection { | namespace rename_collection { | |||
void addPrivilegesRequiredForRenameCollection(const BSONObj& cmdObj, | Status checkAuthForRenameCollectionCommand(ClientBasic* client, | |||
std::vector<Privilege>* o | const std::string& dbname, | |||
ut); | const BSONObj& cmdObj); | |||
} // namespace rename_collection | } // namespace rename_collection | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 2 change blocks. | ||||
3 lines changed or deleted | 6 lines changed or added | |||
role_graph.h | role_graph.h | |||
---|---|---|---|---|
skipping to change at line 32 | skipping to change at line 32 | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <algorithm> | #include <algorithm> | |||
#include <set> | ||||
#include <vector> | #include <vector> | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/db/auth/privilege.h" | #include "mongo/db/auth/privilege.h" | |||
#include "mongo/db/auth/role_name.h" | #include "mongo/db/auth/role_name.h" | |||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
#include "mongo/platform/unordered_set.h" | #include "mongo/platform/unordered_set.h" | |||
namespace mongo { | namespace mongo { | |||
class BSONOBj; | ||||
/** | /** | |||
* A graph of role and privilege relationships. | * A graph of role and privilege relationships. | |||
* | * | |||
* This structure is used to store an in-memory representation of the a dmin.system.roledata | * This structure is used to store an in-memory representation of the a dmin.system.roledata | |||
* collection, specifically the graph of which roles are members of oth er roles and what | * collection, specifically the graph of which roles are members of oth er roles and what | |||
* privileges each role has, both directly and transitively through mem bership in other roles. | * privileges each role has, both directly and transitively through mem bership in other roles. | |||
* There are some restrictions on calls to getAllPrivileges(), specific ally, one must call | * There are some restrictions on calls to getAllPrivileges(), specific ally, one must call | |||
* recomputePrivilegeData() before calling getAllPrivileges() if any of the mutation methods | * recomputePrivilegeData() before calling getAllPrivileges() if any of the mutation methods | |||
* have been called on the instance since the later of its construction or the last call to | * have been called on the instance since the later of its construction or the last call to | |||
* recomputePrivilegeData() on the object. | * recomputePrivilegeData() on the object. | |||
skipping to change at line 93 | skipping to change at line 92 | |||
/** | /** | |||
* Returns an iterator over the RoleNames of the "members" of the g iven role. | * Returns an iterator over the RoleNames of the "members" of the g iven role. | |||
* Members of a role are roles that have been granted this role dir ectly (roles that are | * Members of a role are roles that have been granted this role dir ectly (roles that are | |||
* members transitively through another role are not included). Th ese are the "parents" of | * members transitively through another role are not included). Th ese are the "parents" of | |||
* this node in the graph. | * this node in the graph. | |||
*/ | */ | |||
RoleNameIterator getDirectMembers(const RoleName& role); | RoleNameIterator getDirectMembers(const RoleName& role); | |||
/** | /** | |||
* Returns an iterator over the RoleNames of the "subordninates" of the given role. | * Returns an iterator over the RoleNames of the "subordinates" of the given role. | |||
* Subordinate roles are the roles that this role has been granted directly (roles | * Subordinate roles are the roles that this role has been granted directly (roles | |||
* that have been granted transitively through another role are not included). These are | * that have been granted transitively through another role are not included). These are | |||
* the "children" of this node in the graph. | * the "children" of this node in the graph. | |||
*/ | */ | |||
RoleNameIterator getDirectSubordinates(const RoleName& role); | RoleNameIterator getDirectSubordinates(const RoleName& role); | |||
/** | /** | |||
* Returns an iterator that can be used to get a full list of roles that this role inherits | * Returns an iterator that can be used to get a full list of roles that this role inherits | |||
* privileges from. This includes its direct subordinate roles as well as the subordinates | * privileges from. This includes its direct subordinate roles as well as the subordinates | |||
* of its subordinates, and so on. | * of its subordinates, and so on. | |||
*/ | */ | |||
RoleNameIterator getIndirectSubordinates(const RoleName& role); | RoleNameIterator getIndirectSubordinates(const RoleName& role); | |||
/** | /** | |||
* Returns an iterator that can be used to get a full list of roles | ||||
(in lexicographical | ||||
* order) that are defined on the given database. | ||||
*/ | ||||
RoleNameIterator getRolesForDatabase(const std::string& dbname); | ||||
/** | ||||
* Returns a vector of the privileges that the given role has been directly granted. | * Returns a vector of the privileges that the given role has been directly granted. | |||
* Privileges that have been granted transitively through this role 's subordinate roles are | * Privileges that have been granted transitively through this role 's subordinate roles are | |||
* not included. | * not included. | |||
*/ | */ | |||
const PrivilegeVector& getDirectPrivileges(const RoleName& role); | const PrivilegeVector& getDirectPrivileges(const RoleName& role); | |||
/** | /** | |||
* Returns a vector of all privileges that the given role contains. This includes both the | * Returns a vector of all privileges that the given role contains. This includes both the | |||
* privileges that have been granted to this role directly, as well as any privileges | * privileges that have been granted to this role directly, as well as any privileges | |||
* inherited from the role's subordinate roles. | * inherited from the role's subordinate roles. | |||
skipping to change at line 257 | skipping to change at line 262 | |||
* | * | |||
* Must be called between calls to any of the mutation functions an d calls | * Must be called between calls to any of the mutation functions an d calls | |||
* to getAllPrivileges(). | * to getAllPrivileges(). | |||
* | * | |||
* Returns Status::OK() on success. If a cycle is detected, return s | * Returns Status::OK() on success. If a cycle is detected, return s | |||
* ErrorCodes::GraphContainsCycle, and the status message reveals t he cycle. | * ErrorCodes::GraphContainsCycle, and the status message reveals t he cycle. | |||
*/ | */ | |||
Status recomputePrivilegeData(); | Status recomputePrivilegeData(); | |||
private: | private: | |||
// Helper method for recursively doing a topological DFS to compute the indirect privilege | // Helper method doing a topological DFS to compute the indirect pr ivilege | |||
// data and look for cycles | // data and look for cycles | |||
Status _recomputePrivilegeDataHelper(const RoleName& currentRole, | Status _recomputePrivilegeDataHelper(const RoleName& currentRole, | |||
std::vector<RoleName>& inProgr essRoles, | ||||
unordered_set<RoleName>& visit edRoles); | unordered_set<RoleName>& visit edRoles); | |||
/** | /** | |||
* If the role name given is not a built-in role, or it is but it's already in the role | * If the role name given is not a built-in role, or it is but it's already in the role | |||
* graph, then this does nothing. If it *is* a built-in role and t his is the first time | * graph, then this does nothing. If it *is* a built-in role and t his is the first time | |||
* this function has been called for this role, it will add the rol e into the role graph. | * this function has been called for this role, it will add the rol e into the role graph. | |||
*/ | */ | |||
void _createBuiltinRoleIfNeeded(const RoleName& role); | void _createBuiltinRoleIfNeeded(const RoleName& role); | |||
/** | /** | |||
* Adds the built-in roles for the given database name to the role | ||||
graph if they aren't | ||||
* already present. | ||||
*/ | ||||
void _createBuiltinRolesForDBIfNeeded(const std::string& dbname); | ||||
/** | ||||
* Returns whether or not the given role exists strictly within the role graph. | * Returns whether or not the given role exists strictly within the role graph. | |||
*/ | */ | |||
bool _roleExistsDontCreateBuiltin(const RoleName& role); | bool _roleExistsDontCreateBuiltin(const RoleName& role); | |||
/** | /** | |||
* Just creates the role in the role graph, without checking whethe r or not the role already | * Just creates the role in the role graph, without checking whethe r or not the role already | |||
* exists. | * exists. | |||
*/ | */ | |||
void _createRoleDontCheckIfRoleExists(const RoleName& role); | void _createRoleDontCheckIfRoleExists(const RoleName& role); | |||
skipping to change at line 297 | skipping to change at line 307 | |||
// Represents all the outgoing edges to other roles from any given role. | // Represents all the outgoing edges to other roles from any given role. | |||
typedef unordered_map<RoleName, std::vector<RoleName> > EdgeSet; | typedef unordered_map<RoleName, std::vector<RoleName> > EdgeSet; | |||
// Maps a role name to a list of privileges associated with that ro le. | // Maps a role name to a list of privileges associated with that ro le. | |||
typedef unordered_map<RoleName, PrivilegeVector> RolePrivilegeMap; | typedef unordered_map<RoleName, PrivilegeVector> RolePrivilegeMap; | |||
EdgeSet _roleToSubordinates; | EdgeSet _roleToSubordinates; | |||
unordered_map<RoleName, unordered_set<RoleName> > _roleToIndirectSu bordinates; | unordered_map<RoleName, unordered_set<RoleName> > _roleToIndirectSu bordinates; | |||
EdgeSet _roleToMembers; | EdgeSet _roleToMembers; | |||
RolePrivilegeMap _directPrivilegesForRole; | RolePrivilegeMap _directPrivilegesForRole; | |||
RolePrivilegeMap _allPrivilegesForRole; | RolePrivilegeMap _allPrivilegesForRole; | |||
set<RoleName> _allRoles; | ||||
}; | }; | |||
void swap(RoleGraph& lhs, RoleGraph& rhs); | void swap(RoleGraph& lhs, RoleGraph& rhs); | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 8 change blocks. | ||||
5 lines changed or deleted | 18 lines changed or added | |||
role_name.h | role_name.h | |||
---|---|---|---|---|
skipping to change at line 92 | skipping to change at line 92 | |||
static inline bool operator==(const RoleName& lhs, const RoleName& rhs) { | static inline bool operator==(const RoleName& lhs, const RoleName& rhs) { | |||
return lhs.getFullName() == rhs.getFullName(); | return lhs.getFullName() == rhs.getFullName(); | |||
} | } | |||
static inline bool operator!=(const RoleName& lhs, const RoleName& rhs) { | static inline bool operator!=(const RoleName& lhs, const RoleName& rhs) { | |||
return lhs.getFullName() != rhs.getFullName(); | return lhs.getFullName() != rhs.getFullName(); | |||
} | } | |||
static inline bool operator<(const RoleName& lhs, const RoleName& rhs) { | static inline bool operator<(const RoleName& lhs, const RoleName& rhs) { | |||
return lhs.getFullName() < rhs.getFullName(); | if (lhs.getDB() == rhs.getDB()) { | |||
return lhs.getRole() < rhs.getRole(); | ||||
} | ||||
return lhs.getDB() < rhs.getDB(); | ||||
} | } | |||
std::ostream& operator<<(std::ostream& os, const RoleName& name); | std::ostream& operator<<(std::ostream& os, const RoleName& name); | |||
/** | /** | |||
* Iterator over an unspecified container of RoleName objects. | * Iterator over an unspecified container of RoleName objects. | |||
*/ | */ | |||
class RoleNameIterator { | class RoleNameIterator { | |||
public: | public: | |||
class Impl { | class Impl { | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 4 lines changed or added | |||
rs.h | rs.h | |||
---|---|---|---|---|
skipping to change at line 33 | skipping to change at line 33 | |||
* all of the code used other than as permitted herein. If you modify fil e(s) | * all of the code used other than as permitted herein. If you modify fil e(s) | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/bson/optime.h" | ||||
#include "mongo/db/commands.h" | #include "mongo/db/commands.h" | |||
#include "mongo/db/index.h" | #include "mongo/db/index/index_descriptor.h" | |||
#include "mongo/db/storage/index_details.h" | ||||
#include "mongo/db/repl/oplogreader.h" | #include "mongo/db/repl/oplogreader.h" | |||
#include "mongo/bson/optime.h" | ||||
#include "mongo/db/repl/rs_config.h" | #include "mongo/db/repl/rs_config.h" | |||
#include "mongo/db/repl/rs_exception.h" | #include "mongo/db/repl/rs_exception.h" | |||
#include "mongo/db/repl/rs_member.h" | #include "mongo/db/repl/rs_member.h" | |||
#include "mongo/db/repl/rs_sync.h" | #include "mongo/db/repl/rs_sync.h" | |||
#include "mongo/db/repl/sync_source_feedback.h" | #include "mongo/db/repl/sync_source_feedback.h" | |||
#include "mongo/util/concurrency/list.h" | #include "mongo/util/concurrency/list.h" | |||
#include "mongo/util/concurrency/msg.h" | #include "mongo/util/concurrency/msg.h" | |||
#include "mongo/util/concurrency/thread_pool.h" | #include "mongo/util/concurrency/thread_pool.h" | |||
#include "mongo/util/concurrency/value.h" | #include "mongo/util/concurrency/value.h" | |||
#include "mongo/util/net/hostandport.h" | #include "mongo/util/net/hostandport.h" | |||
skipping to change at line 174 | skipping to change at line 175 | |||
* unreachable, e.g., S1--->S2--->S3--->P. S2 should ghost sync fr om S3 | * unreachable, e.g., S1--->S2--->S3--->P. S2 should ghost sync fr om S3 | |||
* and S3 can ghost sync from the primary. | * and S3 can ghost sync from the primary. | |||
* | * | |||
* Say we have an S1--->S2--->P situation and this node is S2. rid | * Say we have an S1--->S2--->P situation and this node is S2. rid | |||
* would refer to S1. S2 would create a ghost slave of S1 and conn ect | * would refer to S1. S2 would create a ghost slave of S1 and conn ect | |||
* it to P (_currentSyncTarget). Then it would use this connection to | * it to P (_currentSyncTarget). Then it would use this connection to | |||
* pretend to be S1, replicating off of P. | * pretend to be S1, replicating off of P. | |||
*/ | */ | |||
void percolate(const mongo::OID& rid, const OpTime& last); | void percolate(const mongo::OID& rid, const OpTime& last); | |||
void associateSlave(const BSONObj& rid, const int memberId); | void associateSlave(const BSONObj& rid, const int memberId); | |||
void updateSlave(const mongo::OID& id, const OpTime& last); | bool updateSlave(const mongo::OID& id, const OpTime& last); | |||
void clearCache(); | void clearCache(); | |||
}; | }; | |||
class Consensus { | class Consensus { | |||
ReplSetImpl &rs; | ReplSetImpl &rs; | |||
struct LastYea { | struct LastYea { | |||
LastYea() : when(0), who(0xffffffff) { } | LastYea() : when(0), who(0xffffffff) { } | |||
time_t when; | time_t when; | |||
unsigned who; | unsigned who; | |||
}; | }; | |||
skipping to change at line 534 | skipping to change at line 535 | |||
bool setMaintenanceMode(const bool inc); | bool setMaintenanceMode(const bool inc); | |||
// Records a new slave's id in the GhostSlave map, at handshake tim e. | // Records a new slave's id in the GhostSlave map, at handshake tim e. | |||
void registerSlave(const BSONObj& rid, const int memberId); | void registerSlave(const BSONObj& rid, const int memberId); | |||
private: | private: | |||
Member* head() const { return _members.head(); } | Member* head() const { return _members.head(); } | |||
public: | public: | |||
const Member* findById(unsigned id) const; | const Member* findById(unsigned id) const; | |||
Member* getMutableMember(unsigned id); | Member* getMutableMember(unsigned id); | |||
Member* findByName(const std::string& hostname) const; | Member* findByName(const std::string& hostname) const; | |||
/** | ||||
* Cause the node to resync from scratch. | ||||
*/ | ||||
bool resync(std::string& errmsg); | ||||
private: | private: | |||
void _getTargets(list<Target>&, int &configVersion); | void _getTargets(list<Target>&, int &configVersion); | |||
void getTargets(list<Target>&, int &configVersion); | void getTargets(list<Target>&, int &configVersion); | |||
void startThreads(); | void startThreads(); | |||
friend class FeedbackThread; | friend class FeedbackThread; | |||
friend class CmdReplSetElect; | friend class CmdReplSetElect; | |||
friend class Member; | friend class Member; | |||
friend class Manager; | friend class Manager; | |||
friend class GhostSync; | friend class GhostSync; | |||
friend class Consensus; | friend class Consensus; | |||
skipping to change at line 611 | skipping to change at line 617 | |||
* minValid, to indicate that we are in a consistent state when the batch has been fully | * minValid, to indicate that we are in a consistent state when the batch has been fully | |||
* applied. | * applied. | |||
*/ | */ | |||
static void setMinValid(BSONObj obj); | static void setMinValid(BSONObj obj); | |||
static OpTime getMinValid(); | static OpTime getMinValid(); | |||
static void clearInitialSyncFlag(); | static void clearInitialSyncFlag(); | |||
static bool getInitialSyncFlag(); | static bool getInitialSyncFlag(); | |||
static void setInitialSyncFlag(); | static void setInitialSyncFlag(); | |||
int oplogVersion; | int oplogVersion; | |||
// bool for indicating resync need on this node and the mutex that | ||||
protects it | ||||
bool initialSyncRequested; | ||||
boost::mutex initialSyncMutex; | ||||
private: | private: | |||
IndexPrefetchConfig _indexPrefetchConfig; | IndexPrefetchConfig _indexPrefetchConfig; | |||
static const char* _initialSyncFlagString; | static const char* _initialSyncFlagString; | |||
static const BSONObj _initialSyncFlag; | static const BSONObj _initialSyncFlag; | |||
}; | }; | |||
class ReplSet : public ReplSetImpl { | class ReplSet : public ReplSetImpl { | |||
public: | public: | |||
static ReplSet* make(ReplSetCmdline& replSetCmdline); | static ReplSet* make(ReplSetCmdline& replSetCmdline); | |||
skipping to change at line 731 | skipping to change at line 741 | |||
/** inlines ----------------- */ | /** inlines ----------------- */ | |||
inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig: :MemberCfg *c, bool self) : | inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig: :MemberCfg *c, bool self) : | |||
_config(*c), _h(h), _hbinfo(ord) { | _config(*c), _h(h), _hbinfo(ord) { | |||
verify(c); | verify(c); | |||
if( self ) | if( self ) | |||
_hbinfo.health = 1.0; | _hbinfo.health = 1.0; | |||
} | } | |||
inline bool ignoreUniqueIndex(IndexDescriptor* idx) { | ||||
if (!idx->unique()) { | ||||
return false; | ||||
} | ||||
if (!theReplSet) { | ||||
return false; | ||||
} | ||||
// see SERVER-6671 | ||||
MemberState ms = theReplSet->state(); | ||||
if (! ((ms == MemberState::RS_STARTUP2) || | ||||
(ms == MemberState::RS_RECOVERING) || | ||||
(ms == MemberState::RS_ROLLBACK))) { | ||||
return false; | ||||
} | ||||
// 2 is the oldest oplog version where operations | ||||
// are fully idempotent. | ||||
if (theReplSet->oplogVersion < 2) { | ||||
return false; | ||||
} | ||||
// Never ignore _id index | ||||
if (idx->isIdIndex()) { | ||||
return false; | ||||
} | ||||
return true; | ||||
} | ||||
inline bool ignoreUniqueIndex(IndexDetails& idx) { | inline bool ignoreUniqueIndex(IndexDetails& idx) { | |||
if (!idx.unique()) { | if (!idx.unique()) { | |||
return false; | return false; | |||
} | } | |||
if (!theReplSet) { | if (!theReplSet) { | |||
return false; | return false; | |||
} | } | |||
// see SERVER-6671 | // see SERVER-6671 | |||
MemberState ms = theReplSet->state(); | MemberState ms = theReplSet->state(); | |||
if (! ((ms == MemberState::RS_STARTUP2) || | if (! ((ms == MemberState::RS_STARTUP2) || | |||
End of changes. 7 change blocks. | ||||
3 lines changed or deleted | 41 lines changed or added | |||
rwlock.h | rwlock.h | |||
---|---|---|---|---|
skipping to change at line 33 | skipping to change at line 33 | |||
* for all of the code used other than as permitted herein. If you modif y | * for all of the code used other than as permitted herein. If you modif y | |||
* file(s) with this exception, you may extend this exception to your | * file(s) with this exception, you may extend this exception to your | |||
* version of the file(s), but you are not obligated to do so. If you do not | * version of the file(s), but you are not obligated to do so. If you do not | |||
* wish to do so, delete this exception statement from your version. If you | * wish to do so, delete this exception statement from your version. If you | |||
* delete this exception statement from all source files in the program, | * delete this exception statement from all source files in the program, | |||
* then also delete it in the license file. | * then also delete it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mutex.h" | #include "mongo/util/concurrency/mutex.h" | |||
#include "../time_support.h" | #include "mongo/util/concurrency/rwlockimpl.h" | |||
#include "rwlockimpl.h" | #include "mongo/util/concurrency/simplerwlock.h" | |||
#include "mongo/util/debug_util.h" | ||||
#include "mongo/util/log.h" | ||||
#include "mongo/util/time_support.h" | ||||
#if defined(_DEBUG) | #if defined(_DEBUG) | |||
#include "mutexdebugger.h" | #include "mongo/util/concurrency/mutexdebugger.h" | |||
#endif | #endif | |||
#include "simplerwlock.h" | ||||
namespace mongo { | namespace mongo { | |||
class RWLock : public RWLockBase { | class RWLock : public RWLockBase { | |||
enum { NilState, UpgradableState, Exclusive } x; // only bother to set when doing upgradable related things | enum { NilState, UpgradableState, Exclusive } x; // only bother to set when doing upgradable related things | |||
public: | public: | |||
const char * const _name; | const char * const _name; | |||
RWLock(const char *name) : _name(name) { | RWLock(const char *name) : _name(name) { | |||
x = NilState; | x = NilState; | |||
} | } | |||
skipping to change at line 249 | skipping to change at line 252 | |||
got = true; | got = true; | |||
break; | break; | |||
} | } | |||
int sleep = 1; | int sleep = 1; | |||
if ( i > ( lowPriorityWaitMS / 20 ) ) | if ( i > ( lowPriorityWaitMS / 20 ) ) | |||
sleep = 10; | sleep = 10; | |||
sleepmillis(sleep); | sleepmillis(sleep); | |||
i += ( sleep - 1 ); | i += ( sleep - 1 ); | |||
} | } | |||
if ( ! got ) { | if ( ! got ) { | |||
log() << "couldn't lazily get rwlock" << endl; | log() << "couldn't lazily get rwlock"; | |||
RWLockBase::lock(); | RWLockBase::lock(); | |||
} | } | |||
} | } | |||
public: | public: | |||
const int lowPriorityWaitMS; | const int lowPriorityWaitMS; | |||
RWLockRecursiveNongreedy(const char *nm, int lpwaitms) : RWLockRecu rsive(nm), lowPriorityWaitMS(lpwaitms) { } | RWLockRecursiveNongreedy(const char *nm, int lpwaitms) : RWLockRecu rsive(nm), lowPriorityWaitMS(lpwaitms) { } | |||
const char * implType() const { return RWLockRecursive::implType(); } | const char * implType() const { return RWLockRecursive::implType(); } | |||
//just for testing: | //just for testing: | |||
End of changes. 4 change blocks. | ||||
6 lines changed or deleted | 9 lines changed or added | |||
sasl_client_authenticate.h | sasl_client_authenticate.h | |||
---|---|---|---|---|
skipping to change at line 44 | skipping to change at line 44 | |||
* | * | |||
* The "saslParameters" BSONObj should be initialized with zero or more of the | * The "saslParameters" BSONObj should be initialized with zero or more of the | |||
* fields below. Which fields are required depends on the mechanism. Consult the | * fields below. Which fields are required depends on the mechanism. Consult the | |||
* relevant IETF standards. | * relevant IETF standards. | |||
* | * | |||
* "mechanism": The string name of the sasl mechanism to use. Mand atory. | * "mechanism": The string name of the sasl mechanism to use. Mand atory. | |||
* "autoAuthorize": Truthy values tell the server to automatically acquire privileges on | * "autoAuthorize": Truthy values tell the server to automatically acquire privileges on | |||
* all resources after successful authentication, which is the default. Falsey values | * all resources after successful authentication, which is the default. Falsey values | |||
* instruct the server to await separate privilege-acquisition commands. | * instruct the server to await separate privilege-acquisition commands. | |||
* "user": The string name of the user to authenticate. | * "user": The string name of the user to authenticate. | |||
* "userSource": The database target of the auth command, which ide ntifies the location | * "db": The database target of the auth command, which identifies the location | |||
* of the credential information for the user. May be "$extern al" if credential | * of the credential information for the user. May be "$extern al" if credential | |||
* information is stored outside of the mongo cluster. | * information is stored outside of the mongo cluster. | |||
* "pwd": The password. | * "pwd": The password. | |||
* "serviceName": The GSSAPI service name to use. Defaults to "mon godb". | * "serviceName": The GSSAPI service name to use. Defaults to "mon godb". | |||
* "serviceHostname": The GSSAPI hostname to use. Defaults to the name of the remote host. | * "serviceHostname": The GSSAPI hostname to use. Defaults to the name of the remote host. | |||
* | * | |||
* Other fields in saslParameters are silently ignored. | * Other fields in saslParameters are silently ignored. | |||
* | * | |||
* Returns an OK status on success, and ErrorCodes::AuthenticationFaile d if authentication is | * Returns an OK status on success, and ErrorCodes::AuthenticationFaile d if authentication is | |||
* rejected. Other failures, all of which are tantamount to authentica tion failure, may also be | * rejected. Other failures, all of which are tantamount to authentica tion failure, may also be | |||
skipping to change at line 116 | skipping to change at line 116 | |||
/// Field containing sasl payloads passed to and from the server. | /// Field containing sasl payloads passed to and from the server. | |||
extern const char* const saslCommandPayloadFieldName; | extern const char* const saslCommandPayloadFieldName; | |||
/// Field containing the string identifier of the user to authenticate in | /// Field containing the string identifier of the user to authenticate in | |||
/// saslClientAuthenticate(). | /// saslClientAuthenticate(). | |||
extern const char* const saslCommandUserFieldName; | extern const char* const saslCommandUserFieldName; | |||
/// Field containing the string identifier of the database containing c redential information, | /// Field containing the string identifier of the database containing c redential information, | |||
/// or "$external" if the credential information is stored outside of t he mongo cluster. | /// or "$external" if the credential information is stored outside of t he mongo cluster. | |||
extern const char* const saslCommandUserSourceFieldName; | extern const char* const saslCommandUserDBFieldName; | |||
/// Field overriding the FQDN of the hostname hosting the mongodb srevi ce in | /// Field overriding the FQDN of the hostname hosting the mongodb srevi ce in | |||
/// saslClientAuthenticate(). | /// saslClientAuthenticate(). | |||
extern const char* const saslCommandServiceHostnameFieldName; | extern const char* const saslCommandServiceHostnameFieldName; | |||
/// Field overriding the name of the mongodb service saslClientAuthenti cate(). | /// Field overriding the name of the mongodb service saslClientAuthenti cate(). | |||
extern const char* const saslCommandServiceNameFieldName; | extern const char* const saslCommandServiceNameFieldName; | |||
/// Default database against which sasl authentication commands should run. | /// Default database against which sasl authentication commands should run. | |||
extern const char* const saslDefaultDBName; | extern const char* const saslDefaultDBName; | |||
End of changes. 2 change blocks. | ||||
2 lines changed or deleted | 2 lines changed or added | |||
sequence_util.h | sequence_util.h | |||
---|---|---|---|---|
skipping to change at line 27 | skipping to change at line 27 | |||
* This file declares utility methods for operating on sequence containers, such as vectors, lists | * This file declares utility methods for operating on sequence containers, such as vectors, lists | |||
* and deques. | * and deques. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <algorithm> | #include <algorithm> | |||
namespace mongo { | namespace mongo { | |||
/* | /** | |||
* Returns true if "container" contains "value". | * Returns true if "container" contains "value". | |||
*/ | */ | |||
template <typename C> | template <typename C, typename T> | |||
bool sequenceContains(const C& container, typename C::const_reference value | bool sequenceContains(const C& container, const T& value) { | |||
) { | ||||
using std::find; | using std::find; | |||
return find(container.begin(), container.end(), value) != container.end (); | return find(container.begin(), container.end(), value) != container.end (); | |||
} | } | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 2 change blocks. | ||||
4 lines changed or deleted | 3 lines changed or added | |||
server_options.h | server_options.h | |||
---|---|---|---|---|
/* | /* Copyright 2013 10gen Inc. | |||
* Copyright (C) 2013 10gen Inc. | ||||
* | * | |||
* This program is free software: you can redistribute it and/or modify | * Licensed under the Apache License, Version 2.0 (the "License"); | |||
* it under the terms of the GNU Affero General Public License, version | * you may not use this file except in compliance with the License. | |||
3, | * You may obtain a copy of the License at | |||
* as published by the Free Software Foundation. | ||||
* | * | |||
* This program is distributed in the hope that it will be useful, | * http://www.apache.org/licenses/LICENSE-2.0 | |||
* but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||||
* GNU Affero General Public License for more details. | ||||
* | * | |||
* You should have received a copy of the GNU Affero General Public Lice | * Unless required by applicable law or agreed to in writing, software | |||
nse | * distributed under the License is distributed on an "AS IS" BASIS, | |||
* along with this program. If not, see <http://www.gnu.org/licenses/>. | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli | |||
* | ed. | |||
* As a special exception, the copyright holders give permission to link | * See the License for the specific language governing permissions and | |||
the | * limitations under the License. | |||
* code of portions of this program with the OpenSSL library under certa | ||||
in | ||||
* conditions as described in each individual source file and distribute | ||||
* linked combinations including the program with the OpenSSL library. Y | ||||
ou | ||||
* must comply with the GNU Affero General Public License in all respect | ||||
s for | ||||
* all of the code used other than as permitted herein. If you modify fi | ||||
le(s) | ||||
* with this exception, you may extend this exception to your version of | ||||
the | ||||
* file(s), but you are not obligated to do so. If you do not wish to do | ||||
so, | ||||
* delete this exception statement from your version. If you delete this | ||||
* exception statement from all source files in the program, then also d | ||||
elete | ||||
* it in the license file. | ||||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/status.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/platform/process_id.h" | #include "mongo/platform/process_id.h" | |||
#include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN | #include "mongo/util/net/listen.h" // For DEFAULT_MAX_CONN | |||
#include "mongo/util/options_parser/environment.h" | ||||
#include "mongo/util/options_parser/option_section.h" | ||||
namespace mongo { | namespace mongo { | |||
namespace optionenvironment { | ||||
class OptionSection; | ||||
class Environment; | ||||
} // namespace optionenvironment | ||||
namespace moe = mongo::optionenvironment; | ||||
struct ServerGlobalParams { | struct ServerGlobalParams { | |||
ServerGlobalParams() : | ServerGlobalParams() : | |||
port(DefaultDBPort), rest(false), jsonp(false), indexBuildRetry (true), quiet(false), | port(DefaultDBPort), rest(false), jsonp(false), indexBuildRetry (true), quiet(false), | |||
configsvr(false), cpu(false), objcheck(true), defaultProfile(0) , | configsvr(false), cpu(false), objcheck(true), defaultProfile(0) , | |||
slowMS(100), defaultLocalThresholdMillis(15), moveParanoia(true ), | slowMS(100), defaultLocalThresholdMillis(15), moveParanoia(true ), | |||
noUnixSocket(false), doFork(0), socket("/tmp"), maxConns(DEFAUL T_MAX_CONN), | noUnixSocket(false), doFork(0), socket("/tmp"), maxConns(DEFAUL T_MAX_CONN), | |||
logAppend(false), logWithSyslog(false), isHttpInterfaceEnabled( false) | logAppend(false), logWithSyslog(false), isHttpInterfaceEnabled( false) | |||
{ | { | |||
started = time(0); | started = time(0); | |||
skipping to change at line 101 | skipping to change at line 79 | |||
int maxConns; // Maximum number of simultaneous open conne ctions. | int maxConns; // Maximum number of simultaneous open conne ctions. | |||
std::string keyFile; // Path to keyfile, or empty if none. | std::string keyFile; // Path to keyfile, or empty if none. | |||
std::string pidFile; // Path to pid file, or empty if none. | std::string pidFile; // Path to pid file, or empty if none. | |||
std::string logpath; // Path to log file, if logging to a file; o therwise, empty. | std::string logpath; // Path to log file, if logging to a file; o therwise, empty. | |||
bool logAppend; // True if logging to a file in append mode. | bool logAppend; // True if logging to a file in append mode. | |||
bool logWithSyslog; // True if logging to syslog; must not be se t if logpath is set. | bool logWithSyslog; // True if logging to syslog; must not be se t if logpath is set. | |||
int syslogFacility; // Facility used when appending messages to the syslog. | int syslogFacility; // Facility used when appending messages to the syslog. | |||
std::string clusterAuthMode; // Cluster authentication mode | ||||
bool isHttpInterfaceEnabled; // True if the dbwebserver should be e nabled. | bool isHttpInterfaceEnabled; // True if the dbwebserver should be e nabled. | |||
#ifndef _WIN32 | #ifndef _WIN32 | |||
ProcessId parentProc; // --fork pid of initial process | ProcessId parentProc; // --fork pid of initial process | |||
ProcessId leaderProc; // --fork pid of leader process | ProcessId leaderProc; // --fork pid of leader process | |||
#endif | #endif | |||
/** | /** | |||
* Switches to enable experimental (unsupported) features. | * Switches to enable experimental (unsupported) features. | |||
skipping to change at line 126 | skipping to change at line 103 | |||
, storageDetailsCmdEnabled(false) | , storageDetailsCmdEnabled(false) | |||
{} | {} | |||
bool indexStatsCmdEnabled; // -- enableExperimentalIndexStatsCm d | bool indexStatsCmdEnabled; // -- enableExperimentalIndexStatsCm d | |||
bool storageDetailsCmdEnabled; // -- enableExperimentalStorageD etailsCmd | bool storageDetailsCmdEnabled; // -- enableExperimentalStorageD etailsCmd | |||
} experimental; | } experimental; | |||
time_t started; | time_t started; | |||
BSONArray argvArray; | BSONArray argvArray; | |||
BSONObj parsedOpts; | BSONObj parsedOpts; | |||
AtomicInt32 clusterAuthMode; // --clusterAuthMode, the internal | ||||
cluster auth mode | ||||
enum ClusterAuthModes { | ||||
ClusterAuthMode_undefined, | ||||
/** | ||||
* Authenticate using keyfile, accept only keyfiles | ||||
*/ | ||||
ClusterAuthMode_keyFile, | ||||
/** | ||||
* Authenticate using keyfile, accept both keyfiles and X.509 | ||||
*/ | ||||
ClusterAuthMode_sendKeyFile, | ||||
/** | ||||
* Authenticate using X.509, accept both keyfiles and X.509 | ||||
*/ | ||||
ClusterAuthMode_sendX509, | ||||
/** | ||||
* Authenticate using X.509, accept only X.509 | ||||
*/ | ||||
ClusterAuthMode_x509 | ||||
}; | ||||
}; | }; | |||
extern ServerGlobalParams serverGlobalParams; | extern ServerGlobalParams serverGlobalParams; | |||
Status addGeneralServerOptions(moe::OptionSection* options); | ||||
Status addWindowsServerOptions(moe::OptionSection* options); | ||||
Status addSSLServerOptions(moe::OptionSection* options); | ||||
Status storeServerOptions(const moe::Environment& params, | ||||
const std::vector<std::string>& args); | ||||
void printCommandLineOpts(); | ||||
// This function should eventually go away, but needs to be here now be | ||||
cause we have a lot of | ||||
// code that is shared between mongod and mongos that must know at runt | ||||
ime which binary it is in | ||||
bool isMongos(); | ||||
} | } | |||
End of changes. 10 change blocks. | ||||
61 lines changed or deleted | 37 lines changed or added | |||
shapes.h | shapes.h | |||
---|---|---|---|---|
skipping to change at line 152 | skipping to change at line 152 | |||
// point (lng/lat in bounds). In this case, we can use FLAT data w ith SPHERE predicates. | // point (lng/lat in bounds). In this case, we can use FLAT data w ith SPHERE predicates. | |||
bool flatUpgradedToSphere; | bool flatUpgradedToSphere; | |||
}; | }; | |||
struct LineWithCRS { | struct LineWithCRS { | |||
S2Polyline line; | S2Polyline line; | |||
CRS crs; | CRS crs; | |||
}; | }; | |||
struct CapWithCRS { | struct CapWithCRS { | |||
// Only one of {cap, circle} is filled out depending on the CRS. | ||||
S2Cap cap; | S2Cap cap; | |||
Circle circle; | Circle circle; | |||
CRS crs; | CRS crs; | |||
}; | }; | |||
struct BoxWithCRS { | struct BoxWithCRS { | |||
Box box; | Box box; | |||
CRS crs; | CRS crs; | |||
}; | }; | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 0 lines changed or added | |||
shard.h | shard.h | |||
---|---|---|---|---|
skipping to change at line 78 | skipping to change at line 78 | |||
: _name( other->_name ) , _addr( other->_addr ), _cs( other->_c s ) , | : _name( other->_name ) , _addr( other->_addr ), _cs( other->_c s ) , | |||
_maxSize( other->_maxSize ) , _isDraining( other->_isDraining ) { | _maxSize( other->_maxSize ) , _isDraining( other->_isDraining ) { | |||
} | } | |||
static Shard make( const string& ident ) { | static Shard make( const string& ident ) { | |||
Shard s; | Shard s; | |||
s.reset( ident ); | s.reset( ident ); | |||
return s; | return s; | |||
} | } | |||
static Shard findIfExists( const string& shardName ); | ||||
/** | /** | |||
* @param ident either name or address | * @param ident either name or address | |||
*/ | */ | |||
void reset( const string& ident ); | void reset( const string& ident ); | |||
void setAddress( const ConnectionString& cs ); | void setAddress( const ConnectionString& cs ); | |||
ConnectionString getAddress() const { return _cs; } | ConnectionString getAddress() const { return _cs; } | |||
string getName() const { | string getName() const { | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 2 lines changed or added | |||
shardkey.h | shardkey.h | |||
---|---|---|---|---|
skipping to change at line 38 | skipping to change at line 38 | |||
* then also delete it in the license file. | * then also delete it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/db/keypattern.h" | #include "mongo/db/keypattern.h" | |||
namespace mongo { | namespace mongo { | |||
/** | ||||
* THIS FUNCTIONALITY IS DEPRECATED | ||||
* Everything BSON related in this file should migrate gradually to s/s | ||||
hard_key_pattern.h, new | ||||
* functionality should not go here. | ||||
*/ | ||||
class Chunk; | class Chunk; | |||
class FieldRangeSet; | class FieldRangeSet; | |||
/* A ShardKeyPattern is a pattern indicating what data to extract from the object to make the shard key from. | /* A ShardKeyPattern is a pattern indicating what data to extract from the object to make the shard key from. | |||
Analogous to an index key pattern. | Analogous to an index key pattern. | |||
*/ | */ | |||
class ShardKeyPattern { | class ShardKeyPattern { | |||
public: | public: | |||
ShardKeyPattern( BSONObj p = BSONObj() ); | ShardKeyPattern( BSONObj p = BSONObj() ); | |||
skipping to change at line 101 | skipping to change at line 107 | |||
bool partOfShardKey(const StringData& key ) const { | bool partOfShardKey(const StringData& key ) const { | |||
return pattern.hasField(key); | return pattern.hasField(key); | |||
} | } | |||
BSONObj extendRangeBound( const BSONObj& bound , bool makeUpperIncl usive ) const { | BSONObj extendRangeBound( const BSONObj& bound , bool makeUpperIncl usive ) const { | |||
return pattern.extendRangeBound( bound , makeUpperInclusive ); | return pattern.extendRangeBound( bound , makeUpperInclusive ); | |||
} | } | |||
/** | /** | |||
* @return | * @return | |||
* true if 'this' is a prefix (not necessarily contained) of 'other | ||||
Pattern'. | ||||
*/ | ||||
bool isPrefixOf( const KeyPattern& otherPattern ) const; | ||||
/** | ||||
* @return | ||||
* true if this shard key is compatible with a unique index on 'uni queIndexPattern'. | * true if this shard key is compatible with a unique index on 'uni queIndexPattern'. | |||
* Primarily this just checks whether 'this' is a prefix of 'u niqueIndexPattern', | * Primarily this just checks whether 'this' is a prefix of 'u niqueIndexPattern', | |||
* However it does not need to be an exact syntactic prefix du e to "hashed" | * However it does not need to be an exact syntactic prefix du e to "hashed" | |||
* indexes or mismatches in ascending/descending order. Also, uniqueness of the | * indexes or mismatches in ascending/descending order. Also, uniqueness of the | |||
* _id field is guaranteed by the generation process (or by th e user) so every | * _id field is guaranteed by the generation process (or by th e user) so every | |||
* index that begins with _id is unique index compatible with any shard key. | * index that begins with _id is unique index compatible with any shard key. | |||
* Examples: | * Examples: | |||
* shard key {a : 1} is compatible with a unique index on {_ id : 1} | * shard key {a : 1} is compatible with a unique index on {_ id : 1} | |||
* shard key {a : 1} is compatible with a unique index on {a : 1 , b : 1} | * shard key {a : 1} is compatible with a unique index on {a : 1 , b : 1} | |||
* shard key {a : 1} is compatible with a unique index on {a : -1 , b : 1 } | * shard key {a : 1} is compatible with a unique index on {a : -1 , b : 1 } | |||
End of changes. 2 change blocks. | ||||
7 lines changed or deleted | 7 lines changed or added | |||
shell_options.h | shell_options.h | |||
---|---|---|---|---|
skipping to change at line 52 | skipping to change at line 52 | |||
std::string authenticationMechanism; | std::string authenticationMechanism; | |||
std::string authenticationDatabase; | std::string authenticationDatabase; | |||
bool runShell; | bool runShell; | |||
bool nodb; | bool nodb; | |||
bool norc; | bool norc; | |||
std::string script; | std::string script; | |||
bool autoKillOp; | bool autoKillOp; | |||
bool useWriteCommandsDefault; | ||||
ShellGlobalParams() : autoKillOp(false) { } | ShellGlobalParams() : autoKillOp(false), useWriteCommandsDefault(tr ue) { } | |||
}; | }; | |||
extern ShellGlobalParams shellGlobalParams; | extern ShellGlobalParams shellGlobalParams; | |||
Status addMongoShellOptions(moe::OptionSection* options); | Status addMongoShellOptions(moe::OptionSection* options); | |||
std::string getMongoShellHelp(const StringData& name, const moe::Option Section& options); | std::string getMongoShellHelp(const StringData& name, const moe::Option Section& options); | |||
Status handlePreValidationMongoShellOptions(const moe::Environment& par | /** | |||
ams, | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationMongoShellOptions(const moe::Environment& param | ||||
s, | ||||
const std::vector<std::stri ng>& args); | const std::vector<std::stri ng>& args); | |||
Status storeMongoShellOptions(const moe::Environment& params, | Status storeMongoShellOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 3 change blocks. | ||||
3 lines changed or deleted | 10 lines changed or added | |||
sock.h | sock.h | |||
---|---|---|---|---|
skipping to change at line 239 | skipping to change at line 239 | |||
void setHandshakeReceived() { | void setHandshakeReceived() { | |||
_awaitingHandshake = false; | _awaitingHandshake = false; | |||
} | } | |||
bool isAwaitingHandshake() { | bool isAwaitingHandshake() { | |||
return _awaitingHandshake; | return _awaitingHandshake; | |||
} | } | |||
#ifdef MONGO_SSL | #ifdef MONGO_SSL | |||
/** secures inline */ | /** secures inline | |||
bool secure( SSLManagerInterface* ssl ); | * ssl - Pointer to the global SSLManager. | |||
* remoteHost - The hostname of the remote server. | ||||
*/ | ||||
bool secure( SSLManagerInterface* ssl, const std::string& remoteHos | ||||
t); | ||||
void secureAccepted( SSLManagerInterface* ssl ); | void secureAccepted( SSLManagerInterface* ssl ); | |||
#endif | #endif | |||
/** | /** | |||
* This function calls SSL_accept() if SSL-encrypted sockets | * This function calls SSL_accept() if SSL-encrypted sockets | |||
* are desired. SSL_accept() waits until the remote host calls | * are desired. SSL_accept() waits until the remote host calls | |||
* SSL_connect(). The return value is the subject name of any | * SSL_connect(). The return value is the subject name of any | |||
* client certificate provided during the handshake. | * client certificate provided during the handshake. | |||
* | * | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 6 lines changed or added | |||
sort.h | sort.h | |||
---|---|---|---|---|
skipping to change at line 31 | skipping to change at line 31 | |||
* all of the code used other than as permitted herein. If you modify fi le(s) | * all of the code used other than as permitted herein. If you modify fi le(s) | |||
* with this exception, you may extend this exception to your version of the | * with this exception, you may extend this exception to your version of the | |||
* file(s), but you are not obligated to do so. If you do not wish to do so, | * file(s), but you are not obligated to do so. If you do not wish to do so, | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/scoped_ptr.hpp> | ||||
#include <vector> | #include <vector> | |||
#include "mongo/db/diskloc.h" | #include "mongo/db/diskloc.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/matcher.h" | #include "mongo/db/matcher.h" | |||
#include "mongo/db/exec/plan_stage.h" | #include "mongo/db/exec/plan_stage.h" | |||
#include "mongo/db/exec/working_set.h" | ||||
#include "mongo/db/query/index_bounds.h" | ||||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
namespace mongo { | namespace mongo { | |||
class BtreeKeyGenerator; | ||||
// External params for the sort stage. Declared below. | // External params for the sort stage. Declared below. | |||
class SortStageParams; | class SortStageParams; | |||
/** | /** | |||
* Sorts the input received from the child according to the sort patter n provided. | * Sorts the input received from the child according to the sort patter n provided. | |||
* | * | |||
* Preconditions: For each field in 'pattern', all inputs in the child must handle a | * Preconditions: For each field in 'pattern', all inputs in the child must handle a | |||
* getFieldDotted for that field. | * getFieldDotted for that field. | |||
*/ | */ | |||
class SortStage : public PlanStage { | class SortStage : public PlanStage { | |||
skipping to change at line 75 | skipping to change at line 80 | |||
private: | private: | |||
// Not owned by us. | // Not owned by us. | |||
WorkingSet* _ws; | WorkingSet* _ws; | |||
// Where we're reading data to sort from. | // Where we're reading data to sort from. | |||
scoped_ptr<PlanStage> _child; | scoped_ptr<PlanStage> _child; | |||
// Our sort pattern. | // Our sort pattern. | |||
BSONObj _pattern; | BSONObj _pattern; | |||
// We read the child into this. | // Have we sorted our data? If so, we can access _resultIterator. I | |||
vector<WorkingSetID> _data; | f not, | |||
// we're still populating _data. | ||||
// Have we sorted our data? | ||||
bool _sorted; | bool _sorted; | |||
// Collection of working set members to sort with their respective | ||||
sort key. | ||||
struct SortableDataItem { | ||||
WorkingSetID wsid; | ||||
BSONObj sortKey; | ||||
// Since we must replicate the behavior of a covered sort as mu | ||||
ch as possible we use the | ||||
// DiskLoc to break sortKey ties. | ||||
// See sorta.js. | ||||
DiskLoc loc; | ||||
}; | ||||
vector<SortableDataItem> _data; | ||||
// Iterates through _data post-sort returning it. | // Iterates through _data post-sort returning it. | |||
vector<WorkingSetID>::iterator _resultIterator; | vector<SortableDataItem>::iterator _resultIterator; | |||
// We buffer a lot of data and we want to look it up by DiskLoc qui ckly upon invalidation. | // We buffer a lot of data and we want to look it up by DiskLoc qui ckly upon invalidation. | |||
typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap; | typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap; | |||
DataMap _wsidByDiskLoc; | DataMap _wsidByDiskLoc; | |||
// | ||||
// Sort Apparatus | ||||
// | ||||
// A comparator for SortableDataItems. | ||||
struct WorkingSetComparator; | ||||
boost::scoped_ptr<WorkingSetComparator> _cmp; | ||||
// Bounds we should consider before sorting. | ||||
IndexBounds _bounds; | ||||
bool _hasBounds; | ||||
// Helper to extract sorting keys from documents containing dotted | ||||
fields, arrays, | ||||
// or both. | ||||
boost::scoped_ptr<BtreeKeyGenerator> _keyGen; | ||||
// Helper to filter keys, thus enforcing _bounds over whatever keys | ||||
generated with | ||||
// _keyGen. | ||||
boost::scoped_ptr<IndexBoundsChecker> _boundsChecker; | ||||
// | ||||
// Stats | // Stats | |||
// | ||||
CommonStats _commonStats; | CommonStats _commonStats; | |||
SortStats _specificStats; | SortStats _specificStats; | |||
// The usage in bytes of all bufered data that we're sorting. | // The usage in bytes of all bufered data that we're sorting. | |||
size_t _memUsage; | size_t _memUsage; | |||
}; | }; | |||
// Parameters that must be provided to a SortStage | // Parameters that must be provided to a SortStage | |||
class SortStageParams { | class SortStageParams { | |||
public: | public: | |||
//SortStageParams() : limit(0) { } | SortStageParams() : hasBounds(false) { } | |||
// How we're sorting. | // How we're sorting. | |||
BSONObj pattern; | BSONObj pattern; | |||
IndexBounds bounds; | ||||
bool hasBounds; | ||||
// TODO: Implement this. | // TODO: Implement this. | |||
// Must be >= 0. Equal to 0 for no limit. | // Must be >= 0. Equal to 0 for no limit. | |||
// int limit; | // int limit; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 10 change blocks. | ||||
6 lines changed or deleted | 53 lines changed or added | |||
spaces.h | spaces.h | |||
---|---|---|---|---|
skipping to change at line 320 | skipping to change at line 320 | |||
MemoryChunk* next_chunk() const { return next_chunk_; } | MemoryChunk* next_chunk() const { return next_chunk_; } | |||
MemoryChunk* prev_chunk() const { return prev_chunk_; } | MemoryChunk* prev_chunk() const { return prev_chunk_; } | |||
void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } | void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } | |||
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } | void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } | |||
Space* owner() const { | Space* owner() const { | |||
if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == | if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == | |||
kFailureTag) { | kFailureTag) { | |||
return reinterpret_cast<Space*>(owner_ - kFailureTag); | return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - | |||
kFailureTag); | ||||
} else { | } else { | |||
return NULL; | return NULL; | |||
} | } | |||
} | } | |||
void set_owner(Space* space) { | void set_owner(Space* space) { | |||
ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); | ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); | |||
owner_ = reinterpret_cast<Address>(space) + kFailureTag; | owner_ = reinterpret_cast<Address>(space) + kFailureTag; | |||
ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == | ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == | |||
kFailureTag); | kFailureTag); | |||
End of changes. 1 change blocks. | ||||
1 lines changed or deleted | 2 lines changed or added | |||
ssl_manager.h | ssl_manager.h | |||
---|---|---|---|---|
skipping to change at line 74 | skipping to change at line 74 | |||
* Throws SocketException on failure. | * Throws SocketException on failure. | |||
* @return a pointer to an SSLConnection. Resources are freed in SS LConnection's destructor | * @return a pointer to an SSLConnection. Resources are freed in SS LConnection's destructor | |||
*/ | */ | |||
virtual SSLConnection* accept(Socket* socket, const char* initialBy tes, int len) = 0; | virtual SSLConnection* accept(Socket* socket, const char* initialBy tes, int len) = 0; | |||
/** | /** | |||
* Fetches a peer certificate and validates it if it exists | * Fetches a peer certificate and validates it if it exists | |||
* Throws SocketException on failure | * Throws SocketException on failure | |||
* @return a std::string containing the certificate's subject name. | * @return a std::string containing the certificate's subject name. | |||
*/ | */ | |||
virtual std::string validatePeerCertificate(const SSLConnection* co | virtual std::string parseAndValidatePeerCertificate(const SSLConnec | |||
nn) = 0; | tion* conn, | |||
const std::string& remo | ||||
teHost) = 0; | ||||
/** | /** | |||
* Cleans up SSL thread local memory; use at thread exit | * Cleans up SSL thread local memory; use at thread exit | |||
* to avoid memory leaks | * to avoid memory leaks | |||
*/ | */ | |||
virtual void cleanupThreadLocals() = 0; | virtual void cleanupThreadLocals() = 0; | |||
/** | /** | |||
* Gets the subject name of our own server certificate | * Gets the subject name of our own server certificate | |||
* @return the subject name. | * @return the subject name. | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 4 lines changed or added | |||
ssl_options.h | ssl_options.h | |||
---|---|---|---|---|
skipping to change at line 41 | skipping to change at line 41 | |||
AtomicInt32 sslMode; // --sslMode - the SSL operation mode, see enum SSLModes | AtomicInt32 sslMode; // --sslMode - the SSL operation mode, see enum SSLModes | |||
bool sslOnNormalPorts; // --sslOnNormalPorts (deprecated) | bool sslOnNormalPorts; // --sslOnNormalPorts (deprecated) | |||
std::string sslPEMKeyFile; // --sslPEMKeyFile | std::string sslPEMKeyFile; // --sslPEMKeyFile | |||
std::string sslPEMKeyPassword; // --sslPEMKeyPassword | std::string sslPEMKeyPassword; // --sslPEMKeyPassword | |||
std::string sslClusterFile; // --sslInternalKeyFile | std::string sslClusterFile; // --sslInternalKeyFile | |||
std::string sslClusterPassword; // --sslInternalKeyPassword | std::string sslClusterPassword; // --sslInternalKeyPassword | |||
std::string sslCAFile; // --sslCAFile | std::string sslCAFile; // --sslCAFile | |||
std::string sslCRLFile; // --sslCRLFile | std::string sslCRLFile; // --sslCRLFile | |||
bool sslWeakCertificateValidation; // --sslWeakCertificateValidatio n | bool sslWeakCertificateValidation; // --sslWeakCertificateValidatio n | |||
bool sslFIPSMode; // --sslFIPSMode | bool sslFIPSMode; // --sslFIPSMode | |||
bool sslAllowInvalidCertificates; // --sslIgnoreCertificateValidati on | ||||
SSLGlobalParams() { | SSLGlobalParams() { | |||
sslMode.store(SSLMode_noSSL); | sslMode.store(SSLMode_disabled); | |||
} | } | |||
enum SSLModes { | enum SSLModes { | |||
/** | /** | |||
* Make unencrypted outgoing connections and do not accept incom ing SSL-connections | * Make unencrypted outgoing connections and do not accept incom ing SSL-connections | |||
*/ | */ | |||
SSLMode_noSSL, | SSLMode_disabled, | |||
/** | /** | |||
* Make unencrypted outgoing connections and accept both unencry pted and SSL-connections | * Make unencrypted outgoing connections and accept both unencry pted and SSL-connections | |||
*/ | */ | |||
SSLMode_acceptSSL, | SSLMode_allowSSL, | |||
/** | /** | |||
* Make outgoing SSL-connections and accept both unecrypted and SSL-connections | * Make outgoing SSL-connections and accept both unecrypted and SSL-connections | |||
*/ | */ | |||
SSLMode_sendAcceptSSL, | SSLMode_preferSSL, | |||
/** | /** | |||
* Make outgoing SSL-connections and only accept incoming SSL-co nnections | * Make outgoing SSL-connections and only accept incoming SSL-co nnections | |||
*/ | */ | |||
SSLMode_sslOnly | SSLMode_requireSSL | |||
}; | }; | |||
}; | }; | |||
extern SSLGlobalParams sslGlobalParams; | extern SSLGlobalParams sslGlobalParams; | |||
Status addSSLServerOptions(moe::OptionSection* options); | Status addSSLServerOptions(moe::OptionSection* options); | |||
Status addSSLClientOptions(moe::OptionSection* options); | Status addSSLClientOptions(moe::OptionSection* options); | |||
Status storeSSLServerOptions(const moe::Environment& params); | Status storeSSLServerOptions(const moe::Environment& params); | |||
End of changes. 6 change blocks. | ||||
5 lines changed or deleted | 6 lines changed or added | |||
stage_types.h | stage_types.h | |||
---|---|---|---|---|
skipping to change at line 53 | skipping to change at line 53 | |||
// STAGE_2DSPHERE to straighten out. | // STAGE_2DSPHERE to straighten out. | |||
STAGE_GEO_2D, | STAGE_GEO_2D, | |||
// The two $geoNear impls imply a fetch+sort and as such are not IX SCANs. | // The two $geoNear impls imply a fetch+sort and as such are not IX SCANs. | |||
STAGE_GEO_NEAR_2D, | STAGE_GEO_NEAR_2D, | |||
STAGE_GEO_NEAR_2DSPHERE, | STAGE_GEO_NEAR_2DSPHERE, | |||
STAGE_IXSCAN, | STAGE_IXSCAN, | |||
STAGE_LIMIT, | STAGE_LIMIT, | |||
STAGE_OR, | STAGE_OR, | |||
STAGE_PROJECTION, | STAGE_PROJECTION, | |||
STAGE_SHARDING_FILTER, | ||||
STAGE_SKIP, | STAGE_SKIP, | |||
STAGE_SORT, | STAGE_SORT, | |||
STAGE_SORT_MERGE, | STAGE_SORT_MERGE, | |||
STAGE_TEXT, | STAGE_TEXT, | |||
STAGE_UNKNOWN, | STAGE_UNKNOWN, | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 1 lines changed or added | |||
startup_options.h | startup_options.h | |||
---|---|---|---|---|
skipping to change at line 43 | skipping to change at line 43 | |||
namespace optionenvironment { | namespace optionenvironment { | |||
/* | /* | |||
* This structure stores information about all the command line options . The parser will use | * This structure stores information about all the command line options . The parser will use | |||
* this description when it parses the command line, the INI config fil e, and the JSON config | * this description when it parses the command line, the INI config fil e, and the JSON config | |||
* file. See the OptionSection and OptionDescription classes for more details. | * file. See the OptionSection and OptionDescription classes for more details. | |||
* | * | |||
* Example: | * Example: | |||
* MONGO_MODULE_STARTUP_OPTIONS_REGISTER(MongodOptions)(InitializerCont ext* context) { | * MONGO_MODULE_STARTUP_OPTIONS_REGISTER(MongodOptions)(InitializerCont ext* context) { | |||
* return addMongodOptions(&moe::startupOptions); | * return addMongodOptions(&moe::startupOptions); | |||
* ret = startupOptions.addOption(OD("option", "option", moe::Strin | * startupOptions.addOptionChaining("option", "option", moe::String | |||
g, "description")) | , "description"); | |||
* if (!ret.isOK()) { | ||||
* return ret; | ||||
* } | ||||
* return Status::OK(); | * return Status::OK(); | |||
* } | * } | |||
*/ | */ | |||
extern OptionSection startupOptions; | extern OptionSection startupOptions; | |||
/* | /* | |||
* This structure stores the parsed command line options. After the "d efult" group of the | * This structure stores the parsed command line options. After the "d efult" group of the | |||
* MONGO_INITIALIZERS, this structure should be fully validated from an option perspective. See | * MONGO_INITIALIZERS, this structure should be fully validated from an option perspective. See | |||
* the Environment, Constraint, and Value classes for more details. | * the Environment, Constraint, and Value classes for more details. | |||
* | * | |||
End of changes. 1 change blocks. | ||||
5 lines changed or deleted | 2 lines changed or added | |||
stemmer.h | stemmer.h | |||
---|---|---|---|---|
skipping to change at line 36 | skipping to change at line 36 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/db/fts/fts_language.h" | ||||
#include "third_party/libstemmer_c/include/libstemmer.h" | #include "third_party/libstemmer_c/include/libstemmer.h" | |||
namespace mongo { | namespace mongo { | |||
namespace fts { | namespace fts { | |||
/** | /** | |||
* maintains case | * maintains case | |||
* but works | * but works | |||
* running/Running -> run/Run | * running/Running -> run/Run | |||
*/ | */ | |||
class Stemmer { | class Stemmer { | |||
public: | public: | |||
Stemmer( const std::string& language ); | Stemmer( const FTSLanguage language ); | |||
~Stemmer(); | ~Stemmer(); | |||
std::string stem( const StringData& word ) const; | std::string stem( const StringData& word ) const; | |||
private: | private: | |||
struct sb_stemmer* _stemmer; | struct sb_stemmer* _stemmer; | |||
}; | }; | |||
} | } | |||
} | } | |||
End of changes. 2 change blocks. | ||||
1 lines changed or deleted | 2 lines changed or added | |||
stop_words.h | stop_words.h | |||
---|---|---|---|---|
skipping to change at line 36 | skipping to change at line 36 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <set> | #include <set> | |||
#include <string> | #include <string> | |||
#include "mongo/db/fts/fts_language.h" | ||||
#include "mongo/platform/unordered_set.h" | #include "mongo/platform/unordered_set.h" | |||
namespace mongo { | namespace mongo { | |||
namespace fts { | namespace fts { | |||
class StopWords { | class StopWords { | |||
public: | public: | |||
StopWords(); | StopWords(); | |||
StopWords( const std::set<std::string>& words ); | StopWords( const std::set<std::string>& words ); | |||
bool isStopWord( const std::string& word ) const { | bool isStopWord( const std::string& word ) const { | |||
return _words.count( word ) > 0; | return _words.count( word ) > 0; | |||
} | } | |||
size_t numStopWords() const { return _words.size(); } | size_t numStopWords() const { return _words.size(); } | |||
static const StopWords* getStopWords( const std::string& langau ge ); | static const StopWords* getStopWords( const FTSLanguage langaug e ); | |||
private: | private: | |||
~StopWords(){} | ~StopWords(){} | |||
unordered_set<std::string> _words; | unordered_set<std::string> _words; | |||
}; | }; | |||
} | } | |||
} | } | |||
End of changes. 2 change blocks. | ||||
1 lines changed or deleted | 2 lines changed or added | |||
strategy.h | strategy.h | |||
---|---|---|---|---|
skipping to change at line 70 | skipping to change at line 70 | |||
{ | { | |||
// Only call this from sharded, for now. | // Only call this from sharded, for now. | |||
// TODO: Refactor all this. | // TODO: Refactor all this. | |||
verify( false ); | verify( false ); | |||
} | } | |||
// These interfaces will merge soon, so make it easy to share logic | // These interfaces will merge soon, so make it easy to share logic | |||
friend class ShardStrategy; | friend class ShardStrategy; | |||
friend class SingleStrategy; | friend class SingleStrategy; | |||
static bool useClusterWriteCommands; | ||||
protected: | protected: | |||
void doWrite( int op , Request& r , const Shard& shard , bool check Version = true ); | void doWrite( int op , Request& r , const Shard& shard , bool check Version = true ); | |||
void doIndexQuery( Request& r , const Shard& shard ); | void doIndexQuery( Request& r , const Shard& shard ); | |||
void broadcastWrite(int op, Request& r); // Sends to all shards in cluster. DOESN'T CHECK VERSION | void broadcastWrite(int op, Request& r); // Sends to all shards in cluster. DOESN'T CHECK VERSION | |||
void insert( const Shard& shard , const char * ns , const vector<BS ONObj>& v , int flags=0 , bool safe=false ); | void insert( const Shard& shard , const char * ns , const vector<BS ONObj>& v , int flags=0 , bool safe=false ); | |||
void update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags=0, bool safe=false ); | void update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags=0, bool safe=false ); | |||
}; | }; | |||
End of changes. 1 change blocks. | ||||
0 lines changed or deleted | 2 lines changed or added | |||
threadlocal.h | threadlocal.h | |||
---|---|---|---|---|
skipping to change at line 88 | skipping to change at line 88 | |||
boost::thread_specific_ptr<T> _val; | boost::thread_specific_ptr<T> _val; | |||
const T _default; | const T _default; | |||
}; | }; | |||
/* TSP | /* TSP | |||
These macros use intrinsics which are faster than boost::thread_spec ific_ptr. | These macros use intrinsics which are faster than boost::thread_spec ific_ptr. | |||
However the intrinsics don't free up objects on thread closure. Thus we use | However the intrinsics don't free up objects on thread closure. Thus we use | |||
a combination here, with the assumption that reset's are infrequent, so that | a combination here, with the assumption that reset's are infrequent, so that | |||
get's are fast. | get's are fast. | |||
*/ | */ | |||
#if defined(_WIN32) || (defined(__GNUC__) && defined(__linux__)) | #if defined(MONGO_HAVE___THREAD) || defined(MONGO_HAVE___DECLSPEC_THREAD) | |||
template< class T > | template< class T > | |||
struct TSP { | struct TSP { | |||
boost::thread_specific_ptr<T> tsp; | boost::thread_specific_ptr<T> tsp; | |||
public: | public: | |||
T* get() const; | T* get() const; | |||
void reset(T* v); | void reset(T* v); | |||
T* getMake() { | T* getMake() { | |||
T *t = get(); | T *t = get(); | |||
if( t == 0 ) | if( t == 0 ) | |||
reset( t = new T() ); | reset( t = new T() ); | |||
return t; | return t; | |||
} | } | |||
}; | }; | |||
# if defined(_WIN32) | # if defined(MONGO_HAVE___DECLSPEC_THREAD) | |||
# define TSP_DECLARE(T,p) extern TSP<T> p; | # define TSP_DECLARE(T,p) extern TSP<T> p; | |||
# define TSP_DEFINE(T,p) __declspec( thread ) T* _ ## p; \ | # define TSP_DEFINE(T,p) __declspec( thread ) T* _ ## p; \ | |||
TSP<T> p; \ | TSP<T> p; \ | |||
template<> T* TSP<T>::get() const { return _ ## p; } \ | template<> T* TSP<T>::get() const { return _ ## p; } \ | |||
void TSP<T>::reset(T* v) { \ | void TSP<T>::reset(T* v) { \ | |||
tsp.reset(v); \ | tsp.reset(v); \ | |||
_ ## p = v; \ | _ ## p = v; \ | |||
} | } | |||
skipping to change at line 131 | skipping to change at line 131 | |||
# define TSP_DEFINE(T,p) \ | # define TSP_DEFINE(T,p) \ | |||
__thread T* _ ## p; \ | __thread T* _ ## p; \ | |||
template<> void TSP<T>::reset(T* v) { \ | template<> void TSP<T>::reset(T* v) { \ | |||
tsp.reset(v); \ | tsp.reset(v); \ | |||
_ ## p = v; \ | _ ## p = v; \ | |||
} \ | } \ | |||
TSP<T> p; | TSP<T> p; | |||
# endif | # endif | |||
#elif defined(__APPLE__) | #elif defined(_POSIX_THREADS) && (_POSIX_THREADS >= 0) | |||
template< class T> | template< class T> | |||
struct TSP { | struct TSP { | |||
pthread_key_t _key; | pthread_key_t _key; | |||
public: | public: | |||
TSP() { | TSP() { | |||
verify( pthread_key_create( &_key, TSP::dodelete ) == 0 ); | verify( pthread_key_create( &_key, TSP::dodelete ) == 0 ); | |||
} | } | |||
~TSP() { | ~TSP() { | |||
pthread_key_delete( _key ); | pthread_key_delete( _key ); | |||
End of changes. 3 change blocks. | ||||
3 lines changed or deleted | 3 lines changed or added | |||
time_support.h | time_support.h | |||
---|---|---|---|---|
skipping to change at line 81 | skipping to change at line 81 | |||
*/ | */ | |||
std::string dateToISOStringLocal(Date_t date); | std::string dateToISOStringLocal(Date_t date); | |||
/** | /** | |||
* Formats "date" in fixed width in the local time zone. | * Formats "date" in fixed width in the local time zone. | |||
* | * | |||
* Sample format: "Wed Oct 31 13:34:47.996" | * Sample format: "Wed Oct 31 13:34:47.996" | |||
*/ | */ | |||
std::string dateToCtimeString(Date_t date); | std::string dateToCtimeString(Date_t date); | |||
/** | ||||
* Converts millis to time_t, doing correct division for negative milli | ||||
s, and uasserting that | ||||
* the result falls within the valid range of a time_t. | ||||
*/ | ||||
time_t millisToTimeT(long long millis); | ||||
/** | ||||
* Returns the millis since the last whole second of the given millis s | ||||
ince epoch, and correctly | ||||
* handles dates before epoch. | ||||
*/ | ||||
int extractMillisPortion(long long millisSinceEpoch); | ||||
boost::gregorian::date currentDate(); | boost::gregorian::date currentDate(); | |||
// parses time of day in "hh:mm" format assuming 'hh' is 00-23 | // parses time of day in "hh:mm" format assuming 'hh' is 00-23 | |||
bool toPointInTime( const std::string& str , boost::posix_time::ptime* timeOfDay ); | bool toPointInTime( const std::string& str , boost::posix_time::ptime* timeOfDay ); | |||
void sleepsecs(int s); | void sleepsecs(int s); | |||
void sleepmillis(long long ms); | void sleepmillis(long long ms); | |||
void sleepmicros(long long micros); | void sleepmicros(long long micros); | |||
class Backoff { | class Backoff { | |||
End of changes. 1 change blocks. | ||||
14 lines changed or deleted | 0 lines changed or added | |||
tokenizer.h | tokenizer.h | |||
---|---|---|---|---|
skipping to change at line 36 | skipping to change at line 36 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also de lete | * exception statement from all source files in the program, then also de lete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/db/fts/fts_language.h" | ||||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
#include "mongo/platform/unordered_set.h" | #include "mongo/platform/unordered_set.h" | |||
namespace mongo { | namespace mongo { | |||
namespace fts { | namespace fts { | |||
struct Token { | struct Token { | |||
enum Type { WHITESPACE, DELIMITER, TEXT, INVALID }; | enum Type { WHITESPACE, DELIMITER, TEXT, INVALID }; | |||
Token( Type type, const StringData& data, unsigned offset, bool previousWhiteSpace ) | Token( Type type, const StringData& data, unsigned offset, bool previousWhiteSpace ) | |||
skipping to change at line 62 | skipping to change at line 63 | |||
Type type; | Type type; | |||
StringData data; | StringData data; | |||
unsigned offset; | unsigned offset; | |||
bool previousWhiteSpace; | bool previousWhiteSpace; | |||
}; | }; | |||
class Tokenizer { | class Tokenizer { | |||
public: | public: | |||
Tokenizer( const std::string& language, const StringData& str ) ; | Tokenizer( const FTSLanguage language, const StringData& str ); | |||
bool more() const; | bool more() const; | |||
Token next(); | Token next(); | |||
private: | private: | |||
Token::Type _type( char c ) const; | Token::Type _type( char c ) const; | |||
bool _skipWhitespace(); | bool _skipWhitespace(); | |||
unsigned _pos; | unsigned _pos; | |||
bool _previousWhiteSpace; | bool _previousWhiteSpace; | |||
End of changes. 2 change blocks. | ||||
1 lines changed or deleted | 2 lines changed or added | |||
tool_options.h | tool_options.h | |||
---|---|---|---|---|
skipping to change at line 91 | skipping to change at line 91 | |||
Status addBSONToolOptions(moe::OptionSection* options); | Status addBSONToolOptions(moe::OptionSection* options); | |||
Status addFieldOptions(moe::OptionSection* options); | Status addFieldOptions(moe::OptionSection* options); | |||
// Legacy interface for getting options in tools | // Legacy interface for getting options in tools | |||
// TODO: Remove this when we use the new interface everywhere | // TODO: Remove this when we use the new interface everywhere | |||
std::string getParam(std::string name, string def=""); | std::string getParam(std::string name, string def=""); | |||
int getParam(std::string name, int def); | int getParam(std::string name, int def); | |||
bool hasParam(std::string name); | bool hasParam(std::string name); | |||
Status handlePreValidationGeneralToolOptions(const moe::Environment& pa | /** | |||
rams); | * Handle options that should come before validation, such as "help". | |||
* | ||||
* Returns false if an option was found that implies we should prematur | ||||
ely exit with success. | ||||
*/ | ||||
bool handlePreValidationGeneralToolOptions(const moe::Environment& para | ||||
ms); | ||||
Status storeGeneralToolOptions(const moe::Environment& params, | Status storeGeneralToolOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
Status storeFieldOptions(const moe::Environment& params, | Status storeFieldOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
Status storeBSONToolOptions(const moe::Environment& params, | Status storeBSONToolOptions(const moe::Environment& params, | |||
const std::vector<std::string>& args); | const std::vector<std::string>& args); | |||
} | } | |||
End of changes. 1 change blocks. | ||||
2 lines changed or deleted | 8 lines changed or added | |||
touch_pages.h | touch_pages.h | |||
---|---|---|---|---|
skipping to change at line 18 | skipping to change at line 18 | |||
* | * | |||
* Unless required by applicable law or agreed to in writing, software | * Unless required by applicable law or agreed to in writing, software | |||
* distributed under the License is distributed on an "AS IS" BASIS, | * distributed under the License is distributed on an "AS IS" BASIS, | |||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. | |||
* See the License for the specific language governing permissions and | * See the License for the specific language governing permissions and | |||
* limitations under the License. | * limitations under the License. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <cstdlib> | |||
#include <fcntl.h> | ||||
namespace mongo { | namespace mongo { | |||
class Extent; | ||||
// Given a namespace, page in all pages associated with that namespace | ||||
void touchNs( const std::string& ns ); | ||||
// Touch a range of pages using an OS-specific method. | // Touch a range of pages using an OS-specific method. | |||
// Takes a file descriptor, offset, and length, for Linux use. | // Takes a file descriptor, offset, and length, for Linux use. | |||
// Additionally takes an Extent pointer for use on other platforms. | // Additionally takes an Extent pointer for use on other platforms. | |||
void touch_pages( HANDLE fd, int offset, size_t length, const Extent* e xt ); | void touch_pages( const char* buf, size_t length ); | |||
} | } | |||
End of changes. 3 change blocks. | ||||
6 lines changed or deleted | 3 lines changed or added | |||
update_driver.h | update_driver.h | |||
---|---|---|---|---|
skipping to change at line 41 | skipping to change at line 41 | |||
#include <string> | #include <string> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/status.h" | #include "mongo/base/status.h" | |||
#include "mongo/base/owned_pointer_vector.h" | #include "mongo/base/owned_pointer_vector.h" | |||
#include "mongo/bson/mutable/document.h" | #include "mongo/bson/mutable/document.h" | |||
#include "mongo/db/field_ref_set.h" | #include "mongo/db/field_ref_set.h" | |||
#include "mongo/db/index_set.h" | #include "mongo/db/index_set.h" | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/ops/modifier_interface.h" | #include "mongo/db/ops/modifier_interface.h" | |||
#include "mongo/db/ops/modifier_table.h" | ||||
namespace mongo { | namespace mongo { | |||
class UpdateDriver { | class UpdateDriver { | |||
public: | public: | |||
struct Options; | struct Options; | |||
UpdateDriver(const Options& opts); | UpdateDriver(const Options& opts); | |||
~UpdateDriver(); | ~UpdateDriver(); | |||
skipping to change at line 69 | skipping to change at line 70 | |||
* Fills in document with any fields in the query which are valid. | * Fills in document with any fields in the query which are valid. | |||
* | * | |||
* Valid fields include equality matches like "a":1, or "a.b":false | * Valid fields include equality matches like "a":1, or "a.b":false | |||
* | * | |||
* Each valid field will be expanded (from dot notation) and confli cts will be | * Each valid field will be expanded (from dot notation) and confli cts will be | |||
* checked for all fields added to the underlying document. | * checked for all fields added to the underlying document. | |||
* | * | |||
* Returns Status::OK() if the document can be used. If there are a ny error or | * Returns Status::OK() if the document can be used. If there are a ny error or | |||
* conflicts along the way then those errors will be returned. | * conflicts along the way then those errors will be returned. | |||
*/ | */ | |||
static Status createFromQuery(const BSONObj& query, mutablebson::Do cument& doc); | Status populateDocumentWithQueryFields(const BSONObj& query, mutabl ebson::Document& doc) const; | |||
/** | /** | |||
* return a BSONObj with the _id field of the doc passed in, or the doc itself. | * return a BSONObj with the _id field of the doc passed in, or the doc itself. | |||
* If no _id and multi, error. | * If no _id and multi, error. | |||
*/ | */ | |||
BSONObj makeOplogEntryQuery(const BSONObj doc, bool multi) const; | BSONObj makeOplogEntryQuery(const BSONObj& doc, bool multi) const; | |||
/** | /** | |||
* Returns OK and executes '_mods' over 'doc', generating 'newObj'. If any mod is | * Returns OK and executes '_mods' over 'doc', generating 'newObj'. If any mod is | |||
* positional, use 'matchedField' (index of the array item matched) . If doc allows | * positional, use 'matchedField' (index of the array item matched) . If doc allows | |||
* mods to be applied in place and no index updating is involved, t hen the mods may | * mods to be applied in place and no index updating is involved, t hen the mods may | |||
* be applied "in place" over 'doc'. | * be applied "in place" over 'doc'. | |||
* | * | |||
* If the driver's '_logOp' mode is turned on, and if 'logOpRec' is not NULL, fills in | * If the driver's '_logOp' mode is turned on, and if 'logOpRec' is not NULL, fills in | |||
* the latter with the oplog entry corresponding to the update. If '_mods' can't be | * the latter with the oplog entry corresponding to the update. If '_mods' can't be | |||
* applied, returns an error status with a corresponding descriptio n. | * applied, returns an error status with a corresponding descriptio n. | |||
* | ||||
* If a non-NULL updatedField vector* is supplied, | ||||
* then all updated fields will be added to it. | ||||
*/ | */ | |||
Status update(const StringData& matchedField, | Status update(const StringData& matchedField, | |||
mutablebson::Document* doc, | mutablebson::Document* doc, | |||
BSONObj* logOpRec); | BSONObj* logOpRec = NULL, | |||
FieldRefSet* updatedFields = NULL); | ||||
// | // | |||
// Accessors | // Accessors | |||
// | // | |||
size_t numMods() const; | size_t numMods() const; | |||
bool isDocReplacement() const; | bool isDocReplacement() const; | |||
bool modsAffectIndices() const; | bool modsAffectIndices() const; | |||
void refreshIndexKeys(const IndexPathSet& indexedFields); | void refreshIndexKeys(const IndexPathSet& indexedFields); | |||
/** Inform the update driver of which fields are shard keys so that | ||||
attempts to modify | ||||
* those fields can be rejected by the driver. Pass an empty objec | ||||
t to indicate that | ||||
* no shard keys are in play. | ||||
*/ | ||||
void refreshShardKeyPattern(const BSONObj& shardKeyPattern); | ||||
/** After calling 'update' above, this will return true if it appea | ||||
rs that the modifier | ||||
* updates may have altered any shard keys. If this returns 'true' | ||||
, | ||||
* 'verifyShardKeysUnaltered' should be called with the original u | ||||
nmutated object so | ||||
* field comparisons can be made and illegal mutations detected. | ||||
*/ | ||||
bool modsAffectShardKeys() const; | ||||
/** If the mods were detected to have potentially affected shard ke | ||||
ys during a | ||||
* non-upsert udpate, call this method, providing the original una | ||||
ltered document so | ||||
* that the apparently altered fields can be verified to have not | ||||
actually changed. A | ||||
* non-OK status indicates that at least one mutation to a shard k | ||||
ey was detected, and | ||||
* the update should be rejected rather than applied. You may pass | ||||
an empty original | ||||
* object on an upsert, since there is not an original object agai | ||||
nst which to | ||||
* compare. In that case, only the existence of shard keys in 'upd | ||||
ated' is verified. | ||||
*/ | ||||
Status checkShardKeysUnaltered(const BSONObj& original, | ||||
const mutablebson::Document& updated | ||||
) const; | ||||
bool multi() const; | bool multi() const; | |||
void setMulti(bool multi); | void setMulti(bool multi); | |||
bool upsert() const; | bool upsert() const; | |||
void setUpsert(bool upsert); | void setUpsert(bool upsert); | |||
bool logOp() const; | bool logOp() const; | |||
void setLogOp(bool logOp); | void setLogOp(bool logOp); | |||
ModifierInterface::Options modOptions() const; | ModifierInterface::Options modOptions() const; | |||
void setModOptions(ModifierInterface::Options modOpts); | void setModOptions(ModifierInterface::Options modOpts); | |||
ModifierInterface::ExecInfo::UpdateContext context() const; | ModifierInterface::ExecInfo::UpdateContext context() const; | |||
void setContext(ModifierInterface::ExecInfo::UpdateContext context) ; | void setContext(ModifierInterface::ExecInfo::UpdateContext context) ; | |||
private: | private: | |||
/** Resets the state of the class associated with mods (not the err or state) */ | /** Resets the state of the class associated with mods (not the err or state) */ | |||
void clear(); | void clear(); | |||
/** Create the modifier and add it to the back of the modifiers vec | ||||
tor */ | ||||
inline Status addAndParse(const modifiertable::ModifierType type, | ||||
const BSONElement& elem); | ||||
// | // | |||
// immutable properties after parsing | // immutable properties after parsing | |||
// | // | |||
// Is there a list of $mod's on '_mods' or is it just full object r eplacement? | // Is there a list of $mod's on '_mods' or is it just full object r eplacement? | |||
bool _replacementMode; | bool _replacementMode; | |||
// Collection of update mod instances. Owned here. | // Collection of update mod instances. Owned here. | |||
vector<ModifierInterface*> _mods; | vector<ModifierInterface*> _mods; | |||
skipping to change at line 182 | skipping to change at line 167 | |||
// Should this driver generate an oplog record when it applies the update? | // Should this driver generate an oplog record when it applies the update? | |||
bool _logOp; | bool _logOp; | |||
// The options to initiate the mods with | // The options to initiate the mods with | |||
ModifierInterface::Options _modOptions; | ModifierInterface::Options _modOptions; | |||
// Are any of the fields mentioned in the mods participating in any index? Is set anew | // Are any of the fields mentioned in the mods participating in any index? Is set anew | |||
// at each call to update. | // at each call to update. | |||
bool _affectIndices; | bool _affectIndices; | |||
// Holds the fields relevant to any optional shard key state. | ||||
struct ShardKeyState { | ||||
// The current shard key pattern | ||||
BSONObj pattern; | ||||
// A vector owning the FieldRefs parsed from the pattern field | ||||
names. | ||||
OwnedPointerVector<FieldRef> keys; | ||||
// A FieldRefSet containing pointers to the FieldRefs in 'keys' | ||||
. | ||||
FieldRefSet keySet; | ||||
// The current set of keys known to be affected by the current | ||||
update. This is | ||||
// reset on each call to 'update'. | ||||
FieldRefSet affectedKeySet; | ||||
}; | ||||
// If shard keys have been set, holds the relevant state. | ||||
boost::scoped_ptr<ShardKeyState> _shardKeyState; | ||||
// Is this update going to be an upsert? | // Is this update going to be an upsert? | |||
ModifierInterface::ExecInfo::UpdateContext _context; | ModifierInterface::ExecInfo::UpdateContext _context; | |||
mutablebson::Document _logDoc; | mutablebson::Document _logDoc; | |||
}; | }; | |||
struct UpdateDriver::Options { | struct UpdateDriver::Options { | |||
bool multi; | bool multi; | |||
bool upsert; | bool upsert; | |||
bool logOp; | bool logOp; | |||
End of changes. 8 change blocks. | ||||
62 lines changed or deleted | 13 lines changed or added | |||
update_request.h | update_request.h | |||
---|---|---|---|---|
skipping to change at line 41 | skipping to change at line 41 | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
#include "mongo/db/curop.h" | #include "mongo/db/curop.h" | |||
#include "mongo/db/namespace_string.h" | #include "mongo/db/namespace_string.h" | |||
#include "mongo/db/query_plan_selection_policy.h" | #include "mongo/db/query_plan_selection_policy.h" | |||
#include "mongo/util/mongoutils/str.h" | #include "mongo/util/mongoutils/str.h" | |||
namespace mongo { | namespace mongo { | |||
namespace str = mongoutils::str; | namespace str = mongoutils::str; | |||
class FieldRef; | ||||
class UpdateLifecycle; | ||||
class UpdateRequest { | class UpdateRequest { | |||
public: | public: | |||
inline UpdateRequest( | inline UpdateRequest( | |||
const NamespaceString& nsString, | const NamespaceString& nsString, | |||
const QueryPlanSelectionPolicy& policy = QueryPlanSelectionPoli cy::any() ) | const QueryPlanSelectionPolicy& policy = QueryPlanSelectionPoli cy::any() ) | |||
: _nsString(nsString) | : _nsString(nsString) | |||
, _queryPlanPolicy(policy) | , _queryPlanPolicy(policy) | |||
, _god(false) | , _god(false) | |||
, _upsert(false) | , _upsert(false) | |||
, _multi(false) | , _multi(false) | |||
, _updateOpLog(false) | , _callLogOp(false) | |||
, _fromMigration(false) | , _fromMigration(false) | |||
, _fromReplication(false) {} | , _fromReplication(false) | |||
, _lifecycle(NULL) {} | ||||
const NamespaceString& getNamespaceString() const { | const NamespaceString& getNamespaceString() const { | |||
return _nsString; | return _nsString; | |||
} | } | |||
const QueryPlanSelectionPolicy& getQueryPlanSelectionPolicy() const { | const QueryPlanSelectionPolicy& getQueryPlanSelectionPolicy() const { | |||
return _queryPlanPolicy; | return _queryPlanPolicy; | |||
} | } | |||
inline void setQuery(const BSONObj& query) { | inline void setQuery(const BSONObj& query) { | |||
skipping to change at line 107 | skipping to change at line 111 | |||
inline void setMulti(bool value = true) { | inline void setMulti(bool value = true) { | |||
_multi = value; | _multi = value; | |||
} | } | |||
bool isMulti() const { | bool isMulti() const { | |||
return _multi; | return _multi; | |||
} | } | |||
inline void setUpdateOpLog(bool value = true) { | inline void setUpdateOpLog(bool value = true) { | |||
_updateOpLog = value; | _callLogOp = value; | |||
} | } | |||
bool shouldUpdateOpLog() const { | bool shouldCallLogOp() const { | |||
return _updateOpLog; | return _callLogOp; | |||
} | } | |||
inline void setFromMigration(bool value = true) { | inline void setFromMigration(bool value = true) { | |||
_fromMigration = value; | _fromMigration = value; | |||
} | } | |||
bool isFromMigration() const { | bool isFromMigration() const { | |||
return _fromMigration; | return _fromMigration; | |||
} | } | |||
inline void setFromReplication(bool value = true) { | inline void setFromReplication(bool value = true) { | |||
_fromReplication = value; | _fromReplication = value; | |||
} | } | |||
bool isFromReplication() const { | bool isFromReplication() const { | |||
return _fromReplication; | return _fromReplication; | |||
} | } | |||
inline void setLifecycle(const UpdateLifecycle* value) { | ||||
_lifecycle = value; | ||||
} | ||||
inline const UpdateLifecycle* getLifecycle() const { | ||||
return _lifecycle; | ||||
} | ||||
const std::string toString() const { | const std::string toString() const { | |||
return str::stream() | return str::stream() | |||
<< " query: " << _query | << " query: " << _query | |||
<< " updated: " << _updates | << " updated: " << _updates | |||
<< " god: " << _god | << " god: " << _god | |||
<< " upsert: " << _upsert | << " upsert: " << _upsert | |||
<< " multi: " << _multi | << " multi: " << _multi | |||
<< " logToOplog: " << _updateOpLog | << " callLogOp: " << _callLogOp | |||
<< " fromMigration: " << _fromMigration | << " fromMigration: " << _fromMigration | |||
<< " fromReplications: " << _fromReplication; | << " fromReplications: " << _fromReplication; | |||
} | } | |||
private: | private: | |||
const NamespaceString& _nsString; | const NamespaceString& _nsString; | |||
const QueryPlanSelectionPolicy& _queryPlanPolicy; | const QueryPlanSelectionPolicy& _queryPlanPolicy; | |||
// Contains the query that selects documents to update. | // Contains the query that selects documents to update. | |||
BSONObj _query; | BSONObj _query; | |||
skipping to change at line 165 | skipping to change at line 177 | |||
// updates, never user updates. | // updates, never user updates. | |||
bool _god; | bool _god; | |||
// True if this should insert if no matching document is found. | // True if this should insert if no matching document is found. | |||
bool _upsert; | bool _upsert; | |||
// True if this update is allowed to affect more than one document. | // True if this update is allowed to affect more than one document. | |||
bool _multi; | bool _multi; | |||
// True if the effects of the update should be written to the oplog . | // True if the effects of the update should be written to the oplog . | |||
bool _updateOpLog; | bool _callLogOp; | |||
// True if this update is on behalf of a chunk migration. | // True if this update is on behalf of a chunk migration. | |||
bool _fromMigration; | bool _fromMigration; | |||
// True if this update is being applied during the application for the oplog. | // True if this update is being applied during the application for the oplog. | |||
bool _fromReplication; | bool _fromReplication; | |||
// The lifecycle data, and events used during the update request. | ||||
const UpdateLifecycle* _lifecycle; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 9 change blocks. | ||||
7 lines changed or deleted | 22 lines changed or added | |||
user.h | user.h | |||
---|---|---|---|---|
skipping to change at line 28 | skipping to change at line 28 | |||
#include <string> | #include <string> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/disallow_copying.h" | #include "mongo/base/disallow_copying.h" | |||
#include "mongo/db/auth/privilege.h" | #include "mongo/db/auth/privilege.h" | |||
#include "mongo/db/auth/resource_pattern.h" | #include "mongo/db/auth/resource_pattern.h" | |||
#include "mongo/db/auth/role_name.h" | #include "mongo/db/auth/role_name.h" | |||
#include "mongo/db/auth/user_name.h" | #include "mongo/db/auth/user_name.h" | |||
#include "mongo/platform/atomic_word.h" | #include "mongo/platform/atomic_word.h" | |||
#include "mongo/platform/unordered_map.h" | #include "mongo/platform/unordered_map.h" | |||
#include "mongo/platform/unordered_set.h" | ||||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* Represents a MongoDB user. Stores information about the user necess ary for access control | * Represents a MongoDB user. Stores information about the user necess ary for access control | |||
* checks and authentications, such as what privileges this user has, a s well as what roles | * checks and authentications, such as what privileges this user has, a s well as what roles | |||
* the user belongs to. | * the user belongs to. | |||
* | * | |||
* Every User object is owned by an AuthorizationManager. The Authoriz ationManager is the only | * Every User object is owned by an AuthorizationManager. The Authoriz ationManager is the only | |||
* one that should construct, modify, or delete a User object. All oth er consumers of User must | * one that should construct, modify, or delete a User object. All oth er consumers of User must | |||
skipping to change at line 53 | skipping to change at line 54 | |||
* user from the AuthorizationManager. | * user from the AuthorizationManager. | |||
*/ | */ | |||
class User { | class User { | |||
MONGO_DISALLOW_COPYING(User); | MONGO_DISALLOW_COPYING(User); | |||
public: | public: | |||
struct CredentialData { | struct CredentialData { | |||
std::string password; | std::string password; | |||
bool isExternal; | bool isExternal; | |||
}; | }; | |||
struct RoleData { | ||||
RoleName name; | ||||
bool hasRole; | ||||
bool canDelegate; | ||||
RoleData() : hasRole(false), canDelegate(false) {} | ||||
RoleData(const RoleName& _name, bool _hasRole, bool _canDelegat | ||||
e) : | ||||
name(_name), hasRole(_hasRole), canDelegate(_canDelegate) { | ||||
} | ||||
}; | ||||
typedef unordered_map<ResourcePattern, Privilege> ResourcePrivilege Map; | typedef unordered_map<ResourcePattern, Privilege> ResourcePrivilege Map; | |||
typedef unordered_map<RoleName, RoleData> RoleDataMap; | ||||
explicit User(const UserName& name); | explicit User(const UserName& name); | |||
~User(); | ~User(); | |||
/** | /** | |||
* Returns the user name for this user. | * Returns the user name for this user. | |||
*/ | */ | |||
const UserName& getName() const; | const UserName& getName() const; | |||
/** | /** | |||
* Returns a reference to the information about the users' role mem bership. | * Returns an iterator over the names of the user's direct roles | |||
*/ | */ | |||
const RoleDataMap& getRoles() const; | RoleNameIterator getRoles() const; | |||
/** | /** | |||
* Returns true if this user is a member of the given role. | * Returns true if this user is a member of the given role. | |||
*/ | */ | |||
bool hasRole(const RoleName& roleName) const; | bool hasRole(const RoleName& roleName) const; | |||
/** | /** | |||
* Returns a reference to the information about the user's privileg es. | * Returns a reference to the information about the user's privileg es. | |||
*/ | */ | |||
const ResourcePrivilegeMap& getPrivileges() const { return _privile ges; } | const ResourcePrivilegeMap& getPrivileges() const { return _privile ges; } | |||
skipping to change at line 99 | skipping to change at line 90 | |||
* Returns the CredentialData for this user. | * Returns the CredentialData for this user. | |||
*/ | */ | |||
const CredentialData& getCredentials() const; | const CredentialData& getCredentials() const; | |||
/** | /** | |||
* Gets the set of actions this user is allowed to perform on the g iven resource. | * Gets the set of actions this user is allowed to perform on the g iven resource. | |||
*/ | */ | |||
const ActionSet getActionsForResource(const ResourcePattern& resour ce) const; | const ActionSet getActionsForResource(const ResourcePattern& resour ce) const; | |||
/** | /** | |||
* Gets the schema version of user documents used to build this use | ||||
r. See comment on | ||||
* _schemaVersion field, below. | ||||
*/ | ||||
int getSchemaVersion() const { return _schemaVersion; } | ||||
/** | ||||
* Returns true if this user object, generated from V1-schema user | ||||
documents, | ||||
* has been probed for privileges on database "dbname", according t | ||||
o the V1 | ||||
* implicit privilge acquisition rules. | ||||
*/ | ||||
bool hasProbedV1(const StringData& dbname) const; | ||||
/** | ||||
* Returns true if this copy of information about this user is stil l valid. If this returns | * Returns true if this copy of information about this user is stil l valid. If this returns | |||
* false, this object should no longer be used and should be return ed to the | * false, this object should no longer be used and should be return ed to the | |||
* AuthorizationManager and a new User object for this user should be requested. | * AuthorizationManager and a new User object for this user should be requested. | |||
*/ | */ | |||
bool isValid() const; | bool isValid() const; | |||
/** | /** | |||
* This returns the reference count for this User. The Authorizati onManager should be the | * This returns the reference count for this User. The Authorizati onManager should be the | |||
* only caller of this. | * only caller of this. | |||
*/ | */ | |||
uint32_t getRefCount() const; | uint32_t getRefCount() const; | |||
// Mutators below. Mutation functions should *only* be called by t | ||||
he AuthorizationManager | ||||
/** | /** | |||
* Copies the contents of other into this User. | * Clones this user into a new, valid User object with refcount of 0. | |||
*/ | */ | |||
void copyFrom(const User& other); | User* clone() const; | |||
// Mutators below. Mutation functions should *only* be called by t | ||||
he AuthorizationManager | ||||
/** | /** | |||
* Sets this user's authentication credentials. | * Sets this user's authentication credentials. | |||
*/ | */ | |||
void setCredentials(const CredentialData& credentials); | void setCredentials(const CredentialData& credentials); | |||
/** | /** | |||
* Replaces any existing user role membership information with "rol es". | * Replaces any existing user role membership information with the roles from "roles". | |||
*/ | */ | |||
void setRoleData(const std::vector<RoleData>& roles); | void setRoles(RoleNameIterator roles); | |||
/** | /** | |||
* Replaces any existing user privilege information with "privilege s". | * Replaces any existing user privilege information with "privilege s". | |||
*/ | */ | |||
void setPrivileges(const PrivilegeVector& privileges); | void setPrivileges(const PrivilegeVector& privileges); | |||
/** | /** | |||
* Adds the given role name to the list of roles of which this user is a member. | * Adds the given role name to the list of roles of which this user is a member. | |||
*/ | */ | |||
void addRole(const RoleName& role); | void addRole(const RoleName& role); | |||
/** | /** | |||
* Adds the given role names to the list of roles that this user be longs to. | * Adds the given role names to the list of roles that this user be longs to. | |||
*/ | */ | |||
void addRoles(const std::vector<RoleName>& roles); | void addRoles(const std::vector<RoleName>& roles); | |||
/** | /** | |||
* Adds the given role name to the list of roles that this user is allowed to delegate. | * Adds the given privilege to the list of privileges this user is authorized for. | |||
*/ | */ | |||
void addDelegatableRole(const RoleName& role); | void addPrivilege(const Privilege& privilege); | |||
/** | /** | |||
* Adds the given role names to the list of roles that this user is allowed to delegate. | * Adds the given privileges to the list of privileges this user is authorized for. | |||
*/ | */ | |||
void addDelegatableRoles(const std::vector<RoleName>& roles); | void addPrivileges(const PrivilegeVector& privileges); | |||
/** | /** | |||
* Adds the given privilege to the list of privileges this user is | * Sets the schema version of documents used for building this user | |||
authorized for. | to 1, for V1 and V0 | |||
* documents. The default value is 2, for V2 documents. | ||||
*/ | */ | |||
void addPrivilege(const Privilege& privilege); | void setSchemaVersion1(); | |||
/** | /** | |||
* Adds the given privileges to the list of privileges this user is | * Marks that this user object, generated from V1-schema user docum | |||
authorized for. | ents, | |||
* has been probed for privileges on database "dbname", according t | ||||
o the V1 | ||||
* implicit privilge acquisition rules. | ||||
*/ | */ | |||
void addPrivileges(const PrivilegeVector& privileges); | void markProbedV1(const StringData& dbname); | |||
/** | /** | |||
* Marks this instance of the User object as invalid, most likely b ecause information about | * Marks this instance of the User object as invalid, most likely b ecause information about | |||
* the user has been updated and needs to be reloaded from the Auth orizationManager. | * the user has been updated and needs to be reloaded from the Auth orizationManager. | |||
* | * | |||
* This method should *only* be called by the AuthorizationManager. | * This method should *only* be called by the AuthorizationManager. | |||
*/ | */ | |||
void invalidate(); | void invalidate(); | |||
/** | /** | |||
skipping to change at line 195 | skipping to change at line 202 | |||
*/ | */ | |||
void decrementRefCount(); | void decrementRefCount(); | |||
private: | private: | |||
UserName _name; | UserName _name; | |||
// Maps resource name to privilege on that resource | // Maps resource name to privilege on that resource | |||
ResourcePrivilegeMap _privileges; | ResourcePrivilegeMap _privileges; | |||
// Roles the user has privileges from and/or can delegate | // Roles the user has privileges from | |||
RoleDataMap _roles; | unordered_set<RoleName> _roles; | |||
// List of databases already probed for privilege information for t | ||||
his user. Only | ||||
// meaningful for V2.4-schema users. | ||||
std::vector<std::string> _probedDatabases; | ||||
// Credential information. | ||||
CredentialData _credentials; | CredentialData _credentials; | |||
// Schema version of user documents used to build this user. Valid | ||||
values are | ||||
// AuthorizationManager::schemaVersion24 and schemaVersion26Final. | ||||
int _schemaVersion; | ||||
// _refCount and _isInvalidated are modified exclusively by the Aut horizationManager | // _refCount and _isInvalidated are modified exclusively by the Aut horizationManager | |||
// _isInvalidated can be read by any consumer of User, but _refCoun t can only be | // _isInvalidated can be read by any consumer of User, but _refCoun t can only be | |||
// meaningfully read by the AuthorizationManager, as _refCount is g uarded by the AM's _lock | // meaningfully read by the AuthorizationManager, as _refCount is g uarded by the AM's _lock | |||
uint32_t _refCount; | uint32_t _refCount; | |||
AtomicUInt32 _isValid; // Using as a boolean | AtomicUInt32 _isValid; // Using as a boolean | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 23 change blocks. | ||||
34 lines changed or deleted | 53 lines changed or added | |||
user_document_parser.h | user_document_parser.h | |||
---|---|---|---|---|
skipping to change at line 45 | skipping to change at line 45 | |||
#include "mongo/db/jsobj.h" | #include "mongo/db/jsobj.h" | |||
namespace mongo { | namespace mongo { | |||
class V1UserDocumentParser { | class V1UserDocumentParser { | |||
MONGO_DISALLOW_COPYING(V1UserDocumentParser); | MONGO_DISALLOW_COPYING(V1UserDocumentParser); | |||
public: | public: | |||
V1UserDocumentParser() {} | V1UserDocumentParser() {} | |||
std::string extractUserNameFromUserDocument(const BSONObj& doc) con st; | std::string extractUserNameFromUserDocument(const BSONObj& doc) con st; | |||
Status initializeUserFromUserDocument(const BSONObj& userDoc, User* | ||||
user) const; | ||||
Status initializeUserCredentialsFromUserDocument(User* user, | Status initializeUserCredentialsFromUserDocument(User* user, | |||
const BSONObj& pri vDoc) const; | const BSONObj& pri vDoc) const; | |||
Status initializeUserRolesFromUserDocument( | Status initializeUserRolesFromUserDocument( | |||
User* user, const BSONObj& privDoc, const StringDat a& dbname) const; | User* user, const BSONObj& privDoc, const StringDat a& dbname) const; | |||
}; | }; | |||
class V2UserDocumentParser { | class V2UserDocumentParser { | |||
MONGO_DISALLOW_COPYING(V2UserDocumentParser); | MONGO_DISALLOW_COPYING(V2UserDocumentParser); | |||
public: | public: | |||
V2UserDocumentParser() {} | V2UserDocumentParser() {} | |||
Status checkValidUserDocument(const BSONObj& doc) const; | Status checkValidUserDocument(const BSONObj& doc) const; | |||
Status initializeUserFromUserDocument(const BSONObj& doc, User* use | ||||
r); | ||||
/** | /** | |||
* Returns Status::OK() iff the given BSONObj describes a valid ele ment from a roles array. | * Returns Status::OK() iff the given BSONObj describes a valid ele ment from a roles array. | |||
*/ | */ | |||
static Status checkValidRoleObject(const BSONObj& roleObject); | static Status checkValidRoleObject(const BSONObj& roleObject); | |||
static Status parseRoleData(const BSONObj& roleObject, User::RoleDa ta* result); | static Status parseRoleName(const BSONObj& roleObject, RoleName* re sult); | |||
static Status parseRoleVector(const BSONArray& rolesArray, | static Status parseRoleVector(const BSONArray& rolesArray, std::vec | |||
std::vector<User::RoleData>* result); | tor<RoleName>* result); | |||
std::string extractUserNameFromUserDocument(const BSONObj& doc) con st; | std::string extractUserNameFromUserDocument(const BSONObj& doc) con st; | |||
Status initializeUserFromUserDocument(const BSONObj& userDoc, User* | ||||
user) const; | ||||
Status initializeUserCredentialsFromUserDocument(User* user, const BSONObj& privDoc) const; | Status initializeUserCredentialsFromUserDocument(User* user, const BSONObj& privDoc) const; | |||
Status initializeUserRolesFromUserDocument(const BSONObj& doc, User * user) const; | Status initializeUserRolesFromUserDocument(const BSONObj& doc, User * user) const; | |||
Status initializeUserPrivilegesFromUserDocument(const BSONObj& doc, User* user) const; | Status initializeUserPrivilegesFromUserDocument(const BSONObj& doc, User* user) const; | |||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 5 change blocks. | ||||
12 lines changed or deleted | 3 lines changed or added | |||
user_management_commands_parser.h | user_management_commands_parser.h | |||
---|---|---|---|---|
skipping to change at line 51 | skipping to change at line 51 | |||
namespace mongo { | namespace mongo { | |||
namespace auth { | namespace auth { | |||
struct CreateOrUpdateUserArgs { | struct CreateOrUpdateUserArgs { | |||
UserName userName; | UserName userName; | |||
bool hasHashedPassword; | bool hasHashedPassword; | |||
std::string hashedPassword; | std::string hashedPassword; | |||
bool hasCustomData; | bool hasCustomData; | |||
BSONObj customData; | BSONObj customData; | |||
bool hasRoles; | bool hasRoles; | |||
std::vector<User::RoleData> roles; | std::vector<RoleName> roles; | |||
BSONObj writeConcern; | BSONObj writeConcern; | |||
CreateOrUpdateUserArgs() : | CreateOrUpdateUserArgs() : | |||
hasHashedPassword(false), hasCustomData(false), hasRoles(false ) {} | hasHashedPassword(false), hasCustomData(false), hasRoles(false ) {} | |||
}; | }; | |||
/** | /** | |||
* Takes a command object describing an invocation of the "createUser" or "updateUser" commands | * Takes a command object describing an invocation of the "createUser" or "updateUser" commands | |||
* (which command it is is specified in "cmdName") on the database "dbn ame", and parses out all | * (which command it is is specified in "cmdName") on the database "dbn ame", and parses out all | |||
* the arguments into the "parsedArgs" output param. | * the arguments into the "parsedArgs" output param. | |||
*/ | */ | |||
skipping to change at line 77 | skipping to change at line 77 | |||
/** | /** | |||
* Takes a command object describing an invocation of one of "grantRole sToUser", | * Takes a command object describing an invocation of one of "grantRole sToUser", | |||
* "revokeRolesFromUser", "grantDelegateRolesToUser", "revokeDelegateRo lesFromUser", | * "revokeRolesFromUser", "grantDelegateRolesToUser", "revokeDelegateRo lesFromUser", | |||
* "grantRolesToRole", and "revokeRolesFromRoles" (which command it is is specified in the | * "grantRolesToRole", and "revokeRolesFromRoles" (which command it is is specified in the | |||
* "cmdName" argument), and parses out (into the parsedName out param) the user/role name of | * "cmdName" argument), and parses out (into the parsedName out param) the user/role name of | |||
* the user/roles being modified, the roles being granted or revoked, a nd the write concern to | * the user/roles being modified, the roles being granted or revoked, a nd the write concern to | |||
* use. | * use. | |||
*/ | */ | |||
Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj, | Status parseRolePossessionManipulationCommands(const BSONObj& cmdObj, | |||
const StringData& cmdNam e, | const StringData& cmdNam e, | |||
const StringData& rolesF ieldName, | ||||
const std::string& dbnam e, | const std::string& dbnam e, | |||
std::string* parsedName, | std::string* parsedName, | |||
vector<RoleName>* parsed RoleNames, | vector<RoleName>* parsed RoleNames, | |||
BSONObj* parsedWriteConc ern); | BSONObj* parsedWriteConc ern); | |||
/** | /** | |||
* Takes a command object describing an invocation of the "dropUser" co mmand and parses out | * Takes a command object describing an invocation of the "dropUser" co mmand and parses out | |||
* the UserName of the user to be removed and the writeConcern. | * the UserName of the user to be removed and the writeConcern. | |||
* Also validates the input and returns a non-ok Status if there is any thing wrong. | * Also validates the input and returns a non-ok Status if there is any thing wrong. | |||
*/ | */ | |||
Status parseAndValidateDropUserCommand(const BSONObj& cmdObj, | Status parseAndValidateDropUserCommand(const BSONObj& cmdObj, | |||
const std::string& dbname, | const std::string& dbname, | |||
UserName* parsedUserName, | UserName* parsedUserName, | |||
BSONObj* parsedWriteConcern); | BSONObj* parsedWriteConcern); | |||
/** | /** | |||
* Takes a command object describing an invocation of the "dropUsersFro mDatabase" command and | * Takes a command object describing an invocation of the "dropAllUsers FromDatabase" command and | |||
* parses out the write concern. | * parses out the write concern. | |||
* Also validates the input and returns a non-ok Status if there is any thing wrong. | * Also validates the input and returns a non-ok Status if there is any thing wrong. | |||
*/ | */ | |||
Status parseAndValidateDropUsersFromDatabaseCommand(const BSONObj& cmdO bj, | Status parseAndValidateDropAllUsersFromDatabaseCommand(const BSONObj& c mdObj, | |||
const std::string& dbname, | const std::string& dbname, | |||
BSONObj* parsedWrit eConcern); | BSONObj* parsedWrit eConcern); | |||
struct UsersInfoArgs { | struct UsersInfoArgs { | |||
std::vector<UserName> userNames; | std::vector<UserName> userNames; | |||
bool allForDB; | bool allForDB; | |||
bool showPrivileges; | bool showPrivileges; | |||
bool showCredentials; | bool showCredentials; | |||
UsersInfoArgs() : allForDB(false), showPrivileges(false), showCrede ntials(false) {} | UsersInfoArgs() : allForDB(false), showPrivileges(false), showCrede ntials(false) {} | |||
}; | }; | |||
/** | /** | |||
* Takes a command object describing an invocation of the "usersInfo" c ommand and parses out | * Takes a command object describing an invocation of the "usersInfo" c ommand and parses out | |||
* all the arguments into the "parsedArgs" output param. | * all the arguments into the "parsedArgs" output param. | |||
*/ | */ | |||
Status parseUsersInfoCommand(const BSONObj& cmdObj, | Status parseUsersInfoCommand(const BSONObj& cmdObj, | |||
const StringData& dbname, | const StringData& dbname, | |||
UsersInfoArgs* parsedArgs); | UsersInfoArgs* parsedArgs); | |||
struct RolesInfoArgs { | ||||
std::vector<RoleName> roleNames; | ||||
bool allForDB; | ||||
bool showPrivileges; | ||||
bool showBuiltinRoles; | ||||
RolesInfoArgs() : allForDB(false), showPrivileges(false), showBuilt | ||||
inRoles(false) {} | ||||
}; | ||||
/** | /** | |||
* Takes a command object describing an invocation of the "rolesInfo" c ommand and parses out | * Takes a command object describing an invocation of the "rolesInfo" c ommand and parses out | |||
* the role names requested into the "parsedRoleNames" output param. | * the arguments into the "parsedArgs" output param. | |||
*/ | */ | |||
Status parseRolesInfoCommand(const BSONObj& cmdObj, | Status parseRolesInfoCommand(const BSONObj& cmdObj, | |||
const StringData& dbname, | const StringData& dbname, | |||
std::vector<RoleName>* parsedRoleNames); | RolesInfoArgs* parsedArgs); | |||
struct CreateOrUpdateRoleArgs { | struct CreateOrUpdateRoleArgs { | |||
RoleName roleName; | RoleName roleName; | |||
bool hasRoles; | bool hasRoles; | |||
std::vector<RoleName> roles; | std::vector<RoleName> roles; | |||
bool hasPrivileges; | bool hasPrivileges; | |||
PrivilegeVector privileges; | PrivilegeVector privileges; | |||
BSONObj writeConcern; | BSONObj writeConcern; | |||
CreateOrUpdateRoleArgs() : hasRoles(false), hasPrivileges(false) {} | CreateOrUpdateRoleArgs() : hasRoles(false), hasPrivileges(false) {} | |||
}; | }; | |||
skipping to change at line 168 | skipping to change at line 175 | |||
/** | /** | |||
* Takes a command object describing an invocation of the "dropRole" co mmand and parses out | * Takes a command object describing an invocation of the "dropRole" co mmand and parses out | |||
* the RoleName of the role to be removed and the writeConcern. | * the RoleName of the role to be removed and the writeConcern. | |||
*/ | */ | |||
Status parseDropRoleCommand(const BSONObj& cmdObj, | Status parseDropRoleCommand(const BSONObj& cmdObj, | |||
const std::string& dbname, | const std::string& dbname, | |||
RoleName* parsedRoleName, | RoleName* parsedRoleName, | |||
BSONObj* parsedWriteConcern); | BSONObj* parsedWriteConcern); | |||
/** | /** | |||
* Takes a command object describing an invocation of the "dropRolesFro | * Takes a command object describing an invocation of the "dropAllRoles | |||
mDatabase" command and | FromDatabase" command and | |||
* parses out the write concern. | ||||
*/ | ||||
Status parseDropAllRolesFromDatabaseCommand(const BSONObj& cmdObj, | ||||
const std::string& dbname, | ||||
BSONObj* parsedWriteConcern | ||||
); | ||||
/** | ||||
* Takes a command object describing an invocation of the "authSchemaUp | ||||
gradeStep" command and | ||||
* parses out the write concern. | * parses out the write concern. | |||
*/ | */ | |||
Status parseDropRolesFromDatabaseCommand(const BSONObj& cmdObj, | Status parseAuthSchemaUpgradeStepCommand(const BSONObj& cmdObj, | |||
const std::string& dbname, | const std::string& dbname, | |||
BSONObj* parsedWriteConcern); | BSONObj* parsedWriteConcern); | |||
/** | /** | |||
* Parses the privileges described in "privileges" into a vector of Pri vilege objects. | * Parses the privileges described in "privileges" into a vector of Pri vilege objects. | |||
* Returns Status::OK() upon successfully parsing all the elements of " privileges". | * Returns Status::OK() upon successfully parsing all the elements of " privileges". | |||
*/ | */ | |||
Status parseAndValidatePrivilegeArray(const BSONArray& privileges, | Status parseAndValidatePrivilegeArray(const BSONArray& privileges, | |||
PrivilegeVector* parsedPrivileges ); | PrivilegeVector* parsedPrivileges ); | |||
/** | /** | |||
End of changes. 9 change blocks. | ||||
9 lines changed or deleted | 27 lines changed or added | |||
working_set.h | working_set.h | |||
---|---|---|---|---|
skipping to change at line 80 | skipping to change at line 80 | |||
* The DiskLoc in WSM 'i' was invalidated while being processed. A ny predicates over the | * The DiskLoc in WSM 'i' was invalidated while being processed. A ny predicates over the | |||
* WSM could not be fully evaluated, so the WSM may or may not sati sfy them. As such, if we | * WSM could not be fully evaluated, so the WSM may or may not sati sfy them. As such, if we | |||
* wish to output the WSM, we must do some clean-up work later. Ad ds the WSM with id 'i' to | * wish to output the WSM, we must do some clean-up work later. Ad ds the WSM with id 'i' to | |||
* the list of flagged WSIDs. | * the list of flagged WSIDs. | |||
* | * | |||
* The WSM must be in the state OWNED_OBJ. | * The WSM must be in the state OWNED_OBJ. | |||
*/ | */ | |||
void flagForReview(const WorkingSetID& i); | void flagForReview(const WorkingSetID& i); | |||
/** | /** | |||
* Return a vector of all WSIDs passed to flagForReview. | * Return a set of all WSIDs passed to flagForReview. | |||
*/ | */ | |||
const vector<WorkingSetID>& getFlagged() const; | const unordered_set<WorkingSetID>& getFlagged() const; | |||
/** | ||||
* Return true if the provided ID is flagged. | ||||
*/ | ||||
bool isFlagged(WorkingSetID id) const; | ||||
private: | private: | |||
typedef unordered_map<WorkingSetID, WorkingSetMember*> DataMap; | typedef unordered_map<WorkingSetID, WorkingSetMember*> DataMap; | |||
DataMap _data; | DataMap _data; | |||
// The WorkingSetID returned by the next call to allocate(). Shoul d refer to the next valid | // The WorkingSetID returned by the next call to allocate(). Shoul d refer to the next valid | |||
// ID. IDs allocated contiguously. Should never point at an in-us e ID. | // ID. IDs allocated contiguously. Should never point at an in-us e ID. | |||
WorkingSetID _nextId; | WorkingSetID _nextId; | |||
// All WSIDs invalidated during evaluation of a predicate (AND). | // All WSIDs invalidated during evaluation of a predicate (AND). | |||
vector<WorkingSetID> _flagged; | unordered_set<WorkingSetID> _flagged; | |||
}; | }; | |||
/** | /** | |||
* The key data extracted from an index. Keeps track of both the key ( currently a BSONObj) and | * The key data extracted from an index. Keeps track of both the key ( currently a BSONObj) and | |||
* the index that provided the key. The index key pattern is required to correctly interpret | * the index that provided the key. The index key pattern is required to correctly interpret | |||
* the key. | * the key. | |||
*/ | */ | |||
struct IndexKeyDatum { | struct IndexKeyDatum { | |||
IndexKeyDatum(const BSONObj& keyPattern, const BSONObj& key) : inde xKeyPattern(keyPattern), | IndexKeyDatum(const BSONObj& keyPattern, const BSONObj& key) : inde xKeyPattern(keyPattern), | |||
keyD ata(key) { } | keyD ata(key) { } | |||
// This is not owned and points into the IndexDescriptor's data. | // This is not owned and points into the IndexDescriptor's data. | |||
BSONObj indexKeyPattern; | BSONObj indexKeyPattern; | |||
// This is the BSONObj for the key that we put into the index. Own ed by us. | // This is the BSONObj for the key that we put into the index. Own ed by us. | |||
BSONObj keyData; | BSONObj keyData; | |||
}; | }; | |||
/** | /** | |||
* What types of computed data can we have? | ||||
*/ | ||||
enum WorkingSetComputedDataType { | ||||
WSM_COMPUTED_TEXT_SCORE = 0, | ||||
WSM_COMPUTED_GEO_DISTANCE = 1, | ||||
}; | ||||
/** | ||||
* Data that is a computed function of a WSM. | ||||
*/ | ||||
class WorkingSetComputedData { | ||||
public: | ||||
WorkingSetComputedData(const WorkingSetComputedDataType type) : _ty | ||||
pe(type) { } | ||||
virtual ~WorkingSetComputedData() { } | ||||
WorkingSetComputedDataType type() const { return _type; } | ||||
virtual WorkingSetComputedData* clone() const = 0; | ||||
private: | ||||
WorkingSetComputedDataType _type; | ||||
}; | ||||
/** | ||||
* The type of the data passed between query stages. In particular: | * The type of the data passed between query stages. In particular: | |||
* | * | |||
* Index scan stages return a WorkingSetMember in the LOC_AND_IDX state . | * Index scan stages return a WorkingSetMember in the LOC_AND_IDX state . | |||
* | * | |||
* Collection scan stages the LOC_AND_UNOWNED_OBJ state. | * Collection scan stages the LOC_AND_UNOWNED_OBJ state. | |||
* | * | |||
* A WorkingSetMember may have any of the data above. | * A WorkingSetMember may have any of the data above. | |||
*/ | */ | |||
struct WorkingSetMember { | struct WorkingSetMember { | |||
WorkingSetMember(); | WorkingSetMember(); | |||
~WorkingSetMember(); | ||||
enum MemberState { | enum MemberState { | |||
// Initial state. | // Initial state. | |||
INVALID, | INVALID, | |||
// Data is from 1 or more indices. | // Data is from 1 or more indices. | |||
LOC_AND_IDX, | LOC_AND_IDX, | |||
// Data is from a collection scan, or data is from an index sca n and was fetched. | // Data is from a collection scan, or data is from an index sca n and was fetched. | |||
LOC_AND_UNOWNED_OBJ, | LOC_AND_UNOWNED_OBJ, | |||
// DiskLoc has been invalidated, or the obj doesn't correspond to an on-disk document | // DiskLoc has been invalidated, or the obj doesn't correspond to an on-disk document | |||
// anymore (e.g. is a computed expression). | // anymore (e.g. is a computed expression). | |||
OWNED_OBJ, | OWNED_OBJ, | |||
}; | }; | |||
// | ||||
// Core attributes | ||||
// | ||||
DiskLoc loc; | DiskLoc loc; | |||
BSONObj obj; | BSONObj obj; | |||
vector<IndexKeyDatum> keyData; | vector<IndexKeyDatum> keyData; | |||
MemberState state; | MemberState state; | |||
bool hasLoc() const; | bool hasLoc() const; | |||
bool hasObj() const; | bool hasObj() const; | |||
bool hasOwnedObj() const; | bool hasOwnedObj() const; | |||
bool hasUnownedObj() const; | bool hasUnownedObj() const; | |||
// | ||||
// Computed data | ||||
// | ||||
bool hasComputed(const WorkingSetComputedDataType type) const; | ||||
const WorkingSetComputedData* getComputed(const WorkingSetComputedD | ||||
ataType type) const; | ||||
void addComputed(WorkingSetComputedData* data); | ||||
/** | /** | |||
* getFieldDotted uses its state (obj or index data) to produce the field with the provided | * getFieldDotted uses its state (obj or index data) to produce the field with the provided | |||
* name. | * name. | |||
* | * | |||
* Returns true if there is the element is in an index key or in an (owned or unowned) | * Returns true if there is the element is in an index key or in an (owned or unowned) | |||
* object. *out is set to the element if so. | * object. *out is set to the element if so. | |||
* | * | |||
* Returns false otherwise. Returning false indicates a query plan ning error. | * Returns false otherwise. Returning false indicates a query plan ning error. | |||
*/ | */ | |||
bool getFieldDotted(const string& field, BSONElement* out) const; | bool getFieldDotted(const string& field, BSONElement* out) const; | |||
private: | ||||
unordered_map<size_t, WorkingSetComputedData*> _computed; | ||||
}; | }; | |||
} // namespace mongo | } // namespace mongo | |||
End of changes. 8 change blocks. | ||||
3 lines changed or deleted | 50 lines changed or added | |||
write_commands.h | write_commands.h | |||
---|---|---|---|---|
skipping to change at line 34 | skipping to change at line 34 | |||
* delete this exception statement from your version. If you delete this | * delete this exception statement from your version. If you delete this | |||
* exception statement from all source files in the program, then also d elete | * exception statement from all source files in the program, then also d elete | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <string> | #include <string> | |||
#include "mongo/db/commands.h" | #include "mongo/db/commands.h" | |||
#include "mongo/s/batched_command_request.h" | #include "mongo/db/client_basic.h" | |||
#include "mongo/s/write_ops/batched_command_request.h" | ||||
namespace mongo { | namespace mongo { | |||
/** | /** | |||
* Base class for write commands. Write commands support batch writes and write concern, | * Base class for write commands. Write commands support batch writes and write concern, | |||
* and return per-item error information. All write commands use the ( non-virtual) entry | * and return per-item error information. All write commands use the ( non-virtual) entry | |||
* point WriteCmd::run(). | * point WriteCmd::run(). | |||
* | * | |||
* Command parsing is performed by the WriteBatch class (command syntax documented there), | * Command parsing is performed by the WriteBatch class (command syntax documented there), | |||
* and command execution is performed by the WriteBatchExecutor class. | * and command execution is performed by the WriteBatchExecutor class. | |||
skipping to change at line 57 | skipping to change at line 58 | |||
MONGO_DISALLOW_COPYING(WriteCmd); | MONGO_DISALLOW_COPYING(WriteCmd); | |||
public: | public: | |||
virtual ~WriteCmd() {} | virtual ~WriteCmd() {} | |||
protected: | protected: | |||
/** | /** | |||
* Instantiates a command that can be invoked by "name", which will be capable of issuing | * Instantiates a command that can be invoked by "name", which will be capable of issuing | |||
* write batches of type "writeType", and will require privilege "a ction" to run. | * write batches of type "writeType", and will require privilege "a ction" to run. | |||
*/ | */ | |||
WriteCmd( const StringData& name, | WriteCmd( const StringData& name, BatchedCommandRequest::BatchType | |||
BatchedCommandRequest::BatchType writeType, | writeType ); | |||
ActionType action ); | ||||
private: | private: | |||
virtual bool logTheOp(); | virtual bool logTheOp(); | |||
virtual bool slaveOk() const; | virtual bool slaveOk() const; | |||
virtual LockType locktype() const; | virtual LockType locktype() const; | |||
virtual void addRequiredPrivileges(const std::string& dbname, | virtual Status checkAuthForCommand( ClientBasic* client, | |||
const BSONObj& cmdObj, | const std::string& dbname, | |||
std::vector<Privilege>* out); | const BSONObj& cmdObj ); | |||
virtual bool shouldAffectCommandCounter() const; | virtual bool shouldAffectCommandCounter() const; | |||
// Write command entry point. | // Write command entry point. | |||
virtual bool run(const string& dbname, | virtual bool run(const string& dbname, | |||
BSONObj& cmdObj, | BSONObj& cmdObj, | |||
int options, | int options, | |||
string& errmsg, | string& errmsg, | |||
BSONObjBuilder& result, | BSONObjBuilder& result, | |||
bool fromRepl); | bool fromRepl); | |||
// Privilege required to execute command. | ||||
ActionType _action; | ||||
// Type of batch (e.g. insert). | // Type of batch (e.g. insert). | |||
BatchedCommandRequest::BatchType _writeType; | BatchedCommandRequest::BatchType _writeType; | |||
}; | }; | |||
class CmdInsert : public WriteCmd { | class CmdInsert : public WriteCmd { | |||
MONGO_DISALLOW_COPYING(CmdInsert); | MONGO_DISALLOW_COPYING(CmdInsert); | |||
public: | public: | |||
CmdInsert(); | CmdInsert(); | |||
private: | private: | |||
End of changes. 4 change blocks. | ||||
10 lines changed or deleted | 7 lines changed or added | |||
write_op.h | write_op.h | |||
---|---|---|---|---|
skipping to change at line 36 | skipping to change at line 36 | |||
* it in the license file. | * it in the license file. | |||
*/ | */ | |||
#pragma once | #pragma once | |||
#include <boost/scoped_ptr.hpp> | #include <boost/scoped_ptr.hpp> | |||
#include <vector> | #include <vector> | |||
#include "mongo/base/string_data.h" | #include "mongo/base/string_data.h" | |||
#include "mongo/bson/bsonobj.h" | #include "mongo/bson/bsonobj.h" | |||
#include "mongo/s/batched_error_detail.h" | ||||
#include "mongo/s/batched_command_request.h" | ||||
#include "mongo/s/ns_targeter.h" | #include "mongo/s/ns_targeter.h" | |||
#include "mongo/s/write_ops/batched_error_detail.h" | ||||
#include "mongo/s/write_ops/batched_command_request.h" | ||||
namespace mongo { | namespace mongo { | |||
struct TargetedWrite; | struct TargetedWrite; | |||
struct ChildWriteOp; | struct ChildWriteOp; | |||
enum WriteOpState { | enum WriteOpState { | |||
// Item is ready to be targeted | // Item is ready to be targeted | |||
WriteOpState_Ready, | WriteOpState_Ready, | |||
skipping to change at line 131 | skipping to change at line 131 | |||
* Returns !OK if the targeting process itself fails | * Returns !OK if the targeting process itself fails | |||
* (no TargetedWrites will be added, state unchanged) | * (no TargetedWrites will be added, state unchanged) | |||
*/ | */ | |||
Status targetWrites( const NSTargeter& targeter, | Status targetWrites( const NSTargeter& targeter, | |||
std::vector<TargetedWrite*>* targetedWrites ); | std::vector<TargetedWrite*>* targetedWrites ); | |||
/** | /** | |||
* Resets the state of this write op to _Ready and stops waiting fo r any outstanding | * Resets the state of this write op to _Ready and stops waiting fo r any outstanding | |||
* TargetedWrites. Optional error can be provided for reporting. | * TargetedWrites. Optional error can be provided for reporting. | |||
* | * | |||
* Can only be called when state is _Pending and no TargetedWrites | * Can only be called when state is _Pending and no TargetedWrites | |||
have been noted. | have been noted, or is a | |||
* no-op if called when the state is still _Ready (and therefore no | ||||
writes are pending). | ||||
*/ | */ | |||
void cancelWrites( const BatchedErrorDetail* why ); | void cancelWrites( const BatchedErrorDetail* why ); | |||
/** | /** | |||
* Marks the targeted write as finished for this write op. | * Marks the targeted write as finished for this write op. | |||
* | * | |||
* One of noteWriteComplete or noteWriteError should be called exac tly once for every | * One of noteWriteComplete or noteWriteError should be called exac tly once for every | |||
* TargetedWrite. | * TargetedWrite. | |||
*/ | */ | |||
void noteWriteComplete( const TargetedWrite& targetedWrite ); | void noteWriteComplete( const TargetedWrite& targetedWrite ); | |||
End of changes. 3 change blocks. | ||||
4 lines changed or deleted | 6 lines changed or added | |||