2d.h   2d.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#include "mongo/db/exec/2dcommon.h" #include "mongo/db/exec/2dcommon.h"
#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stage.h"
#include "mongo/db/geo/geoquery.h"
#pragma once #pragma once
namespace mongo { namespace mongo {
struct TwoDParams { struct TwoDParams {
TwoDParams() : filter(NULL) { } TwoDParams() : filter(NULL) { }
GeoQuery gq; GeoQuery gq;
MatchExpression* filter; MatchExpression* filter;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
skipping to change at line 54 skipping to change at line 55
class TwoD : public PlanStage { class TwoD : public PlanStage {
public: public:
TwoD(const TwoDParams& params, WorkingSet* ws); TwoD(const TwoDParams& params, WorkingSet* ws);
virtual ~TwoD(); virtual ~TwoD();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
scoped_ptr<mongo::twod_exec::GeoBrowse> _browse; scoped_ptr<mongo::twod_exec::GeoBrowse> _browse;
TwoDParams _params; TwoDParams _params;
WorkingSet* _workingSet; WorkingSet* _workingSet;
bool _initted; bool _initted;
IndexDescriptor* _descriptor; IndexDescriptor* _descriptor;
TwoDAccessMethod* _am; TwoDAccessMethod* _am;
CommonStats _commonStats; CommonStats _commonStats;
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 2d_access_method.h   2d_access_method.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/index/2d_common.h" #include "mongo/db/index/2d_common.h"
#include "mongo/db/index/btree_access_method_internal.h" #include "mongo/db/index/btree_based_access_method.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class IndexCatalogEntry;
class IndexCursor; class IndexCursor;
class IndexDescriptor; class IndexDescriptor;
struct TwoDIndexingParams; struct TwoDIndexingParams;
namespace twod_exec { namespace twod_exec {
class GeoPoint; class GeoPoint;
class GeoAccumulator; class GeoAccumulator;
class GeoBrowse; class GeoBrowse;
class GeoHopper; class GeoHopper;
class GeoSearch; class GeoSearch;
skipping to change at line 70 skipping to change at line 71
class GeoCircleBrowse; class GeoCircleBrowse;
class GeoBoxBrowse; class GeoBoxBrowse;
class GeoPolygonBrowse; class GeoPolygonBrowse;
class TwoDGeoNearRunner; class TwoDGeoNearRunner;
} }
class TwoDAccessMethod : public BtreeBasedAccessMethod { class TwoDAccessMethod : public BtreeBasedAccessMethod {
public: public:
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
using BtreeBasedAccessMethod::_interface; using BtreeBasedAccessMethod::_interface;
using BtreeBasedAccessMethod::_ordering;
TwoDAccessMethod(IndexDescriptor* descriptor); TwoDAccessMethod(IndexCatalogEntry* btreeState);
virtual ~TwoDAccessMethod() { } virtual ~TwoDAccessMethod() { }
virtual Status newCursor(IndexCursor** out);
private: private:
friend class TwoDIndexCursor; friend class TwoDIndexCursor;
friend class twod_internal::GeoPoint; friend class twod_internal::GeoPoint;
friend class twod_internal::GeoAccumulator; friend class twod_internal::GeoAccumulator;
friend class twod_internal::GeoBrowse; friend class twod_internal::GeoBrowse;
friend class twod_internal::GeoHopper; friend class twod_internal::GeoHopper;
friend class twod_internal::GeoSearch; friend class twod_internal::GeoSearch;
friend class twod_internal::GeoCircleBrowse; friend class twod_internal::GeoCircleBrowse;
friend class twod_internal::GeoBoxBrowse; friend class twod_internal::GeoBoxBrowse;
friend class twod_internal::GeoPolygonBrowse; friend class twod_internal::GeoPolygonBrowse;
skipping to change at line 100 skipping to change at line 98
friend class twod_exec::GeoBrowse; friend class twod_exec::GeoBrowse;
friend class twod_exec::GeoHopper; friend class twod_exec::GeoHopper;
friend class twod_exec::GeoSearch; friend class twod_exec::GeoSearch;
friend class twod_exec::GeoCircleBrowse; friend class twod_exec::GeoCircleBrowse;
friend class twod_exec::GeoBoxBrowse; friend class twod_exec::GeoBoxBrowse;
friend class twod_exec::GeoPolygonBrowse; friend class twod_exec::GeoPolygonBrowse;
friend class twod_internal::TwoDGeoNearRunner; friend class twod_internal::TwoDGeoNearRunner;
BtreeInterface* getInterface() { return _interface; } BtreeInterface* getInterface() { return _interface; }
IndexDescriptor* getDescriptor() { return _descriptor; } const IndexDescriptor* getDescriptor() { return _descriptor; }
TwoDIndexingParams& getParams() { return _params; } TwoDIndexingParams& getParams() { return _params; }
// This really gets the 'locs' from the provided obj. // This really gets the 'locs' from the provided obj.
void getKeys(const BSONObj& obj, vector<BSONObj>& locs) const; void getKeys(const BSONObj& obj, vector<BSONObj>& locs) const;
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// This is called by the two getKeys above. // This is called by the two getKeys above.
void getKeys(const BSONObj &obj, BSONObjSet* keys, vector<BSONObj>* locs) const; void getKeys(const BSONObj &obj, BSONObjSet* keys, vector<BSONObj>* locs) const;
 End of changes. 6 change blocks. 
6 lines changed or deleted 4 lines changed or added


 2dcommon.h   2dcommon.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/index_scan.h"
#include "mongo/db/geo/core.h" #include "mongo/db/geo/core.h"
#include "mongo/db/geo/geonear.h"
#include "mongo/db/geo/hash.h" #include "mongo/db/geo/hash.h"
#include "mongo/db/geo/shapes.h" #include "mongo/db/geo/shapes.h"
#include "mongo/db/pdfile.h" #include "mongo/db/pdfile.h"
#include "mongo/db/index/2d_access_method.h" #include "mongo/db/index/2d_access_method.h"
#pragma once #pragma once
namespace mongo { namespace mongo {
namespace twod_exec { namespace twod_exec {
skipping to change at line 135 skipping to change at line 134
static bool hasPrefix(const BSONObj& key, const GeoHash& hash); static bool hasPrefix(const BSONObj& key, const GeoHash& hash);
void advance(); void advance();
void prepareToYield() { _scan->prepareToYield(); } void prepareToYield() { _scan->prepareToYield(); }
void recoverFromYield() { _scan->recoverFromYield(); } void recoverFromYield() { _scan->recoverFromYield(); }
// Returns the min and max keys which bound a particular location. // Returns the min and max keys which bound a particular location.
// The only time these may be equal is when we actually equal the l ocation // The only time these may be equal is when we actually equal the l ocation
// itself, otherwise our expanding algorithm will fail. // itself, otherwise our expanding algorithm will fail.
static bool initial(IndexDescriptor* descriptor, const TwoDIndexing static bool initial(const IndexDescriptor* descriptor, const TwoDIn
Params& params, dexingParams& params,
BtreeLocation& min, BtreeLocation& max, GeoHas BtreeLocation& min, BtreeLocation& max, GeoHash
h start); start);
}; };
// //
// Execution // Execution
// //
class GeoAccumulator { class GeoAccumulator {
public: public:
GeoAccumulator(TwoDAccessMethod* accessMethod, MatchExpression* fil GeoAccumulator(TwoDAccessMethod* accessMethod, MatchExpression* fil
ter, bool uniqueDocs, ter);
bool needDistance);
virtual ~GeoAccumulator(); virtual ~GeoAccumulator();
enum KeyResult { BAD, BORDER, GOOD }; enum KeyResult { BAD, BORDER, GOOD };
virtual void add(const GeoIndexEntry& node); virtual void add(const GeoIndexEntry& node);
long long found() const { return _found; } long long found() const { return _found; }
virtual void getPointsFor(const BSONObj& key, const BSONObj& obj, virtual void getPointsFor(const BSONObj& key, const BSONObj& obj,
skipping to change at line 175 skipping to change at line 173
shared_ptr<GeoHashConverter> _converter; shared_ptr<GeoHashConverter> _converter;
map<DiskLoc, bool> _matched; map<DiskLoc, bool> _matched;
MatchExpression* _filter; MatchExpression* _filter;
long long _lookedAt; long long _lookedAt;
long long _matchesPerfd; long long _matchesPerfd;
long long _objectsLoaded; long long _objectsLoaded;
long long _pointsLoaded; long long _pointsLoaded;
long long _found; long long _found;
bool _uniqueDocs;
bool _needDistance;
}; };
class GeoBrowse : public GeoAccumulator { class GeoBrowse : public GeoAccumulator {
public: public:
// The max points which should be added to an expanding box at one time // The max points which should be added to an expanding box at one time
static const int maxPointsHeuristic = 50; static const int maxPointsHeuristic = 50;
// Expand states // Expand states
enum State { enum State {
START, START,
DOING_EXPAND, DOING_EXPAND,
DONE_NEIGHBOR, DONE_NEIGHBOR,
DONE DONE
} _state; } _state;
GeoBrowse(TwoDAccessMethod* accessMethod, string type, MatchExpress GeoBrowse(TwoDAccessMethod* accessMethod, string type, MatchExpress
ion* filter, ion* filter);
bool uniqueDocs = true, bool needDistance = false);
virtual bool ok(); virtual bool ok();
virtual bool advance(); virtual bool advance();
virtual void noteLocation(); virtual void noteLocation();
/* called before query getmore block is iterated */ /* called before query getmore block is iterated */
virtual void checkLocation(); virtual void checkLocation();
virtual Record* _current(); virtual Record* _current();
virtual BSONObj current(); virtual BSONObj current();
skipping to change at line 241 skipping to change at line 235
virtual int addSpecific(const GeoIndexEntry& node, const Point& key P, bool onBounds, virtual int addSpecific(const GeoIndexEntry& node, const Point& key P, bool onBounds,
double keyD, bool potentiallyNewDoc); double keyD, bool potentiallyNewDoc);
virtual long long nscanned(); virtual long long nscanned();
virtual void explainDetails(BSONObjBuilder& b); virtual void explainDetails(BSONObjBuilder& b);
void notePrefix() { _expPrefixes.push_back(_prefix); } void notePrefix() { _expPrefixes.push_back(_prefix); }
void invalidate(const DiskLoc& dl); /**
* Returns true if the result was actually invalidated, false other
wise.
*/
bool invalidate(const DiskLoc& dl);
string _type; string _type;
list<GeoPoint> _stack; list<GeoPoint> _stack;
set<BSONObj> _seenIds; set<BSONObj> _seenIds;
GeoPoint _cur; GeoPoint _cur;
bool _firstCall; bool _firstCall;
long long _nscanned; long long _nscanned;
skipping to change at line 272 skipping to change at line 269
list<string> _fringe; list<string> _fringe;
int recurseDepth; int recurseDepth;
Box _centerBox; Box _centerBox;
// Start and end of our search range in the current box // Start and end of our search range in the current box
BtreeLocation _min; BtreeLocation _min;
BtreeLocation _max; BtreeLocation _max;
shared_ptr<GeoHash> _expPrefix; shared_ptr<GeoHash> _expPrefix;
mutable vector<GeoHash> _expPrefixes; mutable vector<GeoHash> _expPrefixes;
IndexDescriptor* _descriptor; const IndexDescriptor* _descriptor;
shared_ptr<GeoHashConverter> _converter; shared_ptr<GeoHashConverter> _converter;
TwoDIndexingParams _params; TwoDIndexingParams _params;
}; };
} // namespace twod_exec } // namespace twod_exec
} // namespace mongo } // namespace mongo
 End of changes. 7 change blocks. 
16 lines changed or deleted 14 lines changed or added


 2dnear.h   2dnear.h 
skipping to change at line 48 skipping to change at line 48
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
namespace mongo { namespace mongo {
struct TwoDNearParams { struct TwoDNearParams {
NearQuery nearQuery; NearQuery nearQuery;
string ns; string ns;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
MatchExpression* filter; MatchExpression* filter;
int numWanted; int numWanted;
bool uniqueDocs; bool addPointMeta;
bool addDistMeta;
}; };
struct Result { struct Result {
Result(WorkingSetID wsid, double dist) : id(wsid), distance(dist) { } Result(WorkingSetID wsid, double dist) : id(wsid), distance(dist) { }
bool operator<(const Result& other) const { bool operator<(const Result& other) const {
// We want increasing distance, not decreasing, so we reverse t he <. // We want increasing distance, not decreasing, so we reverse t he <.
return distance > other.distance; return distance > other.distance;
} }
WorkingSetID id; WorkingSetID id;
double distance; double distance;
}; };
class TwoDNear : public PlanStage { class TwoDNear : public PlanStage {
public: public:
TwoDNear(const TwoDNearParams& params, WorkingSet* ws); TwoDNear(const TwoDNearParams& params, WorkingSet* ws);
virtual ~TwoDNear(); virtual ~TwoDNear();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
WorkingSet* _workingSet; WorkingSet* _workingSet;
MatchExpression* _filter;
// Stats // Stats
CommonStats _commonStats; CommonStats _commonStats;
TwoDNearStats _specificStats;
// We compute an annulus of results and cache it here. // We compute an annulus of results and cache it here.
priority_queue<Result> _results; priority_queue<Result> _results;
// For fast invalidation. Perhaps not worth it. // For fast invalidation. Perhaps not worth it.
// //
// Multi-location docs mean that this is not one diskloc -> one WSI D but one DiskLoc -> many // Multi-location docs mean that this is not one diskloc -> one WSI D but one DiskLoc -> many
// WSIDs. // WSIDs.
multimap<DiskLoc, WorkingSetID> _invalidationMap; multimap<DiskLoc, WorkingSetID> _invalidationMap;
skipping to change at line 113 skipping to change at line 114
class GeoHopper : public GeoBrowse { class GeoHopper : public GeoBrowse {
public: public:
typedef multiset<GeoPoint> Holder; typedef multiset<GeoPoint> Holder;
GeoHopper(TwoDAccessMethod* accessMethod, GeoHopper(TwoDAccessMethod* accessMethod,
unsigned max, unsigned max,
const Point& n, const Point& n,
MatchExpression* filter, MatchExpression* filter,
double maxDistance = numeric_limits<double>::max(), double maxDistance = numeric_limits<double>::max(),
GeoDistType type = GEO_PLANE, GeoDistType type = GEO_PLANE);
bool uniqueDocs = false,
bool needDistance = true);
virtual KeyResult approxKeyCheck(const Point& p, double& d); virtual KeyResult approxKeyCheck(const Point& p, double& d);
virtual bool exactDocCheck(const Point& p, double& d); virtual bool exactDocCheck(const Point& p, double& d);
// Always in distance units, whether radians or normal // Always in distance units, whether radians or normal
double farthest() const { return _farthest; } double farthest() const { return _farthest; }
virtual int addSpecific(const GeoIndexEntry& node, const Point& key P, bool onBounds, virtual int addSpecific(const GeoIndexEntry& node, const Point& key P, bool onBounds,
double keyD, bool potentiallyNewDoc); double keyD, bool potentiallyNewDoc);
skipping to change at line 152 skipping to change at line 151
map<DiskLoc, Holder::iterator> _seenPts; map<DiskLoc, Holder::iterator> _seenPts;
}; };
class GeoSearch : public GeoHopper { class GeoSearch : public GeoHopper {
public: public:
GeoSearch(TwoDAccessMethod* accessMethod, GeoSearch(TwoDAccessMethod* accessMethod,
const Point& startPt, const Point& startPt,
int numWanted = 100, int numWanted = 100,
MatchExpression* filter = NULL, MatchExpression* filter = NULL,
double maxDistance = numeric_limits<double>::max(), double maxDistance = numeric_limits<double>::max(),
GeoDistType type = GEO_PLANE, GeoDistType type = GEO_PLANE);
bool uniqueDocs = false,
bool needDistance = false);
void exec(); void exec();
void addExactPoints(const GeoPoint& pt, Holder& points, bool force) ; void addExactPoints(const GeoPoint& pt, Holder& points, bool force) ;
void addExactPoints(const GeoPoint& pt, Holder& points, int& before , int& after, void addExactPoints(const GeoPoint& pt, Holder& points, int& before , int& after,
bool force); bool force);
// TODO: Refactor this back into holder class, allow to run periodi cally when we are seeing // TODO: Refactor this back into holder class, allow to run periodi cally when we are seeing
// a lot of pts // a lot of pts
 End of changes. 7 change blocks. 
10 lines changed or deleted 7 lines changed or added


 action_type.h   action_type.h 
skipping to change at line 52 skipping to change at line 52
// Takes the string representation of a single action type and retu rns the corresponding // Takes the string representation of a single action type and retu rns the corresponding
// ActionType enum. // ActionType enum.
static Status parseActionFromString(const std::string& actionString , ActionType* result); static Status parseActionFromString(const std::string& actionString , ActionType* result);
// Takes an ActionType and returns the string representation // Takes an ActionType and returns the string representation
static std::string actionToString(const ActionType& action); static std::string actionToString(const ActionType& action);
static const ActionType addShard; static const ActionType addShard;
static const ActionType anyAction; static const ActionType anyAction;
static const ActionType appendOplogNote;
static const ActionType applicationMessage; static const ActionType applicationMessage;
static const ActionType auditLogRotate; static const ActionType auditLogRotate;
static const ActionType authCheck; static const ActionType authCheck;
static const ActionType authSchemaUpgrade; static const ActionType authSchemaUpgrade;
static const ActionType authenticate; static const ActionType authenticate;
static const ActionType captrunc;
static const ActionType changeCustomData; static const ActionType changeCustomData;
static const ActionType changePassword; static const ActionType changePassword;
static const ActionType changeOwnPassword; static const ActionType changeOwnPassword;
static const ActionType changeOwnCustomData; static const ActionType changeOwnCustomData;
static const ActionType clean;
static const ActionType cleanupOrphaned; static const ActionType cleanupOrphaned;
static const ActionType closeAllDatabases; static const ActionType closeAllDatabases;
static const ActionType collMod; static const ActionType collMod;
static const ActionType collStats; static const ActionType collStats;
static const ActionType compact; static const ActionType compact;
static const ActionType connPoolStats; static const ActionType connPoolStats;
static const ActionType connPoolSync; static const ActionType connPoolSync;
static const ActionType convertToCapped; static const ActionType convertToCapped;
static const ActionType cpuProfiler; static const ActionType cpuProfiler;
static const ActionType createCollection; static const ActionType createCollection;
skipping to change at line 103 skipping to change at line 102
static const ActionType fsync; static const ActionType fsync;
static const ActionType getCmdLineOpts; static const ActionType getCmdLineOpts;
static const ActionType getLog; static const ActionType getLog;
static const ActionType getParameter; static const ActionType getParameter;
static const ActionType getShardMap; static const ActionType getShardMap;
static const ActionType getShardVersion; static const ActionType getShardVersion;
static const ActionType grantRole; static const ActionType grantRole;
static const ActionType grantPrivilegesToRole; static const ActionType grantPrivilegesToRole;
static const ActionType grantRolesToRole; static const ActionType grantRolesToRole;
static const ActionType grantRolesToUser; static const ActionType grantRolesToUser;
static const ActionType handshake;
static const ActionType hostInfo; static const ActionType hostInfo;
static const ActionType impersonate;
static const ActionType indexStats; static const ActionType indexStats;
static const ActionType inprog; static const ActionType inprog;
static const ActionType insert; static const ActionType insert;
static const ActionType internal;
static const ActionType invalidateUserCache; static const ActionType invalidateUserCache;
static const ActionType killCursors; static const ActionType killCursors;
static const ActionType killop; static const ActionType killop;
static const ActionType listDatabases; static const ActionType listDatabases;
static const ActionType listShards; static const ActionType listShards;
static const ActionType logRotate; static const ActionType logRotate;
static const ActionType mapReduceShardedFinish;
static const ActionType moveChunk; static const ActionType moveChunk;
static const ActionType netstat; static const ActionType netstat;
static const ActionType planCacheIndexFilter;
static const ActionType planCacheRead;
static const ActionType planCacheWrite;
static const ActionType reIndex; static const ActionType reIndex;
static const ActionType remove; static const ActionType remove;
static const ActionType removeShard; static const ActionType removeShard;
static const ActionType renameCollection; static const ActionType renameCollection;
static const ActionType renameCollectionSameDB; static const ActionType renameCollectionSameDB;
static const ActionType repairDatabase; static const ActionType repairDatabase;
static const ActionType replSetConfigure; static const ActionType replSetConfigure;
static const ActionType replSetElect;
static const ActionType replSetFresh;
static const ActionType replSetGetRBID;
static const ActionType replSetGetStatus; static const ActionType replSetGetStatus;
static const ActionType replSetHeartbeat; static const ActionType replSetHeartbeat;
static const ActionType replSetReconfig; static const ActionType replSetReconfig;
static const ActionType replSetStateChange; static const ActionType replSetStateChange;
static const ActionType replSetUpdatePosition;
static const ActionType resync; static const ActionType resync;
static const ActionType revokeRole; static const ActionType revokeRole;
static const ActionType revokePrivilegesFromRole; static const ActionType revokePrivilegesFromRole;
static const ActionType revokeRolesFromRole; static const ActionType revokeRolesFromRole;
static const ActionType revokeRolesFromUser; static const ActionType revokeRolesFromUser;
static const ActionType serverStatus; static const ActionType serverStatus;
static const ActionType setParameter; static const ActionType setParameter;
static const ActionType setShardVersion; static const ActionType shardCollection;
static const ActionType shardingState; static const ActionType shardingState;
static const ActionType shutdown; static const ActionType shutdown;
static const ActionType splitChunk; static const ActionType splitChunk;
static const ActionType splitVector; static const ActionType splitVector;
static const ActionType storageDetails; static const ActionType storageDetails;
static const ActionType top; static const ActionType top;
static const ActionType touch; static const ActionType touch;
static const ActionType unlock; static const ActionType unlock;
static const ActionType unsetSharding;
static const ActionType update; static const ActionType update;
static const ActionType updateRole; static const ActionType updateRole;
static const ActionType updateUser; static const ActionType updateUser;
static const ActionType validate; static const ActionType validate;
static const ActionType viewRole; static const ActionType viewRole;
static const ActionType viewUser; static const ActionType viewUser;
static const ActionType writebacklisten;
static const ActionType writeBacksQueued;
static const ActionType _migrateClone;
static const ActionType _recvChunkAbort;
static const ActionType _recvChunkCommit;
static const ActionType _recvChunkStart;
static const ActionType _recvChunkStatus;
static const ActionType _transferMods;
enum ActionTypeIdentifier { enum ActionTypeIdentifier {
addShardValue, addShardValue,
anyActionValue, anyActionValue,
appendOplogNoteValue,
applicationMessageValue, applicationMessageValue,
auditLogRotateValue, auditLogRotateValue,
authCheckValue, authCheckValue,
authSchemaUpgradeValue, authSchemaUpgradeValue,
authenticateValue, authenticateValue,
captruncValue,
changeCustomDataValue, changeCustomDataValue,
changePasswordValue, changePasswordValue,
changeOwnPasswordValue, changeOwnPasswordValue,
changeOwnCustomDataValue, changeOwnCustomDataValue,
cleanValue,
cleanupOrphanedValue, cleanupOrphanedValue,
closeAllDatabasesValue, closeAllDatabasesValue,
collModValue, collModValue,
collStatsValue, collStatsValue,
compactValue, compactValue,
connPoolStatsValue, connPoolStatsValue,
connPoolSyncValue, connPoolSyncValue,
convertToCappedValue, convertToCappedValue,
cpuProfilerValue, cpuProfilerValue,
createCollectionValue, createCollectionValue,
skipping to change at line 218 skipping to change at line 206
fsyncValue, fsyncValue,
getCmdLineOptsValue, getCmdLineOptsValue,
getLogValue, getLogValue,
getParameterValue, getParameterValue,
getShardMapValue, getShardMapValue,
getShardVersionValue, getShardVersionValue,
grantRoleValue, grantRoleValue,
grantPrivilegesToRoleValue, grantPrivilegesToRoleValue,
grantRolesToRoleValue, grantRolesToRoleValue,
grantRolesToUserValue, grantRolesToUserValue,
handshakeValue,
hostInfoValue, hostInfoValue,
impersonateValue,
indexStatsValue, indexStatsValue,
inprogValue, inprogValue,
insertValue, insertValue,
internalValue,
invalidateUserCacheValue, invalidateUserCacheValue,
killCursorsValue, killCursorsValue,
killopValue, killopValue,
listDatabasesValue, listDatabasesValue,
listShardsValue, listShardsValue,
logRotateValue, logRotateValue,
mapReduceShardedFinishValue,
moveChunkValue, moveChunkValue,
netstatValue, netstatValue,
planCacheIndexFilterValue,
planCacheReadValue,
planCacheWriteValue,
reIndexValue, reIndexValue,
removeValue, removeValue,
removeShardValue, removeShardValue,
renameCollectionValue, renameCollectionValue,
renameCollectionSameDBValue, renameCollectionSameDBValue,
repairDatabaseValue, repairDatabaseValue,
replSetConfigureValue, replSetConfigureValue,
replSetElectValue,
replSetFreshValue,
replSetGetRBIDValue,
replSetGetStatusValue, replSetGetStatusValue,
replSetHeartbeatValue, replSetHeartbeatValue,
replSetReconfigValue, replSetReconfigValue,
replSetStateChangeValue, replSetStateChangeValue,
replSetUpdatePositionValue,
resyncValue, resyncValue,
revokeRoleValue, revokeRoleValue,
revokePrivilegesFromRoleValue, revokePrivilegesFromRoleValue,
revokeRolesFromRoleValue, revokeRolesFromRoleValue,
revokeRolesFromUserValue, revokeRolesFromUserValue,
serverStatusValue, serverStatusValue,
setParameterValue, setParameterValue,
setShardVersionValue, shardCollectionValue,
shardingStateValue, shardingStateValue,
shutdownValue, shutdownValue,
splitChunkValue, splitChunkValue,
splitVectorValue, splitVectorValue,
storageDetailsValue, storageDetailsValue,
topValue, topValue,
touchValue, touchValue,
unlockValue, unlockValue,
unsetShardingValue,
updateValue, updateValue,
updateRoleValue, updateRoleValue,
updateUserValue, updateUserValue,
validateValue, validateValue,
viewRoleValue, viewRoleValue,
viewUserValue, viewUserValue,
writebacklistenValue,
writeBacksQueuedValue,
_migrateCloneValue,
_recvChunkAbortValue,
_recvChunkCommitValue,
_recvChunkStartValue,
_recvChunkStatusValue,
_transferModsValue,
actionTypeEndValue, // Should always be last in this enum actionTypeEndValue, // Should always be last in this enum
}; };
static const int NUM_ACTION_TYPES = actionTypeEndValue; static const int NUM_ACTION_TYPES = actionTypeEndValue;
private: private:
uint32_t _identifier; // unique identifier for this action. uint32_t _identifier; // unique identifier for this action.
}; };
 End of changes. 26 change blocks. 
36 lines changed or deleted 14 lines changed or added


 algorithm.h   algorithm.h 
skipping to change at line 47 skipping to change at line 47
* matches, then the 'ok' method on the returned Element will return f alse. * matches, then the 'ok' method on the returned Element will return f alse.
*/ */
template<typename ElementType, typename Predicate> template<typename ElementType, typename Predicate>
inline ElementType findElement(ElementType first, Predicate predicate) { inline ElementType findElement(ElementType first, Predicate predicate) {
while (first.ok() && !predicate(first)) while (first.ok() && !predicate(first))
first = first.rightSibling(); first = first.rightSibling();
return first; return first;
} }
/** A predicate for findElement that matches on the field name of Eleme nts. */ /** A predicate for findElement that matches on the field name of Eleme nts. */
class FieldNameEquals { struct FieldNameEquals {
public:
// The lifetime of this object must be a subset of the lifetime of 'fieldName'. // The lifetime of this object must be a subset of the lifetime of 'fieldName'.
explicit FieldNameEquals(const StringData& fieldName) explicit FieldNameEquals(const StringData& fieldName)
: _fieldName(fieldName) {} : fieldName(fieldName) {}
bool operator()(const ConstElement& element) const { bool operator()(const ConstElement& element) const {
return (_fieldName == element.getFieldName()); return (fieldName == element.getFieldName());
} }
private: const StringData& fieldName;
const StringData& _fieldName;
}; };
/** An overload of findElement that delegates to the special implementa
tion
* Element::findElementNamed to reduce traffic across the Element API.
*/
template<typename ElementType>
inline ElementType findElement(ElementType first, FieldNameEquals predi
cate) {
return first.ok() ? first.findElementNamed(predicate.fieldName) : f
irst;
}
/** A convenience wrapper around findElement<ElementType, FieldNameEqua ls>. */ /** A convenience wrapper around findElement<ElementType, FieldNameEqua ls>. */
template<typename ElementType> template<typename ElementType>
inline ElementType findElementNamed(ElementType first, const StringData & fieldName) { inline ElementType findElementNamed(ElementType first, const StringData & fieldName) {
return findElement(first, FieldNameEquals(fieldName)); return findElement(first, FieldNameEquals(fieldName));
} }
/** Finds the first child under 'parent' that matches the given predica
te. If no such child
* Element is found, the returned Element's 'ok' method will return fa
lse.
*/
template<typename ElementType, typename Predicate>
inline ElementType findFirstChild(ElementType parent, Predicate predica
te) {
return findElement(parent.leftchild(), predicate);
}
/** An overload of findFirstChild that delegates to the special impleme
ntation
* Element::findFirstChildNamed to reduce traffic across the Element A
PI.
*/
template<typename ElementType>
inline ElementType findFirstChild(ElementType parent, FieldNameEquals p
redicate) {
return parent.ok() ? parent.findFirstChildNamed(predicate.fieldName
) : parent;
}
/** Finds the first child under 'parent' that matches the given field n ame. If no such child /** Finds the first child under 'parent' that matches the given field n ame. If no such child
* Element is found, the returned Element's 'ok' method will return fa lse. * Element is found, the returned Element's 'ok' method will return fa lse.
*/ */
template<typename ElementType> template<typename ElementType>
inline ElementType findFirstChildNamed(ElementType parent, const String Data& fieldName) { inline ElementType findFirstChildNamed(ElementType parent, const String Data& fieldName) {
return findElementNamed(parent.leftChild(), fieldName); return findFirstChild(parent, FieldNameEquals(fieldName));
} }
/** A less-than ordering for Elements that compares based on the Elemen t field names. */ /** A less-than ordering for Elements that compares based on the Elemen t field names. */
class FieldNameLessThan { class FieldNameLessThan {
// TODO: This should possibly derive from std::binary_function. // TODO: This should possibly derive from std::binary_function.
public: public:
inline bool operator()(const ConstElement& left, const ConstElement & right) const { inline bool operator()(const ConstElement& left, const ConstElement & right) const {
return left.getFieldName() < right.getFieldName(); return left.getFieldName() < right.getFieldName();
} }
}; };
skipping to change at line 193 skipping to change at line 215
} }
inline bool operator()(const ConstElement& elt) const { inline bool operator()(const ConstElement& elt) const {
return _value.compareWithElement(elt, _considerFieldName) == 0; return _value.compareWithElement(elt, _considerFieldName) == 0;
} }
private: private:
const ConstElement& _value; const ConstElement& _value;
const bool _considerFieldName; const bool _considerFieldName;
}; };
// NOTE: Originally, these truly were algorithms, in that they executed
the loop over a
// generic ElementType. However, these operations were later made intri
nsic to
// Element/Document for performance reasons. These functions hare here
for backward
// compatibility, and just delegate to the appropriate Element or Const
Element method of
// the same name.
/** Return the element that is 'n' Elements to the left in the sibling chain of 'element'. */ /** Return the element that is 'n' Elements to the left in the sibling chain of 'element'. */
template<typename ElementType> template<typename ElementType>
ElementType getNthLeftSibling(ElementType element, std::size_t n) { ElementType getNthLeftSibling(ElementType element, std::size_t n) {
while (element.ok() && (n-- != 0)) return element.leftSibling(n);
element = element.leftSibling();
return element;
} }
/** Return the element that is 'n' Elements to the right in the sibling chain of 'element'. */ /** Return the element that is 'n' Elements to the right in the sibling chain of 'element'. */
template<typename ElementType> template<typename ElementType>
ElementType getNthRightSibling(ElementType element, std::size_t n) { ElementType getNthRightSibling(ElementType element, std::size_t n) {
while (element.ok() && (n-- != 0)) return element.rightSibling(n);
element = element.rightSibling();
return element;
} }
/** Move 'n' Elements left or right in the sibling chain of 'element' * / /** Move 'n' Elements left or right in the sibling chain of 'element' * /
template<typename ElementType> template<typename ElementType>
ElementType getNthSibling(ElementType element, int n) { ElementType getNthSibling(ElementType element, int n) {
return (n < 0) ? return (n < 0) ?
getNthLeftSibling(element, -n) : getNthLeftSibling(element, -n) :
getNthRightSibling(element, n); getNthRightSibling(element, n);
} }
/** Get the child that is 'n' Elements to the right of 'element's left child. */ /** Get the child that is 'n' Elements to the right of 'element's left child. */
template<typename ElementType> template<typename ElementType>
ElementType getNthChild(ElementType element, std::size_t n) { ElementType getNthChild(ElementType element, std::size_t n) {
return getNthRightSibling(element.leftChild(), n); return element.findNthChild(n);
} }
/** Returns the number of valid siblings to the left of 'element'. */ /** Returns the number of valid siblings to the left of 'element'. */
template<typename ElementType> template<typename ElementType>
std::size_t countSiblingsLeft(ElementType element) { std::size_t countSiblingsLeft(ElementType element) {
std::size_t result = 0; return element.countSiblingsLeft();
element = element.leftSibling();
while (element.ok()) {
element = element.leftSibling();
++result;
}
return result;
} }
/** Returns the number of valid siblings to the right of 'element'. */ /** Returns the number of valid siblings to the right of 'element'. */
template<typename ElementType> template<typename ElementType>
std::size_t countSiblingsRight(ElementType element) { std::size_t countSiblingsRight(ElementType element) {
std::size_t result = 0; return element.countSiblingsRight();
element = element.rightSibling();
while (element.ok()) {
element = element.rightSibling();
++result;
}
return result;
} }
/** Return the number of children of 'element'. */ /** Return the number of children of 'element'. */
template<typename ElementType> template<typename ElementType>
std::size_t countChildren(ElementType element) { std::size_t countChildren(ElementType element) {
element = element.leftChild(); return element.countChildren();
return element.ok() ? (1 + countSiblingsRight(element)) : 0;
} }
} // namespace mutablebson } // namespace mutablebson
} // namespace mongo } // namespace mongo
 End of changes. 14 change blocks. 
30 lines changed or deleted 55 lines changed or added


 and_common-inl.h   and_common-inl.h 
skipping to change at line 36 skipping to change at line 36
* it in the license file. * it in the license file.
*/ */
namespace mongo { namespace mongo {
class AndCommon { class AndCommon {
public: public:
/** /**
* If src has any data dest doesn't, add that data to dest. * If src has any data dest doesn't, add that data to dest.
*/ */
static void mergeFrom(WorkingSetMember* dest, WorkingSetMember* src ) { static void mergeFrom(WorkingSetMember* dest, const WorkingSetMembe r& src) {
verify(dest->hasLoc()); verify(dest->hasLoc());
verify(src->hasLoc()); verify(src.hasLoc());
verify(dest->loc == src->loc); verify(dest->loc == src.loc);
// This is N^2 but N is probably pretty small. Easy enough to revisit. // This is N^2 but N is probably pretty small. Easy enough to revisit.
// Merge key data. // Merge key data.
for (size_t i = 0; i < src->keyData.size(); ++i) { for (size_t i = 0; i < src.keyData.size(); ++i) {
bool found = false; bool found = false;
for (size_t j = 0; j < dest->keyData.size(); ++j) { for (size_t j = 0; j < dest->keyData.size(); ++j) {
if (dest->keyData[j].indexKeyPattern == src->keyData[i] .indexKeyPattern) { if (dest->keyData[j].indexKeyPattern == src.keyData[i]. indexKeyPattern) {
found = true; found = true;
break; break;
} }
} }
if (!found) { dest->keyData.push_back(src->keyData[i]); } if (!found) { dest->keyData.push_back(src.keyData[i]); }
} }
// Merge computed data. // Merge computed data.
if (!dest->hasComputed(WSM_COMPUTED_TEXT_SCORE) && src->hasComp typedef WorkingSetComputedDataType WSCD;
uted(WSM_COMPUTED_TEXT_SCORE)) { for (WSCD i = WSCD(0); i < WSM_COMPUTED_NUM_TYPES; i = WSCD(i +
dest->addComputed(src->getComputed(WSM_COMPUTED_TEXT_SCORE) 1)) {
->clone()); if (!dest->hasComputed(i) && src.hasComputed(i)) {
} dest->addComputed(src.getComputed(i)->clone());
}
if (!dest->hasComputed(WSM_COMPUTED_GEO_DISTANCE) && src->hasCo
mputed(WSM_COMPUTED_GEO_DISTANCE)) {
dest->addComputed(src->getComputed(WSM_COMPUTED_GEO_DISTANC
E)->clone());
} }
} }
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
16 lines changed or deleted 12 lines changed or added


 and_hash.h   and_hash.h 
skipping to change at line 65 skipping to change at line 65
AndHashStage(WorkingSet* ws, const MatchExpression* filter); AndHashStage(WorkingSet* ws, const MatchExpression* filter);
virtual ~AndHashStage(); virtual ~AndHashStage();
void addChild(PlanStage* child); void addChild(PlanStage* child);
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
StageState readFirstChild(WorkingSetID* out); StageState readFirstChild(WorkingSetID* out);
StageState hashOtherChildren(WorkingSetID* out); StageState hashOtherChildren(WorkingSetID* out);
// Not owned by us. // Not owned by us.
WorkingSet* _ws; WorkingSet* _ws;
// Not owned by us. // Not owned by us.
const MatchExpression* _filter; const MatchExpression* _filter;
// The stages we read from. Owned by us. // The stages we read from. Owned by us.
vector<PlanStage*> _children; vector<PlanStage*> _children;
// _dataMap is filled out by the first child and probed by subseque // _dataMap is filled out by the first child and probed by subseque
nt children. nt children. This is the
// hash table that we create by intersecting _children and probe wi
th the last child.
typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap; typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap;
DataMap _dataMap; DataMap _dataMap;
// Keeps track of what elements from _dataMap subsequent children h ave seen. // Keeps track of what elements from _dataMap subsequent children h ave seen.
// Only used while _hashingChildren.
typedef unordered_set<DiskLoc, DiskLoc::Hasher> SeenMap; typedef unordered_set<DiskLoc, DiskLoc::Hasher> SeenMap;
SeenMap _seenMap; SeenMap _seenMap;
// Iterator over the members of _dataMap that survive. // True if we're still intersecting _children[0..._children.size()-
DataMap::iterator _resultIterator; 1].
bool _hashingChildren;
// True if we're still scanning _children for results.
bool _shouldScanChildren;
// Which child are we currently working on? // Which child are we currently working on?
size_t _currentChild; size_t _currentChild;
// Stats // Stats
CommonStats _commonStats; CommonStats _commonStats;
AndHashStats _specificStats; AndHashStats _specificStats;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
8 lines changed or deleted 9 lines changed or added


 and_sorted.h   and_sorted.h 
skipping to change at line 66 skipping to change at line 66
AndSortedStage(WorkingSet* ws, const MatchExpression* filter); AndSortedStage(WorkingSet* ws, const MatchExpression* filter);
virtual ~AndSortedStage(); virtual ~AndSortedStage();
void addChild(PlanStage* child); void addChild(PlanStage* child);
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
// Find a node to AND against. // Find a node to AND against.
PlanStage::StageState getTargetLoc(WorkingSetID* out); PlanStage::StageState getTargetLoc(WorkingSetID* out);
// Move a child which hasn't advanced to the target node forward. // Move a child which hasn't advanced to the target node forward.
// Returns the target node in 'out' if all children successfully ad vance to it. // Returns the target node in 'out' if all children successfully ad vance to it.
PlanStage::StageState moveTowardTargetLoc(WorkingSetID* out); PlanStage::StageState moveTowardTargetLoc(WorkingSetID* out);
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 assert_util.h   assert_util.h 
skipping to change at line 26 skipping to change at line 26
*/ */
#pragma once #pragma once
#include <iostream> #include <iostream>
#include <typeinfo> #include <typeinfo>
#include <string> #include <string>
#include "mongo/base/status.h" // NOTE: This is safe as utils depend on bas e #include "mongo/base/status.h" // NOTE: This is safe as utils depend on bas e
#include "mongo/bson/inline_decls.h" #include "mongo/bson/inline_decls.h"
#include "mongo/client/export_macros.h"
#include "mongo/platform/compiler.h" #include "mongo/platform/compiler.h"
namespace mongo { namespace mongo {
enum CommonErrorCodes { enum CommonErrorCodes {
OkCode = 0, OkCode = 0,
DatabaseDifferCaseCode = 13297 , // uassert( 13297 ) DatabaseDifferCaseCode = 13297 , // uassert( 13297 )
InterruptedAtShutdown = 11600 , // uassert( 11600 )
SendStaleConfigCode = 13388 , // uassert( 13388 ) SendStaleConfigCode = 13388 , // uassert( 13388 )
RecvStaleConfigCode = 9996, // uassert( 9996 ) RecvStaleConfigCode = 9996, // uassert( 9996 )
PrepareConfigsFailedCode = 13104, // uassert( 13104 ) PrepareConfigsFailedCode = 13104, // uassert( 13104 )
NotMasterOrSecondaryCode = 13436, // uassert( 13436 ) NotMasterOrSecondaryCode = 13436, // uassert( 13436 )
NotMasterNoSlaveOkCode = 13435, // uassert( 13435 ) NotMasterNoSlaveOkCode = 13435, // uassert( 13435 )
NotMaster = 10107 // uassert( 10107 ) NotMaster = 10107 // uassert( 10107 )
}; };
class AssertionCount { class MONGO_CLIENT_API AssertionCount {
public: public:
AssertionCount(); AssertionCount();
void rollover(); void rollover();
void condrollover( int newValue ); void condrollover( int newValue );
int regular; int regular;
int warning; int warning;
int msg; int msg;
int user; int user;
int rollovers; int rollovers;
}; };
extern AssertionCount assertionCount; extern AssertionCount assertionCount;
class BSONObjBuilder; class BSONObjBuilder;
struct ExceptionInfo { struct MONGO_CLIENT_API ExceptionInfo {
ExceptionInfo() : msg(""),code(-1) {} ExceptionInfo() : msg(""),code(-1) {}
ExceptionInfo( const char * m , int c ) ExceptionInfo( const char * m , int c )
: msg( m ) , code( c ) { : msg( m ) , code( c ) {
} }
ExceptionInfo( const std::string& m , int c ) ExceptionInfo( const std::string& m , int c )
: msg( m ) , code( c ) { : msg( m ) , code( c ) {
} }
void append( BSONObjBuilder& b , const char * m = "$err" , const ch ar * c = "code" ) const ; void append( BSONObjBuilder& b , const char * m = "$err" , const ch ar * c = "code" ) const ;
std::string toString() const; std::string toString() const;
bool empty() const { return msg.empty(); } bool empty() const { return msg.empty(); }
skipping to change at line 80 skipping to change at line 82
std::string msg; std::string msg;
int code; int code;
}; };
/** helper class that builds error strings. lighter weight than a Stri ngBuilder, albeit less flexible. /** helper class that builds error strings. lighter weight than a Stri ngBuilder, albeit less flexible.
NOINLINE_DECL used in the constructor implementations as we are ass uming this is a cold code path when used. NOINLINE_DECL used in the constructor implementations as we are ass uming this is a cold code path when used.
example: example:
throw UserException(123, ErrorMsg("blah", num_val)); throw UserException(123, ErrorMsg("blah", num_val));
*/ */
class ErrorMsg { class MONGO_CLIENT_API ErrorMsg {
public: public:
ErrorMsg(const char *msg, char ch); ErrorMsg(const char *msg, char ch);
ErrorMsg(const char *msg, unsigned val); ErrorMsg(const char *msg, unsigned val);
operator std::string() const { return buf; } operator std::string() const { return buf; }
private: private:
char buf[256]; char buf[256];
}; };
class DBException; class DBException;
std::string causedBy( const DBException& e ); MONGO_CLIENT_API std::string causedBy( const DBException& e );
std::string causedBy( const std::string& e ); MONGO_CLIENT_API std::string causedBy( const std::string& e );
bool inShutdown(); MONGO_CLIENT_API bool inShutdown();
/** Most mongo exceptions inherit from this; this is commonly caught in most threads */ /** Most mongo exceptions inherit from this; this is commonly caught in most threads */
class DBException : public std::exception { class MONGO_CLIENT_API DBException : public std::exception {
public: public:
DBException( const ExceptionInfo& ei ) : _ei(ei) { traceIfNeeded(*t his); } DBException( const ExceptionInfo& ei ) : _ei(ei) { traceIfNeeded(*t his); }
DBException( const char * msg , int code ) : _ei(msg,code) { traceI fNeeded(*this); } DBException( const char * msg , int code ) : _ei(msg,code) { traceI fNeeded(*this); }
DBException( const std::string& msg , int code ) : _ei(msg,code) { traceIfNeeded(*this); } DBException( const std::string& msg , int code ) : _ei(msg,code) { traceIfNeeded(*this); }
virtual ~DBException() throw() { } virtual ~DBException() throw() { }
virtual const char* what() const throw() { return _ei.msg.c_str(); } virtual const char* what() const throw() { return _ei.msg.c_str(); }
virtual int getCode() const { return _ei.code; } virtual int getCode() const { return _ei.code; }
virtual void appendPrefix( std::stringstream& ss ) const { } virtual void appendPrefix( std::stringstream& ss ) const { }
virtual void addContext( const std::string& str ) { virtual void addContext( const std::string& str ) {
skipping to change at line 134 skipping to change at line 136
const ExceptionInfo& getInfo() const { return _ei; } const ExceptionInfo& getInfo() const { return _ei; }
private: private:
static void traceIfNeeded( const DBException& e ); static void traceIfNeeded( const DBException& e );
public: public:
static bool traceExceptions; static bool traceExceptions;
protected: protected:
ExceptionInfo _ei; ExceptionInfo _ei;
}; };
class AssertionException : public DBException { class MONGO_CLIENT_API AssertionException : public DBException {
public: public:
AssertionException( const ExceptionInfo& ei ) : DBException(ei) {} AssertionException( const ExceptionInfo& ei ) : DBException(ei) {}
AssertionException( const char * msg , int code ) : DBException(msg ,code) {} AssertionException( const char * msg , int code ) : DBException(msg ,code) {}
AssertionException( const std::string& msg , int code ) : DBExcepti on(msg,code) {} AssertionException( const std::string& msg , int code ) : DBExcepti on(msg,code) {}
virtual ~AssertionException() throw() { } virtual ~AssertionException() throw() { }
virtual bool severe() { return true; } virtual bool severe() const { return true; }
virtual bool isUserAssertion() { return false; } virtual bool isUserAssertion() const { return false; }
/* true if an interrupted exception - see KillCurrentOp */ /* true if an interrupted exception - see KillCurrentOp */
bool interrupted() { bool interrupted() {
return _ei.code == 11600 || _ei.code == 11601 || return _ei.code == InterruptedAtShutdown || _ei.code == 11601 | |
_ei.code == ErrorCodes::ExceededTimeLimit; _ei.code == ErrorCodes::ExceededTimeLimit;
} }
}; };
/* UserExceptions are valid errors that a user can cause, like out of d isk space or duplicate key */ /* UserExceptions are valid errors that a user can cause, like out of d isk space or duplicate key */
class UserException : public AssertionException { class MONGO_CLIENT_API UserException : public AssertionException {
public: public:
UserException(int c , const std::string& m) : AssertionException( m , c ) {} UserException(int c , const std::string& m) : AssertionException( m , c ) {}
virtual bool severe() { return false; } virtual bool severe() const { return false; }
virtual bool isUserAssertion() { return true; } virtual bool isUserAssertion() const { return true; }
virtual void appendPrefix( std::stringstream& ss ) const; virtual void appendPrefix( std::stringstream& ss ) const;
}; };
class MsgAssertionException : public AssertionException { class MONGO_CLIENT_API MsgAssertionException : public AssertionExceptio n {
public: public:
MsgAssertionException( const ExceptionInfo& ei ) : AssertionExcepti on( ei ) {} MsgAssertionException( const ExceptionInfo& ei ) : AssertionExcepti on( ei ) {}
MsgAssertionException(int c, const std::string& m) : AssertionExcep tion( m , c ) {} MsgAssertionException(int c, const std::string& m) : AssertionExcep tion( m , c ) {}
virtual bool severe() { return false; } virtual bool severe() const { return false; }
virtual void appendPrefix( std::stringstream& ss ) const; virtual void appendPrefix( std::stringstream& ss ) const;
}; };
MONGO_COMPILER_NORETURN void verifyFailed(const char *msg, const char * MONGO_CLIENT_API MONGO_COMPILER_NORETURN void verifyFailed(const char *
file, unsigned line); msg, const char *file, unsigned line);
void wasserted(const char *msg, const char *file, unsigned line); MONGO_CLIENT_API MONGO_COMPILER_NORETURN void invariantFailed(const cha
MONGO_COMPILER_NORETURN void fassertFailed( int msgid ); r *msg, const char *file, unsigned line);
MONGO_COMPILER_NORETURN void fassertFailedNoTrace( int msgid ); MONGO_CLIENT_API void wasserted(const char *msg, const char *file, unsi
MONGO_COMPILER_NORETURN void fassertFailedWithStatus(int msgid, const S gned line);
tatus& status); MONGO_CLIENT_API MONGO_COMPILER_NORETURN void fassertFailed( int msgid
);
MONGO_CLIENT_API MONGO_COMPILER_NORETURN void fassertFailedNoTrace( int
msgid );
MONGO_CLIENT_API MONGO_COMPILER_NORETURN void fassertFailedWithStatus(
int msgid, const Status& status);
/** a "user assertion". throws UserAssertion. logs. typically used f or errors that a user /** a "user assertion". throws UserAssertion. logs. typically used f or errors that a user
could cause, such as duplicate key, disk full, etc. could cause, such as duplicate key, disk full, etc.
*/ */
MONGO_COMPILER_NORETURN void uasserted(int msgid, const char *msg); MONGO_CLIENT_API MONGO_COMPILER_NORETURN void uasserted(int msgid, cons
MONGO_COMPILER_NORETURN void uasserted(int msgid , const std::string &m t char *msg);
sg); MONGO_CLIENT_API MONGO_COMPILER_NORETURN void uasserted(int msgid , con
st std::string &msg);
/** msgassert and massert are for errors that are internal but have a w /** msgassert and massert are for errors that are internal but have a w
ell defined error text std::string. ell defined error text
a stack trace is logged. std::string. a stack trace is logged.
*/ */
MONGO_COMPILER_NORETURN void msgassertedNoTrace(int msgid, const char * MONGO_CLIENT_API MONGO_COMPILER_NORETURN void msgassertedNoTrace(int ms
msg); gid, const char *msg);
MONGO_COMPILER_NORETURN inline void msgassertedNoTrace(int msgid, const MONGO_CLIENT_API MONGO_COMPILER_NORETURN void msgasserted(int msgid, co
std::string& msg) { nst char *msg);
msgassertedNoTrace( msgid , msg.c_str() ); MONGO_CLIENT_API MONGO_COMPILER_NORETURN void msgasserted(int msgid, co
} nst std::string &msg);
MONGO_COMPILER_NORETURN void msgasserted(int msgid, const char *msg);
MONGO_COMPILER_NORETURN void msgasserted(int msgid, const std::string &
msg);
/* convert various types of exceptions to strings */ /* convert various types of exceptions to strings */
inline std::string causedBy( const char* e ){ return (std::string)" :: MONGO_CLIENT_API std::string causedBy( const char* e );
caused by :: " + e; } MONGO_CLIENT_API std::string causedBy( const DBException& e );
inline std::string causedBy( const DBException& e ){ return causedBy( e MONGO_CLIENT_API std::string causedBy( const std::exception& e );
.toString().c_str() ); } MONGO_CLIENT_API std::string causedBy( const std::string& e );
inline std::string causedBy( const std::exception& e ){ return causedBy MONGO_CLIENT_API std::string causedBy( const std::string* e );
( e.what() ); } MONGO_CLIENT_API std::string causedBy( const Status& e );
inline std::string causedBy( const std::string& e ){ return causedBy( e
.c_str() ); }
inline std::string causedBy( const std::string* e ){
return (e && *e != "") ? causedBy(*e) : "";
}
inline std::string causedBy( const Status& e ){ return causedBy( e.reas
on() ); }
/** aborts on condition failure */ /** aborts on condition failure */
inline void fassert(int msgid, bool testOK) {if (MONGO_unlikely(!testOK MONGO_CLIENT_API inline void fassert(int msgid, bool testOK) {
)) fassertFailed(msgid);} if (MONGO_unlikely(!testOK)) fassertFailed(msgid);
inline void fassert(int msgid, const Status& status) { }
MONGO_CLIENT_API inline void fassert(int msgid, const Status& status) {
if (MONGO_unlikely(!status.isOK())) { if (MONGO_unlikely(!status.isOK())) {
fassertFailedWithStatus(msgid, status); fassertFailedWithStatus(msgid, status);
} }
} }
/* "user assert". if asserts, user did something wrong, not our code * / /* "user assert". if asserts, user did something wrong, not our code * /
#define MONGO_uassert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || ( ::mongo::uasserted(msgid, msg), 0) ) #define MONGO_uassert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || ( ::mongo::uasserted(msgid, msg), 0) )
inline void uassertStatusOK(const Status& status) { MONGO_CLIENT_API inline void uassertStatusOK(const Status& status) {
if (MONGO_unlikely(!status.isOK())) { if (MONGO_unlikely(!status.isOK())) {
uasserted((status.location() != 0 ? status.location() : status. code()), uasserted((status.location() != 0 ? status.location() : status. code()),
status.reason()); status.reason());
} }
} }
/* warning only - keeps going */ /* warning only - keeps going */
#define MONGO_wassert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (::mongo::wasserted(#_Expression, __FILE__, __LINE__), 0) ) #define MONGO_wassert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (::mongo::wasserted(#_Expression, __FILE__, __LINE__), 0) )
/* display a message, no context, and throw assertionexception /* display a message, no context, and throw assertionexception
easy way to throw an exception and log something without our stack t race easy way to throw an exception and log something without our stack t race
display happening. display happening.
*/ */
#define MONGO_massert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || ( ::mongo::msgasserted(msgid, msg), 0) ) #define MONGO_massert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || ( ::mongo::msgasserted(msgid, msg), 0) )
/* same as massert except no msgid */ /* same as massert except no msgid */
#define MONGO_verify(_Expression) (void)( MONGO_likely(!!(_Expression)) || (::mongo::verifyFailed(#_Expression, __FILE__, __LINE__), 0) ) #define MONGO_verify(_Expression) (void)( MONGO_likely(!!(_Expression)) || (::mongo::verifyFailed(#_Expression, __FILE__, __LINE__), 0) )
#define MONGO_invariant(_Expression) (void)( MONGO_likely(!!(_Expression))
|| (::mongo::invariantFailed(#_Expression, __FILE__, __LINE__), 0) )
/* dassert is 'debug assert' -- might want to turn off for production a s these /* dassert is 'debug assert' -- might want to turn off for production a s these
could be slow. could be slow.
*/ */
#if defined(_DEBUG) #if defined(_DEBUG)
# define MONGO_dassert(x) fassert(16199, (x)) # define MONGO_dassert(x) fassert(16199, (x))
#else #else
# define MONGO_dassert(x) # define MONGO_dassert(x)
#endif #endif
/** Allows to jump code during exeuction. */
inline bool debugCompare(bool inDebug, bool condition) { return inDebug
&& condition; }
#if defined(_DEBUG)
# define MONGO_debug_and(x) debugCompare(true, (x))
#else
# define MONGO_debug_and(x) debugCompare(false, (x))
#endif
#ifdef MONGO_EXPOSE_MACROS #ifdef MONGO_EXPOSE_MACROS
# define dcompare MONGO_debug_and
# define dassert MONGO_dassert # define dassert MONGO_dassert
# define verify MONGO_verify # define verify MONGO_verify
# define invariant MONGO_invariant
# define uassert MONGO_uassert # define uassert MONGO_uassert
# define wassert MONGO_wassert # define wassert MONGO_wassert
# define massert MONGO_massert # define massert MONGO_massert
#endif #endif
// some special ids that we want to duplicate // some special ids that we want to duplicate
// > 10000 asserts // > 10000 asserts
// < 10000 UserException // < 10000 UserException
 End of changes. 25 change blocks. 
66 lines changed or deleted 59 lines changed or added


 atomic_intrinsics.h   atomic_intrinsics.h 
skipping to change at line 46 skipping to change at line 46
* *
* The behavior of the functions is analogous to the same-named member func tions of the AtomicWord * The behavior of the functions is analogous to the same-named member func tions of the AtomicWord
* template type in atomic_word.h. * template type in atomic_word.h.
*/ */
#pragma once #pragma once
#if defined(_WIN32) #if defined(_WIN32)
#include "mongo/platform/atomic_intrinsics_win32.h" #include "mongo/platform/atomic_intrinsics_win32.h"
#elif defined(__GNUC__) #elif defined(__GNUC__)
#include "mongo/platform/atomic_intrinsics_gcc.h" #if defined(__i386__) || defined(__x86_64__)
#include "mongo/platform/atomic_intrinsics_gcc_intel.h"
#else
#include "mongo/platform/atomic_intrinsics_gcc_generic.h"
#endif
#else #else
#error "Unsupported os/compiler family" #error "Unsupported os/compiler family"
#endif #endif
 End of changes. 1 change blocks. 
1 lines changed or deleted 5 lines changed or added


 atomic_intrinsics_win32.h   atomic_intrinsics_win32.h 
skipping to change at line 84 skipping to change at line 84
static T fetchAndAdd(volatile T* dest, T increment) { static T fetchAndAdd(volatile T* dest, T increment) {
return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>( dest), LONG(increment)); return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>( dest), LONG(increment));
} }
private: private:
AtomicIntrinsics(); AtomicIntrinsics();
~AtomicIntrinsics(); ~AtomicIntrinsics();
}; };
/** namespace details {
* Instantiation of AtomicIntrinsics<> for 64-bit word sizes.
*/
template <typename T>
class AtomicIntrinsics<T, typename boost::enable_if_c<sizeof(T) == size
of(LONGLONG)>::type> {
public:
#if defined(NTDDI_VERSION) && defined(NTDDI_WS03SP2) && (NTDDI_VERSION >= N
TDDI_WS03SP2)
static const bool kHaveInterlocked64 = true;
#else
static const bool kHaveInterlocked64 = false;
#endif
static T compareAndSwap(volatile T* dest, T expected, T newValue) {
return InterlockedImpl<kHaveInterlocked64>::compareAndSwap(dest
, expected, newValue);
}
static T swap(volatile T* dest, T newValue) { template <typename T, bool HaveInterlocked64Ops>
return InterlockedImpl<kHaveInterlocked64>::swap(dest, newValue struct InterlockedImpl64;
);
}
static T load(volatile const T* value) {
return LoadStoreImpl<T>::load(value);
}
static void store(volatile T* dest, T newValue) {
LoadStoreImpl<T>::store(dest, newValue);
}
static T fetchAndAdd(volatile T* dest, T increment) {
return InterlockedImpl<kHaveInterlocked64>::fetchAndAdd(dest, i
ncrement);
}
private:
AtomicIntrinsics();
~AtomicIntrinsics();
template <bool>
struct InterlockedImpl;
// Implementation of 64-bit Interlocked operations via Windows API calls. // Implementation of 64-bit Interlocked operations via Windows API calls.
template<> template<typename T>
struct InterlockedImpl<true> { struct InterlockedImpl64<T, true> {
static T compareAndSwap(volatile T* dest, T expected, T newValu e) { static T compareAndSwap(volatile T* dest, T expected, T newValu e) {
return InterlockedCompareExchange64( return InterlockedCompareExchange64(
reinterpret_cast<volatile LONGLONG*>(dest), reinterpret_cast<volatile LONGLONG*>(dest),
LONGLONG(newValue), LONGLONG(newValue),
LONGLONG(expected)); LONGLONG(expected));
} }
static T swap(volatile T* dest, T newValue) { static T swap(volatile T* dest, T newValue) {
return InterlockedExchange64( return InterlockedExchange64(
reinterpret_cast<volatile LONGLONG*>(dest), reinterpret_cast<volatile LONGLONG*>(dest),
skipping to change at line 149 skipping to change at line 114
static T fetchAndAdd(volatile T* dest, T increment) { static T fetchAndAdd(volatile T* dest, T increment) {
return InterlockedExchangeAdd64( return InterlockedExchangeAdd64(
reinterpret_cast<volatile LONGLONG*>(dest), reinterpret_cast<volatile LONGLONG*>(dest),
LONGLONG(increment)); LONGLONG(increment));
} }
}; };
// Implementation of 64-bit Interlocked operations for systems wher e the API does not // Implementation of 64-bit Interlocked operations for systems wher e the API does not
// yet provide the Interlocked...64 operations. // yet provide the Interlocked...64 operations.
template<> template<typename T>
struct InterlockedImpl<false> { struct InterlockedImpl64<T, false> {
static T compareAndSwap(volatile T* dest, T expected, T newValu e) { static T compareAndSwap(volatile T* dest, T expected, T newValu e) {
// NOTE: We must use the compiler intrinsic here: WinXP doe s not offer // NOTE: We must use the compiler intrinsic here: WinXP doe s not offer
// InterlockedCompareExchange64 as an API call. // InterlockedCompareExchange64 as an API call.
return _InterlockedCompareExchange64( return _InterlockedCompareExchange64(
reinterpret_cast<volatile LONGLONG*>(dest), reinterpret_cast<volatile LONGLONG*>(dest),
LONGLONG(newValue), LONGLONG(newValue),
LONGLONG(expected)); LONGLONG(expected));
} }
static T swap(volatile T* dest, T newValue) { static T swap(volatile T* dest, T newValue) {
skipping to change at line 195 skipping to change at line 160
currentValue = result; currentValue = result;
} }
} }
}; };
// On 32-bit IA-32 systems, 64-bit load and store must be implement ed in terms of // On 32-bit IA-32 systems, 64-bit load and store must be implement ed in terms of
// Interlocked operations, but on 64-bit systems they support a sim pler, native // Interlocked operations, but on 64-bit systems they support a sim pler, native
// implementation. The LoadStoreImpl type represents the abstract implementation of // implementation. The LoadStoreImpl type represents the abstract implementation of
// loading and storing 64-bit values. // loading and storing 64-bit values.
template <typename U, typename _IsTTooBig=void> template <typename U, typename _IsTTooBig=void>
class LoadStoreImpl{}; struct LoadStoreImpl;
// Implementation on 64-bit systems. // Implementation on 64-bit systems.
template <typename U> template <typename U>
class LoadStoreImpl<U, typename boost::enable_if_c<sizeof(U) <= siz struct LoadStoreImpl<U, typename boost::enable_if_c<sizeof(U) <= si
eof(void*)>::type> { zeof(void*)>::type> {
public:
static U load(volatile const U* value) { static U load(volatile const U* value) {
MemoryBarrier(); MemoryBarrier();
U result = *value; U result = *value;
MemoryBarrier(); MemoryBarrier();
return result; return result;
} }
static void store(volatile U* dest, U newValue) { static void store(volatile U* dest, U newValue) {
MemoryBarrier(); MemoryBarrier();
*dest = newValue; *dest = newValue;
MemoryBarrier(); MemoryBarrier();
} }
}; };
// Implementation on 32-bit systems. // Implementation on 32-bit systems.
template <typename U> template <typename U>
class LoadStoreImpl<U, typename boost::disable_if_c<sizeof(U) <= si struct LoadStoreImpl<U, typename boost::disable_if_c<sizeof(U) <= s
zeof(void*)>::type> { izeof(void*)>::type> {
public: // NOTE: Implemented out-of-line below since the implementation
static U load(volatile const U* value) { relies on
return AtomicIntrinsics<U>::compareAndSwap(const_cast<volat // AtomicIntrinsics.
ile U*>(value), static U load(volatile const U* value);
U(0), static void store(volatile U* dest, U newValue);
U(0));
}
static void store(volatile U* dest, U newValue) {
AtomicIntrinsics<U>::swap(dest, newValue);
}
}; };
} // namespace details
/**
* Instantiation of AtomicIntrinsics<> for 64-bit word sizes.
*/
template <typename T>
class AtomicIntrinsics<T, typename boost::enable_if_c<sizeof(T) == size
of(LONGLONG)>::type> {
public:
#if defined(NTDDI_VERSION) && defined(NTDDI_WS03SP2) && (NTDDI_VERSION >= N
TDDI_WS03SP2)
static const bool kHaveInterlocked64 = true;
#else
static const bool kHaveInterlocked64 = false;
#endif
typedef details::InterlockedImpl64<T, kHaveInterlocked64> Interlock
edImpl;
typedef details::LoadStoreImpl<T> LoadStoreImpl;
static T compareAndSwap(volatile T* dest, T expected, T newValue) {
return InterlockedImpl::compareAndSwap(dest, expected, newValue
);
}
static T swap(volatile T* dest, T newValue) {
return InterlockedImpl::swap(dest, newValue);
}
static T load(volatile const T* value) {
return LoadStoreImpl::load(value);
}
static void store(volatile T* dest, T newValue) {
LoadStoreImpl::store(dest, newValue);
}
static T fetchAndAdd(volatile T* dest, T increment) {
return InterlockedImpl::fetchAndAdd(dest, increment);
}
private:
AtomicIntrinsics();
~AtomicIntrinsics();
}; };
namespace details {
template <typename U>
U LoadStoreImpl<U, typename boost::disable_if_c<sizeof(U) <= sizeof
(void*)>::type>
::load(volatile const U* value) {
return AtomicIntrinsics<U>::compareAndSwap(const_cast<volatile
U*>(value),
U(0),
U(0));
}
template<typename U>
void LoadStoreImpl<U, typename boost::disable_if_c<sizeof(U) <= siz
eof(void*)>::type>
::store(volatile U* dest, U newValue) {
AtomicIntrinsics<U>::swap(dest, newValue);
}
} // namespace details
} // namespace mongo } // namespace mongo
 End of changes. 9 change blocks. 
64 lines changed or deleted 84 lines changed or added


 audit.h   audit.h 
skipping to change at line 42 skipping to change at line 42
*/ */
#pragma once #pragma once
#include "mongo/base/error_codes.h" #include "mongo/base/error_codes.h"
#include "mongo/db/auth/privilege.h" #include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/user.h" #include "mongo/db/auth/user.h"
namespace mongo { namespace mongo {
class AuthorizationSession;
class BSONObj; class BSONObj;
class ClientBasic; class ClientBasic;
class NamespaceString; class NamespaceString;
class ReplSetConfig; class ReplSetConfig;
class StringData; class StringData;
class UserName; class UserName;
namespace mutablebson { namespace mutablebson {
class Document; class Document;
} // namespace mutablebson } // namespace mutablebson
skipping to change at line 335 skipping to change at line 336
*/ */
void logEnableSharding(ClientBasic* client, void logEnableSharding(ClientBasic* client,
const StringData& dbname); const StringData& dbname);
/** /**
* Logs the result of a addShard command. * Logs the result of a addShard command.
*/ */
void logAddShard(ClientBasic* client, void logAddShard(ClientBasic* client,
const StringData& name, const StringData& name,
const std::string& servers, const std::string& servers,
long long maxsize); long long maxSize);
/** /**
* Logs the result of a removeShard command. * Logs the result of a removeShard command.
*/ */
void logRemoveShard(ClientBasic* client, void logRemoveShard(ClientBasic* client,
const StringData& shardname); const StringData& shardname);
/** /**
* Logs the result of a shardCollection command. * Logs the result of a shardCollection command.
*/ */
void logShardCollection(ClientBasic* client, void logShardCollection(ClientBasic* client,
const StringData& ns, const StringData& ns,
const BSONObj& keyPattern, const BSONObj& keyPattern,
bool unique); bool unique);
/*
* Appends an array of user/db pairs to the provided Document.
* The users are extracted from the current client. They are to be the
* impersonated users for a Command run by an internal user.
*/
void appendImpersonatedUsers(BSONObjBuilder* cmd);
const char cmdOptionImpersonatedUsers[] = "impersonatedUsers";
/*
* Looks for an 'impersonatedUsers' field. This field is used by mongo
s to
* transmit the usernames of the currently authenticated user when it r
uns commands
* on a shard using internal user authentication. Auditing uses this i
nformation
* to properly ascribe users to actions. This is necessary only for im
plicit actions that
* mongos cannot properly audit itself; examples are implicit collectio
n and database creation.
* This function requires that the field is the last field in the bson
object; it edits the
* command BSON to efficiently remove the field before returning.
*
* cmdObj [in, out]: If any impersonated users field exists, it will be
parsed and removed.
* authSession [in]: current authorization session
* parsedUserNames [out]: populated with parsed usernames
* fieldIsPresent [out]: true if impersonatedUsers field was present in
the object
*/
void parseAndRemoveImpersonatedUserField(BSONObj cmdObj,
AuthorizationSession* authSess
ion,
std::vector<UserName>* parsedU
serNames,
bool* fieldIsPresent);
} // namespace audit } // namespace audit
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
1 lines changed or deleted 39 lines changed or added


 auth_helpers.h   auth_helpers.h 
skipping to change at line 18 skipping to change at line 18
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h"
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
namespace auth { namespace auth {
/** /**
* Hashes the password so that it can be stored in a user object or use d for MONGODB-CR * Hashes the password so that it can be stored in a user object or use d for MONGODB-CR
* authentication. * authentication.
*/ */
std::string createPasswordDigest(const StringData& username, std::string MONGO_CLIENT_API createPasswordDigest(const StringData& use rname,
const StringData& clearTextPassword); const StringData& clearTextPassword);
/**
* Retrieves the schema version of the persistent data describing users
and roles from the
* remote server connected to with conn.
*/
Status getRemoteStoredAuthorizationVersion(DBClientBase* conn, int* out
Version);
/**
* Given a schemaVersion24 user document and its source database, retur
n the query and update
* specifier needed to upsert a schemaVersion26 version of the user.
*/
void getUpdateToUpgradeUser(const StringData& sourceDB,
const BSONObj& oldUserDoc,
BSONObj* query,
BSONObj* update);
/**
* Name of the server parameter used to report the auth schema version
(via getParameter).
*/
extern const std::string schemaVersionServerParameter;
} // namespace auth } // namespace auth
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
1 lines changed or deleted 28 lines changed or added


 authlevel.h   authlevel.h 
skipping to change at line 22 skipping to change at line 22
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/* /*
* for a particular db * for a particular db
* levels * levels
* 0 : none * 0 : none
* 1 : read * 1 : read
* 2 : write * 2 : write
*/ */
struct Auth { struct MONGO_CLIENT_API Auth {
enum Level { NONE = 0 , enum Level { NONE = 0 ,
READ = 1 , READ = 1 ,
WRITE = 2 }; WRITE = 2 };
Auth() : level( NONE ) {} Auth() : level( NONE ) {}
Level level; Level level;
std::string user; std::string user;
}; };
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 authorization_manager.h   authorization_manager.h 
skipping to change at line 98 skipping to change at line 98
static const NamespaceString usersBackupCollectionNamespace; static const NamespaceString usersBackupCollectionNamespace;
static const NamespaceString usersCollectionNamespace; static const NamespaceString usersCollectionNamespace;
static const NamespaceString versionCollectionNamespace; static const NamespaceString versionCollectionNamespace;
/** /**
* Query to match the auth schema version document in the versionCo llectionNamespace. * Query to match the auth schema version document in the versionCo llectionNamespace.
*/ */
static const BSONObj versionDocumentQuery; static const BSONObj versionDocumentQuery;
/** /**
* Name of the server parameter used to report the auth schema vers
ion (via getParameter).
*/
static const std::string schemaVersionServerParameter;
/**
* Name of the field in the auth schema version document containing the current schema * Name of the field in the auth schema version document containing the current schema
* version. * version.
*/ */
static const std::string schemaVersionFieldName; static const std::string schemaVersionFieldName;
/** /**
* Value used to represent that the schema version is not cached or invalid. * Value used to represent that the schema version is not cached or invalid.
*/ */
static const int schemaVersionInvalid = 0; static const int schemaVersionInvalid = 0;
skipping to change at line 394 skipping to change at line 389
*/ */
bool tryAcquireAuthzUpdateLock(const StringData& why); bool tryAcquireAuthzUpdateLock(const StringData& why);
/** /**
* Releases the lock guarding modifications to persistent authoriza tion data, which must * Releases the lock guarding modifications to persistent authoriza tion data, which must
* already be held. * already be held.
*/ */
void releaseAuthzUpdateLock(); void releaseAuthzUpdateLock();
/** /**
* Perform one step in the process of upgrading the stored authoriz ation data to the * Performs one step in the process of upgrading the stored authori zation data to the
* newest schema. * newest schema.
* *
* On success, returns Status::OK(), and *isDone will indicate whet her there are more * On success, returns Status::OK(), and *isDone will indicate whet her there are more
* steps to perform. * steps to perform.
* *
* If the authorization data is already fully upgraded, returns Sta tus::OK and sets *isDone * If the authorization data is already fully upgraded, returns Sta tus::OK and sets *isDone
* to true, so this is safe to call on a fully upgraded system. * to true, so this is safe to call on a fully upgraded system.
* *
* On failure, returns a status other than Status::OK(). In this c ase, is is typically safe * On failure, returns a status other than Status::OK(). In this c ase, is is typically safe
* to try again. * to try again.
*/ */
Status upgradeSchemaStep(const BSONObj& writeConcern, bool* isDone) ; Status upgradeSchemaStep(const BSONObj& writeConcern, bool* isDone) ;
/** /**
* Performs up to maxSteps steps in the process of upgrading the st
ored authorization data
* to the newest schema. Behaves as if by repeatedly calling upgra
deSchemaStep up to
* maxSteps times until either it completes the upgrade or returns
a non-OK status.
*
* Invalidates the user cache before the first step and after each
attempted step.
*
* Returns Status::OK() to indicate that the upgrade process has co
mpleted successfully.
* Returns ErrorCodes::OperationIncomplete to indicate that progres
s was made, but that more
* steps must be taken to complete the process. Other returns indi
cate a failure to make
* progress performing the upgrade, and the specific code and messa
ge in the returned status
* may provide additional information.
*/
Status upgradeSchema(int maxSteps, const BSONObj& writeConcern);
/**
* Hook called by replication code to let the AuthorizationManager observe changes * Hook called by replication code to let the AuthorizationManager observe changes
* to relevant collections. * to relevant collections.
*/ */
void logOp(const char* opstr, void logOp(const char* opstr,
const char* ns, const char* ns,
const BSONObj& obj, const BSONObj& obj,
BSONObj* patt, BSONObj* patt,
bool* b); bool* b);
private: private:
 End of changes. 3 change blocks. 
7 lines changed or deleted 24 lines changed or added


 authorization_session.h   authorization_session.h 
skipping to change at line 92 skipping to change at line 92
// Returns the authenticated user with the given name. Returns NUL L // Returns the authenticated user with the given name. Returns NUL L
// if no such user is found. // if no such user is found.
// The user remains in the _authenticatedUsers set for this Authori zationSession, // The user remains in the _authenticatedUsers set for this Authori zationSession,
// and ownership of the user stays with the AuthorizationManager // and ownership of the user stays with the AuthorizationManager
User* lookupUser(const UserName& name); User* lookupUser(const UserName& name);
// Returns the number of authenticated users in this session. // Returns the number of authenticated users in this session.
size_t getNumAuthenticatedUsers(); size_t getNumAuthenticatedUsers();
// Gets an iterator over the names of all authenticated users store d in this manager. // Gets an iterator over the names of all authenticated users store d in this manager.
UserSet::NameIterator getAuthenticatedUserNames(); UserNameIterator getAuthenticatedUserNames();
// Returns a string representing all logged-in users on the current session. // Returns a string representing all logged-in users on the current session.
// WARNING: this string will contain NUL bytes so don't call c_str( )! // WARNING: this string will contain NUL bytes so don't call c_str( )!
std::string getAuthenticatedUserNamesToken(); std::string getAuthenticatedUserNamesToken();
// Removes any authenticated principals whose authorization credent ials came from the given // Removes any authenticated principals whose authorization credent ials came from the given
// database, and revokes any privileges that were granted via that principal. // database, and revokes any privileges that were granted via that principal.
void logoutDatabase(const std::string& dbname); void logoutDatabase(const std::string& dbname);
// Adds the internalSecurity user to the set of authenticated users . // Adds the internalSecurity user to the set of authenticated users .
skipping to change at line 183 skipping to change at line 183
const ActionSet& actions); const ActionSet& actions);
// Utility function for // Utility function for
// isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), action). // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), action).
bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, A ctionType action); bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, A ctionType action);
// Utility function for // Utility function for
// isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), actions). // isAuthorizedForActionsOnResource(ResourcePattern::forExactNamesp ace(ns), actions).
bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, c onst ActionSet& actions); bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, c onst ActionSet& actions);
// Replaces the vector of UserNames that a system user is impersona
ting with a new vector.
// The auditing system adds these to each audit record in the log.
void setImpersonatedUserNames(const std::vector<UserName>& names);
// Returns an iterator to a vector of impersonated usernames.
UserNameIterator getImpersonatedUserNames() const;
// Clears the vector of impersonated UserNames.
void clearImpersonatedUserNames();
// Tells whether impersonation is active or not. This state is set
when
// setImpersonatedUserNames is called and cleared when clearImperso
natedUserNames is
// called.
bool isImpersonating() const;
private: private:
// If any users authenticated on this session are marked as invalid this updates them with // If any users authenticated on this session are marked as invalid this updates them with
// up-to-date information. May require a read lock on the "admin" d b to read the user data. // up-to-date information. May require a read lock on the "admin" d b to read the user data.
void _refreshUserInfoAsNeeded(); void _refreshUserInfoAsNeeded();
// Checks if this connection is authorized for the given Privilege, ignoring whether or not // Checks if this connection is authorized for the given Privilege, ignoring whether or not
// we should even be doing authorization checks in general. Note: this may acquire a read // we should even be doing authorization checks in general. Note: this may acquire a read
// lock on the admin database (to update out-of-date user privilege information). // lock on the admin database (to update out-of-date user privilege information).
bool _isAuthorizedForPrivilege(const Privilege& privilege); bool _isAuthorizedForPrivilege(const Privilege& privilege);
scoped_ptr<AuthzSessionExternalState> _externalState; scoped_ptr<AuthzSessionExternalState> _externalState;
// All Users who have been authenticated on this connection // All Users who have been authenticated on this connection
UserSet _authenticatedUsers; UserSet _authenticatedUsers;
// A vector of impersonated UserNames. These are used in the audit
ing system.
// They are not used for authz checks.
std::vector<UserName> _impersonatedUserNames;
bool _impersonationFlag;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
1 lines changed or deleted 25 lines changed or added


 authz_manager_external_state_s.h   authz_manager_external_state_s.h 
skipping to change at line 38 skipping to change at line 38
#pragma once #pragma once
#include <boost/function.hpp> #include <boost/function.hpp>
#include <boost/thread/mutex.hpp> #include <boost/thread/mutex.hpp>
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <string> #include <string>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/client/distlock.h"
#include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/authz_manager_external_state.h"
#include "mongo/db/auth/user_name.h" #include "mongo/db/auth/user_name.h"
#include "mongo/s/distlock.h"
namespace mongo { namespace mongo {
/** /**
* The implementation of AuthzManagerExternalState functionality for mo ngos. * The implementation of AuthzManagerExternalState functionality for mo ngos.
*/ */
class AuthzManagerExternalStateMongos : public AuthzManagerExternalStat e{ class AuthzManagerExternalStateMongos : public AuthzManagerExternalStat e{
MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongos); MONGO_DISALLOW_COPYING(AuthzManagerExternalStateMongos);
public: public:
skipping to change at line 67 skipping to change at line 67
virtual Status getRoleDescription(const RoleName& roleName, virtual Status getRoleDescription(const RoleName& roleName,
bool showPrivileges, bool showPrivileges,
BSONObj* result); BSONObj* result);
virtual Status getRoleDescriptionsForDB(const std::string dbname, virtual Status getRoleDescriptionsForDB(const std::string dbname,
bool showPrivileges, bool showPrivileges,
bool showBuiltinRoles, bool showBuiltinRoles,
vector<BSONObj>* result); vector<BSONObj>* result);
virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s); virtual Status getAllDatabaseNames(std::vector<std::string>* dbname s);
/**
* Implements findOne of the AuthzManagerExternalState interface
*
* NOTE: The data returned from this helper may be from any config
server or replica set
* node. The first config server or primary node is preferred, whe
n available.
*/
virtual Status findOne(const NamespaceString& collectionName, virtual Status findOne(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
BSONObj* result); BSONObj* result);
/**
* Implements query of the AuthzManagerExternalState interface
*
* NOTE: The data returned from this helper may be from any config
server or replica set
* node. The first config server or primary node is preferred, whe
n available.
*/
virtual Status query(const NamespaceString& collectionName, virtual Status query(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& projection, const BSONObj& projection,
const boost::function<void(const BSONObj&)>& r esultProcessor); const boost::function<void(const BSONObj&)>& r esultProcessor);
virtual Status insert(const NamespaceString& collectionName, virtual Status insert(const NamespaceString& collectionName,
const BSONObj& document, const BSONObj& document,
const BSONObj& writeConcern); const BSONObj& writeConcern);
virtual Status update(const NamespaceString& collectionName, virtual Status update(const NamespaceString& collectionName,
const BSONObj& query, const BSONObj& query,
const BSONObj& updatePattern, const BSONObj& updatePattern,
bool upsert, bool upsert,
bool multi, bool multi,
const BSONObj& writeConcern, const BSONObj& writeConcern,
int* numUpdated); int* numUpdated);
 End of changes. 5 change blocks. 
1 lines changed or deleted 19 lines changed or added


 balance.h   balance.h 
skipping to change at line 109 skipping to change at line 109
* @param secondaryThrottle wait for secondaries to catch up before pushing more deletes * @param secondaryThrottle wait for secondaries to catch up before pushing more deletes
* @param waitForDelete wait for deletes to complete after each chu nk move * @param waitForDelete wait for deletes to complete after each chu nk move
* @return number of chunks effectively moved * @return number of chunks effectively moved
*/ */
int _moveChunks(const vector<CandidateChunkPtr>* candidateChunks, int _moveChunks(const vector<CandidateChunkPtr>* candidateChunks,
bool secondaryThrottle, bool secondaryThrottle,
bool waitForDelete); bool waitForDelete);
/** /**
* Marks this balancer as being live on the config server(s). * Marks this balancer as being live on the config server(s).
*
* @param conn is the connection with the config server(s)
*/ */
void _ping( DBClientBase& conn, bool waiting = false ); void _ping( bool waiting = false );
/** /**
* @return true if all the servers listed in configdb as being shar ds are reachable and are distinct processes * @return true if all the servers listed in configdb as being shar ds are reachable and are distinct processes
*/ */
bool _checkOIDs(); bool _checkOIDs();
}; };
extern Balancer balancer; extern Balancer balancer;
} }
 End of changes. 2 change blocks. 
3 lines changed or deleted 1 lines changed or added


 basic.h   basic.h 
skipping to change at line 24 skipping to change at line 24
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#ifdef _WIN32 #ifdef _WIN32
#include "windows_basic.h" #include "windows_basic.h"
#endif #endif
#if defined(__linux__)
#include <cstring>
// glibc's optimized versions are better than g++ builtins
# define __builtin_strcmp strcmp
# define __builtin_strlen strlen
# define __builtin_memchr memchr
# define __builtin_memcmp memcmp
# define __builtin_memcpy memcpy
# define __builtin_memset memset
# define __builtin_memmove memmove
#endif
 End of changes. 1 change blocks. 
0 lines changed or deleted 0 lines changed or added


 batch_downconvert.h   batch_downconvert.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/bson/optime.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/db/lasterror.h" #include "mongo/s/multi_command_dispatch.h"
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batch_write_op.h" #include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/batched_error_detail.h" #include "mongo/s/write_ops/write_error_detail.h"
// TODO: Remove post-2.6 // TODO: Remove post-2.6
namespace mongo { namespace mongo {
/** /**
* Interface to execute a single safe write. * Interface to execute a single safe write and enforce write concern o n a connection.
*/ */
class SafeWriter { class SafeWriter {
public: public:
virtual ~SafeWriter() { virtual ~SafeWriter() {
} }
virtual void safeWrite( DBClientBase* conn, /**
const BatchItemRef& batchItem, * Sends a write to a remote host and returns a GLE response.
LastError* error ) = 0; */
virtual Status safeWrite( DBClientBase* conn,
// Helper exposed for testing const BatchItemRef& batchItem,
static void fillLastError( const BSONObj& gleResult, LastError* err const BSONObj& writeConcern,
or ); BSONObj* gleResponse ) = 0;
/**
* Purely enforces a write concern on a remote host by clearing the
previous error.
* This is more expensive than a normal safe write, but is sometime
s needed to support
* write command emulation.
*/
virtual Status enforceWriteConcern( DBClientBase* conn,
const StringData& dbName,
const BSONObj& writeConcern,
BSONObj* gleResponse ) = 0;
}; };
/** /**
* Executes a batch write using safe writes. * Executes a batch write using safe writes.
* *
* The actual safe write operation is done via an interface to allow te sting the rest of the * The actual safe write operation is done via an interface to allow te sting the rest of the
* aggregation functionality. * aggregation functionality.
*/ */
class BatchSafeWriter { class BatchSafeWriter {
public: public:
skipping to change at line 82 skipping to change at line 96
BatchSafeWriter( SafeWriter* safeWriter ) : BatchSafeWriter( SafeWriter* safeWriter ) :
_safeWriter( safeWriter ) { _safeWriter( safeWriter ) {
} }
// Testable static dispatching method, defers to SafeWriter for act ual writes over the // Testable static dispatching method, defers to SafeWriter for act ual writes over the
// connection. // connection.
void safeWriteBatch( DBClientBase* conn, void safeWriteBatch( DBClientBase* conn,
const BatchedCommandRequest& request, const BatchedCommandRequest& request,
BatchedCommandResponse* response ); BatchedCommandResponse* response );
// Helper exposed for testing // Helper that acts as an auto-ptr for write and wc errors
static bool isFailedOp( const LastError& error ); struct GLEErrors {
auto_ptr<WriteErrorDetail> writeError;
// Helper exposed for testing auto_ptr<WCErrorDetail> wcError;
static BatchedErrorDetail* lastErrorToBatchError( const LastError& };
error );
/**
* Given a GLE response, extracts a write error and a write concern
error for the previous
* operation.
*
* Returns !OK if the GLE itself failed in an unknown way.
*/
static Status extractGLEErrors( const BSONObj& gleResponse, GLEErro
rs* errors );
struct GLEStats {
GLEStats() :
n( 0 ) {
}
int n;
BSONObj upsertedId;
OpTime lastOp;
};
/**
* Given a GLE response, pulls out stats for the previous write ope
ration.
*/
static void extractGLEStats( const BSONObj& gleResponse, GLEStats*
stats );
/**
* Given a GLE response, strips out all non-write-concern related i
nformation
*/
static BSONObj stripNonWCInfo( const BSONObj& gleResponse );
private: private:
SafeWriter* _safeWriter; SafeWriter* _safeWriter;
}; };
// Used for reporting legacy write concern responses
struct LegacyWCResponse {
string shardHost;
BSONObj gleResponse;
string errToReport;
};
/**
* Uses GLE and the shard hosts and opTimes last written by write comma
nds to enforce a
* write concern across the previously used shards.
*
* Returns OK with the LegacyWCResponses containing only write concern
error information
* Returns !OK if there was an error getting a GLE response
*/
Status enforceLegacyWriteConcern( MultiCommandDispatch* dispatcher,
const StringData& dbName,
const BSONObj& options,
const HostOpTimeMap& hostOpTimes,
vector<LegacyWCResponse>* wcResponses
);
} }
 End of changes. 8 change blocks. 
16 lines changed or deleted 85 lines changed or added


 batch_executor.h   batch_executor.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/db/client.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/batched_delete_document.h" #include "mongo/s/write_ops/batched_delete_document.h"
#include "mongo/s/write_ops/batched_update_document.h" #include "mongo/s/write_ops/batched_update_document.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo { namespace mongo {
class BSONObjBuilder; class BSONObjBuilder;
class Client;
class CurOp; class CurOp;
class OpCounters; class OpCounters;
class OpDebug;
struct LastError; struct LastError;
struct WriteOpStats;
class WriteBatchStats;
/** /**
* An instance of WriteBatchExecutor is an object capable of issuing a write batch. * An instance of WriteBatchExecutor is an object capable of issuing a write batch.
*/ */
class WriteBatchExecutor { class WriteBatchExecutor {
MONGO_DISALLOW_COPYING(WriteBatchExecutor); MONGO_DISALLOW_COPYING(WriteBatchExecutor);
public: public:
WriteBatchExecutor( const BSONObj& defaultWriteConcern, WriteBatchExecutor( const BSONObj& defaultWriteConcern,
Client* client, Client* client,
OpCounters* opCounters, OpCounters* opCounters,
LastError* le ); LastError* le );
/** /**
* Issues writes with requested write concern. Fills response with errors if problems * Issues writes with requested write concern. Fills response with errors if problems
* occur. * occur.
*/ */
void executeBatch( const BatchedCommandRequest& request, BatchedCom mandResponse* response ); void executeBatch( const BatchedCommandRequest& request, BatchedCom mandResponse* response );
const WriteBatchStats& getStats() const;
private: private:
// TODO: This will change in the near future, but keep like this fo /**
r now * Executes the writes in the batch and returns upserted _ids and w
struct WriteStats { rite errors.
* Dispatches to one of the three functions below for DBLock, CurOp
, and stats management.
*/
void bulkExecute( const BatchedCommandRequest& request,
std::vector<BatchedUpsertDetail*>* upsertedIds,
std::vector<WriteErrorDetail*>* errors );
WriteStats() : /**
numInserted( 0 ), numUpdated( 0 ), numUpserted( 0 ), nu * Executes the inserts of an insert batch and returns the write er
mDeleted( 0 ) { rors.
} *
* Internally uses the DBLock of the request namespace.
int numInserted; * May execute multiple inserts inside the same DBLock, and/or take
int numUpdated; the DBLock multiple
int numUpserted; * times.
int numDeleted; */
}; void execInserts( const BatchedCommandRequest& request,
std::vector<WriteErrorDetail*>* errors );
/**
* Issues the single write 'itemRef'. Returns true iff write item w /**
as issued * Executes an update item (which may update many documents or upse
* sucessfully and increments 'stats'. If the item is an upsert, fi rt), and returns the
lls in the * upserted _id on upsert or error on failure.
* 'upsertedID' also, with the '_id' chosen for that update. If the *
write failed, * Internally uses the DBLock of the update namespace.
* returns false and populates 'error' * May take the DBLock multiple times.
*/ */
bool applyWriteItem( const BatchItemRef& itemRef, void execUpdate( const BatchItemRef& updateItem,
WriteStats* stats, BSONObj* upsertedId,
BSONObj* upsertedID, WriteErrorDetail** error );
BatchedErrorDetail* error );
/**
// * Executes a delete item (which may remove many documents) and ret
// Helpers to issue underlying write. urns an error on failure.
// Returns true iff write item was issued sucessfully and increment *
s stats, populates error * Internally uses the DBLock of the delete namespace.
// if not successful. * May take the DBLock multiple times.
// */
void execRemove( const BatchItemRef& removeItem, WriteErrorDetail**
bool doWrite( const string& ns, error );
const BatchItemRef& itemRef,
CurOp* currentOp, /**
WriteStats* stats, * Helper for incrementing stats on the next CurOp.
BSONObj* upsertedID, *
BatchedErrorDetail* error ); * No lock requirements.
*/
bool doInsert( const std::string& ns, void incOpStats( const BatchItemRef& currWrite );
const BSONObj& insertOp,
CurOp* currentOp, /**
WriteStats* stats, * Helper for incrementing stats after each individual write op.
BatchedErrorDetail* error ); *
* No lock requirements (though usually done inside write lock to m
bool doUpdate( const std::string& ns, ake stats update look
const BatchedUpdateDocument& updateOp, * atomic).
CurOp* currentOp, */
WriteStats* stats, void incWriteStats( const BatchItemRef& currWrite,
BSONObj* upsertedID, const WriteOpStats& stats,
BatchedErrorDetail* error ); const WriteErrorDetail* error,
CurOp* currentOp );
bool doDelete( const std::string& ns,
const BatchedDeleteDocument& deleteOp,
CurOp* currentOp,
WriteStats* stats,
BatchedErrorDetail* error );
// Default write concern, if one isn't provide in the batches. // Default write concern, if one isn't provide in the batches.
const BSONObj _defaultWriteConcern; const BSONObj _defaultWriteConcern;
// Client object to issue writes on behalf of. // Client object to issue writes on behalf of.
// Not owned here. // Not owned here.
Client* _client; Client* _client;
// OpCounters object to update. // OpCounters object to update - needed for stats reporting
// Not owned here. // Not owned here.
OpCounters* _opCounters; OpCounters* _opCounters;
// LastError object to use for preparing write results. // LastError object to use for preparing write results - needed for stats reporting
// Not owned here. // Not owned here.
LastError* _le; LastError* _le;
// Stats
scoped_ptr<WriteBatchStats> _stats;
};
/**
* Holds information about the result of a single write operation.
*/
struct WriteOpStats {
WriteOpStats() :
n( 0 ), nModified( 0 ) {
}
void reset() {
n = 0;
nModified = 0;
upsertedID = BSONObj();
}
// Num docs logically affected by this operation.
int n;
// Num docs actually modified by this operation, if applicable (upd
ate)
int nModified;
// _id of newly upserted document, if applicable (update)
BSONObj upsertedID;
};
/**
* Full stats accumulated by a write batch execution. Note that these
stats do not directly
* correspond to the stats accumulated in opCounters and LastError.
*/
class WriteBatchStats {
public:
WriteBatchStats() :
numInserted( 0 ), numUpserted( 0 ), numUpdated( 0 ), numModifie
d( 0 ), numDeleted( 0 ) {
}
int numInserted;
int numUpserted;
int numUpdated;
int numModified;
int numDeleted;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 12 change blocks. 
64 lines changed or deleted 119 lines changed or added


 batch_upconvert.h   batch_upconvert.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <vector>
#include "mongo/db/lasterror.h" #include "mongo/db/lasterror.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/util/net/message.h" #include "mongo/util/net/message.h"
namespace mongo { namespace mongo {
// //
// Utility functions for up-converting incoming write messages into bat ch write requests. // Utility functions for up-converting incoming write messages into bat ch write requests.
// NOTE: These functions throw on invalid message format. // NOTE: These functions throw on invalid message format.
// //
BatchedCommandRequest* msgToBatchRequest( const Message& msg ); void msgToBatchRequests( const Message& msg, std::vector<BatchedCommand Request*>* requests );
BatchedCommandRequest* msgToBatchInsert( const Message& insertMsg ); // Batch inserts may get mapped to multiple batch requests, to avoid sp
illing MaxBSONObjSize
void msgToBatchInserts( const Message& insertMsg,
std::vector<BatchedCommandRequest*>* insertRequ
ests );
BatchedCommandRequest* msgToBatchUpdate( const Message& updateMsg ); BatchedCommandRequest* msgToBatchUpdate( const Message& updateMsg );
BatchedCommandRequest* msgToBatchDelete( const Message& deleteMsg ); BatchedCommandRequest* msgToBatchDelete( const Message& deleteMsg );
// /**
// Utility function for recording completed batch writes into the LastE * Utility function for recording completed batch writes into the LastE
rror object. rror object.
// (Interpreting the response requires the request object as well.) * (Interpreting the response requires the request object as well.)
// *
* Returns true if an error occurred in the batch.
void batchErrorToLastError( const BatchedCommandRequest& request, */
bool batchErrorToLastError( const BatchedCommandRequest& request,
const BatchedCommandResponse& response, const BatchedCommandResponse& response,
LastError* error ); LastError* error );
} }
 End of changes. 4 change blocks. 
9 lines changed or deleted 16 lines changed or added


 batch_write_exec.h   batch_write_exec.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <map>
#include <string>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/bson/optime.h"
#include "mongo/s/ns_targeter.h" #include "mongo/s/ns_targeter.h"
#include "mongo/s/multi_command_dispatch.h" #include "mongo/s/multi_command_dispatch.h"
#include "mongo/s/shard_resolver.h" #include "mongo/s/shard_resolver.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/batched_command_response.h"
namespace mongo { namespace mongo {
class BatchWriteExecStats;
/** /**
* The BatchWriteExec is able to execute client batch write requests, r esulting in a batch * The BatchWriteExec is able to execute client batch write requests, r esulting in a batch
* response to send back to the client. * response to send back to the client.
* *
* There are two main interfaces the exec uses to "run" the batch: * There are two main interfaces the exec uses to "run" the batch:
* *
* - the "targeter" used to generate child batch operations to send to particular shards * - the "targeter" used to generate child batch operations to send to particular shards
* *
* - the "dispatcher" used to send child batches to several shards at once, and retrieve the * - the "dispatcher" used to send child batches to several shards at once, and retrieve the
* results * results
skipping to change at line 63 skipping to change at line 69
* Both the targeter and dispatcher are assumed to be dedicated to this particular * Both the targeter and dispatcher are assumed to be dedicated to this particular
* BatchWriteExec instance. * BatchWriteExec instance.
* *
*/ */
class BatchWriteExec { class BatchWriteExec {
MONGO_DISALLOW_COPYING (BatchWriteExec); MONGO_DISALLOW_COPYING (BatchWriteExec);
public: public:
BatchWriteExec( NSTargeter* targeter, BatchWriteExec( NSTargeter* targeter,
ShardResolver* resolver, ShardResolver* resolver,
MultiCommandDispatch* dispatcher ) : MultiCommandDispatch* dispatcher );
_targeter( targeter ), _resolver( resolver ), _dispatcher( disp
atcher ) {
}
/** /**
* Executes a client batch write request by sending child batches t o several shard * Executes a client batch write request by sending child batches t o several shard
* endpoints, and returns a client batch write response. * endpoints, and returns a client batch write response.
* *
* Several network round-trips are generally required to execute a
write batch.
*
* This function does not throw, any errors are reported via the cl ientResponse. * This function does not throw, any errors are reported via the cl ientResponse.
*
* TODO: Stats?
*/ */
void executeBatch( const BatchedCommandRequest& clientRequest, void executeBatch( const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse ); BatchedCommandResponse* clientResponse );
const BatchWriteExecStats& getStats();
BatchWriteExecStats* releaseStats();
private: private:
// Not owned here // Not owned here
NSTargeter* _targeter; NSTargeter* _targeter;
// Not owned here // Not owned here
ShardResolver* _resolver; ShardResolver* _resolver;
// Not owned here // Not owned here
MultiCommandDispatch* _dispatcher; MultiCommandDispatch* _dispatcher;
// Stats
auto_ptr<BatchWriteExecStats> _stats;
};
// Useful comparator for using connection strings in ordered sets and m
aps
struct ConnectionStringComp {
bool operator()( const ConnectionString& connStrA,
const ConnectionString& connStrB ) const {
return connStrA.toString().compare( connStrB.toString() ) < 0;
}
};
struct HostOpTime {
HostOpTime(OpTime ot, OID e) : opTime(ot), electionId(e) {};
HostOpTime() {};
OpTime opTime;
OID electionId;
};
typedef std::map<ConnectionString, HostOpTime, ConnectionStringComp> Ho
stOpTimeMap;
class BatchWriteExecStats {
public:
BatchWriteExecStats() :
numRounds( 0 ), numTargetErrors( 0 ), numResolveErrors( 0 ), num
StaleBatches( 0 ) {
}
void noteWriteAt(const ConnectionString& host, OpTime opTime, const
OID& electionId);
const HostOpTimeMap& getWriteOpTimes() const;
// Expose via helpers if this gets more complex
// Number of round trips required for the batch
int numRounds;
// Number of times targeting failed
int numTargetErrors;
// Number of times host resolution failed
int numResolveErrors;
// Number of stale batches
int numStaleBatches;
private:
HostOpTimeMap _writeOpTimes;
}; };
} }
 End of changes. 8 change blocks. 
9 lines changed or deleted 62 lines changed or added


 batch_write_op.h   batch_write_op.h 
skipping to change at line 41 skipping to change at line 41
#include <set> #include <set>
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/base/owned_pointer_vector.h" #include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/platform/unordered_map.h" #include "mongo/platform/unordered_map.h"
#include "mongo/s/ns_targeter.h" #include "mongo/s/ns_targeter.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/batched_command_response.h"
#include "mongo/s/write_ops/batched_error_detail.h" #include "mongo/s/write_ops/wc_error_detail.h"
#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/s/write_ops/write_op.h" #include "mongo/s/write_ops/write_op.h"
namespace mongo { namespace mongo {
class TargetedWriteBatch; class TargetedWriteBatch;
struct ShardError; struct ShardError;
struct ShardWCError;
class TrackedErrors; class TrackedErrors;
class BatchWriteStats; struct BatchWriteStats;
/** /**
* The BatchWriteOp class manages the lifecycle of a batched write rece ived by mongos. Each * The BatchWriteOp class manages the lifecycle of a batched write rece ived by mongos. Each
* item in a batch is tracked via a WriteOp, and the function of the Ba tchWriteOp is to * item in a batch is tracked via a WriteOp, and the function of the Ba tchWriteOp is to
* aggregate the dispatched requests and responses for the underlying W riteOps. * aggregate the dispatched requests and responses for the underlying W riteOps.
* *
* Overall, the BatchWriteOp lifecycle is similar to the WriteOp lifecy cle, with the following * Overall, the BatchWriteOp lifecycle is similar to the WriteOp lifecy cle, with the following
* stages: * stages:
* *
* 0) Client request comes in, batch write op is initialized * 0) Client request comes in, batch write op is initialized
skipping to change at line 133 skipping to change at line 135
*/ */
void noteBatchResponse( const TargetedWriteBatch& targetedBatch, void noteBatchResponse( const TargetedWriteBatch& targetedBatch,
const BatchedCommandResponse& response, const BatchedCommandResponse& response,
TrackedErrors* trackedErrors ); TrackedErrors* trackedErrors );
/** /**
* Stores an error that occurred while trying to send/recv a Target edWriteBatch for this * Stores an error that occurred while trying to send/recv a Target edWriteBatch for this
* BatchWriteOp, and so a response is not available. * BatchWriteOp, and so a response is not available.
*/ */
void noteBatchError( const TargetedWriteBatch& targetedBatch, void noteBatchError( const TargetedWriteBatch& targetedBatch,
const BatchedErrorDetail& error ); const WriteErrorDetail& error );
/**
* Sets a command error for this batch op directly.
*
* Should only be used when there are no outstanding batches to ret
urn.
*/
void setBatchError( const WriteErrorDetail& error );
/** /**
* Returns false if the batch write op needs more processing. * Returns false if the batch write op needs more processing.
*/ */
bool isFinished(); bool isFinished();
/** /**
* Fills a batch response to send back to the client. * Fills a batch response to send back to the client.
*/ */
void buildClientResponse( BatchedCommandResponse* batchResp ); void buildClientResponse( BatchedCommandResponse* batchResp );
//
// Accessors
//
int numWriteOps() const;
int numWriteOpsIn( WriteOpState state ) const;
private: private:
// Incoming client request, not owned here // Incoming client request, not owned here
const BatchedCommandRequest* _clientRequest; const BatchedCommandRequest* _clientRequest;
// Array of ops being processed from the client request // Array of ops being processed from the client request
WriteOp* _writeOps; WriteOp* _writeOps;
// Current outstanding batch op write requests // Current outstanding batch op write requests
// Not owned here but tracked for reporting // Not owned here but tracked for reporting
std::set<const TargetedWriteBatch*> _targeted; std::set<const TargetedWriteBatch*> _targeted;
// Write concern responses from all write batches so far // Write concern responses from all write batches so far
OwnedPointerVector<ShardError> _wcErrors; OwnedPointerVector<ShardWCError> _wcErrors;
// Upserted ids for the whole write batch // Upserted ids for the whole write batch
OwnedPointerVector<BatchedUpsertDetail> _upsertedIds; OwnedPointerVector<BatchedUpsertDetail> _upsertedIds;
// Use to store a top-level error indicating that the batch aborted
unexpectedly and we
// can't report on any of the writes sent. May also include a Shar
dEndpoint indicating
// where the root problem was.
scoped_ptr<ShardError> _batchError;
// Stats for the entire batch op // Stats for the entire batch op
scoped_ptr<BatchWriteStats> _stats; scoped_ptr<BatchWriteStats> _stats;
}; };
struct BatchWriteStats { struct BatchWriteStats {
BatchWriteStats(); BatchWriteStats();
int numInserted; int numInserted;
int numUpserted; int numUpserted;
int numUpdated; int numUpdated;
int numModified;
int numDeleted; int numDeleted;
}; };
/** /**
* Data structure representing the information needed to make a batch r equest, along with * Data structure representing the information needed to make a batch r equest, along with
* pointers to where the resulting responses should be placed. * pointers to where the resulting responses should be placed.
* *
* Internal support for storage as a doubly-linked list, to allow the T argetedWriteBatch to * Internal support for storage as a doubly-linked list, to allow the T argetedWriteBatch to
* efficiently be registered for reporting. * efficiently be registered for reporting.
skipping to change at line 225 skipping to change at line 248
OwnedPointerVector<TargetedWrite> _writes; OwnedPointerVector<TargetedWrite> _writes;
}; };
/** /**
* Simple struct for storing an error with an endpoint. * Simple struct for storing an error with an endpoint.
* *
* Certain types of errors are not stored in WriteOps or must be return ed to a caller. * Certain types of errors are not stored in WriteOps or must be return ed to a caller.
*/ */
struct ShardError { struct ShardError {
ShardError( const ShardEndpoint& endpoint, const BatchedErrorDetail ShardError( const ShardEndpoint& endpoint, const WriteErrorDetail&
& error ) : error ) :
endpoint( endpoint ) {
error.cloneTo( &this->error );
}
const ShardEndpoint endpoint;
WriteErrorDetail error;
};
/**
* Simple struct for storing a write concern error with an endpoint.
*
* Certain types of errors are not stored in WriteOps or must be return
ed to a caller.
*/
struct ShardWCError {
ShardWCError( const ShardEndpoint& endpoint, const WCErrorDetail& e
rror ) :
endpoint( endpoint ) { endpoint( endpoint ) {
error.cloneTo( &this->error ); error.cloneTo( &this->error );
} }
const ShardEndpoint endpoint; const ShardEndpoint endpoint;
BatchedErrorDetail error; WCErrorDetail error;
}; };
/** /**
* Helper class for tracking certain errors from batch operations * Helper class for tracking certain errors from batch operations
*/ */
class TrackedErrors { class TrackedErrors {
public: public:
~TrackedErrors(); ~TrackedErrors();
 End of changes. 10 change blocks. 
7 lines changed or deleted 51 lines changed or added


 batched_command_request.h   batched_command_request.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
#include "mongo/s/write_ops/batched_insert_request.h" #include "mongo/s/write_ops/batched_insert_request.h"
#include "mongo/s/write_ops/batched_update_request.h" #include "mongo/s/write_ops/batched_update_request.h"
#include "mongo/s/write_ops/batched_delete_request.h" #include "mongo/s/write_ops/batched_delete_request.h"
skipping to change at line 38 skipping to change at line 50
* This class wraps the different kinds of command requests into a gene rically usable write * This class wraps the different kinds of command requests into a gene rically usable write
* command request. * command request.
* *
* Designed to be a very thin wrapper that mimics the underlying reques ts exactly. Owns the * Designed to be a very thin wrapper that mimics the underlying reques ts exactly. Owns the
* wrapped request object once constructed. * wrapped request object once constructed.
*/ */
class BatchedCommandRequest : public BSONSerializable { class BatchedCommandRequest : public BSONSerializable {
MONGO_DISALLOW_COPYING(BatchedCommandRequest); MONGO_DISALLOW_COPYING(BatchedCommandRequest);
public: public:
// Maximum number of write ops supported per batch
static const int kMaxWriteBatchSize = 1000;
enum BatchType { enum BatchType {
BatchType_Insert, BatchType_Update, BatchType_Delete, BatchType _Unknown BatchType_Insert, BatchType_Update, BatchType_Delete, BatchType _Unknown
}; };
// //
// construction / destruction // construction / destruction
// //
BatchedCommandRequest( BatchType batchType ); BatchedCommandRequest( BatchType batchType );
/**
* insertReq ownership is transferred to here.
*/
BatchedCommandRequest( BatchedInsertRequest* insertReq ) : BatchedCommandRequest( BatchedInsertRequest* insertReq ) :
_batchType( BatchType_Insert ), _insertReq( insertReq ) { _batchType( BatchType_Insert ), _insertReq( insertReq ) {
} }
/**
* updateReq ownership is transferred to here.
*/
BatchedCommandRequest( BatchedUpdateRequest* updateReq ) : BatchedCommandRequest( BatchedUpdateRequest* updateReq ) :
_batchType( BatchType_Update ), _updateReq( updateReq ) { _batchType( BatchType_Update ), _updateReq( updateReq ) {
} }
/**
* deleteReq ownership is transferred to here.
*/
BatchedCommandRequest( BatchedDeleteRequest* deleteReq ) : BatchedCommandRequest( BatchedDeleteRequest* deleteReq ) :
_batchType( BatchType_Delete ), _deleteReq( deleteReq ) { _batchType( BatchType_Delete ), _deleteReq( deleteReq ) {
} }
virtual ~BatchedCommandRequest() {}; virtual ~BatchedCommandRequest() {};
/** Copies all the fields present in 'this' to 'other'. */ /** Copies all the fields present in 'this' to 'other'. */
void cloneTo( BatchedCommandRequest* other ) const; void cloneTo( BatchedCommandRequest* other ) const;
// //
skipping to change at line 98 skipping to change at line 124
// individual field accessors // individual field accessors
// //
bool isVerboseWC() const; bool isVerboseWC() const;
void setNS( const StringData& collName ); void setNS( const StringData& collName );
void unsetNS(); void unsetNS();
bool isNSSet() const; bool isNSSet() const;
const std::string& getNS() const; const std::string& getNS() const;
/**
* Write ops are BSONObjs, whose format depends on the type of requ
est
* TODO: Should be possible to further parse these ops generically
if we come up with a
* good scheme.
*/
void setWriteOps( const std::vector<BSONObj>& writeOps );
void unsetWriteOps();
bool isWriteOpsSet() const;
std::size_t sizeWriteOps() const; std::size_t sizeWriteOps() const;
std::vector<BSONObj> getWriteOps() const;
void setWriteConcern( const BSONObj& writeConcern ); void setWriteConcern( const BSONObj& writeConcern );
void unsetWriteConcern(); void unsetWriteConcern();
bool isWriteConcernSet() const; bool isWriteConcernSet() const;
const BSONObj& getWriteConcern() const; const BSONObj& getWriteConcern() const;
void setOrdered( bool ordered ); void setOrdered( bool ordered );
void unsetOrdered(); void unsetOrdered();
bool isOrderedSet() const; bool isOrderedSet() const;
bool getOrdered() const; bool getOrdered() const;
void setShardName(const StringData& shardName); void setMetadata(BatchedRequestMetadata* metadata);
void unsetShardName(); void unsetMetadata();
bool isShardNameSet() const; bool isMetadataSet() const;
const std::string& getShardName() const; BatchedRequestMetadata* getMetadata() const;
void setShardVersion( const ChunkVersion& shardVersion );
void unsetShardVersion();
bool isShardVersionSet() const;
const ChunkVersion& getShardVersion() const;
void setSession( long long session );
void unsetSession();
bool isSessionSet() const;
long long getSession() const;
// //
// Helpers for auth pre-parsing // Helpers for auth pre-parsing
// //
/** /**
* Helper to determine whether or not there are any upserts in the batch * Helper to determine whether or not there are any upserts in the batch
*/ */
static bool containsUpserts( const BSONObj& writeCmdObj ); static bool containsUpserts( const BSONObj& writeCmdObj );
skipping to change at line 173 skipping to change at line 180
* Similar to above, this class wraps the write items of a command requ est into a generically * Similar to above, this class wraps the write items of a command requ est into a generically
* usable type. Very thin wrapper, does not own the write item itself. * usable type. Very thin wrapper, does not own the write item itself.
* *
* TODO: Use in BatchedCommandRequest above * TODO: Use in BatchedCommandRequest above
*/ */
class BatchItemRef { class BatchItemRef {
public: public:
BatchItemRef( const BatchedCommandRequest* request, int itemIndex ) : BatchItemRef( const BatchedCommandRequest* request, int itemIndex ) :
_request( request ), _itemIndex( itemIndex ) { _request( request ), _itemIndex( itemIndex ) {
dassert( itemIndex < static_cast<int>( request->sizeWriteOps() ) );
} }
const BatchedCommandRequest* getRequest() const { const BatchedCommandRequest* getRequest() const {
return _request; return _request;
} }
int getItemIndex() const { int getItemIndex() const {
return _itemIndex; return _itemIndex;
} }
BatchedCommandRequest::BatchType getOpType() const { BatchedCommandRequest::BatchType getOpType() const {
return _request->getBatchType(); return _request->getBatchType();
} }
BSONObj getDocument() const { const BSONObj& getDocument() const {
dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps(
) ) );
return _request->getInsertRequest()->getDocumentsAt( _itemIndex ); return _request->getInsertRequest()->getDocumentsAt( _itemIndex );
} }
const BatchedUpdateDocument* getUpdate() const { const BatchedUpdateDocument* getUpdate() const {
dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps( ) ) );
return _request->getUpdateRequest()->getUpdatesAt( _itemIndex ) ; return _request->getUpdateRequest()->getUpdatesAt( _itemIndex ) ;
} }
const BatchedDeleteDocument* getDelete() const { const BatchedDeleteDocument* getDelete() const {
dassert( _itemIndex < static_cast<int>( _request->sizeWriteOps( ) ) );
return _request->getDeleteRequest()->getDeletesAt( _itemIndex ) ; return _request->getDeleteRequest()->getDeletesAt( _itemIndex ) ;
} }
private: private:
const BatchedCommandRequest* _request; const BatchedCommandRequest* _request;
const int _itemIndex; const int _itemIndex;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 12 change blocks. 
27 lines changed or deleted 42 lines changed or added


 batched_command_response.h   batched_command_response.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
#include "mongo/s/write_ops/batched_error_detail.h" #include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/s/write_ops/batched_upsert_detail.h" #include "mongo/s/write_ops/batched_upsert_detail.h"
#include "mongo/s/write_ops/wc_error_detail.h"
namespace mongo { namespace mongo {
/** /**
* This class represents the layout and content of a insert/update/dele te runCommand, * This class represents the layout and content of a insert/update/dele te runCommand,
* the response side. * the response side.
*/ */
class BatchedCommandResponse : public BSONSerializable { class BatchedCommandResponse : public BSONSerializable {
MONGO_DISALLOW_COPYING(BatchedCommandResponse); MONGO_DISALLOW_COPYING(BatchedCommandResponse);
public: public:
// //
// schema declarations // schema declarations
// //
static const BSONField<int> ok; static const BSONField<int> ok;
static const BSONField<int> errCode; static const BSONField<int> errCode;
static const BSONField<BSONObj> errInfo;
static const BSONField<string> errMessage; static const BSONField<string> errMessage;
static const BSONField<long long> n; static const BSONField<long long> n;
static const BSONField<BSONObj> singleUpserted; // ID type static const BSONField<long long> nModified;
static const BSONField<std::vector<BatchedUpsertDetail*> > upsertDe tails; static const BSONField<std::vector<BatchedUpsertDetail*> > upsertDe tails;
static const BSONField<Date_t> lastOp; static const BSONField<OpTime> lastOp;
static const BSONField<std::vector<BatchedErrorDetail*> > errDetail static const BSONField<OID> electionId;
s; static const BSONField<std::vector<WriteErrorDetail*> > writeErrors
;
static const BSONField<WCErrorDetail*> writeConcernError;
// //
// construction / destruction // construction / destruction
// //
BatchedCommandResponse(); BatchedCommandResponse();
virtual ~BatchedCommandResponse(); virtual ~BatchedCommandResponse();
/** Copies all the fields present in 'this' to 'other'. */ /** Copies all the fields present in 'this' to 'other'. */
void cloneTo(BatchedCommandResponse* other) const; void cloneTo(BatchedCommandResponse* other) const;
skipping to change at line 87 skipping to change at line 101
void setOk(int ok); void setOk(int ok);
void unsetOk(); void unsetOk();
bool isOkSet() const; bool isOkSet() const;
int getOk() const; int getOk() const;
void setErrCode(int errCode); void setErrCode(int errCode);
void unsetErrCode(); void unsetErrCode();
bool isErrCodeSet() const; bool isErrCodeSet() const;
int getErrCode() const; int getErrCode() const;
void setErrInfo(const BSONObj& errInfo);
void unsetErrInfo();
bool isErrInfoSet() const;
const BSONObj& getErrInfo() const;
void setErrMessage(const StringData& errMessage); void setErrMessage(const StringData& errMessage);
void unsetErrMessage(); void unsetErrMessage();
bool isErrMessageSet() const; bool isErrMessageSet() const;
const std::string& getErrMessage() const; const std::string& getErrMessage() const;
void setNModified(long long n);
void unsetNModified();
bool isNModified() const;
long long getNModified() const;
void setN(long long n); void setN(long long n);
void unsetN(); void unsetN();
bool isNSet() const; bool isNSet() const;
long long getN() const; long long getN() const;
void setSingleUpserted(const BSONObj& singleUpserted);
void unsetSingleUpserted();
bool isSingleUpsertedSet() const;
const BSONObj& getSingleUpserted() const;
void setUpsertDetails(const std::vector<BatchedUpsertDetail*>& upse rtDetails); void setUpsertDetails(const std::vector<BatchedUpsertDetail*>& upse rtDetails);
void addToUpsertDetails(BatchedUpsertDetail* upsertDetails); void addToUpsertDetails(BatchedUpsertDetail* upsertDetails);
void unsetUpsertDetails(); void unsetUpsertDetails();
bool isUpsertDetailsSet() const; bool isUpsertDetailsSet() const;
std::size_t sizeUpsertDetails() const; std::size_t sizeUpsertDetails() const;
const std::vector<BatchedUpsertDetail*>& getUpsertDetails() const; const std::vector<BatchedUpsertDetail*>& getUpsertDetails() const;
const BatchedUpsertDetail* getUpsertDetailsAt(std::size_t pos) cons t; const BatchedUpsertDetail* getUpsertDetailsAt(std::size_t pos) cons t;
void setLastOp(Date_t lastOp); void setLastOp(OpTime lastOp);
void unsetLastOp(); void unsetLastOp();
bool isLastOpSet() const; bool isLastOpSet() const;
Date_t getLastOp() const; OpTime getLastOp() const;
void setErrDetails(const std::vector<BatchedErrorDetail*>& errDetai void setElectionId(const OID& electionId);
ls); void unsetElectionId();
void addToErrDetails(BatchedErrorDetail* errDetails); bool isElectionIdSet() const;
OID getElectionId() const;
void setErrDetails(const std::vector<WriteErrorDetail*>& errDetails
);
// errDetails ownership is transferred to here.
void addToErrDetails(WriteErrorDetail* errDetails);
void unsetErrDetails(); void unsetErrDetails();
bool isErrDetailsSet() const; bool isErrDetailsSet() const;
std::size_t sizeErrDetails() const; std::size_t sizeErrDetails() const;
const std::vector<BatchedErrorDetail*>& getErrDetails() const; const std::vector<WriteErrorDetail*>& getErrDetails() const;
const BatchedErrorDetail* getErrDetailsAt(std::size_t pos) const; const WriteErrorDetail* getErrDetailsAt(std::size_t pos) const;
void setWriteConcernError(WCErrorDetail* error);
void unsetWriteConcernError();
bool isWriteConcernErrorSet() const;
const WCErrorDetail* getWriteConcernError() const;
private: private:
// Convention: (M)andatory, (O)ptional // Convention: (M)andatory, (O)ptional
// (M) 0 if batch didn't get to be applied for any reason // (M) 0 if batch didn't get to be applied for any reason
int _ok; int _ok;
bool _isOkSet; bool _isOkSet;
// (O) whether all items in the batch applied correctly // (O) whether all items in the batch applied correctly
int _errCode; int _errCode;
bool _isErrCodeSet; bool _isErrCodeSet;
// (O) further details about the error
BSONObj _errInfo;
bool _isErrInfoSet;
// (O) whether all items in the batch applied correctly // (O) whether all items in the batch applied correctly
string _errMessage; string _errMessage;
bool _isErrMessageSet; bool _isErrMessageSet;
// (O) number of documents affected // (M) number of documents affected
long long _n; long long _n;
bool _isNSet; bool _isNSet;
// (O) number of documents updated
long long _nModified;
bool _isNModifiedSet;
// (O) "promoted" _upserted, if the corresponding request containe d only one batch item // (O) "promoted" _upserted, if the corresponding request containe d only one batch item
// Should only be present if _upserted is not. // Should only be present if _upserted is not.
BSONObj _singleUpserted; BSONObj _singleUpserted;
bool _isSingleUpsertedSet; bool _isSingleUpsertedSet;
// (O) Array of upserted items' _id's // (O) Array of upserted items' _id's
// Should only be present if _singleUpserted is not. // Should only be present if _singleUpserted is not.
boost::scoped_ptr<std::vector<BatchedUpsertDetail*> >_upsertDetails ; boost::scoped_ptr<std::vector<BatchedUpsertDetail*> >_upsertDetails ;
// (O) XXX What is lastop? // (O) Timestamp assigned to the write op when it was written to t
Date_t _lastOp; he oplog.
// Normally, getLastError can use Client::_lastOp, but this is
not valid for
// mongos which loses track of the session due to RCAR. There
fore, we must
// keep track of the lastOp manually ourselves.
OpTime _lastOp;
bool _isLastOpSet; bool _isLastOpSet;
// (O) In addition to keeping track of the above lastOp timestamp,
we must also keep
// track of the primary we talked to. This is because if the
primary moves,
// subsequent calls to getLastError are invalid. The only way
we know if an
// election has occurred is to use the unique electionId.
OID _electionId;
bool _isElectionIdSet;
// (O) Array of item-level error information // (O) Array of item-level error information
boost::scoped_ptr<std::vector<BatchedErrorDetail*> >_errDetails; boost::scoped_ptr<std::vector<WriteErrorDetail*> >_writeErrorDetail
s;
// (O) errors that occurred while trying to satisfy the write conc
ern.
boost::scoped_ptr<WCErrorDetail> _wcErrDetails;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 19 change blocks. 
31 lines changed or deleted 79 lines changed or added


 batched_delete_document.h   batched_delete_document.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 batched_delete_request.h   batched_delete_request.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without xbeven the implied warranty of * but WITHOUT ANY WARRANTY; without xbeven the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
#include "mongo/s/chunk_version.h" #include "mongo/s/chunk_version.h"
#include "mongo/s/write_ops/batched_delete_document.h" #include "mongo/s/write_ops/batched_delete_document.h"
#include "mongo/s/write_ops/batched_request_metadata.h"
namespace mongo { namespace mongo {
/** /**
* This class represents the layout and content of a batched delete run Command, * This class represents the layout and content of a batched delete run Command,
* the request side. * the request side.
*/ */
class BatchedDeleteRequest : public BSONSerializable { class BatchedDeleteRequest : public BSONSerializable {
MONGO_DISALLOW_COPYING(BatchedDeleteRequest); MONGO_DISALLOW_COPYING(BatchedDeleteRequest);
public: public:
skipping to change at line 51 skipping to change at line 64
// //
// Name used for the batched delete invocation. // Name used for the batched delete invocation.
static const std::string BATCHED_DELETE_REQUEST; static const std::string BATCHED_DELETE_REQUEST;
// Field names and types in the batched delete command type. // Field names and types in the batched delete command type.
static const BSONField<std::string> collName; static const BSONField<std::string> collName;
static const BSONField<std::vector<BatchedDeleteDocument*> > delete s; static const BSONField<std::vector<BatchedDeleteDocument*> > delete s;
static const BSONField<BSONObj> writeConcern; static const BSONField<BSONObj> writeConcern;
static const BSONField<bool> ordered; static const BSONField<bool> ordered;
static const BSONField<string> shardName; static const BSONField<BSONObj> metadata;
static const BSONField<ChunkVersion> shardVersion;
static const BSONField<long long> session;
// //
// construction / destruction // construction / destruction
// //
BatchedDeleteRequest(); BatchedDeleteRequest();
virtual ~BatchedDeleteRequest(); virtual ~BatchedDeleteRequest();
/** Copies all the fields present in 'this' to 'other'. */ /** Copies all the fields present in 'this' to 'other'. */
void cloneTo(BatchedDeleteRequest* other) const; void cloneTo(BatchedDeleteRequest* other) const;
skipping to change at line 85 skipping to change at line 96
// //
// individual field accessors // individual field accessors
// //
void setCollName(const StringData& collName); void setCollName(const StringData& collName);
void unsetCollName(); void unsetCollName();
bool isCollNameSet() const; bool isCollNameSet() const;
const std::string& getCollName() const; const std::string& getCollName() const;
void setDeletes(const std::vector<BatchedDeleteDocument*>& deletes) ; void setDeletes(const std::vector<BatchedDeleteDocument*>& deletes) ;
/**
* deletes ownership is transferred to here.
*/
void addToDeletes(BatchedDeleteDocument* deletes); void addToDeletes(BatchedDeleteDocument* deletes);
void unsetDeletes(); void unsetDeletes();
bool isDeletesSet() const; bool isDeletesSet() const;
std::size_t sizeDeletes() const; std::size_t sizeDeletes() const;
const std::vector<BatchedDeleteDocument*>& getDeletes() const; const std::vector<BatchedDeleteDocument*>& getDeletes() const;
const BatchedDeleteDocument* getDeletesAt(std::size_t pos) const; const BatchedDeleteDocument* getDeletesAt(std::size_t pos) const;
void setWriteConcern(const BSONObj& writeConcern); void setWriteConcern(const BSONObj& writeConcern);
void unsetWriteConcern(); void unsetWriteConcern();
bool isWriteConcernSet() const; bool isWriteConcernSet() const;
const BSONObj& getWriteConcern() const; const BSONObj& getWriteConcern() const;
void setOrdered(bool ordered); void setOrdered(bool ordered);
void unsetOrdered(); void unsetOrdered();
bool isOrderedSet() const; bool isOrderedSet() const;
bool getOrdered() const; bool getOrdered() const;
void setShardName(const StringData& shardName); /*
void unsetShardName(); * metadata ownership will be transferred to this.
bool isShardNameSet() const; */
const std::string& getShardName() const; void setMetadata(BatchedRequestMetadata* metadata);
void unsetMetadata();
void setShardVersion(const ChunkVersion& shardVersion); bool isMetadataSet() const;
void unsetShardVersion(); BatchedRequestMetadata* getMetadata() const;
bool isShardVersionSet() const;
const ChunkVersion& getShardVersion() const;
void setSession(long long session);
void unsetSession();
bool isSessionSet() const;
long long getSession() const;
private: private:
// Convention: (M)andatory, (O)ptional // Convention: (M)andatory, (O)ptional
// (M) collection we're deleting from // (M) collection we're deleting from
std::string _collName; std::string _collName;
bool _isCollNameSet; bool _isCollNameSet;
// (M) array of individual deletes // (M) array of individual deletes
std::vector<BatchedDeleteDocument*> _deletes; std::vector<BatchedDeleteDocument*> _deletes;
bool _isDeletesSet; bool _isDeletesSet;
// (O) to be issued after the batch applied // (O) to be issued after the batch applied
BSONObj _writeConcern; BSONObj _writeConcern;
bool _isWriteConcernSet; bool _isWriteConcernSet;
// (O) whether batch is issued in parallel or not // (O) whether batch is issued in parallel or not
bool _ordered; bool _ordered;
bool _isOrderedSet; bool _isOrderedSet;
// (O) shard name we're sending this batch to // (O) metadata associated with this request for internal use.
std::string _shardName; scoped_ptr<BatchedRequestMetadata> _metadata;
bool _isShardNameSet;
// (O) version for this collection on a given shard
boost::scoped_ptr<ChunkVersion> _shardVersion;
// (O) session number the inserts belong to
long long _session;
bool _isSessionSet;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
27 lines changed or deleted 34 lines changed or added


 batched_insert_request.h   batched_insert_request.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
#include "mongo/s/chunk_version.h" #include "mongo/s/chunk_version.h"
#include "mongo/s/write_ops/batched_request_metadata.h"
namespace mongo { namespace mongo {
/** /**
* This class represents the layout and content of a batched insert run Command, * This class represents the layout and content of a batched insert run Command,
* the request side. * the request side.
*/ */
class BatchedInsertRequest : public BSONSerializable { class BatchedInsertRequest : public BSONSerializable {
MONGO_DISALLOW_COPYING(BatchedInsertRequest); MONGO_DISALLOW_COPYING(BatchedInsertRequest);
public: public:
skipping to change at line 50 skipping to change at line 63
// //
// Name used for the batched insert invocation. // Name used for the batched insert invocation.
static const std::string BATCHED_INSERT_REQUEST; static const std::string BATCHED_INSERT_REQUEST;
// Field names and types in the batched insert command type. // Field names and types in the batched insert command type.
static const BSONField<std::string> collName; static const BSONField<std::string> collName;
static const BSONField<std::vector<BSONObj> > documents; static const BSONField<std::vector<BSONObj> > documents;
static const BSONField<BSONObj> writeConcern; static const BSONField<BSONObj> writeConcern;
static const BSONField<bool> ordered; static const BSONField<bool> ordered;
static const BSONField<string> shardName; static const BSONField<BSONObj> metadata;
static const BSONField<ChunkVersion> shardVersion;
static const BSONField<long long> session;
// //
// construction / destruction // construction / destruction
// //
BatchedInsertRequest(); BatchedInsertRequest();
virtual ~BatchedInsertRequest(); virtual ~BatchedInsertRequest();
/** Copies all the fields present in 'this' to 'other'. */ /** Copies all the fields present in 'this' to 'other'. */
void cloneTo(BatchedInsertRequest* other) const; void cloneTo(BatchedInsertRequest* other) const;
skipping to change at line 83 skipping to change at line 94
// //
// individual field accessors // individual field accessors
// //
void setCollName(const StringData& collName); void setCollName(const StringData& collName);
void unsetCollName(); void unsetCollName();
bool isCollNameSet() const; bool isCollNameSet() const;
const std::string& getCollName() const; const std::string& getCollName() const;
void setDocuments(const std::vector<BSONObj>& documents);
void addToDocuments(const BSONObj& documents); void addToDocuments(const BSONObj& documents);
void unsetDocuments(); void unsetDocuments();
bool isDocumentsSet() const; bool isDocumentsSet() const;
std::size_t sizeDocuments() const; std::size_t sizeDocuments() const;
const std::vector<BSONObj>& getDocuments() const; const std::vector<BSONObj>& getDocuments() const;
const BSONObj& getDocumentsAt(std::size_t pos) const; const BSONObj& getDocumentsAt(std::size_t pos) const;
void setWriteConcern(const BSONObj& writeConcern); void setWriteConcern(const BSONObj& writeConcern);
void unsetWriteConcern(); void unsetWriteConcern();
bool isWriteConcernSet() const; bool isWriteConcernSet() const;
const BSONObj& getWriteConcern() const; const BSONObj& getWriteConcern() const;
void setOrdered(bool ordered); void setOrdered(bool ordered);
void unsetOrdered(); void unsetOrdered();
bool isOrderedSet() const; bool isOrderedSet() const;
bool getOrdered() const; bool getOrdered() const;
void setShardName(const StringData& shardName); /*
void unsetShardName(); * metadata ownership will be transferred to this.
bool isShardNameSet() const; */
const std::string& getShardName() const; void setMetadata(BatchedRequestMetadata* metadata);
void unsetMetadata();
void setShardVersion(const ChunkVersion& shardVersion); bool isMetadataSet() const;
void unsetShardVersion(); BatchedRequestMetadata* getMetadata() const;
bool isShardVersionSet() const;
const ChunkVersion& getShardVersion() const;
void setSession(long long session);
void unsetSession();
bool isSessionSet() const;
long long getSession() const;
private: private:
// Convention: (M)andatory, (O)ptional // Convention: (M)andatory, (O)ptional
// (M) collection we're inserting on // (M) collection we're inserting on
std::string _collName; std::string _collName;
bool _isCollNameSet; bool _isCollNameSet;
// (M) array of documents to be inserted // (M) array of documents to be inserted
std::vector<BSONObj> _documents; std::vector<BSONObj> _documents;
bool _isDocumentsSet; bool _isDocumentsSet;
// (O) to be issued after the batch applied // (O) to be issued after the batch applied
BSONObj _writeConcern; BSONObj _writeConcern;
bool _isWriteConcernSet; bool _isWriteConcernSet;
// (O) whether batch is issued in parallel or not // (O) whether batch is issued in parallel or not
bool _ordered; bool _ordered;
bool _isOrderedSet; bool _isOrderedSet;
// (O) shard name we're sending this batch to // (O) metadata associated with this request for internal use.
std::string _shardName; scoped_ptr<BatchedRequestMetadata> _metadata;
bool _isShardNameSet;
// (O) version for this collection on a given shard
boost::scoped_ptr<ChunkVersion> _shardVersion;
// (O) session number the inserts belong to
long long _session;
bool _isSessionSet;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
28 lines changed or deleted 30 lines changed or added


 batched_update_document.h   batched_update_document.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 batched_update_request.h   batched_update_request.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
#include "mongo/s/chunk_version.h" #include "mongo/s/chunk_version.h"
#include "mongo/s/write_ops/batched_request_metadata.h"
#include "mongo/s/write_ops/batched_update_document.h" #include "mongo/s/write_ops/batched_update_document.h"
namespace mongo { namespace mongo {
/** /**
* This class represents the layout and content of a batched update run Command, * This class represents the layout and content of a batched update run Command,
* the request side. * the request side.
*/ */
class BatchedUpdateRequest : public BSONSerializable { class BatchedUpdateRequest : public BSONSerializable {
MONGO_DISALLOW_COPYING(BatchedUpdateRequest); MONGO_DISALLOW_COPYING(BatchedUpdateRequest);
skipping to change at line 51 skipping to change at line 64
// //
// Name used for the batched update invocation. // Name used for the batched update invocation.
static const std::string BATCHED_UPDATE_REQUEST; static const std::string BATCHED_UPDATE_REQUEST;
// Field names and types in the batched update command type. // Field names and types in the batched update command type.
static const BSONField<std::string> collName; static const BSONField<std::string> collName;
static const BSONField<std::vector<BatchedUpdateDocument*> > update s; static const BSONField<std::vector<BatchedUpdateDocument*> > update s;
static const BSONField<BSONObj> writeConcern; static const BSONField<BSONObj> writeConcern;
static const BSONField<bool> ordered; static const BSONField<bool> ordered;
static const BSONField<string> shardName; static const BSONField<BSONObj> metadata;
static const BSONField<ChunkVersion> shardVersion;
static const BSONField<long long> session;
// //
// construction / destruction // construction / destruction
// //
BatchedUpdateRequest(); BatchedUpdateRequest();
virtual ~BatchedUpdateRequest(); virtual ~BatchedUpdateRequest();
/** Copies all the fields present in 'this' to 'other'. */ /** Copies all the fields present in 'this' to 'other'. */
void cloneTo(BatchedUpdateRequest* other) const; void cloneTo(BatchedUpdateRequest* other) const;
skipping to change at line 85 skipping to change at line 96
// //
// individual field accessors // individual field accessors
// //
void setCollName(const StringData& collName); void setCollName(const StringData& collName);
void unsetCollName(); void unsetCollName();
bool isCollNameSet() const; bool isCollNameSet() const;
const std::string& getCollName() const; const std::string& getCollName() const;
void setUpdates(const std::vector<BatchedUpdateDocument*>& updates) ; void setUpdates(const std::vector<BatchedUpdateDocument*>& updates) ;
/**
* updates ownership is transferred to here.
*/
void addToUpdates(BatchedUpdateDocument* updates); void addToUpdates(BatchedUpdateDocument* updates);
void unsetUpdates(); void unsetUpdates();
bool isUpdatesSet() const; bool isUpdatesSet() const;
std::size_t sizeUpdates() const; std::size_t sizeUpdates() const;
const std::vector<BatchedUpdateDocument*>& getUpdates() const; const std::vector<BatchedUpdateDocument*>& getUpdates() const;
const BatchedUpdateDocument* getUpdatesAt(std::size_t pos) const; const BatchedUpdateDocument* getUpdatesAt(std::size_t pos) const;
void setWriteConcern(const BSONObj& writeConcern); void setWriteConcern(const BSONObj& writeConcern);
void unsetWriteConcern(); void unsetWriteConcern();
bool isWriteConcernSet() const; bool isWriteConcernSet() const;
const BSONObj& getWriteConcern() const; const BSONObj& getWriteConcern() const;
void setOrdered(bool ordered); void setOrdered(bool ordered);
void unsetOrdered(); void unsetOrdered();
bool isOrderedSet() const; bool isOrderedSet() const;
bool getOrdered() const; bool getOrdered() const;
void setShardName(const StringData& shardName); /*
void unsetShardName(); * metadata ownership will be transferred to this.
bool isShardNameSet() const; */
const std::string& getShardName() const; void setMetadata(BatchedRequestMetadata* metadata);
void unsetMetadata();
void setShardVersion(const ChunkVersion& shardVersion); bool isMetadataSet() const;
void unsetShardVersion(); BatchedRequestMetadata* getMetadata() const;
bool isShardVersionSet() const;
const ChunkVersion& getShardVersion() const;
void setSession(long long session);
void unsetSession();
bool isSessionSet() const;
long long getSession() const;
private: private:
// Convention: (M)andatory, (O)ptional // Convention: (M)andatory, (O)ptional
// (M) collection we're updating from // (M) collection we're updating from
std::string _collName; std::string _collName;
bool _isCollNameSet; bool _isCollNameSet;
// (M) array of individual updates // (M) array of individual updates
std::vector<BatchedUpdateDocument*> _updates; std::vector<BatchedUpdateDocument*> _updates;
bool _isUpdatesSet; bool _isUpdatesSet;
// (O) to be issued after the batch applied // (O) to be issued after the batch applied
BSONObj _writeConcern; BSONObj _writeConcern;
bool _isWriteConcernSet; bool _isWriteConcernSet;
// (O) whether batch is issued in parallel or not // (O) whether batch is issued in parallel or not
bool _ordered; bool _ordered;
bool _isOrderedSet; bool _isOrderedSet;
// (O) shard name we're sending this batch to // (O) metadata associated with this request for internal use.
std::string _shardName; scoped_ptr<BatchedRequestMetadata> _metadata;
bool _isShardNameSet;
// (O) version for this collection on a given shard
boost::scoped_ptr<ChunkVersion> _shardVersion;
// (O) session number the inserts belong to
long long _session;
bool _isSessionSet;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
27 lines changed or deleted 34 lines changed or added


 bits.h   bits.h 
skipping to change at line 22 skipping to change at line 22
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
// figure out if we're on a 64 or 32 bit system // figure out if we're on a 64 or 32 bit system
#if defined(__x86_64__) || defined(__amd64__) || defined(_WIN64) #if defined(__x86_64__) || defined(__amd64__) || defined(_WIN64) || defined (__aarch64__)
#define MONGO_PLATFORM_64 #define MONGO_PLATFORM_64
#elif defined(__i386__) || defined(_WIN32) #elif defined(__i386__) || defined(_WIN32) || defined(__arm__)
#define MONGO_PLATFORM_32 #define MONGO_PLATFORM_32
#else #else
#error "unknown platform" #error "unknown platform"
#endif #endif
namespace mongo { namespace mongo {
// defined here so can test on linux // defined here so can test on linux
inline int mongo_firstBitSet( unsigned long long v ) { inline int mongo_firstBitSet( unsigned long long v ) {
if ( v == 0 ) if ( v == 0 )
return 0; return 0;
 End of changes. 2 change blocks. 
2 lines changed or deleted 2 lines changed or added


 bson-inl.h   bson-inl.h 
skipping to change at line 368 skipping to change at line 368
} }
inline Labeler BSONObjBuilderValueStream::operator<<( const Labeler::La bel &l ) { inline Labeler BSONObjBuilderValueStream::operator<<( const Labeler::La bel &l ) {
return Labeler( l, this ); return Labeler( l, this );
} }
inline void BSONObjBuilderValueStream::endField( const StringData& next FieldName ) { inline void BSONObjBuilderValueStream::endField( const StringData& next FieldName ) {
if ( haveSubobj() ) { if ( haveSubobj() ) {
verify( _fieldName.rawData() ); verify( _fieldName.rawData() );
_builder->append( _fieldName, subobj()->done() ); _builder->append( _fieldName, subobj()->done() );
_subobj.reset();
} }
_subobj.reset();
_fieldName = nextFieldName; _fieldName = nextFieldName;
} }
inline BSONObjBuilder *BSONObjBuilderValueStream::subobj() { inline BSONObjBuilder *BSONObjBuilderValueStream::subobj() {
if ( !haveSubobj() ) if ( !haveSubobj() )
_subobj.reset( new BSONObjBuilder() ); _subobj.reset( new BSONObjBuilder() );
return _subobj.get(); return _subobj.get();
} }
template<class T> inline template<class T> inline
skipping to change at line 475 skipping to change at line 475
BSONObjIterator i(*this); BSONObjIterator i(*this);
bool first = true; bool first = true;
while ( 1 ) { while ( 1 ) {
massert( 10327 , "Object does not end with EOO", i.moreWithEOO () ); massert( 10327 , "Object does not end with EOO", i.moreWithEOO () );
BSONElement e = i.next( true ); BSONElement e = i.next( true );
massert( 10328 , "Invalid element size", e.size() > 0 ); massert( 10328 , "Invalid element size", e.size() > 0 );
massert( 10329 , "Element too large", e.size() < ( 1 << 30 ) ) ; massert( 10329 , "Element too large", e.size() < ( 1 << 30 ) ) ;
int offset = (int) (e.rawdata() - this->objdata()); int offset = (int) (e.rawdata() - this->objdata());
massert( 10330 , "Element extends past end of object", massert( 10330 , "Element extends past end of object",
e.size() + offset <= this->objsize() ); e.size() + offset <= this->objsize() );
e.validate();
bool end = ( e.size() + offset == this->objsize() ); bool end = ( e.size() + offset == this->objsize() );
if ( e.eoo() ) { if ( e.eoo() ) {
massert( 10331 , "EOO Before end of object", end ); massert( 10331 , "EOO Before end of object", end );
break; break;
} }
if ( first ) if ( first )
first = false; first = false;
else else
s << ", "; s << ", ";
e.toString( s, !isArray, full, depth ); e.toString( s, !isArray, full, depth );
} }
s << ( isArray ? " ]" : " }" ); s << ( isArray ? " ]" : " }" );
} }
inline void BSONElement::validate() const {
const BSONType t = type();
switch( t ) {
case DBRef:
case Code:
case Symbol:
case mongo::String: {
unsigned x = (unsigned) valuestrsize();
bool lenOk = x > 0 && x < (unsigned) BSONObjMaxInternalSize;
if( lenOk && valuestr()[x-1] == 0 )
return;
StringBuilder buf;
buf << "Invalid dbref/code/string/symbol size: " << x;
if( lenOk )
buf << " strnlen:" << mongo::strnlen( valuestr() , x );
msgasserted( 10321 , buf.str() );
break;
}
case CodeWScope: {
int totalSize = *( int * )( value() );
massert( 10322 , "Invalid CodeWScope size", totalSize >= 8 );
int strSizeWNull = *( int * )( value() + 4 );
massert( 10323 , "Invalid CodeWScope string size", totalSize >
= strSizeWNull + 4 + 4 );
massert( 10324 , "Invalid CodeWScope string size",
strSizeWNull > 0 &&
(strSizeWNull - 1) == mongo::strnlen( codeWScopeCode()
, strSizeWNull ) );
massert( 10325 , "Invalid CodeWScope size", totalSize >= strSi
zeWNull + 4 + 4 + 4 );
int objSize = *( int * )( value() + 4 + 4 + strSizeWNull );
massert( 10326 , "Invalid CodeWScope object size", totalSize =
= 4 + 4 + strSizeWNull + objSize );
// Subobject validation handled elsewhere.
}
case Object:
// We expect Object size validation to be handled elsewhere.
default:
break;
}
}
inline int BSONElement::size( int maxLen ) const { inline int BSONElement::size( int maxLen ) const {
if ( totalSize >= 0 ) if ( totalSize >= 0 )
return totalSize; return totalSize;
int remain = maxLen - fieldNameSize() - 1; int remain = maxLen - fieldNameSize() - 1;
int x = 0; int x = 0;
switch ( type() ) { switch ( type() ) {
case EOO: case EOO:
case Undefined: case Undefined:
 End of changes. 4 change blocks. 
45 lines changed or deleted 1 lines changed or added


 bson_extract.h   bson_extract.h 
skipping to change at line 65 skipping to change at line 65
* Returns Status::OK() and sets *out to the found element's boolean va lue on success. Returns * Returns Status::OK() and sets *out to the found element's boolean va lue on success. Returns
* ErrorCodes::NoSuchKey if there are no matches for "fieldName", and E rrorCodes::TypeMismatch * ErrorCodes::NoSuchKey if there are no matches for "fieldName", and E rrorCodes::TypeMismatch
* if the type of the matching element is not Bool or a number type. F or return values other * if the type of the matching element is not Bool or a number type. F or return values other
* than Status::OK(), the resulting value of "*out" is undefined. * than Status::OK(), the resulting value of "*out" is undefined.
*/ */
Status bsonExtractBooleanField(const BSONObj& object, Status bsonExtractBooleanField(const BSONObj& object,
const StringData& fieldName, const StringData& fieldName,
bool* out); bool* out);
/** /**
* Finds an element named "fieldName" in "object" that represents an in
tegral value.
*
* Returns Status::OK() and sets *out to the element's 64-bit integer v
alue representation on
* success. Returns ErrorCodes::NoSuchKey if there are no matches for
"fieldName". Returns
* ErrorCodes::TypeMismatch if the value of the matching element is not
of a numeric type.
* Returns ErrorCodes::BadValue if the value does not have an exact 64-
bit integer
* representation. For return values other than Status::OK(), the resu
lting value of "*out" is
* undefined.
*/
Status bsonExtractIntegerField(const BSONObj& object,
const StringData& fieldName,
long long* out);
/**
* Finds a string-typed element named "fieldName" in "object" and store s its value in "out". * Finds a string-typed element named "fieldName" in "object" and store s its value in "out".
* *
* Returns Status::OK() and sets *out to the found element's string val ue on success. Returns * Returns Status::OK() and sets *out to the found element's string val ue on success. Returns
* ErrorCodes::NoSuchKey if there are no matches for "fieldName", and E rrorCodes::TypeMismatch * ErrorCodes::NoSuchKey if there are no matches for "fieldName", and E rrorCodes::TypeMismatch
* if the type of the matching element is not String. For return value s other than * if the type of the matching element is not String. For return value s other than
* Status::OK(), the resulting value of "*out" is undefined. * Status::OK(), the resulting value of "*out" is undefined.
*/ */
Status bsonExtractStringField(const BSONObj& object, Status bsonExtractStringField(const BSONObj& object,
const StringData& fieldName, const StringData& fieldName,
std::string* out); std::string* out);
skipping to change at line 92 skipping to change at line 106
* *
* If "fieldName" is present more than once, behavior is undefined. If the found field is not a * If "fieldName" is present more than once, behavior is undefined. If the found field is not a
* boolean or number, returns ErrorCodes::TypeMismatch. * boolean or number, returns ErrorCodes::TypeMismatch.
*/ */
Status bsonExtractBooleanFieldWithDefault(const BSONObj& object, Status bsonExtractBooleanFieldWithDefault(const BSONObj& object,
const StringData& fieldName, const StringData& fieldName,
bool defaultValue, bool defaultValue,
bool* out); bool* out);
/** /**
* Finds an element named "fieldName" in "object" that represents an in
tegral value.
*
* If a field named "fieldName" is present and is a value of numeric ty
pe with an exact 64-bit
* integer representation, returns that representation in *out and retu
rns Status::OK(). If
* there is no field named "fieldName", stores defaultValue into *out a
nd returns Status::OK().
* If the field is found, but has non-numeric type, returns ErrorCodes:
:TypeMismatch. If the
* value has numeric type, but cannot be represented as a 64-bit intege
r, returns
* ErrorCodes::BadValue.
*/
Status bsonExtractIntegerFieldWithDefault(const BSONObj& object,
const StringData& fieldName,
long long defaultValue,
long long* out);
/**
* Finds a string element named "fieldName" in "object". * Finds a string element named "fieldName" in "object".
* *
* If a field named "fieldName" is present, and is a string, stores the value of the field into * If a field named "fieldName" is present, and is a string, stores the value of the field into
* "*out". If no field named fieldName is present, sets "*out" to "def aultValue". In these * "*out". If no field named fieldName is present, sets "*out" to "def aultValue". In these
* cases, returns Status::OK(). * cases, returns Status::OK().
* *
* If "fieldName" is present more than once, behavior is undefined. If the found field is not a * If "fieldName" is present more than once, behavior is undefined. If the found field is not a
* string, returns ErrorCodes::TypeMismatch. * string, returns ErrorCodes::TypeMismatch.
*/ */
Status bsonExtractStringFieldWithDefault(const BSONObj& object, Status bsonExtractStringFieldWithDefault(const BSONObj& object,
 End of changes. 2 change blocks. 
0 lines changed or deleted 41 lines changed or added


 bsondump_options.h   bsondump_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 bsonelement.h   bsonelement.h 
skipping to change at line 26 skipping to change at line 26
*/ */
#pragma once #pragma once
#include <string.h> // strlen #include <string.h> // strlen
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/bson/bsontypes.h" #include "mongo/bson/bsontypes.h"
#include "mongo/bson/oid.h" #include "mongo/bson/oid.h"
#include "mongo/client/export_macros.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
#include "mongo/platform/float_utils.h" #include "mongo/platform/float_utils.h"
namespace mongo { namespace mongo {
class OpTime; class OpTime;
class BSONObj; class BSONObj;
class BSONElement; class BSONElement;
class BSONObjBuilder; class BSONObjBuilder;
} }
skipping to change at line 60 skipping to change at line 61
The BSONElement object points into the BSONObj's data. Thus the BS ONObj must stay in scope The BSONElement object points into the BSONObj's data. Thus the BS ONObj must stay in scope
for the life of the BSONElement. for the life of the BSONElement.
internals: internals:
<type><fieldName ><value> <type><fieldName ><value>
-------- size() ------------ -------- size() ------------
-fieldNameSize- -fieldNameSize-
value() value()
type() type()
*/ */
class BSONElement { class MONGO_CLIENT_API BSONElement {
public: public:
/** These functions, which start with a capital letter, throw a Msg AssertionException if the /** These functions, which start with a capital letter, throw a Msg AssertionException if the
element is not of the required type. Example: element is not of the required type. Example:
std::string foo = obj["foo"].String(); // std::exception if not a std::string type or DNE std::string foo = obj["foo"].String(); // std::exception if not a std::string type or DNE
*/ */
std::string String() const { return chk(mongo::String).str() ; } std::string String() const { return chk(mongo::String).str() ; }
Date_t Date() const { return chk(mongo::Date).date(); } Date_t Date() const { return chk(mongo::Date).date(); }
double Number() const { return chk(isNumber()).number() ; } double Number() const { return chk(isNumber()).number() ; }
double Double() const { return chk(NumberDouble)._numbe rDouble(); } double Double() const { return chk(NumberDouble)._numbe rDouble(); }
skipping to change at line 377 skipping to change at line 378
int woCompare( const BSONElement &e, bool considerFieldName = true ) const; int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
const char * rawdata() const { return data; } const char * rawdata() const { return data; }
/** 0 == Equality, just not defined yet */ /** 0 == Equality, just not defined yet */
int getGtLtOp( int def = 0 ) const; int getGtLtOp( int def = 0 ) const;
/** Constructs an empty element */ /** Constructs an empty element */
BSONElement(); BSONElement();
/** Check that data is internally consistent. */
void validate() const;
/** True if this element may contain subobjects. */ /** True if this element may contain subobjects. */
bool mayEncapsulate() const { bool mayEncapsulate() const {
switch ( type() ) { switch ( type() ) {
case Object: case Object:
case mongo::Array: case mongo::Array:
case CodeWScope: case CodeWScope:
return true; return true;
default: default:
return false; return false;
} }
skipping to change at line 411 skipping to change at line 409
} }
Date_t timestampTime() const { Date_t timestampTime() const {
unsigned long long t = ((unsigned int*)(value() + 4 ))[0]; unsigned long long t = ((unsigned int*)(value() + 4 ))[0];
return t * 1000; return t * 1000;
} }
unsigned int timestampInc() const { unsigned int timestampInc() const {
return ((unsigned int*)(value() ))[0]; return ((unsigned int*)(value() ))[0];
} }
unsigned long long timestampValue() const {
return reinterpret_cast<const unsigned long long*>( value() )[0
];
}
const char * dbrefNS() const { const char * dbrefNS() const {
uassert( 10063 , "not a dbref" , type() == DBRef ); uassert( 10063 , "not a dbref" , type() == DBRef );
return value() + 4; return value() + 4;
} }
const mongo::OID& dbrefOID() const { const mongo::OID& dbrefOID() const {
uassert( 10064 , "not a dbref" , type() == DBRef ); uassert( 10064 , "not a dbref" , type() == DBRef );
const char * start = value(); const char * start = value();
start += 4 + *reinterpret_cast< const int* >( start ); start += 4 + *reinterpret_cast< const int* >( start );
return *reinterpret_cast< const mongo::OID* >( start ); return *reinterpret_cast< const mongo::OID* >( start );
skipping to change at line 457 skipping to change at line 459
explicit BSONElement(const char *d) : data(d) { explicit BSONElement(const char *d) : data(d) {
fieldNameSize_ = -1; fieldNameSize_ = -1;
totalSize = -1; totalSize = -1;
if ( eoo() ) { if ( eoo() ) {
fieldNameSize_ = 0; fieldNameSize_ = 0;
totalSize = 1; totalSize = 1;
} }
} }
struct FieldNameSizeTag {}; // For disambiguation with ctor taking
'maxLen' above.
/** Construct a BSONElement where you already know the length of th
e name. The value
* passed here includes the null terminator. The data pointed to b
y 'd' must not
* represent an EOO. You may pass -1 to indicate that you don't ac
tually know the
* size.
*/
BSONElement(const char* d, int fieldNameSize, FieldNameSizeTag)
: data(d)
, fieldNameSize_(fieldNameSize) // internal size includes null
terminator
, totalSize(-1) {
}
std::string _asCode() const; std::string _asCode() const;
OpTime _opTime() const; OpTime _opTime() const;
template<typename T> bool coerce( T* out ) const; template<typename T> bool coerce( T* out ) const;
private: private:
const char *data; const char *data;
mutable int fieldNameSize_; // cached value mutable int fieldNameSize_; // cached value
mutable int totalSize; /* caches the computed size */ mutable int totalSize; /* caches the computed size */
 End of changes. 5 change blocks. 
4 lines changed or deleted 25 lines changed or added


 bsonmisc.h   bsonmisc.h 
skipping to change at line 23 skipping to change at line 23
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <memory> #include <memory>
#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
int getGtLtOp(const BSONElement& e); int getGtLtOp(const BSONElement& e);
struct BSONElementCmpWithoutField { struct BSONElementCmpWithoutField {
bool operator()( const BSONElement &l, const BSONElement &r ) const { bool operator()( const BSONElement &l, const BSONElement &r ) const {
return l.woCompare( r, false ) < 0; return l.woCompare( r, false ) < 0;
} }
}; };
skipping to change at line 85 skipping to change at line 86
BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARR AY( "bar" << "baz" << "qux" ) ) ); BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARR AY( "bar" << "baz" << "qux" ) ) );
*/ */
#define BSON_ARRAY(x) (( ::mongo::BSONArrayBuilder() << x ).arr()) #define BSON_ARRAY(x) (( ::mongo::BSONArrayBuilder() << x ).arr())
/* Utility class to auto assign object IDs. /* Utility class to auto assign object IDs.
Example: Example:
std::cout << BSON( GENOID << "z" << 3 ); // { _id : ..., z : 3 } std::cout << BSON( GENOID << "z" << 3 ); // { _id : ..., z : 3 }
*/ */
extern struct GENOIDLabeler { } GENOID; struct MONGO_CLIENT_API GENOIDLabeler { };
extern MONGO_CLIENT_API GENOIDLabeler GENOID;
/* Utility class to add a Date element with the current time /* Utility class to add a Date element with the current time
Example: Example:
std::cout << BSON( "created" << DATENOW ); // { created : "2009-10 -09 11:41:42" } std::cout << BSON( "created" << DATENOW ); // { created : "2009-10 -09 11:41:42" }
*/ */
extern struct DateNowLabeler { } DATENOW; struct MONGO_CLIENT_API DateNowLabeler { };
extern MONGO_CLIENT_API DateNowLabeler DATENOW;
/* Utility class to assign a NULL value to a given attribute /* Utility class to assign a NULL value to a given attribute
Example: Example:
std::cout << BSON( "a" << BSONNULL ); // { a : null } std::cout << BSON( "a" << BSONNULL ); // { a : null }
*/ */
extern struct NullLabeler { } BSONNULL; struct MONGO_CLIENT_API NullLabeler { };
extern MONGO_CLIENT_API NullLabeler BSONNULL;
/* Utility class to assign an Undefined value to a given attribute /* Utility class to assign an Undefined value to a given attribute
Example: Example:
std::cout << BSON( "a" << BSONUndefined ); // { a : undefined } std::cout << BSON( "a" << BSONUndefined ); // { a : undefined }
*/ */
extern struct UndefinedLabeler { } BSONUndefined; struct MONGO_CLIENT_API UndefinedLabeler { };
extern MONGO_CLIENT_API UndefinedLabeler BSONUndefined;
/* Utility class to add the minKey (minus infinity) to a given attribut e /* Utility class to add the minKey (minus infinity) to a given attribut e
Example: Example:
std::cout << BSON( "a" << MINKEY ); // { "a" : { "$minKey" : 1 } } std::cout << BSON( "a" << MINKEY ); // { "a" : { "$minKey" : 1 } }
*/ */
extern struct MinKeyLabeler { } MINKEY; struct MONGO_CLIENT_API MinKeyLabeler { };
extern struct MaxKeyLabeler { } MAXKEY; extern MONGO_CLIENT_API MinKeyLabeler MINKEY;
struct MONGO_CLIENT_API MaxKeyLabeler { };
extern MONGO_CLIENT_API MaxKeyLabeler MAXKEY;
// Utility class to implement GT, GTE, etc as described above. // Utility class to implement GT, GTE, etc as described above.
class Labeler { class Labeler {
public: public:
struct Label { struct Label {
explicit Label( const char *l ) : l_( l ) {} explicit Label( const char *l ) : l_( l ) {}
const char *l_; const char *l_;
}; };
Labeler( const Label &l, BSONObjBuilderValueStream *s ) : l_( l ), s_( s ) {} Labeler( const Label &l, BSONObjBuilderValueStream *s ) : l_( l ), s_( s ) {}
template<class T> template<class T>
skipping to change at line 175 skipping to change at line 182
BinDataType type; BinDataType type;
}; };
// Utility class to allow adding deprecated DBRef type to BSON // Utility class to allow adding deprecated DBRef type to BSON
struct BSONDBRef { struct BSONDBRef {
BSONDBRef(const StringData& nameSpace, const OID& o) :ns(nameSpace) , oid(o) {} BSONDBRef(const StringData& nameSpace, const OID& o) :ns(nameSpace) , oid(o) {}
StringData ns; StringData ns;
OID oid; OID oid;
}; };
extern Labeler::Label GT; extern MONGO_CLIENT_API Labeler::Label GT;
extern Labeler::Label GTE; extern MONGO_CLIENT_API Labeler::Label GTE;
extern Labeler::Label LT; extern MONGO_CLIENT_API Labeler::Label LT;
extern Labeler::Label LTE; extern MONGO_CLIENT_API Labeler::Label LTE;
extern Labeler::Label NE; extern MONGO_CLIENT_API Labeler::Label NE;
extern Labeler::Label NIN; extern MONGO_CLIENT_API Labeler::Label NIN;
extern Labeler::Label BSIZE; extern MONGO_CLIENT_API Labeler::Label BSIZE;
// $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT << 6)); // $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT << 6));
// becomes : {$or: [{x: {$gt: 7}}, {y: {$lt: 6}}]} // becomes : {$or: [{x: {$gt: 7}}, {y: {$lt: 6}}]}
inline BSONObj OR(const BSONObj& a, const BSONObj& b); inline BSONObj OR(const BSONObj& a, const BSONObj& b);
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c) ; inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c) ;
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d); inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d);
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e); inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e);
inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f); inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f);
// definitions in bsonobjbuilder.h b/c of incomplete types // definitions in bsonobjbuilder.h b/c of incomplete types
// Utility class to implement BSON( key << val ) as described above. // Utility class to implement BSON( key << val ) as described above.
class BSONObjBuilderValueStream : public boost::noncopyable { class MONGO_CLIENT_API BSONObjBuilderValueStream : public boost::noncop yable {
public: public:
friend class Labeler; friend class Labeler;
BSONObjBuilderValueStream( BSONObjBuilder * builder ); BSONObjBuilderValueStream( BSONObjBuilder * builder );
BSONObjBuilder& operator<<( const BSONElement& e ); BSONObjBuilder& operator<<( const BSONElement& e );
template<class T> template<class T>
BSONObjBuilder& operator<<( T value ); BSONObjBuilder& operator<<( T value );
BSONObjBuilder& operator<<(const DateNowLabeler& id); BSONObjBuilder& operator<<(const DateNowLabeler& id);
 End of changes. 8 change blocks. 
14 lines changed or deleted 21 lines changed or added


 bsonobj.h   bsonobj.h 
skipping to change at line 31 skipping to change at line 31
#include <boost/noncopyable.hpp> #include <boost/noncopyable.hpp>
#include <set> #include <set>
#include <list> #include <list>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement.h"
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/bson/util/atomic_int.h" #include "mongo/bson/util/atomic_int.h"
#include "mongo/bson/util/builder.h" #include "mongo/bson/util/builder.h"
#include "mongo/client/export_macros.h"
#include "mongo/util/bufreader.h" #include "mongo/util/bufreader.h"
namespace mongo { namespace mongo {
typedef std::set< BSONElement, BSONElementCmpWithoutField > BSONElement Set; typedef std::set< BSONElement, BSONElementCmpWithoutField > BSONElement Set;
typedef std::multiset< BSONElement, BSONElementCmpWithoutField > BSONEl ementMSet; typedef std::multiset< BSONElement, BSONElementCmpWithoutField > BSONEl ementMSet;
/** /**
C++ representation of a "BSON" object -- that is, an extended JSON-s tyle C++ representation of a "BSON" object -- that is, an extended JSON-s tyle
object in a binary representation. object in a binary representation.
skipping to change at line 76 skipping to change at line 77
Object: a nested object, leading with its entire size, which termin ates with EOO. Object: a nested object, leading with its entire size, which termin ates with EOO.
Array: same as object Array: same as object
DBRef: <strlen> <cstring ns> <oid> DBRef: <strlen> <cstring ns> <oid>
DBRef: a database reference: basically a collection name plus an O bject ID DBRef: a database reference: basically a collection name plus an O bject ID
BinData: <int len> <byte subtype> <byte[len] data> BinData: <int len> <byte subtype> <byte[len] data>
Code: a function (not a closure): same format as String. Code: a function (not a closure): same format as String.
Symbol: a language symbol (say a python symbol). same format as St ring. Symbol: a language symbol (say a python symbol). same format as St ring.
Code With Scope: <total size><String><Object> Code With Scope: <total size><String><Object>
\endcode \endcode
*/ */
class BSONObj { class MONGO_CLIENT_API BSONObj {
public: public:
/** Construct a BSONObj from data in the proper format. /** Construct a BSONObj from data in the proper format.
* Use this constructor when something else owns msgdata's buffer * Use this constructor when something else owns msgdata's buffer
*/ */
explicit BSONObj(const char *msgdata) { explicit BSONObj(const char *msgdata) {
init(msgdata); init(msgdata);
} }
/** Construct a BSONObj from data in the proper format. /** Construct a BSONObj from data in the proper format.
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 bsonobjbuilder.h   bsonobjbuilder.h 
skipping to change at line 36 skipping to change at line 36
#include <map> #include <map>
#include <cmath> #include <cmath>
#include <limits> #include <limits>
#include "mongo/base/parse_number.h" #include "mongo/base/parse_number.h"
#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement.h"
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bson_builder_base.h" #include "mongo/bson/bson_builder_base.h"
#include "mongo/bson/bson_field.h" #include "mongo/bson/bson_field.h"
#include "mongo/client/export_macros.h"
#if defined(_DEBUG) && defined(MONGO_EXPOSE_MACROS) #if defined(_DEBUG) && defined(MONGO_EXPOSE_MACROS)
#include "mongo/util/log.h" #include "mongo/util/log.h"
#endif #endif
namespace mongo { namespace mongo {
#if defined(_WIN32) #if defined(_WIN32)
// warning: 'this' : used in base member initializer list // warning: 'this' : used in base member initializer list
#pragma warning( disable : 4355 ) #pragma warning( disable : 4355 )
#endif #endif
/** Utility for creating a BSONObj. /** Utility for creating a BSONObj.
See also the BSON() and BSON_ARRAY() macros. See also the BSON() and BSON_ARRAY() macros.
*/ */
class BSONObjBuilder : public BSONBuilderBase, private boost::noncopyab le { class MONGO_CLIENT_API BSONObjBuilder : public BSONBuilderBase, private boost::noncopyable {
public: public:
/** @param initsize this is just a hint as to the final size of the object */ /** @param initsize this is just a hint as to the final size of the object */
BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize + sizeof (unsigned)), _offset( sizeof(unsigned) ), _s( this ) , _tracker(0) , _doneC alled(false) { BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize + sizeof (unsigned)), _offset( sizeof(unsigned) ), _s( this ) , _tracker(0) , _doneC alled(false) {
_b.appendNum((unsigned)0); // ref-count _b.appendNum((unsigned)0); // ref-count
_b.skip(4); /*leave room for size field and ref-count*/ _b.skip(4); /*leave room for size field and ref-count*/
} }
/** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder /** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder
* This is for more efficient adding of subobjects/arrays. See doc s for subobjStart for example. * This is for more efficient adding of subobjects/arrays. See doc s for subobjStart for example.
*/ */
skipping to change at line 606 skipping to change at line 607
The returned object is only valid until the next modification o r destruction of the builder. The returned object is only valid until the next modification o r destruction of the builder.
Intended use case: append a field if not already there. Intended use case: append a field if not already there.
*/ */
BSONObj asTempObj() { BSONObj asTempObj() {
BSONObj temp(_done()); BSONObj temp(_done());
_b.setlen(_b.len()-1); //next append should overwrite the EOO _b.setlen(_b.len()-1); //next append should overwrite the EOO
_doneCalled = false; _doneCalled = false;
return temp; return temp;
} }
/** Make it look as if "done" has been called, so that our destruct
or is a no-op. Do
* this if you know that you don't care about the contents of the
builder you are
* destroying.
*
* Note that it is invalid to call any method other than the destr
uctor after invoking
* this method.
*/
void abandon() {
_doneCalled = true;
}
void decouple() { void decouple() {
_b.decouple(); // post done() call version. be sure jsobj f rees... _b.decouple(); // post done() call version. be sure jsobj f rees...
} }
void appendKeys( const BSONObj& keyPattern , const BSONObj& values ); void appendKeys( const BSONObj& keyPattern , const BSONObj& values );
static std::string numStr( int i ) { static std::string numStr( int i ) {
if (i>=0 && i<100 && numStrsReady) if (i>=0 && i<100 && numStrsReady)
return numStrs[i]; return numStrs[i];
StringBuilder o; StringBuilder o;
 End of changes. 3 change blocks. 
1 lines changed or deleted 16 lines changed or added


 btree.h   btree.h 
skipping to change at line 38 skipping to change at line 38
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/dur.h" #include "mongo/db/dur.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/key.h" #include "mongo/db/storage/record.h"
#include "mongo/db/structure/btree/key.h"
namespace mongo { namespace mongo {
class IndexCatalogEntry;
class IndexDescriptor;
template< class Version > class BtreeBucket;
/** /**
* Our btree implementation generally follows the standard btree algori thm, * Our btree implementation generally follows the standard btree algori thm,
* which is described in many places. The nodes of our btree are refer red to * which is described in many places. The nodes of our btree are refer red to
* as buckets below. These buckets are of size BucketSize and their bo dy is * as buckets below. These buckets are of size BucketSize and their bo dy is
* an ordered array of <bson key, disk loc> pairs, where disk loc is th e disk * an ordered array of <bson key, disk loc> pairs, where disk loc is th e disk
* location of a document and bson key is a projection of this document into * location of a document and bson key is a projection of this document into
* the schema of the index for this btree. Ordering is determined on t he * the schema of the index for this btree. Ordering is determined on t he
* basis of bson key first and then disk loc in case of a tie. All bso n keys * basis of bson key first and then disk loc in case of a tie. All bso n keys
* for a btree have identical schemas with empty string field names and may * for a btree have identical schemas with empty string field names and may
* not have an objsize() exceeding KeyMax. The btree's buckets are * not have an objsize() exceeding KeyMax. The btree's buckets are
skipping to change at line 651 skipping to change at line 657
// make compiler happy: // make compiler happy:
typedef typename V::Key Key; typedef typename V::Key Key;
typedef typename V::KeyOwned KeyOwned; typedef typename V::KeyOwned KeyOwned;
typedef typename BucketBasics<V>::KeyNode KeyNode; typedef typename BucketBasics<V>::KeyNode KeyNode;
typedef typename BucketBasics<V>::_KeyNode _KeyNode; typedef typename BucketBasics<V>::_KeyNode _KeyNode;
typedef typename BucketBasics<V>::Loc Loc; typedef typename BucketBasics<V>::Loc Loc;
const _KeyNode& k(int i) const { return static_cast< const Buck etBasics<V> * >(this)->k(i); } const _KeyNode& k(int i) const { return static_cast< const Buck etBasics<V> * >(this)->k(i); }
protected: protected:
_KeyNode& k(int i) { return static_cast< BucketBasi cs<V> * >(this)->_k(i); } _KeyNode& k(int i) { return static_cast< BucketBasi cs<V> * >(this)->_k(i); }
public: public:
static const BtreeBucket<V>* asVersion( Record* rec );
static BtreeBucket<V>* asVersionMod( Record* rec );
const KeyNode keyNode(int i) const { return static_cast< const Buck etBasics<V> * >(this)->keyNode(i); } const KeyNode keyNode(int i) const { return static_cast< const Buck etBasics<V> * >(this)->keyNode(i); }
bool isHead() const { return this->parent.isNull(); } bool isHead() const { return this->parent.isNull(); }
void dumpTree(const DiskLoc &thisLoc, const BSONObj &order) const; void dumpTree(const DiskLoc &thisLoc, const BSONObj &order) const;
long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order , long long *unusedCount = 0, bool strict = false, unsigned depth=0) const; /* traverses everything */ long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order , long long *unusedCount = 0, bool strict = false, unsigned depth=0) const; /* traverses everything */
bool isUsed( int i ) const { return this->k(i).isUsed(); } bool isUsed( int i ) const { return this->k(i).isUsed(); }
string bucketSummary() const; string bucketSummary() const;
void dump(unsigned depth=0) const; void dump(unsigned depth=0) const;
/** /**
* @return true if key exists in index * @return true if key exists in index
* *
* @order - indicates order of keys in the index. this is basicall y the index's key pattern, e.g.: * @order - indicates order of keys in the index. this is basicall y the index's key pattern, e.g.:
* BSONObj order = ((IndexDetails&)idx).keyPattern(); * BSONObj order = ((IndexDetails&)idx).keyPattern();
* likewise below in bt_insert() etc. * likewise below in bt_insert() etc.
*/ */
private: private:
bool exists(const IndexDetails& idx, const DiskLoc &thisLoc, const Key& key, const Ordering& order) const; bool exists(const IndexCatalogEntry* btreeState, const DiskLoc &thi sLoc, const Key& key ) const;
public: public:
/** /**
* @param self - Don't complain about ourself already being in the index case. * @param self - Don't complain about ourself already being in the index case.
* @return true = There is a duplicate used key. * @return true = There is a duplicate used key.
*/ */
bool wouldCreateDup( bool wouldCreateDup(const IndexCatalogEntry* btreeState,
const IndexDetails& idx, const DiskLoc &thisLoc, const DiskLoc& thisLoc,
const Key& key, const Ordering& order, const Key& key,
const DiskLoc &self) const; const DiskLoc& self) const;
/** /**
* Preconditions: none * Preconditions: none
* Postconditions: @return a new bucket allocated from pdfile stora ge * Postconditions: @return a new bucket allocated from pdfile stora ge
* and init()-ed. This bucket is suitable to for use as a new roo t * and init()-ed. This bucket is suitable to for use as a new roo t
* or any other new node in the tree. * or any other new node in the tree.
*/ */
static DiskLoc addBucket(const IndexDetails&); static DiskLoc addBucket(IndexCatalogEntry* btreeState);
/** /**
* Preconditions: none * Preconditions: none
* Postconditions: * Postconditions:
* - Some header values in this bucket are cleared, and the bucket is * - Some header values in this bucket are cleared, and the bucket is
* deallocated from pdfile storage. * deallocated from pdfile storage.
* - The memory at thisLoc is invalidated, and 'this' is invalidat ed. * - The memory at thisLoc is invalidated, and 'this' is invalidat ed.
*/ */
void deallocBucket(const DiskLoc thisLoc, const IndexDetails &id); void deallocBucket(IndexCatalogEntry* btreeState, const DiskLoc thi sLoc );
/** /**
* Preconditions: * Preconditions:
* - 'key' has a valid schema for this index. * - 'key' has a valid schema for this index.
* - All other paramenters are valid and consistent with this inde x if applicable. * - All other paramenters are valid and consistent with this inde x if applicable.
* Postconditions: * Postconditions:
* - If key is bigger than KeyMax, @return 2 or 3 and no change. * - If key is bigger than KeyMax, @return 2 or 3 and no change.
* - If key / recordLoc exist in the btree as an unused key, set t hem * - If key / recordLoc exist in the btree as an unused key, set t hem
* as used and @return 0 * as used and @return 0
* - If key / recordLoc exist in the btree as a used key, @throw * - If key / recordLoc exist in the btree as a used key, @throw
* exception 10287 and no change. * exception 10287 and no change.
* - If key / recordLoc do not exist in the btree, they are insert ed * - If key / recordLoc do not exist in the btree, they are insert ed
* and @return 0. The root of the btree may be changed, so * and @return 0. The root of the btree may be changed, so
* 'this'/thisLoc may no longer be the root upon return. * 'this'/thisLoc may no longer be the root upon return.
*/ */
int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc, int bt_insert(IndexCatalogEntry* btreeState,
const BSONObj& key, const Ordering &order, bool dupsA const DiskLoc thisLoc,
llowed, const DiskLoc recordLoc,
IndexDetails& idx, bool toplevel = true) const; const BSONObj& key,
bool dupsallowed,
bool toplevel) const;
/** /**
* Preconditions: * Preconditions:
* - 'key' has a valid schema for this index, and may have objsize () > KeyMax. * - 'key' has a valid schema for this index, and may have objsize () > KeyMax.
* Postconditions: * Postconditions:
* - If key / recordLoc are in the btree, they are removed (possib ly * - If key / recordLoc are in the btree, they are removed (possib ly
* by being marked as an unused key), @return true, and potentia lly * by being marked as an unused key), @return true, and potentia lly
* invalidate 'this' / thisLoc and change the head. * invalidate 'this' / thisLoc and change the head.
* - If key / recordLoc are not in the btree, @return false and do nothing. * - If key / recordLoc are not in the btree, @return false and do nothing.
*/ */
bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj bool unindex(IndexCatalogEntry* btreeState,
& key, const DiskLoc recordLoc) const; const DiskLoc thisLoc,
const BSONObj& key,
const DiskLoc recordLoc) const;
/** /**
* locate may return an "unused" key that is just a marker. so be careful. * locate may return an "unused" key that is just a marker. so be careful.
* looks for a key:recordloc pair. * looks for a key:recordloc pair.
* *
* @found - returns true if exact match found. note you can get ba ck a position * @found - returns true if exact match found. note you can get ba ck a position
* result even if found is false. * result even if found is false.
*/ */
DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, co DiskLoc locate(const IndexCatalogEntry* btreeState,
nst BSONObj& key, const Ordering &order, const DiskLoc& thisLoc,
int& pos, bool& found, const DiskLoc &recordLoc, int const BSONObj& key,
direction=1) const; int& pos,
DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, co bool& found,
nst Key& key, const Ordering &order, const DiskLoc& recordLoc,
int& pos, bool& found, const DiskLoc &recordLoc, int int direction=1) const;
direction=1) const;
DiskLoc locate(const IndexCatalogEntry* btreeState,
const DiskLoc& thisLoc,
const Key& key,
int& pos,
bool& found,
const DiskLoc& recordLoc,
int direction=1) const;
/** /**
* find the first instance of the key * find the first instance of the key
* does not handle dups * does not handle dups
* WARNING: findSingle may not be compound index safe. this may ne ed to change. see notes in * WARNING: findSingle may not be compound index safe. this may ne ed to change. see notes in
* findSingle code. * findSingle code.
* @return the record location of the first match * @return the record location of the first match
*/ */
DiskLoc findSingle( const IndexDetails &indexdetails , const DiskLo DiskLoc findSingle( const IndexCatalogEntry* btreeState,
c& thisLoc, const BSONObj& key ) const; const DiskLoc& thisLoc,
const BSONObj& key ) const;
/** /**
* Advance to next or previous key in the index. * Advance to next or previous key in the index.
* @param direction to advance. * @param direction to advance.
*/ */
DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const; DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const;
/** Advance in specified direction to the specified key */ /** Advance in specified direction to the specified key */
void advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBeg void advanceTo(const IndexCatalogEntry* btreeState,
in, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &ke DiskLoc &thisLoc,
yEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int dir int &keyOfs,
ection ) const; const BSONObj &keyBegin,
int keyBeginLen,
bool afterKey,
const vector<const BSONElement*>& keyEnd,
const vector<bool>& keyEndInclusive,
int direction) const;
/** Locate a key with fields comprised of a combination of keyBegin fields and keyEnd fields. */ /** Locate a key with fields comprised of a combination of keyBegin fields and keyEnd fields. */
static void customLocate(DiskLoc &locInOut, int &keyOfs, const BSON static void customLocate(const IndexCatalogEntry* btreeState,
Obj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElem DiskLoc& locInOut,
ent * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &ord int& keyOfs,
er, int direction, pair< DiskLoc, int > &bestParent ) ; const BSONObj& keyBegin,
int keyBeginLen, bool afterVersion,
const vector<const BSONElement*>& keyEnd,
const vector<bool>& keyEndInclusive,
int direction,
pair<DiskLoc, int>& bestParent);
/** @return head of the btree by traversing from current bucket. */ /** @return head of the btree by traversing from current bucket. */
const DiskLoc getHead(const DiskLoc& thisLoc) const; const DiskLoc getHead(const DiskLoc& thisLoc) const;
/** get tree shape */ /** get tree shape */
void shape(stringstream&) const; void shape(stringstream&) const;
static void a_test(IndexDetails&);
static int getKeyMax(); static int getKeyMax();
protected: protected:
/** /**
* Preconditions: * Preconditions:
* - 0 <= firstIndex <= n * - 0 <= firstIndex <= n
* - -1 <= lastIndex <= n ( -1 is equivalent to n ) * - -1 <= lastIndex <= n ( -1 is equivalent to n )
* Postconditions: * Postconditions:
* - Any children at indexes firstIndex through lastIndex (inclusi ve) * - Any children at indexes firstIndex through lastIndex (inclusi ve)
* will have their parent pointers set to thisLoc. * will have their parent pointers set to thisLoc.
skipping to change at line 791 skipping to change at line 833
/** /**
* Preconditions: * Preconditions:
* - thisLoc is not the btree head. * - thisLoc is not the btree head.
* - n == 0 is ok * - n == 0 is ok
* Postconditions: * Postconditions:
* - All cursors pointing to this bucket will be updated. * - All cursors pointing to this bucket will be updated.
* - This bucket's parent's child pointer is set to null. * - This bucket's parent's child pointer is set to null.
* - This bucket is deallocated from pdfile storage. * - This bucket is deallocated from pdfile storage.
* - 'this' and thisLoc are invalidated. * - 'this' and thisLoc are invalidated.
*/ */
void delBucket(const DiskLoc thisLoc, const IndexDetails&); void delBucket(IndexCatalogEntry* btreeState, const DiskLoc thisLoc );
/** /**
* Preconditions: 0 <= p < n * Preconditions: 0 <= p < n
* Postconditions: * Postconditions:
* - The key at index p is removed from the btree. * - The key at index p is removed from the btree.
* - 'this' and thisLoc may be invalidated. * - 'this' and thisLoc may be invalidated.
* - The tree head may change. * - The tree head may change.
*/ */
void delKeyAtPos(const DiskLoc thisLoc, IndexDetails& id, int p, co void delKeyAtPos(IndexCatalogEntry* btreeSate,
nst Ordering &order); const DiskLoc thisLoc,
int p );
/** /**
* Preconditions: * Preconditions:
* - n == 0 is ok * - n == 0 is ok
* Postconditions: * Postconditions:
* - If thisLoc is head, or if its body has at least lowWaterMark bytes, * - If thisLoc is head, or if its body has at least lowWaterMark bytes,
* return false and do nothing. * return false and do nothing.
* - Otherwise, if thisLoc has left or right neighbors, either bal ance * - Otherwise, if thisLoc has left or right neighbors, either bal ance
* or merge with them and return true. Also, 'this' and thisLoc may * or merge with them and return true. Also, 'this' and thisLoc may
* be invalidated and the tree head may change. * be invalidated and the tree head may change.
*/ */
bool mayBalanceWithNeighbors(const DiskLoc thisLoc, IndexDetails &i d, const Ordering &order) const; bool mayBalanceWithNeighbors(IndexCatalogEntry* btreeState, const D iskLoc thisLoc);
/** /**
* Preconditions: * Preconditions:
* - 0 <= leftIndex < n * - 0 <= leftIndex < n
* - The child at leftIndex or the child at leftIndex + 1 contains * - The child at leftIndex or the child at leftIndex + 1 contains
* fewer than lowWaterMark bytes. * fewer than lowWaterMark bytes.
* Postconditions: * Postconditions:
* - If the child bucket at leftIndex can merge with the child ind ex * - If the child bucket at leftIndex can merge with the child ind ex
* at leftIndex + 1, do nothing and return false. * at leftIndex + 1, do nothing and return false.
* - Otherwise, balance keys between the leftIndex child and the * - Otherwise, balance keys between the leftIndex child and the
* leftIndex + 1 child, return true, and possibly change the tre e head. * leftIndex + 1 child, return true, and possibly change the tre e head.
*/ */
bool tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, Inde xDetails &id, const Ordering &order ) const; bool tryBalanceChildren(IndexCatalogEntry* btreeState, const DiskLo c thisLoc, int leftIndex) const;
/** /**
* Preconditions: * Preconditions:
* - All preconditions of tryBalanceChildren. * - All preconditions of tryBalanceChildren.
* - The leftIndex child and leftIndex + 1 child cannot be merged. * - The leftIndex child and leftIndex + 1 child cannot be merged.
* Postconditions: * Postconditions:
* - Keys are moved between the leftIndex child and the leftIndex + 1 * - Keys are moved between the leftIndex child and the leftIndex + 1
* child such that neither child has fewer than lowWaterMark byt es. * child such that neither child has fewer than lowWaterMark byt es.
* The tree head may change. * The tree head may change.
*/ */
void doBalanceChildren( const DiskLoc thisLoc, int leftIndex, Index Details &id, const Ordering &order ); void doBalanceChildren( IndexCatalogEntry* btreeState, const DiskLo c thisLoc, int leftIndex );
/** /**
* Preconditions: * Preconditions:
* - All preconditions of doBalanceChildren * - All preconditions of doBalanceChildren
* - The leftIndex and leftIndex + 1 children are packed. * - The leftIndex and leftIndex + 1 children are packed.
* - The leftIndex + 1 child has fewer than lowWaterMark bytes. * - The leftIndex + 1 child has fewer than lowWaterMark bytes.
* - split returned by rebalancedSeparatorPos() * - split returned by rebalancedSeparatorPos()
* Postconditions: * Postconditions:
* - The key in lchild at index split is set as thisLoc's key at i ndex * - The key in lchild at index split is set as thisLoc's key at i ndex
* leftIndex, which may trigger a split and change the tree head . * leftIndex, which may trigger a split and change the tree head .
* The previous key in thisLoc at index leftIndex and all keys w ith * The previous key in thisLoc at index leftIndex and all keys w ith
* indexes greater than split in lchild are moved to rchild. * indexes greater than split in lchild are moved to rchild.
*/ */
void doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, in void doBalanceLeftToRight( IndexCatalogEntry* btreeState,
t split, const DiskLoc thisLoc, int leftIndex, in
t split,
BtreeBucket<V> *l, const DiskLoc lchild, BtreeBucket<V> *l, const DiskLoc lchild,
BtreeBucket<V> *r, const DiskLoc rchild, BtreeBucket<V> *r, const DiskLoc rchild
IndexDetails &id, const Ordering &order );
);
/** /**
* Preconditions: * Preconditions:
* - All preconditions of doBalanceChildren * - All preconditions of doBalanceChildren
* - The leftIndex and leftIndex + 1 children are packed. * - The leftIndex and leftIndex + 1 children are packed.
* - The leftIndex child has fewer than lowWaterMark bytes. * - The leftIndex child has fewer than lowWaterMark bytes.
* - split returned by rebalancedSeparatorPos() * - split returned by rebalancedSeparatorPos()
* Postconditions: * Postconditions:
* - The key in rchild at index split - l->n - 1 is set as thisLoc 's key * - The key in rchild at index split - l->n - 1 is set as thisLoc 's key
* at index leftIndex, which may trigger a split and change the tree * at index leftIndex, which may trigger a split and change the tree
* head. The previous key in thisLoc at index leftIndex and all keys * head. The previous key in thisLoc at index leftIndex and all keys
* with indexes less than split - l->n - 1 in rchild are moved t o * with indexes less than split - l->n - 1 in rchild are moved t o
* lchild. * lchild.
*/ */
void doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, in void doBalanceRightToLeft( IndexCatalogEntry* btreeState,
t split, const DiskLoc thisLoc, int leftIndex, in
t split,
BtreeBucket<V> *l, const DiskLoc lchild, BtreeBucket<V> *l, const DiskLoc lchild,
BtreeBucket<V> *r, const DiskLoc rchild, BtreeBucket<V> *r, const DiskLoc rchild
IndexDetails &id, const Ordering &order );
);
/** /**
* Preconditions: * Preconditions:
* - 0 <= leftIndex < n * - 0 <= leftIndex < n
* - this->canMergeChildren( thisLoc, leftIndex ) == true * - this->canMergeChildren( thisLoc, leftIndex ) == true
* Postconditions: * Postconditions:
* - All of the above mentioned keys will be placed in the left ch ild. * - All of the above mentioned keys will be placed in the left ch ild.
* - The tree may be updated recursively, resulting in 'this' and * - The tree may be updated recursively, resulting in 'this' and
* thisLoc being invalidated and the tree head being changed. * thisLoc being invalidated and the tree head being changed.
*/ */
void doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDe tails &id, const Ordering &order); void doMergeChildren(IndexCatalogEntry* btreeState,const DiskLoc th isLoc, int leftIndex );
/** /**
* Preconditions: * Preconditions:
* - n == 0 * - n == 0
* - !nextChild.isNull() * - !nextChild.isNull()
* Postconditions: * Postconditions:
* - 'this' and thisLoc are deallocated (and invalidated), any cur sors * - 'this' and thisLoc are deallocated (and invalidated), any cur sors
* to them are updated, and the tree head may change. * to them are updated, and the tree head may change.
* - nextChild replaces thisLoc in the btree structure. * - nextChild replaces thisLoc in the btree structure.
*/ */
void replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ); void replaceWithNextChild( IndexCatalogEntry* btreeState, const Dis kLoc thisLoc );
/** /**
* @return true iff the leftIndex and leftIndex + 1 children both e xist, * @return true iff the leftIndex and leftIndex + 1 children both e xist,
* and if their body sizes when packed and the thisLoc key at left Index * and if their body sizes when packed and the thisLoc key at left Index
* would fit in a single bucket body. * would fit in a single bucket body.
*/ */
bool canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) cons t; bool canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) cons t;
/** /**
* Preconditions: * Preconditions:
skipping to change at line 939 skipping to change at line 983
* - Cannot add a key of size KeyMax to this bucket. * - Cannot add a key of size KeyMax to this bucket.
* - 0 <= keypos <= n is the position of a new key that will be in serted * - 0 <= keypos <= n is the position of a new key that will be in serted
* - lchild is equal to the existing child at index keypos. * - lchild is equal to the existing child at index keypos.
* Postconditions: * Postconditions:
* - The thisLoc bucket is split into two packed buckets, possibly * - The thisLoc bucket is split into two packed buckets, possibly
* invalidating the initial position of keypos, with a split key * invalidating the initial position of keypos, with a split key
* promoted to the parent. The new key key/recordLoc will be in serted * promoted to the parent. The new key key/recordLoc will be in serted
* into one of the split buckets, and lchild/rchild set appropri ately. * into one of the split buckets, and lchild/rchild set appropri ately.
* Splitting may occur recursively, possibly changing the tree h ead. * Splitting may occur recursively, possibly changing the tree h ead.
*/ */
void split(const DiskLoc thisLoc, int keypos, void split(IndexCatalogEntry* btreeState,
const DiskLoc thisLoc, int keypos,
const DiskLoc recordLoc, const Key& key, const DiskLoc recordLoc, const Key& key,
const Ordering& order, const DiskLoc lchild, const DiskL oc rchild, IndexDetails& idx); const DiskLoc lchild, const DiskLoc rchild);
/** /**
* Preconditions: * Preconditions:
* - 0 <= keypos <= n * - 0 <= keypos <= n
* - If key / recordLoc are inserted at position keypos, with prov ided * - If key / recordLoc are inserted at position keypos, with prov ided
* lchild and rchild, the btree ordering requirements will be * lchild and rchild, the btree ordering requirements will be
* maintained. * maintained.
* - lchild is equal to the existing child at index keypos. * - lchild is equal to the existing child at index keypos.
* - n == 0 is ok. * - n == 0 is ok.
* Postconditions: * Postconditions:
* - The key / recordLoc are inserted at position keypos, and the * - The key / recordLoc are inserted at position keypos, and the
* bucket is split if necessary, which may change the tree head. * bucket is split if necessary, which may change the tree head.
* - The bucket may be packed or split, invalidating the specified value * - The bucket may be packed or split, invalidating the specified value
* of keypos. * of keypos.
* This function will always modify thisLoc, but it's marked const because * This function will always modify thisLoc, but it's marked const because
* it commonly relies on the specialized writ]e intent mechanism of basicInsert(). * it commonly relies on the specialized writ]e intent mechanism of basicInsert().
*/ */
void insertHere(const DiskLoc thisLoc, int keypos, void insertHere(IndexCatalogEntry* btreeState,
const DiskLoc recordLoc, const Key& key, const Orde const DiskLoc thisLoc, int keypos,
ring &order, const DiskLoc recordLoc, const Key& key,
const DiskLoc lchild, const DiskLoc rchild, IndexDe const DiskLoc lchild, const DiskLoc rchild ) const;
tails &idx) const;
/** bt_insert() is basically just a wrapper around this. */ /** bt_insert() is basically just a wrapper around this. */
int _insert(const DiskLoc thisLoc, const DiskLoc recordLoc, int _insert(IndexCatalogEntry* btreeState,
const Key& key, const Ordering &order, bool dupsAllowed const DiskLoc thisLoc, const DiskLoc recordLoc,
, const Key& key, bool dupsallowed,
const DiskLoc lChild, const DiskLoc rChild, IndexDetail const DiskLoc lChild, const DiskLoc rChild ) const;
s &idx) const;
bool find(const IndexCatalogEntry* btreeState,
const Key& key,
const DiskLoc &recordLoc,
int& pos,
bool assertIfDup) const;
bool find(const IndexDetails& idx, const Key& key, const DiskLoc &r ecordLoc, const Ordering &order, int& pos, bool assertIfDup) const;
static bool customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, co nst vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) ; static bool customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, co nst vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) ;
static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largest Loc, int& largestKey); static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largest Loc, int& largestKey);
static int customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction ); static int customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction );
/** If child is non null, set its parent to thisLoc */ /** If child is non null, set its parent to thisLoc */
static void fix(const DiskLoc thisLoc, const DiskLoc child); static void fix(const DiskLoc thisLoc, const DiskLoc child);
/** /**
* Preconditions: * Preconditions:
* - 0 <= keypos < n * - 0 <= keypos < n
skipping to change at line 990 skipping to change at line 1042
* and lchild and rchild are set, the btree ordering properties will * and lchild and rchild are set, the btree ordering properties will
* be maintained. * be maintained.
* - rchild == childForPos( keypos + 1 ) * - rchild == childForPos( keypos + 1 )
* - childForPos( keypos ) is referenced elsewhere if nonnull. * - childForPos( keypos ) is referenced elsewhere if nonnull.
* Postconditions: * Postconditions:
* - The key at keypos will be replaced with the specified key and * - The key at keypos will be replaced with the specified key and
* lchild, potentially splitting this bucket and changing the tr ee * lchild, potentially splitting this bucket and changing the tr ee
* head. * head.
* - childForPos( keypos ) will be orphaned. * - childForPos( keypos ) will be orphaned.
*/ */
void setInternalKey( const DiskLoc thisLoc, int keypos, void setInternalKey( IndexCatalogEntry* btreeState,
const DiskLoc recordLoc, const Key &key, const const DiskLoc thisLoc,
Ordering &order, int keypos,
const DiskLoc lchild, const DiskLoc rchild, In const DiskLoc recordLoc,
dexDetails &idx); const Key &key,
const DiskLoc lchild,
const DiskLoc rchild );
/** /**
* Preconditions: * Preconditions:
* - 0 <= keypos < n * - 0 <= keypos < n
* - The keypos or keypos+1 indexed child is non null. * - The keypos or keypos+1 indexed child is non null.
* Postconditions: * Postconditions:
* - The specified key is deleted by replacing it with another key if * - The specified key is deleted by replacing it with another key if
* possible. This replacement may cause a split and change the tree * possible. This replacement may cause a split and change the tree
* head. The replacement key will be deleted from its original * head. The replacement key will be deleted from its original
* location, potentially causing merges and splits that may inva lidate * location, potentially causing merges and splits that may inva lidate
* 'this' and thisLoc and change the tree head. * 'this' and thisLoc and change the tree head.
* - If the key cannot be replaced, it will be marked as unused. This * - If the key cannot be replaced, it will be marked as unused. This
* is only expected in legacy btrees. * is only expected in legacy btrees.
*/ */
void deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDet ails &id, const Ordering &order ); void deleteInternalKey( IndexCatalogEntry* btreeState, const DiskLo c thisLoc, int keypos );
public: public:
/** simply builds and returns a dup key error message string */ /** simply builds and returns a dup key error message string */
static string dupKeyError( const IndexDetails& idx , const Key& key ); static string dupKeyError( const IndexDescriptor* idx , const Key& key );
}; };
#pragma pack() #pragma pack()
template< class V >
BucketBasics<V>::KeyNode::KeyNode(const BucketBasics<V>& bb, const _Key
Node &k) :
prevChildBucket(k.prevChildBucket),
recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
{ }
template< class V >
const BtreeBucket<V> * DiskLoc::btree() const {
verify( _a != -1 );
Record *r = rec();
return (const BtreeBucket<V> *) r->data();
}
/** /**
* give us a writable version of the btree bucket (declares write inten t). * give us a writable version of the btree bucket (declares write inten t).
* note it is likely more efficient to declare write intent on somethin g smaller when you can. * note it is likely more efficient to declare write intent on somethin g smaller when you can.
*/ */
template< class V > template< class V >
BtreeBucket<V> * DiskLoc::btreemod() const { BtreeBucket<V> * DiskLoc::btreemod() const {
verify( _a != -1 ); verify( _a != -1 );
BtreeBucket<V> *b = const_cast< BtreeBucket<V> * >( btree<V>() ); BtreeBucket<V> *b = const_cast< BtreeBucket<V> * >( btree<V>() );
return static_cast< BtreeBucket<V>* >( getDur().writingPtr( b, V::B ucketSize ) ); return static_cast< BtreeBucket<V>* >( getDur().writingPtr( b, V::B ucketSize ) );
} }
template< class V >
BucketBasics<V>::KeyNode::KeyNode(const BucketBasics<V>& bb, const _Key
Node &k) :
prevChildBucket(k.prevChildBucket),
recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
{ }
} // namespace mongo; } // namespace mongo;
 End of changes. 35 change blocks. 
80 lines changed or deleted 123 lines changed or added


 btree_access_method.h   btree_access_method.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/btree.h" #include "mongo/db/structure/btree/btree.h"
#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/btree_key_generator.h" #include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/index/btree_access_method_internal.h" #include "mongo/db/index/btree_based_access_method.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class BtreeInterface; class BtreeInterface;
class IndexCursor; class IndexCursor;
class IndexDescriptor; class IndexDescriptor;
/** /**
* The IndexAccessMethod for a Btree index. * The IndexAccessMethod for a Btree index.
* Any index created with {field: 1} or {field: -1} uses this. * Any index created with {field: 1} or {field: -1} uses this.
*/ */
class BtreeAccessMethod : public BtreeBasedAccessMethod { class BtreeAccessMethod : public BtreeBasedAccessMethod {
public: public:
// Every Btree-based index needs these. We put them in the BtreeBa sedAccessMethod // Every Btree-based index needs these. We put them in the BtreeBa sedAccessMethod
// superclass and subclasses (like this) can use them. // superclass and subclasses (like this) can use them.
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
using BtreeBasedAccessMethod::_interface; using BtreeBasedAccessMethod::_interface;
using BtreeBasedAccessMethod::_ordering;
BtreeAccessMethod(IndexDescriptor* descriptor); BtreeAccessMethod(IndexCatalogEntry* btreeState );
virtual ~BtreeAccessMethod() { } virtual ~BtreeAccessMethod() { }
virtual Status newCursor(IndexCursor** out);
private: private:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// Our keys differ for V0 and V1. // Our keys differ for V0 and V1.
scoped_ptr<BtreeKeyGenerator> _keyGenerator; scoped_ptr<BtreeKeyGenerator> _keyGenerator;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
6 lines changed or deleted 3 lines changed or added


 btree_index_cursor.h   btree_index_cursor.h 
skipping to change at line 48 skipping to change at line 48
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
namespace mongo { namespace mongo {
class BtreeIndexCursor : public IndexCursor { class BtreeIndexCursor : public IndexCursor {
public: public:
virtual ~BtreeIndexCursor(); virtual ~BtreeIndexCursor();
bool isEOF() const; bool isEOF() const;
// See nasty comment in .cpp
virtual DiskLoc getBucket() const;
virtual int getKeyOfs() const;
/** /**
* Called from btree.cpp when we're about to delete a Btree bucket. * Called from btree.cpp when we're about to delete a Btree bucket.
*/ */
static void aboutToDeleteBucket(const DiskLoc& bucket); static void aboutToDeleteBucket(const DiskLoc& bucket);
virtual Status setOptions(const CursorOptions& options); virtual Status setOptions(const CursorOptions& options);
virtual Status seek(const BSONObj& position); virtual Status seek(const BSONObj& position);
// Btree-specific seeking functions. // Btree-specific seeking functions.
Status seek(const vector<const BSONElement*>& position, Status seek(const vector<const BSONElement*>& position,
const vector<bool>& inclusive); const vector<bool>& inclusive);
/**
* Seek to the key 'position'. If 'afterKey' is true, seeks to the
first
* key that is oriented after 'position'.
*
* Btree-specific.
*/
void seek(const BSONObj& position, bool afterKey);
Status skip(const BSONObj &keyBegin, int keyBeginLen, bool afterKey , Status skip(const BSONObj &keyBegin, int keyBeginLen, bool afterKey ,
const vector<const BSONElement*>& keyEnd, const vector<const BSONElement*>& keyEnd,
const vector<bool>& keyEndInclusive); const vector<bool>& keyEndInclusive);
virtual BSONObj getKey() const; virtual BSONObj getKey() const;
virtual DiskLoc getValue() const; virtual DiskLoc getValue() const;
virtual void next(); virtual void next();
/**
* BtreeIndexCursor-only.
* Returns true if 'this' points at the same exact key as 'other'.
* Returns false otherwise.
*/
bool pointsAt(const BtreeIndexCursor& other);
virtual Status savePosition(); virtual Status savePosition();
virtual Status restorePosition(); virtual Status restorePosition();
virtual string toString(); virtual string toString();
private: private:
// We keep the constructor private and only allow the AM to create us. // We keep the constructor private and only allow the AM to create us.
friend class BtreeAccessMethod; friend class BtreeBasedAccessMethod;
// For handling bucket deletion. // For handling bucket deletion.
static unordered_set<BtreeIndexCursor*> _activeCursors; static unordered_set<BtreeIndexCursor*> _activeCursors;
static SimpleMutex _activeCursorsMutex; static SimpleMutex _activeCursorsMutex;
// Go forward by default. // Go forward by default.
BtreeIndexCursor(IndexDescriptor *descriptor, Ordering ordering, Bt reeInterface *interface); BtreeIndexCursor(const IndexCatalogEntry* btreeState, BtreeInterfac e *interface);
void skipUnusedKeys(); void skipUnusedKeys();
bool isSavedPositionValid(); bool isSavedPositionValid();
// Move to the next/prev. key. Used by normal getNext and also ski pping unused keys. // Move to the next/prev. key. Used by normal getNext and also ski pping unused keys.
void advance(const char* caller); void advance(const char* caller);
// For saving/restoring position. // For saving/restoring position.
BSONObj _savedKey; BSONObj _savedKey;
DiskLoc _savedLoc; DiskLoc _savedLoc;
BSONObj _emptyObj; BSONObj _emptyObj;
int _direction; int _direction;
IndexDescriptor* _descriptor; const IndexCatalogEntry* _btreeState; // not-owned
Ordering _ordering;
BtreeInterface* _interface; BtreeInterface* _interface;
// What are we looking at RIGHT NOW? We look at a bucket. // What are we looking at RIGHT NOW? We look at a bucket.
DiskLoc _bucket; DiskLoc _bucket;
// And we look at an offset in the bucket. // And we look at an offset in the bucket.
int _keyOffset; int _keyOffset;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
8 lines changed or deleted 19 lines changed or added


 btree_interface.h   btree_interface.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/btree.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class IndexCatalogEntry;
/** /**
* We have two Btree on-disk formats which support identical operations . We hide this as much * We have two Btree on-disk formats which support identical operations . We hide this as much
* as possible by having one implementation of this interface per forma t. * as possible by having one implementation of this interface per forma t.
* *
* For documentation on all of the methods here, look at mongo/db/btree .h * For documentation on all of the methods here, look at mongo/db/struc ture/btree/btree.h
*/ */
class BtreeInterface { class BtreeInterface {
public: public:
virtual ~BtreeInterface() { } virtual ~BtreeInterface() { }
static BtreeInterface *interfaces[]; static BtreeInterface *interfaces[];
// This is the # of the exception that is thrown if we're trying to access a bucket that // This is the # of the exception that is thrown if we're trying to access a bucket that
// was deleted. Calling code needs to be able to recognize this an d possibly ignore it. // was deleted. Calling code needs to be able to recognize this an d possibly ignore it.
static const int deletedBucketCode = 16738; static const int deletedBucketCode = 16738;
virtual int bt_insert(const DiskLoc thisLoc, virtual int bt_insert(IndexCatalogEntry* btreeState,
const DiskLoc thisLoc,
const DiskLoc recordLoc, const DiskLoc recordLoc,
const BSONObj& key, const BSONObj& key,
const Ordering &order, bool dupsallowed,
bool dupsAllowed, bool toplevel = true) = 0;
IndexDetails& idx,
bool toplevel = true) const = 0;
virtual bool unindex(const DiskLoc thisLoc, virtual bool unindex(IndexCatalogEntry* btreeState,
IndexDetails& id, const DiskLoc thisLoc,
const BSONObj& key, const BSONObj& key,
const DiskLoc recordLoc) const = 0; const DiskLoc recordLoc) = 0;
virtual DiskLoc locate(const IndexDetails& idx, virtual DiskLoc locate(const IndexCatalogEntry* btreeState,
const DiskLoc& thisLoc, const DiskLoc& thisLoc,
const BSONObj& key, const BSONObj& key,
const Ordering& order, int& pos, // out
int& pos, bool& found, // out
bool& found, const DiskLoc& recordLoc, // out
const DiskLoc& recordLoc,
int direction = 1) const = 0; int direction = 1) const = 0;
virtual bool wouldCreateDup(const IndexDetails& idx, virtual bool wouldCreateDup(const IndexCatalogEntry* btreeState,
const DiskLoc& thisLoc, const DiskLoc& thisLoc,
const BSONObj& key, const BSONObj& key,
const Ordering& order,
const DiskLoc& self) const = 0; const DiskLoc& self) const = 0;
virtual void customLocate(DiskLoc& locInOut, virtual void customLocate(const IndexCatalogEntry* btreeState,
DiskLoc& locInOut,
int& keyOfs, int& keyOfs,
const BSONObj& keyBegin, const BSONObj& keyBegin,
int keyBeginLen, bool afterKey, int keyBeginLen, bool afterKey,
const vector<const BSONElement*>& keyEnd, const vector<const BSONElement*>& keyEnd,
const vector<bool>& keyEndInclusive, const vector<bool>& keyEndInclusive,
const Ordering& order,
int direction, int direction,
pair<DiskLoc, int>& bestParent) = 0 ; pair<DiskLoc, int>& bestParent) const = 0 ;
virtual void advanceTo(DiskLoc &thisLoc, virtual void advanceTo(const IndexCatalogEntry* btreeState,
DiskLoc &thisLoc,
int &keyOfs, int &keyOfs,
const BSONObj &keyBegin, const BSONObj &keyBegin,
int keyBeginLen, int keyBeginLen,
bool afterKey, bool afterKey,
const vector<const BSONElement*>& keyEnd, const vector<const BSONElement*>& keyEnd,
const vector<bool>& keyEndInclusive, const vector<bool>& keyEndInclusive,
const Ordering& order, int direction) const = 0; int direction) const = 0;
virtual string dupKeyError(DiskLoc bucket, virtual string dupKeyError(const IndexCatalogEntry* btreeState,
const IndexDetails &idx, DiskLoc bucket,
const BSONObj& keyObj) const =0; const BSONObj& keyObj) const =0;
virtual DiskLoc advance(const DiskLoc& thisLoc, virtual DiskLoc advance(const IndexCatalogEntry* btreeState,
const DiskLoc& thisLoc,
int& keyOfs, int& keyOfs,
int direction, int direction,
const char* caller) const = 0; const char* caller) const = 0;
virtual long long fullValidate(const DiskLoc& thisLoc, const BSONOb virtual long long fullValidate(const IndexCatalogEntry* btreeState,
j& keyPattern) = 0; const DiskLoc& thisLoc,
const BSONObj& keyPattern) = 0;
/** /**
* These methods are here so that the BtreeCursor doesn't need to d o any templating for the * These methods are here so that the BtreeCursor doesn't need to d o any templating for the
* two on-disk formats. * two on-disk formats.
*/ */
/** /**
* Returns number of total keys just in provided bucket
* (not recursive)
*/
virtual int nKeys(const IndexCatalogEntry* btreeState,
DiskLoc bucket ) = 0;
/**
* Is the key at (bucket, keyOffset) being used or not? * Is the key at (bucket, keyOffset) being used or not?
* Some keys are marked as not used and skipped. * Some keys are marked as not used and skipped.
*/ */
virtual bool keyIsUsed(DiskLoc bucket, int keyOffset) const = 0; virtual bool keyIsUsed(const IndexCatalogEntry* btreeState,
DiskLoc bucket, int keyOffset) const = 0;
/** /**
* Get the BSON representation of the key at (bucket, keyOffset). * Get the BSON representation of the key at (bucket, keyOffset).
*/ */
virtual BSONObj keyAt(DiskLoc bucket, int keyOffset) const = 0; virtual BSONObj keyAt(const IndexCatalogEntry* btreeState,
DiskLoc bucket, int keyOffset) const = 0;
/** /**
* Get the DiskLoc that the key at (bucket, keyOffset) points at. * Get the DiskLoc that the key at (bucket, keyOffset) points at.
*/ */
virtual DiskLoc recordAt(DiskLoc bucket, int keyOffset) const = 0; virtual DiskLoc recordAt(const IndexCatalogEntry* btreeState,
DiskLoc bucket, int keyOffset) const = 0;
/** /**
* keyAt and recordAt at the same time. * keyAt and recordAt at the same time.
*/ */
virtual void keyAndRecordAt(DiskLoc bucket, int keyOffset, BSONObj* virtual void keyAndRecordAt(const IndexCatalogEntry* btreeState,
keyOut, DiskLoc bucket, int keyOffset, BSONObj*
keyOut,
DiskLoc* recordOut) const = 0; DiskLoc* recordOut) const = 0;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 24 change blocks. 
32 lines changed or deleted 44 lines changed or added


 btreebuilder.h   btreebuilder.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/btree.h" #include "mongo/db/structure/btree/btree.h"
namespace mongo { namespace mongo {
class IndexCatalogEntry;
/** /**
* build btree from the bottom up * build btree from the bottom up
*/ */
template< class V > template< class V >
class BtreeBuilder { class BtreeBuilder {
typedef typename V::KeyOwned KeyOwned; typedef typename V::KeyOwned KeyOwned;
typedef typename V::Key Key; typedef typename V::Key Key;
bool dupsAllowed; bool _dupsAllowed;
IndexDetails& idx; IndexCatalogEntry* _btreeState;
/** Number of keys added to btree. */ /** Number of keys added to btree. */
unsigned long long n; unsigned long long _numAdded;
/** Last key passed to addKey(). */ /** Last key passed to addKey(). */
auto_ptr< typename V::KeyOwned > keyLast; auto_ptr< typename V::KeyOwned > keyLast;
BSONObj order;
Ordering ordering;
/** true iff commit() completed successfully. */ /** true iff commit() completed successfully. */
bool committed; bool committed;
DiskLoc cur, first; DiskLoc cur, first;
BtreeBucket<V> *b; BtreeBucket<V> *b;
void newBucket(); void newBucket();
void buildNextLevel(DiskLoc loc, bool mayInterrupt); void buildNextLevel(DiskLoc loc, bool mayInterrupt);
void mayCommitProgressDurably(); void mayCommitProgressDurably();
BtreeBucket<V>* _getModifiableBucket( DiskLoc loc );
const BtreeBucket<V>* _getBucket( DiskLoc loc );
public: public:
BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx); BtreeBuilder(bool dupsAllowed, IndexCatalogEntry* idx);
/** /**
* Preconditions: 'key' is > or >= last key passed to this function (depends on _dupsAllowed) * Preconditions: 'key' is > or >= last key passed to this function (depends on _dupsAllowed)
* Postconditions: 'key' is added to intermediate storage. * Postconditions: 'key' is added to intermediate storage.
*/ */
void addKey(BSONObj& key, DiskLoc loc); void addKey(BSONObj& key, DiskLoc loc);
/** /**
* commit work. if not called, destructor will clean up partially completed work * commit work. if not called, destructor will clean up partially completed work
* (in case exception has happened). * (in case exception has happened).
*/ */
void commit(bool mayInterrupt); void commit(bool mayInterrupt);
unsigned long long getn() { return n; } unsigned long long getn() { return _numAdded; }
}; };
} }
 End of changes. 8 change blocks. 
8 lines changed or deleted 13 lines changed or added


 builder.h   builder.h 
skipping to change at line 59 skipping to change at line 59
const int BSONObjMaxUserSize = 16 * 1024 * 1024; const int BSONObjMaxUserSize = 16 * 1024 * 1024;
/* /*
Sometimes we need objects slightly larger - an object in the replica tion local.oplog Sometimes we need objects slightly larger - an object in the replica tion local.oplog
is slightly larger than a user object for example. is slightly larger than a user object for example.
*/ */
const int BSONObjMaxInternalSize = BSONObjMaxUserSize + ( 16 * 1024 ); const int BSONObjMaxInternalSize = BSONObjMaxUserSize + ( 16 * 1024 );
const int BufferMaxSize = 64 * 1024 * 1024; const int BufferMaxSize = 64 * 1024 * 1024;
void msgasserted(int msgid, const char *msg);
template <typename Allocator> template <typename Allocator>
class StringBuilderImpl; class StringBuilderImpl;
class TrivialAllocator { class TrivialAllocator {
public: public:
void* Malloc(size_t sz) { return malloc(sz); } void* Malloc(size_t sz) { return malloc(sz); }
void* Realloc(void *p, size_t sz) { return realloc(p, sz); } void* Realloc(void *p, size_t sz) { return realloc(p, sz); }
void Free(void *p) { free(p); } void Free(void *p) { free(p); }
}; };
 End of changes. 1 change blocks. 
2 lines changed or deleted 0 lines changed or added


 cached_plan_runner.h   cached_plan_runner.h 
skipping to change at line 35 skipping to change at line 35
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <string> #include <string>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/runner.h" #include "mongo/db/query/runner.h"
namespace mongo { namespace mongo {
class BSONObj; class BSONObj;
struct CachedSolution; class CachedSolution;
class CanonicalQuery; class CanonicalQuery;
class DiskLoc; class DiskLoc;
class PlanExecutor; class PlanExecutor;
class PlanStage; class PlanStage;
class TypeExplain; class TypeExplain;
struct PlanInfo;
class WorkingSet; class WorkingSet;
/** /**
* CachedPlanRunner runs a plan retrieved from the cache. * CachedPlanRunner runs a plan retrieved from the cache.
* *
* Cached plans are bundled with information describing why the plan is
in the cache.
*
* If we run a plan from the cache and behavior wildly deviates from ex pected behavior, we may * If we run a plan from the cache and behavior wildly deviates from ex pected behavior, we may
* remove the plan from the cache. See plan_cache.h. * remove the plan from the cache. See plan_cache.h.
*/ */
class CachedPlanRunner : public Runner { class CachedPlanRunner : public Runner {
public: public:
/**
/** Takes ownership of all arguments. */ * Takes ownership of all arguments.
CachedPlanRunner(CanonicalQuery* canonicalQuery, CachedSolution* ca * XXX: what args should this really take? probably a cachekey as
ched, well?
*/
CachedPlanRunner(const Collection* collection,
CanonicalQuery* canonicalQuery, QuerySolution* sol
ution,
PlanStage* root, WorkingSet* ws); PlanStage* root, WorkingSet* ws);
virtual ~CachedPlanRunner(); virtual ~CachedPlanRunner();
Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut); Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
virtual bool isEOF(); virtual bool isEOF();
virtual void saveState(); virtual void saveState();
virtual bool restoreState(); virtual bool restoreState();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual void setYieldPolicy(Runner::YieldPolicy policy); virtual void setYieldPolicy(Runner::YieldPolicy policy);
virtual const std::string& ns(); virtual const std::string& ns();
virtual void kill(); virtual void kill();
virtual const Collection* collection() { return _collection; }
/** /**
* Returns OK, allocating and filling in '*explain' with details of * Returns OK, allocating and filling in '*explain' and '*planInfo'
the cached with details of
* plan. Caller takes ownership of '*explain'. Otherwise, return a * the cached plan. Caller takes ownership of '*explain' and '*plan
status describing Info'. Otherwise,
* the error. * return a status describing the error.
*/ */
virtual Status getExplainPlan(TypeExplain** explain) const; virtual Status getInfo(TypeExplain** explain,
PlanInfo** planInfo) const;
/**
* Takes ownership of all arguments.
*/
void setBackupPlan(QuerySolution* qs, PlanStage* root, WorkingSet*
ws);
private: private:
void updateCache(); void updateCache();
const Collection* _collection;
boost::scoped_ptr<CanonicalQuery> _canonicalQuery; boost::scoped_ptr<CanonicalQuery> _canonicalQuery;
boost::scoped_ptr<CachedSolution> _cachedQuery; boost::scoped_ptr<QuerySolution> _solution;
boost::scoped_ptr<PlanExecutor> _exec; boost::scoped_ptr<PlanExecutor> _exec;
// Owned here. If non-NULL, then this plan executor is capable
// of executing a backup plan in the case of a blocking sort.
std::auto_ptr<PlanExecutor> _backupPlan;
// Owned here. If non-NULL, contains the query solution correspondi
ng
// to the backup plan.
boost::scoped_ptr<QuerySolution> _backupSolution;
// Whether the executor for the winning plan has produced results y
et.
bool _alreadyProduced;
// Have we updated the cache with our plan stats yet? // Have we updated the cache with our plan stats yet?
bool _updatedCache; bool _updatedCache;
// Has the runner been killed?
bool _killed;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 13 change blocks. 
16 lines changed or deleted 45 lines changed or added


 canonical_query.h   canonical_query.h 
skipping to change at line 36 skipping to change at line 36
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/dbmessage.h" #include "mongo/db/dbmessage.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#include "mongo/db/query/lite_parsed_query.h" #include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/query/lite_projection.h" #include "mongo/db/query/parsed_projection.h"
namespace mongo { namespace mongo {
// TODO: Is this binary data really?
typedef std::string PlanCacheKey;
class CanonicalQuery { class CanonicalQuery {
public: public:
static Status canonicalize(const QueryMessage& qm, CanonicalQuery** out); static Status canonicalize(const QueryMessage& qm, CanonicalQuery** out);
/** /**
* For testing or for internal clients to use. * For testing or for internal clients to use.
*/ */
static Status canonicalize(const string& ns, const BSONObj& query, CanonicalQuery** out); static Status canonicalize(const string& ns, const BSONObj& query, CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query, long long skip, static Status canonicalize(const string& ns, const BSONObj& query, long long skip,
long long limit, CanonicalQuery** out); long long limit, CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort, static Status canonicalize(const string& ns, const BSONObj& query, const BSONObj& sort,
const BSONObj& proj, CanonicalQuery** ou t); const BSONObj& proj, CanonicalQuery** ou t);
static Status canonicalize(const string& ns, const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip, long long limit,
CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip, long long limit,
const BSONObj& hint,
CanonicalQuery** out);
static Status canonicalize(const string& ns, const BSONObj& query,
const BSONObj& sort,
const BSONObj& proj,
long long skip, long long limit,
const BSONObj& hint,
const BSONObj& minObj, const BSONObj& ma
xObj,
bool snapshot, CanonicalQuery** out);
/**
* Returns true if "query" describes an exact-match query on _id, p
ossibly with
* the $isolated/$atomic modifier.
*/
static bool isSimpleIdQuery(const BSONObj& query);
// What namespace is this query over? // What namespace is this query over?
const string& ns() const { return _pq->ns(); } const string& ns() const { return _pq->ns(); }
// //
// Accessors for the query // Accessors for the query
// //
MatchExpression* root() const { return _root.get(); } MatchExpression* root() const { return _root.get(); }
BSONObj getQueryObj() const { return _pq->getFilter(); } BSONObj getQueryObj() const { return _pq->getFilter(); }
const LiteParsedQuery& getParsed() const { return *_pq; } const LiteParsedQuery& getParsed() const { return *_pq; }
LiteProjection* getLiteProj() const { return _liteProj.get(); } const ParsedProjection* getProj() const { return _proj.get(); }
/**
* Get the cache key for this canonical query.
*/
const PlanCacheKey& getPlanCacheKey() const;
// Debugging
string toString() const; string toString() const;
/**
* Validates match expression, checking for certain
* combinations of operators in match expression and
* query options in LiteParsedQuery.
* Since 'root' is derived from 'filter' in LiteParsedQuery,
* 'filter' is not validated.
*
* TODO: Move this to query_validator.cpp
*/
static Status isValid(MatchExpression* root, const LiteParsedQuery&
parsed);
/**
* Returns the normalized version of the subtree rooted at 'root'.
*
* Takes ownership of 'root'.
*/
static MatchExpression* normalizeTree(MatchExpression* root);
/**
* Traverses expression tree post-order.
* Sorts children at each non-leaf node by (MatchType, path(), cach
eKey)
*/
static void sortTree(MatchExpression* tree);
private: private:
// You must go through canonicalize to create a CanonicalQuery. // You must go through canonicalize to create a CanonicalQuery.
CanonicalQuery() { } CanonicalQuery() { }
/**
* Computes and stores the cache key / query shape
* for this query.
*/
void generateCacheKey(void);
// Takes ownership of lpq // Takes ownership of lpq
Status init(LiteParsedQuery* lpq); Status init(LiteParsedQuery* lpq);
scoped_ptr<LiteParsedQuery> _pq; scoped_ptr<LiteParsedQuery> _pq;
// _root points into _pq->getFilter() // _root points into _pq->getFilter()
scoped_ptr<MatchExpression> _root; scoped_ptr<MatchExpression> _root;
scoped_ptr<LiteProjection> _liteProj; scoped_ptr<ParsedProjection> _proj;
/**
* Cache key is a string-ified combination of the query and sort ob
fuscated
* for minimal user comprehension.
*/
PlanCacheKey _cacheKey;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 8 change blocks. 
3 lines changed or deleted 80 lines changed or added


 chunk.h   chunk.h 
skipping to change at line 35 skipping to change at line 35
* version of the file(s), but you are not obligated to do so. If you do not * version of the file(s), but you are not obligated to do so. If you do not
* wish to do so, delete this exception statement from your version. If y ou * wish to do so, delete this exception statement from your version. If y ou
* delete this exception statement from all source files in the program, * delete this exception statement from all source files in the program,
* then also delete it in the license file. * then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/bson/util/atomic_int.h" #include "mongo/bson/util/atomic_int.h"
#include "mongo/client/distlock.h"
#include "mongo/s/chunk_version.h" #include "mongo/s/chunk_version.h"
#include "mongo/s/distlock.h"
#include "mongo/s/shard.h" #include "mongo/s/shard.h"
#include "mongo/s/shardkey.h" #include "mongo/s/shardkey.h"
#include "mongo/util/concurrency/ticketholder.h" #include "mongo/util/concurrency/ticketholder.h"
namespace mongo { namespace mongo {
class DBConfig; class DBConfig;
class Chunk; class Chunk;
class ChunkRange; class ChunkRange;
class ChunkManager; class ChunkManager;
 End of changes. 2 change blocks. 
1 lines changed or deleted 1 lines changed or added


 chunk_manager_targeter.h   chunk_manager_targeter.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public License * You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link th
e
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do no
t
* wish to do so, delete this exception statement from your version. If you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <map> #include <map>
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/s/chunk.h" #include "mongo/s/chunk.h"
#include "mongo/s/shard.h" #include "mongo/s/shard.h"
#include "mongo/s/chunk_version.h" #include "mongo/s/chunk_version.h"
#include "mongo/s/ns_targeter.h" #include "mongo/s/ns_targeter.h"
namespace mongo { namespace mongo {
class TargeterStats; struct TargeterStats;
/** /**
* NSTargeter based on a ChunkManager implementation. Wraps all except ion codepaths and * NSTargeter based on a ChunkManager implementation. Wraps all except ion codepaths and
* returns DatabaseNotFound statuses on applicable failures. * returns DatabaseNotFound statuses on applicable failures.
* *
* Must be initialized before use, and initialization may fail. * Must be initialized before use, and initialization may fail.
*/ */
class ChunkManagerTargeter : public NSTargeter { class ChunkManagerTargeter : public NSTargeter {
public: public:
skipping to change at line 75 skipping to change at line 87
void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ); void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo );
void noteCouldNotTarget(); void noteCouldNotTarget();
/** /**
* Replaces the targeting information with the latest information f rom the cache. If this * Replaces the targeting information with the latest information f rom the cache. If this
* information is stale WRT the noted stale responses or a remote r efresh is needed due * information is stale WRT the noted stale responses or a remote r efresh is needed due
* to a targeting failure, will contact the config servers to reloa d the metadata. * to a targeting failure, will contact the config servers to reloa d the metadata.
* *
* Reports wasChanged = true if the metadata is different after thi
s reload.
*
* Also see NSTargeter::refreshIfNeeded(). * Also see NSTargeter::refreshIfNeeded().
*/ */
Status refreshIfNeeded(); Status refreshIfNeeded( bool* wasChanged );
/** /**
* Returns the stats. Note that the returned stats object is still owned by this targeter. * Returns the stats. Note that the returned stats object is still owned by this targeter.
*/ */
const TargeterStats* getStats() const; const TargeterStats* getStats() const;
private: private:
// Different ways we can refresh metadata // Different ways we can refresh metadata
// TODO: Improve these ways. // TODO: Improve these ways.
 End of changes. 4 change blocks. 
2 lines changed or deleted 19 lines changed or added


 chunk_version.h   chunk_version.h 
skipping to change at line 191 skipping to change at line 191
if( ! hasCompatibleEpoch( otherVersion ) ) return false; if( ! hasCompatibleEpoch( otherVersion ) ) return false;
return otherVersion._major == _major; return otherVersion._major == _major;
} }
// Is this the same version? // Is this the same version?
bool isEquivalentTo( const ChunkVersion& otherVersion ) const { bool isEquivalentTo( const ChunkVersion& otherVersion ) const {
if( ! hasCompatibleEpoch( otherVersion ) ) return false; if( ! hasCompatibleEpoch( otherVersion ) ) return false;
return otherVersion._combined == _combined; return otherVersion._combined == _combined;
} }
/**
* Returns true if the otherVersion is the same as this version and
enforces strict epoch
* checking (empty epochs are not wildcards).
*/
bool isStrictlyEqualTo( const ChunkVersion& otherVersion ) const {
if ( otherVersion._epoch != _epoch )
return false;
return otherVersion._combined == _combined;
}
/**
* Returns true if this version is (strictly) in the same epoch as
the other version and
* this version is older. Returns false if we're not sure because
the epochs are different
* or if this version is newer.
*/
bool isOlderThan( const ChunkVersion& otherVersion ) const {
if ( otherVersion._epoch != _epoch )
return false;
if ( _major != otherVersion._major )
return _major < otherVersion._major;
return _minor < otherVersion._minor;
}
// Is this in the same epoch? // Is this in the same epoch?
bool hasCompatibleEpoch( const ChunkVersion& otherVersion ) const { bool hasCompatibleEpoch( const ChunkVersion& otherVersion ) const {
return hasCompatibleEpoch( otherVersion._epoch ); return hasCompatibleEpoch( otherVersion._epoch );
} }
bool hasCompatibleEpoch( const OID& otherEpoch ) const { bool hasCompatibleEpoch( const OID& otherEpoch ) const {
// TODO : Change logic from eras are not-unequal to eras are eq ual // TODO : Change logic from eras are not-unequal to eras are eq ual
if( otherEpoch.isSet() && _epoch.isSet() && otherEpoch != _epoc h ) return false; if( otherEpoch.isSet() && _epoch.isSet() && otherEpoch != _epoc h ) return false;
return true; return true;
} }
 End of changes. 1 change blocks. 
0 lines changed or deleted 28 lines changed or added


 clientOnly-private.h   clientOnly-private.h 
skipping to change at line 18 skipping to change at line 18
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
class mutex; class mutex;
namespace shell_utils { namespace shell_utils {
extern mongo::mutex &mongoProgramOutputMutex; extern MONGO_CLIENT_API mongo::mutex &mongoProgramOutputMutex;
} }
} }
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 client_info.h   client_info.h 
skipping to change at line 34 skipping to change at line 34
* file(s) with this exception, you may extend this exception to your * file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do not * version of the file(s), but you are not obligated to do so. If you do not
* wish to do so, delete this exception statement from your version. If you * wish to do so, delete this exception statement from your version. If you
* delete this exception statement from all source files in the program, * delete this exception statement from all source files in the program,
* then also delete it in the license file. * then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include <map>
#include <set>
#include <vector>
#include "mongo/db/client_basic.h" #include "mongo/db/client_basic.h"
#include "mongo/s/chunk.h" #include "mongo/s/chunk.h"
#include "mongo/s/writeback_listener.h" #include "mongo/s/writeback_listener.h"
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/util/net/hostandport.h" #include "mongo/util/net/hostandport.h"
namespace mongo { namespace mongo {
class AbstractMessagingPort; class AbstractMessagingPort;
/** /**
* holds information about a client connected to a mongos * holds information about a client connected to a mongos
* 1 per client socket * 1 per client socket
* currently implemented with a thread local * currently implemented with a thread local
skipping to change at line 73 skipping to change at line 78
/** /**
* @return remote socket address of the client * @return remote socket address of the client
*/ */
HostAndPort getRemote() const { return _remote; } HostAndPort getRemote() const { return _remote; }
/** /**
* notes that this client use this shard * notes that this client use this shard
* keeps track of all shards accessed this request * keeps track of all shards accessed this request
*/ */
void addShard( const string& shard ); void addShardHost( const string& shardHost );
/**
* Notes that this client wrote to these particular hosts with writ
e commands.
*/
void addHostOpTime(ConnectionString connstr, HostOpTime stat);
void addHostOpTimes( const HostOpTimeMap& hostOpTimes );
/** /**
* gets shards used on the previous request * gets shards used on the previous request
*/ */
set<string> * getPrev() const { return _prev; }; set<string>* getPrevShardHosts() const { return &_prev->shardHostsW
ritten; }
/**
* Gets the shards, hosts, and opTimes the client last wrote to wit
h write commands.
*/
const HostOpTimeMap& getPrevHostOpTimes() const {
return _prev->hostOpTimes;
}
/** /**
* gets all shards we've accessed since the last time we called cle arSinceLastGetError * gets all shards we've accessed since the last time we called cle arSinceLastGetError
*/ */
const set<string>& sinceLastGetError() const { return _sinceLastGet Error; } const set<string>& sinceLastGetError() const { return _sinceLastGet Error; }
/** /**
* clears list of shards we've talked to * clears list of shards we've talked to
*/ */
void clearSinceLastGetError() { _sinceLastGetError.clear(); } void clearSinceLastGetError() { _sinceLastGetError.clear(); }
/** /**
* resets the list of shards using to process the current request * resets the information stored for the current request
*/ */
void clearCurrentShards(){ _cur->clear(); } void clearRequestInfo(){ _cur->clear(); }
void disableForCommand(); void disableForCommand();
/**
* calls getLastError
* resets shards since get last error
* @return if the command was ok or if there was an error
*/
bool getLastError( const string& dbName,
const BSONObj& options ,
BSONObjBuilder& result ,
string& errmsg,
bool fromWriteBackListener = false );
/** @return if its ok to auto split from this client */ /** @return if its ok to auto split from this client */
bool autoSplitOk() const { return _autoSplitOk && Chunk::ShouldAuto Split; } bool autoSplitOk() const { return _autoSplitOk && Chunk::ShouldAuto Split; }
void noAutoSplit() { _autoSplitOk = false; } void noAutoSplit() { _autoSplitOk = false; }
// Returns whether or not a ClientInfo for this thread has already been created and stored // Returns whether or not a ClientInfo for this thread has already been created and stored
// in _tlInfo. // in _tlInfo.
static bool exists(); static bool exists();
// Gets the ClientInfo object for this thread from _tlInfo. If no C lientInfo object exists // Gets the ClientInfo object for this thread from _tlInfo. If no C lientInfo object exists
// yet for this thread, it creates one. // yet for this thread, it creates one.
static ClientInfo * get(AbstractMessagingPort* messagingPort = NULL ); static ClientInfo * get(AbstractMessagingPort* messagingPort = NULL );
// Creates a ClientInfo and stores it in _tlInfo // Creates a ClientInfo and stores it in _tlInfo
static ClientInfo* create(AbstractMessagingPort* messagingPort); static ClientInfo* create(AbstractMessagingPort* messagingPort);
private: private:
struct WBInfo {
WBInfo( const WriteBackListener::ConnectionIdent& c, OID o, boo
l fromLastOperation )
: ident( c ), id( o ), fromLastOperation( fromLastOperation
) {}
WriteBackListener::ConnectionIdent ident;
OID id;
bool fromLastOperation;
};
// for getLastError
void _addWriteBack( vector<WBInfo>& all , const BSONObj& o, bool fr
omLastOperation );
vector<BSONObj> _handleWriteBacks( const vector<WBInfo>& all , bool
fromWriteBackListener );
int _id; // unique client id int _id; // unique client id
HostAndPort _remote; // server:port of remote socket end HostAndPort _remote; // server:port of remote socket end
// we use _a and _b to store shards we've talked to on the current struct RequestInfo {
request and the previous
void clear() {
shardHostsWritten.clear();
hostOpTimes.clear();
}
std::set<string> shardHostsWritten;
HostOpTimeMap hostOpTimes;
};
// we use _a and _b to store info from the current request and the
previous request
// we use 2 so we can flip for getLastError type operations // we use 2 so we can flip for getLastError type operations
set<string> _a; // actual set for _cur or _prev RequestInfo _a; // actual set for _cur or _prev
set<string> _b; // " RequestInfo _b; // "
set<string> * _cur; // pointer to _a or _b depending on state RequestInfo* _cur; // pointer to _a or _b depending on state
set<string> * _prev; // "" RequestInfo* _prev; // ""
set<string> _sinceLastGetError; // all shards accessed since last g etLastError std::set<string> _sinceLastGetError; // all shards accessed since l ast getLastError
int _lastAccess; int _lastAccess;
bool _autoSplitOk; bool _autoSplitOk;
static boost::thread_specific_ptr<ClientInfo> _tlInfo; static boost::thread_specific_ptr<ClientInfo> _tlInfo;
}; };
/* Look for $gleStats in a command response, and fill in ClientInfo wit
h the data,
* if found.
* This data will be used by subsequent GLE calls, to ensure we look fo
r the correct
* write on the correct PRIMARY.
* result: the result from calling runCommand
* conn: the string name of the hostAndPort where the command ran. This
can be a replica set
* seed list.
*/
void saveGLEStats(const BSONObj& result, const std::string& conn);
} }
 End of changes. 13 change blocks. 
37 lines changed or deleted 55 lines changed or added


 clientcursor.h   clientcursor.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h"
#include <boost/thread/recursive_mutex.hpp> #include <boost/thread/recursive_mutex.hpp>
#include "mongo/db/cc_by_loc.h"
#include "mongo/db/cursor.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/keypattern.h" #include "mongo/db/keypattern.h"
#include "mongo/db/matcher.h"
#include "mongo/db/projection.h"
#include "mongo/db/query/runner.h" #include "mongo/db/query/runner.h"
#include "mongo/s/collection_metadata.h" #include "mongo/s/collection_metadata.h"
#include "mongo/util/net/message.h"
#include "mongo/util/background.h" #include "mongo/util/background.h"
#include "mongo/util/elapsed_tracker.h" #include "mongo/util/net/message.h"
namespace mongo { namespace mongo {
typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock; typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock;
class ClientCursor; class ClientCursor;
class Collection;
class CurOp;
class Database;
class NamespaceDetails;
class ParsedQuery; class ParsedQuery;
typedef long long CursorId; /* passed to the client so it can send back
on getMore */
static const CursorId INVALID_CURSOR_ID = -1; // But see SERVER-5726.
/** /**
* ClientCursor is a wrapper that represents a cursorid from our databa se application's * ClientCursor is a wrapper that represents a cursorid from our databa se application's
* perspective. * perspective.
*/ */
class ClientCursor : private boost::noncopyable { class ClientCursor : private boost::noncopyable {
public: public:
ClientCursor(int qopts, const shared_ptr<Cursor>& c, const StringDa ClientCursor(const Collection* collection, Runner* runner,
ta& ns, int qopts = 0, const BSONObj query = BSONObj());
BSONObj query = BSONObj());
ClientCursor(Runner* runner, int qopts = 0, const BSONObj query = B
SONObj());
ClientCursor(const string& ns); ClientCursor(const Collection* collection);
~ClientCursor(); ~ClientCursor();
/**
* Assert that there are no open cursors.
* Called from DatabaseHolder::closeAll.
*/
static void assertNoCursors();
// //
// Basic accessors // Basic accessors
// //
CursorId cursorid() const { return _cursorid; } CursorId cursorid() const { return _cursorid; }
string ns() const { return _ns; } string ns() const { return _ns; }
Database * db() const { return _db; } const Collection* collection() const { return _collection; }
//
// Invalidation of DiskLocs and dropping of namespaces
//
/** /**
* Get rid of cursors for namespaces 'ns'. When dropping a db, ns i * This is called when someone is dropping a collection or somethin
s "dbname." Used by drop, g else that
* dropIndexes, dropDatabase. * goes through killing cursors.
* It removes the responsiilibty of de-registering from ClientCurso
r.
* Responsibility for deleting the ClientCursor doesn't change from
this call
* see Runner::kill.
*/ */
static void invalidate(const StringData& ns); void kill();
/**
* Called when the provided DiskLoc is about to change state via a
deletion or an update.
* All runners/cursors that might be using that DiskLoc must adapt.
*/
static void aboutToDelete(const StringData& ns,
const NamespaceDetails* nsd,
const DiskLoc& dl);
/**
* Register a runner so that it can be notified of deletion/invalid
ation during yields.
* Must be called before a runner yields. If a runner is cached (i
nside a ClientCursor) it
* MUST NOT be registered; the two are mutually exclusive.
*/
static void registerRunner(Runner* runner);
/**
* Remove a runner from the runner registry.
*/
static void deregisterRunner(Runner* runner);
// //
// Yielding. // Yielding.
// //
static void staticYield(int micros, const StringData& ns, Record* r static void staticYield(int micros, const StringData& ns, const Rec
ec); ord* rec);
static int suggestYieldMicros();
//
// Static methods about all ClientCursors TODO: Document.
//
static void appendStats( BSONObjBuilder& result );
//
// ClientCursor creation/deletion.
//
static unsigned numCursors() { return clientCursorsById.size(); }
static void find( const string& ns , set<CursorId>& all );
static ClientCursor* find(CursorId id, bool warn = true);
// Same as erase but checks to make sure this thread has read permi
ssion on the cursor's
// namespace. This should be called when receiving killCursors fro
m a client. This should
// not be called when ccmutex is held.
static int eraseIfAuthorized(int n, long long* ids);
static bool eraseIfAuthorized(CursorId id);
/**
* @return number of cursors found
*/
static int erase(int n, long long* ids);
/**
* Deletes the cursor with the provided @param 'id' if one exists.
* @throw if the cursor with the provided id is pinned.
* This does not do any auth checking and should be used only when
erasing cursors as part
* of cleaning up internal operations.
*/
static bool erase(CursorId id);
// //
// Timing and timeouts // Timing and timeouts
// //
/** /**
* called every 4 seconds. millis is amount of idle time passed si
nce the last call --
* could be zero
*/
static void idleTimeReport(unsigned millis);
/**
* @param millis amount of idle passed time since last call * @param millis amount of idle passed time since last call
* note called outside of locks (other than ccmutex) so care must b e exercised * note called outside of locks (other than ccmutex) so care must b e exercised
*/ */
bool shouldTimeout( unsigned millis ); bool shouldTimeout( unsigned millis );
void setIdleTime( unsigned millis );
unsigned idleTime() const { return _idleAgeMillis; } unsigned idleTime() const { return _idleAgeMillis; }
uint64_t getLeftoverMaxTimeMicros() const { return _leftoverMaxTime Micros; } uint64_t getLeftoverMaxTimeMicros() const { return _leftoverMaxTime Micros; }
void setLeftoverMaxTimeMicros( uint64_t leftoverMaxTimeMicros ) { void setLeftoverMaxTimeMicros( uint64_t leftoverMaxTimeMicros ) {
_leftoverMaxTimeMicros = leftoverMaxTimeMicros; _leftoverMaxTimeMicros = leftoverMaxTimeMicros;
} }
// //
// Sharding-specific data. TODO: Document. // Sharding-specific data. TODO: Document.
// //
// future getMore.
void setCollMetadata( CollectionMetadataPtr metadata ){ _collMetada ta = metadata; } void setCollMetadata( CollectionMetadataPtr metadata ){ _collMetada ta = metadata; }
CollectionMetadataPtr getCollMetadata(){ return _collMetadata; } CollectionMetadataPtr getCollMetadata(){ return _collMetadata; }
// //
// Replication-related stuff. TODO: Document and clean. // Replication-related stuff. TODO: Document and clean.
// //
void updateSlaveLocation( CurOp& curop ); void updateSlaveLocation( CurOp& curop );
void slaveReadTill( const OpTime& t ) { _slaveReadTill = t; } void slaveReadTill( const OpTime& t ) { _slaveReadTill = t; }
/** Just for testing. */ /** Just for testing. */
skipping to change at line 204 skipping to change at line 136
// //
Runner* getRunner() const { return _runner.get(); } Runner* getRunner() const { return _runner.get(); }
int queryOptions() const { return _queryOptions; } int queryOptions() const { return _queryOptions; }
// Used by ops/query.cpp to stash how many results have been return ed by a query. // Used by ops/query.cpp to stash how many results have been return ed by a query.
int pos() const { return _pos; } int pos() const { return _pos; }
void incPos(int n) { _pos += n; } void incPos(int n) { _pos += n; }
void setPos(int n) { _pos = n; } void setPos(int n) { _pos = n; }
//
// Yielding that is DEPRECATED. Will be removed when we use runner
s and they yield
// internally.
//
/**
* DEPRECATED
* @param microsToSleep -1 : ask client
* 0 : pthread_yield or equivilant
* >0 : sleep for that amount
* @param recordToLoad after yielding lock, load this record with o
nly mmutex
* do a dbtemprelease
* note: caller should check matcher.docMatcher().atomic() first an
d not yield if atomic -
* we don't do herein as this->matcher (above) is only initia
lized for true queries/getmore.
* (ie not set for remote/update)
* @return if the cursor is still valid.
* if false is returned, then this ClientCursor should be c
onsidered deleted -
* in fact, the whole database could be gone.
*/
bool yield( int microsToSleep = -1, Record * recordToLoad = 0 );
enum RecordNeeds {
DontNeed = -1 , MaybeCovered = 0 , WillNeed = 100
};
/** /**
* @param needRecord whether or not the next record has to be read * Is this ClientCursor backed by an aggregation pipeline. Defaults
from disk for sure to false.
* if this is true, will yield of next record isn *
't in memory * Agg Runners differ from others in that they manage their own loc
* @param yielded true if a yield occurred, and potentially if a yi king internally and
eld did not occur * should not be killed or destroyed when the underlying collection
* @return same as yield() is deleted.
*
* Note: This should *not* be set for the internal cursor used as i
nput to an aggregation.
*/ */
bool yieldSometimes( RecordNeeds need, bool *yielded = 0 ); bool isAggCursor;
struct YieldData { CursorId _id; bool _doingDeletes; };
bool prepareToYield( YieldData &data );
static bool recoverFromYield( const YieldData &data );
static int suggestYieldMicros();
//
// Cursor-only DEPRECATED methods.
//
void storeOpForSlave( DiskLoc last );
// Only used by ops/query.cpp, which will stop using them when quer unsigned pinValue() const { return _pinValue; }
ies are answered only by
// a runner.
const BSONObj& query() const { return _query; }
shared_ptr<ParsedQuery> pq;
// This one is used also by pipeline/document_source_cursor.cpp
shared_ptr<Projection> fields; // which fields query wants returned
DiskLoc lastLoc() const { return _lastLoc; }
Cursor* c() const { return _c.get(); }
bool ok() { return _c->ok(); }
bool advance() { return _c->advance(); }
BSONObj current() { return _c->current(); }
DiskLoc currLoc() { return _c->currLoc(); }
BSONObj currKey() const { return _c->currKey(); }
bool currentIsDup() { return _c->getsetdup( _c->currLoc() ); }
bool currentMatches() {
if ( ! _c->matcher() )
return true;
return _c->matcher()->matchesCurrent( _c.get() );
}
void setDoingDeletes( bool doingDeletes ) {_doingDeletes = doingDel etes; } static long long totalOpen();
private: private:
friend class ClientCursorHolder; friend class ClientCursorMonitor;
friend class ClientCursorPin; friend class ClientCursorPin;
friend struct ClientCursorYieldLock;
friend class CmdCursorInfo; friend class CmdCursorInfo;
// A map from the CursorId to the ClientCursor behind it.
// TODO: Consider making this per-connection.
typedef map<CursorId, ClientCursor*> CCById;
static CCById clientCursorsById;
// A list of NON-CACHED runners. Any runner that yields must be pu
t into this map before
// yielding in order to be notified of invalidation and namespace d
eletion. Before the
// runner is deleted, it must be removed from this map.
//
// TODO: This is temporary and as such is highly NOT optimized.
static set<Runner*> nonCachedRunners;
// How many cursors have timed out?
static long long numberTimedOut;
// This must be held when modifying any static member.
static boost::recursive_mutex& ccmutex;
/** /**
* Initialization common between Cursor and Runner. * Initialization common between both constructors for the ClientCu
* TODO: Remove when we're all-runner. rsor.
*/ */
void init(); void init();
/**
* Allocates a new CursorId.
* Called from init(...). Assumes ccmutex held.
*/
static CursorId allocCursorId_inlock();
/**
* Find the ClientCursor with the provided ID. Optionally warn if
it's not found.
* Assumes ccmutex is held.
*/
static ClientCursor* find_inlock(CursorId id, bool warn = true);
/**
* Delete the ClientCursor with the provided ID. masserts if the c
ursor is pinned.
*/
static void _erase_inlock(ClientCursor* cursor);
// //
// ClientCursor-specific data, independent of the underlying execut ion type. // ClientCursor-specific data, independent of the underlying execut ion type.
// //
// The ID of the ClientCursor. // The ID of the ClientCursor.
CursorId _cursorid; CursorId _cursorid;
// A variable indicating the state of the ClientCursor. Possible v alues: // A variable indicating the state of the ClientCursor. Possible v alues:
// 0: Normal behavior. May time out. // 0: Normal behavior. May time out.
// 1: No timing out of this ClientCursor. // 1: No timing out of this ClientCursor.
// 100: Currently in use (via ClientCursorPin). // 100: Currently in use (via ClientCursorPin).
unsigned _pinValue; unsigned _pinValue;
// The namespace we're operating on. // The namespace we're operating on.
string _ns; string _ns;
// The database we're operating on. const Collection* _collection;
Database* _db;
// if we've added it to the total open counter yet
bool _countedYet;
// How many objects have been returned by the find() so far? // How many objects have been returned by the find() so far?
int _pos; int _pos;
// The query that prompted this ClientCursor. Only used for debugg ing. // The query that prompted this ClientCursor. Only used for debugg ing.
BSONObj _query; BSONObj _query;
// See the QueryOptions enum in dbclient.h // See the QueryOptions enum in dbclient.h
int _queryOptions; int _queryOptions;
skipping to change at line 366 skipping to change at line 209
// For chunks that are being migrated, there is a period of time wh en that chunks data is in // For chunks that are being migrated, there is a period of time wh en that chunks data is in
// two shards, the donor and the receiver one. That data is picked up by a cursor on the // two shards, the donor and the receiver one. That data is picked up by a cursor on the
// receiver side, even before the migration was decided. The Colle ctionMetadata allow one // receiver side, even before the migration was decided. The Colle ctionMetadata allow one
// to inquiry if any given document of the collection belongs indee d to this shard or if it // to inquiry if any given document of the collection belongs indee d to this shard or if it
// is coming from (or a vestige of) an ongoing migration. // is coming from (or a vestige of) an ongoing migration.
CollectionMetadataPtr _collMetadata; CollectionMetadataPtr _collMetadata;
// //
// The underlying execution machinery. // The underlying execution machinery.
// //
// The new world: a runner.
scoped_ptr<Runner> _runner; scoped_ptr<Runner> _runner;
//
// Cursor-only private data and methods. DEPRECATED.
//
// The old world: a cursor. DEPRECATED.
const shared_ptr<Cursor> _c;
/**
* call when cursor's location changes so that we can update the cu
rsorsbylocation map. if
* you are locked and internally iterating, only need to call when
you are ready to
* "unlock".
*/
void updateLocation();
void setLastLoc_inlock(DiskLoc);
Record* _recordForYield( RecordNeeds need );
DiskLoc _lastLoc; // use getter and setter n
ot this (important)
bool _doingDeletes; // when true we are the delete and aboutToDelet
e shouldn't manipulate us
// TODO: This will be moved into the runner.
ElapsedTracker _yieldSometimesTracker;
}; };
/** /**
* use this to assure we don't in the background time out cursor while it is under use. if you * use this to assure we don't in the background time out cursor while it is under use. if you
* are using noTimeout() already, there is no risk anyway. Further, th is mechanism guards * are using noTimeout() already, there is no risk anyway. Further, th is mechanism guards
* against two getMore requests on the same cursor executing at the sam e time - which might be * against two getMore requests on the same cursor executing at the sam e time - which might be
* bad. That should never happen, but if a client driver had a bug, it could (or perhaps some * bad. That should never happen, but if a client driver had a bug, it could (or perhaps some
* sort of attack situation). * sort of attack situation).
* Must have a read lock on the collection already
*/ */
class ClientCursorPin : boost::noncopyable { class ClientCursorPin : boost::noncopyable {
public: public:
ClientCursorPin( long long cursorid ); ClientCursorPin( const Collection* collection, long long cursorid ) ;
~ClientCursorPin(); ~ClientCursorPin();
// This just releases the pin, does not delete the underlying. // This just releases the pin, does not delete the underlying
// unless ownership has passed to us after kill
void release(); void release();
// Call this to delete the underlying ClientCursor. // Call this to delete the underlying ClientCursor.
void deleteUnderlying(); void deleteUnderlying();
ClientCursor *c() const; ClientCursor *c() const;
private: private:
CursorId _cursorid; ClientCursor* _cursor;
};
/** Assures safe and reliable cleanup of a ClientCursor. */
class ClientCursorHolder : boost::noncopyable {
public:
ClientCursorHolder( ClientCursor *c = 0 );
~ClientCursorHolder();
void reset( ClientCursor *c = 0 );
ClientCursor* get();
operator bool() { return _c; }
ClientCursor * operator-> ();
const ClientCursor * operator-> () const;
/** Release ownership of the ClientCursor. */
void release();
private:
ClientCursor *_c;
CursorId _id;
}; };
/** thread for timing out old cursors */ /** thread for timing out old cursors */
class ClientCursorMonitor : public BackgroundJob { class ClientCursorMonitor : public BackgroundJob {
public: public:
string name() const { return "ClientCursorMonitor"; } string name() const { return "ClientCursorMonitor"; }
void run(); void run();
}; };
struct ClientCursorYieldLock : boost::noncopyable {
explicit ClientCursorYieldLock( ptr<ClientCursor> cc );
~ClientCursorYieldLock();
/**
* @return if the cursor is still ok
* if it is, we also relock
*/
bool stillOk();
void relock();
private:
const bool _canYield;
ClientCursor::YieldData _data;
scoped_ptr<dbtempreleasecond> _unlock;
};
} // namespace mongo } // namespace mongo
// ClientCursor should only be used with auto_ptr because it needs to be
// release()ed after a yield if stillOk() returns false and these pointer t
ypes
// do not support releasing. This will prevent them from being used acciden
tally
// Instead of auto_ptr<>, which still requires some degree of manual manage
ment
// of this, consider using ClientCursor::Holder which handles ClientCursor'
s
// unusual self-deletion mechanics.
namespace boost{
template<> class scoped_ptr<mongo::ClientCursor> {};
template<> class shared_ptr<mongo::ClientCursor> {};
}
 End of changes. 37 change blocks. 
282 lines changed or deleted 51 lines changed or added


 cloner.h   cloner.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/client.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
struct CloneOptions; struct CloneOptions;
class DBClientBase; class DBClientBase;
class DBClientCursor; class DBClientCursor;
class Query; class Query;
class Cloner: boost::noncopyable { class Cloner: boost::noncopyable {
skipping to change at line 55 skipping to change at line 56
/** /**
* slaveOk - if true it is ok if the source of the data is !is master. * slaveOk - if true it is ok if the source of the data is !is master.
* useReplAuth - use the credentials we normally use as a replicat ion slave for the cloning * useReplAuth - use the credentials we normally use as a replicat ion slave for the cloning
* snapshot - use $snapshot mode for copying collections. note this should not be used * snapshot - use $snapshot mode for copying collections. note this should not be used
* when it isn't required, as it will be slower. fo r example, * when it isn't required, as it will be slower. fo r example,
* repairDatabase need not use it. * repairDatabase need not use it.
*/ */
void setConnection( DBClientBase *c ) { _conn.reset( c ); } void setConnection( DBClientBase *c ) { _conn.reset( c ); }
/** copy the entire database */ /** copy the entire database */
bool go(const char *masterHost, string& errmsg, const string& fromd bool go(Client::Context& ctx,
b, bool logForRepl, const string& masterHost, const CloneOptions& opts,
bool slaveOk, bool useReplAuth, bool snapshot, bool mayYiel set<string>* clonedColls,
d,
bool mayBeInterrupted, int *errCode = 0);
bool go(const char *masterHost, const CloneOptions& opts, set<strin
g>& clonedColls,
string& errmsg, int *errCode = 0); string& errmsg, int *errCode = 0);
bool go(const char *masterHost, const CloneOptions& opts, string& e
rrmsg, int *errCode = 0);
bool copyCollection(const string& ns, const BSONObj& query, string& errmsg, bool copyCollection(const string& ns, const BSONObj& query, string& errmsg,
bool mayYield, bool mayBeInterrupted, bool copy Indexes = true, bool mayYield, bool mayBeInterrupted, bool copy Indexes = true,
bool logForRepl = true ); bool logForRepl = true );
/** /**
* validate the cloner query was successful * validate the cloner query was successful
* @param cur Cursor the query was executed on * @param cur Cursor the query was executed on
* @param errCode out Error code encountered during the query * @param errCode out Error code encountered during the query
* @param errmsg out Error message encountered during the query * @param errmsg out Error message encountered during the query
*/ */
static bool validateQueryResults(const auto_ptr<DBClientCursor>& cu r, int32_t* errCode, static bool validateQueryResults(const auto_ptr<DBClientCursor>& cu r, int32_t* errCode,
string& errmsg); string& errmsg);
/** /**
* @param errmsg out - Error message (if encountered). * @param errmsg out - Error message (if encountered).
* @param slaveOk - if true it is ok if the source of the data
is !ismaster.
* @param useReplAuth - use the credentials we normally use as a re
plication slave for the
* cloning.
* @param snapshot - use $snapshot mode for copying collections.
note this should not be
* used when it isn't required, as it will be
slower. for example
* repairDatabase need not use it.
* @param errCode out - If provided, this will be set on error to t he server's error code. * @param errCode out - If provided, this will be set on error to t he server's error code.
* Currently this will only be set if there is an error in the initial * Currently this will only be set if there is an error in the initial
* system.namespaces query. * system.namespaces query.
*/ */
static bool cloneFrom(const char *masterHost, string& errmsg, const static bool cloneFrom(Client::Context& context,
string& fromdb, const string& masterHost, const CloneOptions&
bool logForReplication, bool slaveOk, bool us options,
eReplAuth,
bool snapshot, bool mayYield, bool mayBeInter
rupted,
int *errCode = 0);
static bool cloneFrom(const string& masterHost, const CloneOptions&
options,
string& errmsg, int* errCode = 0, string& errmsg, int* errCode = 0,
set<string>* clonedCollections = 0); set<string>* clonedCollections = 0);
/** /**
* Copy a collection (and indexes) from a remote host * Copy a collection (and indexes) from a remote host
*/ */
static bool copyCollectionFromRemote(const string& host, const stri ng& ns, string& errmsg); static bool copyCollectionFromRemote(const string& host, const stri ng& ns, string& errmsg);
private: private:
void copy(const char *from_ns, const char *to_ns, bool isindex, boo void copy(Client::Context& ctx,
l logForRepl, const char *from_ns, const char *to_ns, bool isindex, boo
l logForRepl,
bool masterSameProcess, bool slaveOk, bool mayYield, bool mayBeInterrupted, bool masterSameProcess, bool slaveOk, bool mayYield, bool mayBeInterrupted,
Query q); Query q);
struct Fun; struct Fun;
auto_ptr<DBClientBase> _conn; auto_ptr<DBClientBase> _conn;
}; };
struct CloneOptions { struct CloneOptions {
CloneOptions() { CloneOptions() {
 End of changes. 6 change blocks. 
33 lines changed or deleted 10 lines changed or added


 cluster_write.h   cluster_write.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/batched_command_response.h"
namespace mongo { namespace mongo {
class ClusterWriterStats;
class BatchWriteExecStats;
class ClusterWriter {
public:
ClusterWriter( bool autoSplit, int timeoutMillis );
void write( const BatchedCommandRequest& request, BatchedCommandRes
ponse* response );
const ClusterWriterStats& getStats();
private:
void configWrite( const BatchedCommandRequest& request,
BatchedCommandResponse* response,
bool fsyncCheck );
void shardWrite( const BatchedCommandRequest& request,
BatchedCommandResponse* response );
bool _autoSplit;
int _timeoutMillis;
scoped_ptr<ClusterWriterStats> _stats;
};
class ClusterWriterStats {
public:
// Transfers ownership to the cluster write stats
void setShardStats( BatchWriteExecStats* _shardStats );
bool hasShardStats() const;
const BatchWriteExecStats& getShardStats() const;
// TODO: When we have ConfigCoordinator stats, put these here too.
private:
scoped_ptr<BatchWriteExecStats> _shardStats;
};
/**
* Note: response can NEVER be NULL.
*/
void clusterWrite( const BatchedCommandRequest& request, void clusterWrite( const BatchedCommandRequest& request,
BatchedCommandResponse* response, BatchedCommandResponse* response,
bool autoSplit ); bool autoSplit );
/**
* Note: response can be NULL if you don't care about the write statist
ics.
*/
Status clusterInsert( const std::string& ns,
const BSONObj& doc,
const BSONObj& writeConcern,
BatchedCommandResponse* response );
/**
* Note: response can be NULL if you don't care about the write statist
ics.
*/
Status clusterUpdate( const std::string& ns,
const BSONObj& query,
const BSONObj& update,
bool upsert,
bool multi,
const BSONObj& writeConcern,
BatchedCommandResponse* response );
/**
* Note: response can be NULL if you don't care about the write statist
ics.
*/
Status clusterDelete( const std::string& ns,
const BSONObj& query,
int limit,
const BSONObj& writeConcern,
BatchedCommandResponse* response );
/**
* Note: response can be NULL if you don't care about the write statist
ics.
*/
Status clusterCreateIndex( const std::string& ns,
BSONObj keys,
bool unique,
const BSONObj& writeConcern,
BatchedCommandResponse* response );
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
0 lines changed or deleted 90 lines changed or added


 collection.h   collection.h 
skipping to change at line 36 skipping to change at line 36
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/catalog/collection_cursor_cache.h"
#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/collection_scan_common.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/db/storage/record_store.h" #include "mongo/db/structure/record_store.h"
#include "mongo/db/structure/collection_info_cache.h" #include "mongo/db/catalog/collection_info_cache.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
namespace mongo { namespace mongo {
class Database; class Database;
class ExtentManager; class ExtentManager;
class NamespaceDetails; class NamespaceDetails;
class IndexCatalog; class IndexCatalog;
class CollectionIterator; class CollectionIterator;
class FlatIterator; class FlatIterator;
class CappedIterator; class CappedIterator;
class OpDebug; class OpDebug;
class DocWriter {
public:
virtual ~DocWriter() {}
virtual void writeDocument( char* buf ) const = 0;
virtual size_t documentSize() const = 0;
virtual bool addPadding() const { return true; }
};
struct CompactOptions {
CompactOptions() {
paddingMode = NONE;
validateDocuments = true;
paddingFactor = 1;
paddingBytes = 0;
}
// padding
enum PaddingMode {
PRESERVE, NONE, MANUAL
} paddingMode;
// only used if _paddingMode == MANUAL
double paddingFactor; // what to multiple document size by
int paddingBytes; // what to add to ducment size after multiplicati
on
unsigned computeRecordSize( unsigned recordSize ) const {
recordSize = static_cast<unsigned>( paddingFactor * recordSize
);
recordSize += paddingBytes;
return recordSize;
}
// other
bool validateDocuments;
std::string toString() const;
};
struct CompactStats {
CompactStats() {
corruptDocuments = 0;
}
long long corruptDocuments;
};
/** /**
* this is NOT safe through a yield right now * this is NOT safe through a yield right now
* not sure if it will be, or what yet * not sure if it will be, or what yet
*/ */
class Collection { class Collection {
public: public:
Collection( const StringData& fullNS, Collection( const StringData& fullNS,
NamespaceDetails* details, NamespaceDetails* details,
Database* database ); Database* database );
skipping to change at line 82 skipping to change at line 128
const NamespaceDetails* details() const { return _details; } const NamespaceDetails* details() const { return _details; }
CollectionInfoCache* infoCache() { return &_infoCache; } CollectionInfoCache* infoCache() { return &_infoCache; }
const CollectionInfoCache* infoCache() const { return &_infoCache; } const CollectionInfoCache* infoCache() const { return &_infoCache; }
const NamespaceString& ns() const { return _ns; } const NamespaceString& ns() const { return _ns; }
const IndexCatalog* getIndexCatalog() const { return &_indexCatalog ; } const IndexCatalog* getIndexCatalog() const { return &_indexCatalog ; }
IndexCatalog* getIndexCatalog() { return &_indexCatalog; } IndexCatalog* getIndexCatalog() { return &_indexCatalog; }
CollectionCursorCache* cursorCache() const { return &_cursorCache;
}
bool requiresIdIndex() const; bool requiresIdIndex() const;
BSONObj docFor( const DiskLoc& loc ); BSONObj docFor( const DiskLoc& loc );
// ---- things that should move to a CollectionAccessMethod like th ing // ---- things that should move to a CollectionAccessMethod like th ing
/**
* canonical to get all would be
* getIterator( DiskLoc(), false, CollectionScanParams::FORWARD )
*/
CollectionIterator* getIterator( const DiskLoc& start, bool tailabl e, CollectionIterator* getIterator( const DiskLoc& start, bool tailabl e,
const CollectionScanParams::Direct ion& dir) const; const CollectionScanParams::Direct ion& dir) const;
/**
* does a table scan to do a count
* this should only be used at a very low level
* does no yielding, indexes, etc...
*/
int64_t countTableScan( const MatchExpression* expression );
void deleteDocument( const DiskLoc& loc, void deleteDocument( const DiskLoc& loc,
bool cappedOK = false, bool cappedOK = false,
bool noWarn = false, bool noWarn = false,
BSONObj* deletedId = 0 ); BSONObj* deletedId = 0 );
/** /**
* this does NOT modify the doc before inserting * this does NOT modify the doc before inserting
* i.e. will not add an _id field for documents that are missing it * i.e. will not add an _id field for documents that are missing it
*/ */
StatusWith<DiskLoc> insertDocument( const BSONObj& doc, bool enforc eQuota ); StatusWith<DiskLoc> insertDocument( const BSONObj& doc, bool enforc eQuota );
StatusWith<DiskLoc> insertDocument( const DocWriter* doc, bool enfo
rceQuota );
/** /**
* updates the document @ oldLocation with newDoc * updates the document @ oldLocation with newDoc
* if the document fits in the old space, it is put there * if the document fits in the old space, it is put there
* if not, it is moved * if not, it is moved
* @return the post update location of the doc (may or may not be t he same as oldLocation) * @return the post update location of the doc (may or may not be t he same as oldLocation)
*/ */
StatusWith<DiskLoc> updateDocument( const DiskLoc& oldLocation, StatusWith<DiskLoc> updateDocument( const DiskLoc& oldLocation,
const BSONObj& newDoc, const BSONObj& newDoc,
bool enforceQuota, bool enforceQuota,
OpDebug* debug ); OpDebug* debug );
int64_t storageSize( int* numExtents = NULL, BSONArrayBuilder* exte ntInfo = NULL ) const; int64_t storageSize( int* numExtents = NULL, BSONArrayBuilder* exte ntInfo = NULL ) const;
// ----------- // -----------
StatusWith<CompactStats> compact( const CompactOptions* options );
// -----------
// this is temporary, moving up from DB for now // this is temporary, moving up from DB for now
// this will add a new extent the collection // this will add a new extent the collection
// the new extent will be returned // the new extent will be returned
// it will have been added to the linked list already // it will have been added to the linked list already
Extent* increaseStorageSize( int size, bool enforceQuota ); Extent* increaseStorageSize( int size, bool enforceQuota );
// //
// Stats // Stats
// //
bool isCapped() const;
uint64_t numRecords() const; uint64_t numRecords() const;
uint64_t dataSize() const; uint64_t dataSize() const;
int averageObjectSize() const { int averageObjectSize() const {
uint64_t n = numRecords(); uint64_t n = numRecords();
if ( n == 0 ) if ( n == 0 )
return 5; return 5;
return static_cast<int>( dataSize() / n ); return static_cast<int>( dataSize() / n );
} }
private: private:
/**
* same semantics as insertDocument, but doesn't do:
* - some user error checks
* - adjust padding
*/
StatusWith<DiskLoc> _insertDocument( const BSONObj& doc, bool enfor
ceQuota );
void _compactExtent(const DiskLoc diskloc, int extentNumber,
vector<IndexAccessMethod*>& indexesToInsertTo,
const CompactOptions* compactOptions, CompactSt
ats* stats );
// @return 0 for inf., otherwise a number of files // @return 0 for inf., otherwise a number of files
int largestFileNumberInQuota() const; int largestFileNumberInQuota() const;
ExtentManager* getExtentManager(); ExtentManager* getExtentManager();
const ExtentManager* getExtentManager() const; const ExtentManager* getExtentManager() const;
int _magic; int _magic;
NamespaceString _ns; NamespaceString _ns;
NamespaceDetails* _details; NamespaceDetails* _details;
Database* _database; Database* _database;
RecordStore _recordStore; scoped_ptr<RecordStore> _recordStore;
CollectionInfoCache _infoCache; CollectionInfoCache _infoCache;
IndexCatalog _indexCatalog; IndexCatalog _indexCatalog;
// this is mutable because read only users of the Collection class
// use it keep state. This seems valid as const correctness of Col
lection
// should be about the data.
mutable CollectionCursorCache _cursorCache;
friend class Database; friend class Database;
friend class FlatIterator; friend class FlatIterator;
friend class CappedIterator; friend class CappedIterator;
friend class IndexCatalog; friend class IndexCatalog;
}; };
} }
 End of changes. 12 change blocks. 
4 lines changed or deleted 92 lines changed or added


 collection_info_cache.h   collection_info_cache.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp>
#include "mongo/db/index_set.h" #include "mongo/db/index_set.h"
#include "mongo/db/querypattern.h" #include "mongo/db/query/query_settings.h"
#include "mongo/db/query/plan_cache.h"
namespace mongo { namespace mongo {
class Collection; class Collection;
/** /**
* this is for storing things that you want to cache about a single col lection * this is for storing things that you want to cache about a single col lection
* life cycle is managed for you from inside Collection * life cycle is managed for you from inside Collection
*/ */
class CollectionInfoCache { class CollectionInfoCache {
public: public:
CollectionInfoCache( Collection* collection ); CollectionInfoCache( Collection* collection );
/* /*
* resets entire cache state * resets entire cache state
*/ */
void reset(); void reset();
//
// New Query Execution
//
/**
* Get the PlanCache for this collection.
*/
PlanCache* getPlanCache() const;
/**
* Get the QuerySettings for this collection.
*/
QuerySettings* getQuerySettings() const;
// ------------------- // -------------------
/* get set of index keys for this namespace. handy to quickly chec k if a given /* get set of index keys for this namespace. handy to quickly chec k if a given
field is indexed (Note it might be a secondary component of a co mpound index.) field is indexed (Note it might be a secondary component of a co mpound index.)
*/ */
const IndexPathSet& indexKeys() { const IndexPathSet& indexKeys() {
if ( !_keysComputed ) if ( !_keysComputed )
computeIndexKeys(); computeIndexKeys();
return _indexedPaths; return _indexedPaths;
} }
// --------------------- // ---------------------
/**
* Called when an index is added to this collection.
*/
void addedIndex() { reset(); } void addedIndex() { reset(); }
void clearQueryCache(); void clearQueryCache();
/* you must notify the cache if you are doing writes, as query plan utility will change */ /* you must notify the cache if you are doing writes, as query plan utility will change */
void notifyOfWriteOp(); void notifyOfWriteOp();
CachedQueryPlan cachedQueryPlanForPattern( const QueryPattern &patt
ern );
void registerCachedQueryPlanForPattern( const QueryPattern &pattern
,
const CachedQueryPlan &cach
edQueryPlan );
private: private:
Collection* _collection; // not owned Collection* _collection; // not owned
// --- index keys cache // --- index keys cache
bool _keysComputed; bool _keysComputed;
IndexPathSet _indexedPaths; IndexPathSet _indexedPaths;
void computeIndexKeys(); // A cache for query plans.
boost::scoped_ptr<PlanCache> _planCache;
// --- for old query optimizer
void _clearQueryCache_inlock();
mutex _qcCacheMutex; // Query settings.
int _qcWriteCount; // Includes index filters.
std::map<QueryPattern,CachedQueryPlan> _qcCache; boost::scoped_ptr<QuerySettings> _querySettings;
void computeIndexKeys();
}; };
} } // namespace mongo
 End of changes. 9 change blocks. 
17 lines changed or deleted 27 lines changed or added


 collection_scan.h   collection_scan.h 
skipping to change at line 49 skipping to change at line 49
class WorkingSet; class WorkingSet;
/** /**
* Scans over a collection, starting at the DiskLoc provided in params and continuing until * Scans over a collection, starting at the DiskLoc provided in params and continuing until
* there are no more records in the collection. * there are no more records in the collection.
* *
* Preconditions: Valid DiskLoc. * Preconditions: Valid DiskLoc.
*/ */
class CollectionScan : public PlanStage { class CollectionScan : public PlanStage {
public: public:
CollectionScan(const CollectionScanParams& params, WorkingSet* work CollectionScan(const CollectionScanParams& params,
ingSet, WorkingSet* workingSet,
const MatchExpression* filter); const MatchExpression* filter);
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
// WorkingSet is not owned by us. // WorkingSet is not owned by us.
WorkingSet* _workingSet; WorkingSet* _workingSet;
// The filter is not owned by us. // The filter is not owned by us.
 End of changes. 2 change blocks. 
3 lines changed or deleted 3 lines changed or added


 collection_scan_common.h   collection_scan_common.h 
skipping to change at line 43 skipping to change at line 43
namespace mongo { namespace mongo {
struct CollectionScanParams { struct CollectionScanParams {
enum Direction { enum Direction {
FORWARD = 1, FORWARD = 1,
BACKWARD = -1, BACKWARD = -1,
}; };
CollectionScanParams() : start(DiskLoc()), CollectionScanParams() : start(DiskLoc()),
direction(FORWARD), direction(FORWARD),
tailable(false) { } tailable(false),
maxScan(0) { }
// What collection? // What collection?
string ns; string ns;
// isNull by default. If you specify any value for this, you're re sponsible for the DiskLoc // isNull by default. If you specify any value for this, you're re sponsible for the DiskLoc
// not being invalidated before the first call to work(...). // not being invalidated before the first call to work(...).
DiskLoc start; DiskLoc start;
Direction direction; Direction direction;
// Do we want the scan to be 'tailable'? Only meaningful if the co llection is capped. // Do we want the scan to be 'tailable'? Only meaningful if the co llection is capped.
bool tailable; bool tailable;
// If non-zero, how many documents will we look at?
size_t maxScan;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 5 lines changed or added


 commands.h   commands.h 
skipping to change at line 230 skipping to change at line 230
BSONObj& cmdObj, BSONObj& cmdObj,
BSONObjBuilder& result, BSONObjBuilder& result,
bool fromRepl ); bool fromRepl );
// Helper for setting errmsg and ok field in command result object. // Helper for setting errmsg and ok field in command result object.
static void appendCommandStatus(BSONObjBuilder& result, bool ok, co nst std::string& errmsg); static void appendCommandStatus(BSONObjBuilder& result, bool ok, co nst std::string& errmsg);
// @return s.isOK() // @return s.isOK()
static bool appendCommandStatus(BSONObjBuilder& result, const Statu s& status); static bool appendCommandStatus(BSONObjBuilder& result, const Statu s& status);
// Converts "result" into a Status object. The input is expected t
o be the object returned
// by running a command. Returns ErrorCodes::CommandResultSchemaVi
olation if "result" does
// not look like the result of a command.
static Status getStatusFromCommandResult(const BSONObj& result);
// Set by command line. Controls whether or not testing-only comma nds should be available. // Set by command line. Controls whether or not testing-only comma nds should be available.
static int testCommandsEnabled; static int testCommandsEnabled;
private: private:
/** /**
* Checks to see if the client is authorized to run the given comma nd with the given * Checks to see if the client is authorized to run the given comma nd with the given
* parameters on the given named database. * parameters on the given named database.
* *
* fromRepl is true if this command is running as part of oplog app lication, which for * fromRepl is true if this command is running as part of oplog app lication, which for
* historic reasons has slightly different authorization semantics. TODO(schwerin): Check * historic reasons has slightly different authorization semantics. TODO(schwerin): Check
 End of changes. 1 change blocks. 
0 lines changed or deleted 7 lines changed or added


 compiler.h   compiler.h 
// @file mongo/platform/compiler.h // Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/* #ifndef V8_COMPILER_H_
* Copyright 2012 10gen Inc. #define V8_COMPILER_H_
*
* Licensed under the Apache License, Version 2.0 (the "License"); #include "allocation.h"
* you may not use this file except in compliance with the License. #include "ast.h"
* You may obtain a copy of the License at #include "zone.h"
*
* http://www.apache.org/licenses/LICENSE-2.0 namespace v8 {
* namespace internal {
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, class ScriptDataImpl;
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and // CompilationInfo encapsulates some information known at compile time. It
* limitations under the License. // is constructed based on the resources available at compile-time.
*/ class CompilationInfo {
public:
#pragma once CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
/** CompilationInfo(Handle<JSFunction> closure, Zone* zone);
* Include "mongo/platform/compiler.h" to get compiler-specific macro defin
itions and utilities. virtual ~CompilationInfo();
*/
Isolate* isolate() {
#if defined(_MSC_VER) ASSERT(Isolate::Current() == isolate_);
#include "mongo/platform/compiler_msvc.h" return isolate_;
#elif defined(__GNUC__) }
#include "mongo/platform/compiler_gcc.h" Zone* zone() {
#else return zone_;
#error "Unsupported compiler family" }
bool is_lazy() const { return IsLazy::decode(flags_); }
bool is_eval() const { return IsEval::decode(flags_); }
bool is_global() const { return IsGlobal::decode(flags_); }
bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
bool is_extended_mode() const { return language_mode() == EXTENDED_MODE;
}
LanguageMode language_mode() const {
return LanguageModeField::decode(flags_);
}
bool is_in_loop() const { return IsInLoop::decode(flags_); }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
Scope* global_scope() const { return global_scope_; }
Handle<Code> code() const { return code_; }
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> calling_context() const { return calling_context_; }
int osr_ast_id() const { return osr_ast_id_; }
void MarkAsEval() {
ASSERT(!is_lazy());
flags_ |= IsEval::encode(true);
}
void MarkAsGlobal() {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
void SetLanguageMode(LanguageMode language_mode) {
ASSERT(this->language_mode() == CLASSIC_MODE ||
this->language_mode() == language_mode ||
language_mode == EXTENDED_MODE);
flags_ = LanguageModeField::update(flags_, language_mode);
}
void MarkAsInLoop() {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
}
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
bool is_native() const {
return IsNative::decode(flags_);
}
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
}
void SetScope(Scope* scope) {
ASSERT(scope_ == NULL);
scope_ = scope;
}
void SetGlobalScope(Scope* global_scope) {
ASSERT(global_scope_ == NULL);
global_scope_ = global_scope;
}
void SetCode(Handle<Code> code) { code_ = code; }
void SetExtension(v8::Extension* extension) {
ASSERT(!is_lazy());
extension_ = extension;
}
void SetPreParseData(ScriptDataImpl* pre_parse_data) {
ASSERT(!is_lazy());
pre_parse_data_ = pre_parse_data;
}
void SetCallingContext(Handle<Context> context) {
ASSERT(is_eval());
calling_context_ = context;
}
void SetOsrAstId(int osr_ast_id) {
ASSERT(IsOptimizing());
osr_ast_id_ = osr_ast_id;
}
void MarkCompilingForDebugging(Handle<Code> current_code) {
ASSERT(mode_ != OPTIMIZE);
ASSERT(current_code->kind() == Code::FUNCTION);
flags_ |= IsCompilingForDebugging::encode(true);
if (current_code->is_compiled_optimizable()) {
EnableDeoptimizationSupport();
} else {
mode_ = CompilationInfo::NONOPT;
}
}
bool IsCompilingForDebugging() {
return IsCompilingForDebugging::decode(flags_);
}
bool has_global_object() const {
return !closure().is_null() && (closure()->context()->global() != NULL)
;
}
GlobalObject* global_object() const {
return has_global_object() ? closure()->context()->global() : NULL;
}
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
void SetOptimizing(int osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
}
void DisableOptimization();
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return SupportsDeoptimization::decode(flags_);
}
void EnableDeoptimizationSupport() {
ASSERT(IsOptimizable());
flags_ |= SupportsDeoptimization::encode(true);
}
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
// Disable all optimization attempts of this info for the rest of the
// current compilation pipeline.
void AbortOptimization();
void set_deferred_handles(DeferredHandles* deferred_handles) {
ASSERT(deferred_handles_ == NULL);
deferred_handles_ = deferred_handles;
}
void SaveHandles() {
SaveHandle(&closure_);
SaveHandle(&shared_info_);
SaveHandle(&calling_context_);
SaveHandle(&script_);
}
private:
Isolate* isolate_;
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailout
s.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
// NONOPT is generated by the full codegen and is not prepared for
// recompilation/bailouts. These functions are never recompiled.
enum Mode {
BASE,
OPTIMIZE,
NONOPT
};
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
ASSERT(!script_.is_null());
if (script_->type()->value() == Script::TYPE_NATIVE) {
MarkAsNative();
}
if (!shared_info_.is_null()) {
ASSERT(language_mode() == CLASSIC_MODE);
SetLanguageMode(shared_info_->language_mode());
}
}
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
mode_ = mode;
}
// Flags using template class BitField<type, start, length>. All are
// false by default.
//
// Compilation is either eager or lazy.
class IsLazy: public BitField<bool, 0, 1> {};
// Flags that can be set for eager compilation.
class IsEval: public BitField<bool, 1, 1> {};
class IsGlobal: public BitField<bool, 2, 1> {};
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};
// Is this code being compiled with support for deoptimization..
class SupportsDeoptimization: public BitField<bool, 7, 1> {};
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
unsigned flags_;
// Fields filled in by the compilation pipeline.
// AST filled in by the parser.
FunctionLiteral* function_;
// The scope of the function literal as a convenience. Set to indicate
// that scopes have been analyzed.
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
// The compiled code.
Handle<Code> code_;
// Possible initial inputs to the compilation process.
Handle<JSFunction> closure_;
Handle<SharedFunctionInfo> shared_info_;
Handle<Script> script_;
// Fields possibly needed for eager compilation, NULL by default.
v8::Extension* extension_;
ScriptDataImpl* pre_parse_data_;
// The context of the caller is needed for eval code, and will be a null
// handle otherwise.
Handle<Context> calling_context_;
// Compilation mode flag and whether deoptimization is allowed.
Mode mode_;
int osr_ast_id_;
// The zone from which the compilation pipeline working on this
// CompilationInfo allocates.
Zone* zone_;
DeferredHandles* deferred_handles_;
template<typename T>
void SaveHandle(Handle<T> *object) {
if (!object->is_null()) {
Handle<T> handle(*(*object));
*object = handle;
}
}
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
// Exactly like a CompilationInfo, except also creates and enters a
// Zone on construction and deallocates it on exit.
class CompilationInfoWithZone: public CompilationInfo {
public:
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_),
zone_(script->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
: CompilationInfo(shared_info, &zone_),
zone_(shared_info->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
explicit CompilationInfoWithZone(Handle<JSFunction> closure)
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
private:
Zone zone_;
ZoneScope zone_scope_;
};
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
class CompilationHandleScope BASE_EMBEDDED {
public:
explicit CompilationHandleScope(CompilationInfo* info)
: deferred_(info->isolate()), info_(info) {}
~CompilationHandleScope() {
info_->set_deferred_handles(deferred_.Detach());
}
private:
DeferredHandleScope deferred_;
CompilationInfo* info_;
};
class HGraph;
class HGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
// Crankshaft and keeps track of its state. The three phases
// CreateGraph, OptimizeGraph and GenerateAndInstallCode can either
// fail, bail-out to the full code generator or succeed. Apart from
// their return value, the status of the phase last run can be checked
// using last_status().
class OptimizingCompiler: public ZoneObject {
public:
explicit OptimizingCompiler(CompilationInfo* info)
: info_(info),
oracle_(NULL),
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
time_taken_to_create_graph_(0),
time_taken_to_optimize_(0),
time_taken_to_codegen_(0),
last_status_(FAILED) { }
enum Status {
FAILED, BAILED_OUT, SUCCEEDED
};
MUST_USE_RESULT Status CreateGraph();
MUST_USE_RESULT Status OptimizeGraph();
MUST_USE_RESULT Status GenerateAndInstallCode();
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
MUST_USE_RESULT Status AbortOptimization() {
info_->AbortOptimization();
info_->shared_info()->DisableOptimization();
return SetLastStatus(BAILED_OUT);
}
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
HGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;
int64_t time_taken_to_optimize_;
int64_t time_taken_to_codegen_;
Status last_status_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
last_status_ = status;
return last_status_;
}
void RecordOptimizationStats();
struct Timer {
Timer(OptimizingCompiler* compiler, int64_t* location)
: compiler_(compiler),
start_(OS::Ticks()),
location_(location) { }
~Timer() {
*location_ += (OS::Ticks() - start_);
}
OptimizingCompiler* compiler_;
int64_t start_;
int64_t* location_;
};
};
// The V8 compiler
//
// General strategy: Source code is translated into an anonymous function w
/o
// parameters which then can be executed. If the source code contains other
// functions, they will be compiled and allocated as part of the compilatio
n
// of the source code.
// Please note this interface returns shared function infos. This means yo
u
// need to call Factory::NewFunctionFromSharedFunctionInfo before you have
a
// real function with a context.
class Compiler : public AllStatic {
public:
// Default maximum number of function optimization attempts before we
// give up.
static const int kDefaultMaxOptCount = 10;
static const int kMaxInliningLevels = 3;
// Call count before primitive functions trigger their own optimization.
static const int kCallsUntilPrimitiveOpt = 200;
// All routines return a SharedFunctionInfo.
// If an error occurs an exception is raised and the return handle
// contains NULL.
// Compile a String source within a context.
static Handle<SharedFunctionInfo> Compile(Handle<String> source,
Handle<Object> script_name,
int line_offset,
int column_offset,
v8::Extension* extension,
ScriptDataImpl* pre_data,
Handle<Object> script_data,
NativesFlag is_natives_code);
// Compile a String source within a context for Eval.
static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
LanguageMode language_mode,
int scope_position);
// Compile from function info (used for lazy compilation). Returns true o
n
// success and false if the compilation resulted in a stack overflow.
static bool CompileLazy(CompilationInfo* info);
static void RecompileParallel(Handle<JSFunction> function);
// Compile a shared function info object (the function is possibly lazily
// compiled).
static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node
,
Handle<Script> script
);
// Set the function info for a newly compiled function.
static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
FunctionLiteral* lit,
bool is_toplevel,
Handle<Script> script);
static void InstallOptimizedCode(OptimizingCompiler* info);
#ifdef ENABLE_DEBUGGER_SUPPORT
static bool MakeCodeForLiveEdit(CompilationInfo* info);
#endif #endif
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CompilationInfo* info,
Handle<SharedFunctionInfo> shared);
};
} } // namespace v8::internal
#endif // V8_COMPILER_H_
 End of changes. 3 change blocks. 
30 lines changed or deleted 471 lines changed or added


 compiler_gcc.h   compiler_gcc.h 
// @file mongo/platform/compiler_gcc.h
/* /*
* Copyright 2012 10gen Inc. * Copyright 2012 10gen Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
* You may obtain a copy of the License at * You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once
/** /**
* Compiler-specific implementations for gcc. * Compiler-specific implementations for gcc.
*/
/**
* Use this to decorate the declaration of functions that will not return.
* *
* Correct: * Refer to mongo/platform/compiler.h for usage documentation.
* MONGO_COMPILER_NORETURN void myAbortFunction();
*/ */
#pragma once
#define MONGO_COMPILER_NORETURN __attribute__((__noreturn__)) #define MONGO_COMPILER_NORETURN __attribute__((__noreturn__))
/**
* Use this to decorate unused variable declarations.
*/
#define MONGO_COMPILER_VARIABLE_UNUSED __attribute__((__unused__)) #define MONGO_COMPILER_VARIABLE_UNUSED __attribute__((__unused__))
/**
* Use this on a type declaration to specify its minimum alignment.
*
* Alignments should probably always be powers of two. Also, note that mos
t allocators will not be
* able to guarantee better than 16- or 32-byte alignment.
*
* Correct:
* class MONGO_COMPILER_ALIGN_TYPE(16) MyClass {...};
*
* Incorrect:
* MONGO_COMPILER_ALIGN_TYPE(16) class MyClass {...};
* class MyClass{...} MONGO_COMPILER_ALIGN_TYPE(16);
*/
#define MONGO_COMPILER_ALIGN_TYPE(ALIGNMENT) __attribute__(( __aligned__(AL IGNMENT) )) #define MONGO_COMPILER_ALIGN_TYPE(ALIGNMENT) __attribute__(( __aligned__(AL IGNMENT) ))
/**
* Use this on a global variable or structure field declaration to specify
that it must be allocated
* at a location that is aligned to a multiple of "ALIGNMENT" bytes.
*
* Note that most allocators will not allow heap allocated alignments that
are better than 16- or
* 32-byte aligned. Stack allocators may only guarantee up to the natural
word length worth of
* alignment.
*
* Correct:
* class MyClass {
* MONGO_COMPILER_ALIGN_VARIABLE(8) char a;
* };
*
* MONGO_COMPILER_ALIGN_VARIABLE(8) class MyClass {...} singletonInstanc
e;
*
* Incorrect:
* int MONGO_COMPILER_ALIGN_VARIABLE(16) a, b;
*/
#define MONGO_COMPILER_ALIGN_VARIABLE(ALIGNMENT) __attribute__(( __aligned_ _(ALIGNMENT) )) #define MONGO_COMPILER_ALIGN_VARIABLE(ALIGNMENT) __attribute__(( __aligned_ _(ALIGNMENT) ))
// NOTE(schwerin): These visibility and calling-convention macro definition
s assume we're not using
// GCC/CLANG to target native Windows. If/when we decide to do such targeti
ng, we'll need to change
// compiler flags on Windows to make sure we use an appropriate calling con
vention, and configure
// MONGO_COMPILER_API_EXPORT, MONGO_COMPILER_API_IMPORT and MONGO_COMPILER_
API_CALLING_CONVENTION
// correctly. I believe "correctly" is the following:
//
// #ifdef _WIN32
// #define MONGO_COMIPLER_API_EXPORT __attribute__(( __dllexport__ ))
// #define MONGO_COMPILER_API_IMPORT __attribute__(( __dllimport__ ))
// #ifdef _M_IX86
// #define MONGO_COMPILER_API_CALLING_CONVENTION __attribute__((__cdecl__))
// #else
// #define MONGO_COMPILER_API_CALLING_CONVENTION
// #endif
// #else ... fall through to the definitions below.
#define MONGO_COMPILER_API_EXPORT __attribute__(( __visibility__("default")
))
#define MONGO_COMPILER_API_IMPORT
#define MONGO_COMPILER_API_CALLING_CONVENTION
 End of changes. 9 change blocks. 
49 lines changed or deleted 4 lines changed or added


 compiler_msvc.h   compiler_msvc.h 
// @file mongo/platform/compiler_msvc.h
/* /*
* Copyright 2012 10gen Inc. * Copyright 2012 10gen Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
* You may obtain a copy of the License at * You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once
/** /**
* Compiler-specific implementations for MSVC. * Compiler-specific implementations for MSVC.
*/
/**
* Use this to decorate the declaration of functions that will not return.
* *
* Correct: * Refer to mongo/platform/compiler.h for usage documentation.
* MONGO_COMPILER_NORETURN void myAbortFunction();
*/ */
#pragma once
#define MONGO_COMPILER_NORETURN __declspec(noreturn) #define MONGO_COMPILER_NORETURN __declspec(noreturn)
/**
* Use this to decorate unused variable declarations.
*/
#define MONGO_COMPILER_VARIABLE_UNUSED #define MONGO_COMPILER_VARIABLE_UNUSED
/**
* Use this on a type declaration to specify its minimum alignment.
*
* Alignments should probably always be powers of two. Also, note that mos
t allocators will not be
* able to guarantee better than 16- or 32-byte alignment.
*
* Correct:
* class MONGO_COMPILER_ALIGN_TYPE(16) MyClass {...};
*
* Incorrect:
* MONGO_COMPILER_ALIGN_TYPE(16) class MyClass {...};
* class MyClass{...} MONGO_COMPILER_ALIGN_TYPE(16);
*/
#define MONGO_COMPILER_ALIGN_TYPE(ALIGNMENT) __declspec( align( ALIGNMENT ) ) #define MONGO_COMPILER_ALIGN_TYPE(ALIGNMENT) __declspec( align( ALIGNMENT ) )
/**
* Use this on a global variable or structure field declaration to specify
that it must be allocated
* at a location that is aligned to a multiple of "ALIGNMENT" bytes.
*
* Note that most allocators will not allow heap allocated alignments that
are better than 16- or
* 32-byte aligned. Stack allocators may only guarantee up to the natural
word length worth of
* alignment.
*
* Correct:
* class MyClass {
* MONGO_COMPILER_ALIGN_VARIABLE(8) char a;
* };
*
* MONGO_COMPILER_ALIGN_VARIABLE class MyClass {...} singletonInstance;
*
* Incorrect:
* int MONGO_COMPILER_ALIGN_VARIABLE(16) a, b;
*/
#define MONGO_COMPILER_ALIGN_VARIABLE(ALIGNMENT) __declspec( align( ALIGNME NT ) ) #define MONGO_COMPILER_ALIGN_VARIABLE(ALIGNMENT) __declspec( align( ALIGNME NT ) )
#define MONGO_COMPILER_API_EXPORT __declspec(dllexport)
#define MONGO_COMPILER_API_IMPORT __declspec(dllimport)
#ifdef _M_IX86
// 32-bit x86 supports multiple of calling conventions. We build supportin
g the cdecl convention
// (most common). By labeling our exported and imported functions as such,
we do a small favor to
// 32-bit Windows developers.
#define MONGO_COMPILER_API_CALLING_CONVENTION __cdecl
#else
#define MONGO_COMPILER_API_CALLING_CONVENTION
#endif
 End of changes. 9 change blocks. 
48 lines changed or deleted 4 lines changed or added


 config.h   config.h 
skipping to change at line 86 skipping to change at line 86
verify(_cm); // this has to be already sharded verify(_cm); // this has to be already sharded
_cm.reset( cm ); _cm.reset( cm );
} }
void shard( ChunkManager* cm ); void shard( ChunkManager* cm );
void unshard(); void unshard();
bool isDirty() const { return _dirty; } bool isDirty() const { return _dirty; }
bool wasDropped() const { return _dropped; } bool wasDropped() const { return _dropped; }
void save( const string& ns , DBClientBase* conn ); void save( const string& ns );
bool unique() const { return _unqiue; } bool unique() const { return _unqiue; }
BSONObj key() const { return _key; } BSONObj key() const { return _key; }
private: private:
BSONObj _key; BSONObj _key;
bool _unqiue; bool _unqiue;
ChunkManagerPtr _cm; ChunkManagerPtr _cm;
bool _dirty; bool _dirty;
bool _dropped; bool _dropped;
skipping to change at line 261 skipping to change at line 261
* @param msg additional info about the metadata change * @param msg additional info about the metadata change
* *
* This call is guaranteed never to throw. * This call is guaranteed never to throw.
*/ */
void logChange( const string& what , const string& ns , const BSONO bj& detail = BSONObj() ); void logChange( const string& what , const string& ns , const BSONO bj& detail = BSONObj() );
ConnectionString getConnectionString() const { ConnectionString getConnectionString() const {
return ConnectionString( _primary.getConnString() , ConnectionS tring::SYNC ); return ConnectionString( _primary.getConnString() , ConnectionS tring::SYNC );
} }
void replicaSetChange( const ReplicaSetMonitor * monitor ); void replicaSetChange(const string& setName, const string& newConne ctionString);
static int VERSION; static int VERSION;
/** /**
* check to see if all config servers have the same state * check to see if all config servers have the same state
* will try tries time to make sure not catching in a bad state * will try tries time to make sure not catching in a bad state
*/ */
bool checkConfigServersConsistent( string& errmsg , int tries = 4 ) const; bool checkConfigServersConsistent( string& errmsg , int tries = 4 ) const;
private: private:
 End of changes. 2 change blocks. 
2 lines changed or deleted 2 lines changed or added


 config_server_fixture.h   config_server_fixture.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/instance.h" #include "mongo/db/instance.h"
#include "mongo/db/wire_version.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/unittest/unittest.h" #include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h" #include "mongo/util/assert_util.h"
namespace mongo { namespace mongo {
class CustomDirectClient: public DBDirectClient { class CustomDirectClient: public DBDirectClient {
public: public:
CustomDirectClient() {
setWireVersions(minWireVersion, maxWireVersion);
}
virtual ConnectionString::ConnectionType type() const { virtual ConnectionString::ConnectionType type() const {
return ConnectionString::CUSTOM; return ConnectionString::CUSTOM;
} }
virtual bool recv( Message& m ) {
// This is tailored to act as a dummy response for write comman
ds.
BufBuilder bb;
bb.skip(sizeof(QueryResult));
BSONObj cmdResult(BSON("ok" << 1));
bb.appendBuf(cmdResult.objdata(), cmdResult.objsize());
QueryResult* qr = reinterpret_cast<QueryResult*>(bb.buf());
bb.decouple();
qr->setResultFlagsToOk();
qr->len = bb.len();
qr->setOperation(opReply);
qr->cursorId = 0;
qr->startingFrom = 0;
qr->nReturned = 1;
m.setData(qr, true);
return true;
}
}; };
class CustomConnectHook: public ConnectionString::ConnectionHook { class CustomConnectHook: public ConnectionString::ConnectionHook {
public: public:
virtual DBClientBase* connect(const ConnectionString& connStr, virtual DBClientBase* connect(const ConnectionString& connStr,
string& errmsg, string& errmsg,
double socketTimeout) double socketTimeout)
{ {
// Note - must be new, since it gets owned elsewhere // Note - must be new, since it gets owned elsewhere
return new CustomDirectClient(); return new CustomDirectClient();
 End of changes. 4 change blocks. 
0 lines changed or deleted 48 lines changed or added


 config_upgrade_helpers.h   config_upgrade_helpers.h 
skipping to change at line 65 skipping to change at line 65
* Verifies that two collections hash to the same values. * Verifies that two collections hash to the same values.
* *
* @return OK if they do, RemoteValidationError if they do not, and an error Status if * @return OK if they do, RemoteValidationError if they do not, and an error Status if
* anything else went wrong. * anything else went wrong.
*/ */
Status checkHashesTheSame(const ConnectionString& configLoc, Status checkHashesTheSame(const ConnectionString& configLoc,
const std::string& nsA, const std::string& nsA,
const std::string& nsB); const std::string& nsB);
/** /**
* Copies a collection (which must not change during this call) to anot
her namespace. All
* indexes will also be copied and constructed prior to the data being
loaded.
*
* @return OK if copy was successful, RemoteValidationError if document
s changed during the
* copy and an error Status if anything else went wrong.
*/
Status copyFrozenCollection(const ConnectionString& configLoc,
const std::string& fromNS,
const std::string& toNS);
/**
* Atomically overwrites a collection with another collection (only ato mic if configLoc is a * Atomically overwrites a collection with another collection (only ato mic if configLoc is a
* single server). * single server).
* *
* @return OK if overwrite was successful, and an error Status if anyth ing else went wrong. * @return OK if overwrite was successful, and an error Status if anyth ing else went wrong.
*/ */
Status overwriteCollection(const ConnectionString& configLoc, Status overwriteCollection(const ConnectionString& configLoc,
const std::string& fromNS, const std::string& fromNS,
const std::string& overwriteNS); const std::string& overwriteNS);
/** /**
 End of changes. 1 change blocks. 
14 lines changed or deleted 0 lines changed or added


 connpool.h   connpool.h 
skipping to change at line 22 skipping to change at line 22
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <stack> #include <stack>
#include "mongo/util/background.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
#include "mongo/util/background.h"
namespace mongo { namespace mongo {
class Shard; class Shard;
class DBConnectionPool; class DBConnectionPool;
/** /**
* not thread safe * not thread safe
* thread safety is handled by DBConnectionPool * thread safety is handled by DBConnectionPool
*/ */
class PoolForHost { class MONGO_CLIENT_API PoolForHost {
public: public:
PoolForHost() PoolForHost()
: _created(0), _minValidCreationTimeMicroSec(0) {} : _created(0), _minValidCreationTimeMicroSec(0) {}
PoolForHost( const PoolForHost& other ) { PoolForHost( const PoolForHost& other ) {
verify(other._pool.size() == 0); verify(other._pool.size() == 0);
_created = other._created; _created = other._created;
_minValidCreationTimeMicroSec = other._minValidCreationTimeMicr oSec; _minValidCreationTimeMicroSec = other._minValidCreationTimeMicr oSec;
verify( _created == 0 ); verify( _created == 0 );
} }
skipping to change at line 133 skipping to change at line 134
Support for authenticated connections requires some adjustments: pl ease Support for authenticated connections requires some adjustments: pl ease
request... request...
Usage: Usage:
{ {
ScopedDbConnection c("myserver"); ScopedDbConnection c("myserver");
c.conn()... c.conn()...
} }
*/ */
class DBConnectionPool : public PeriodicTask { class MONGO_CLIENT_API DBConnectionPool : public PeriodicTask {
public: public:
DBConnectionPool(); DBConnectionPool();
~DBConnectionPool(); ~DBConnectionPool();
/** right now just controls some asserts. defaults to "dbconnectio npool" */ /** right now just controls some asserts. defaults to "dbconnectio npool" */
void setName( const string& name ) { _name = name; } void setName( const string& name ) { _name = name; }
void onCreate( DBClientBase * conn ); void onCreate( DBClientBase * conn );
skipping to change at line 214 skipping to change at line 215
string _name; string _name;
PoolMap _pools; PoolMap _pools;
// pointers owned by me, right now they leak on shutdown // pointers owned by me, right now they leak on shutdown
// _hooks itself also leaks because it creates a shutdown race cond ition // _hooks itself also leaks because it creates a shutdown race cond ition
list<DBConnectionHook*> * _hooks; list<DBConnectionHook*> * _hooks;
}; };
extern DBConnectionPool pool; extern MONGO_CLIENT_API DBConnectionPool pool;
class AScopedConnection : boost::noncopyable { class MONGO_CLIENT_API AScopedConnection : boost::noncopyable {
public: public:
AScopedConnection() { _numConnections++; } AScopedConnection() { _numConnections++; }
virtual ~AScopedConnection() { _numConnections--; } virtual ~AScopedConnection() { _numConnections--; }
virtual DBClientBase* get() = 0; virtual DBClientBase* get() = 0;
virtual void done() = 0; virtual void done() = 0;
virtual string getHost() const = 0; virtual string getHost() const = 0;
/** /**
* @return true iff this has a connection to the db * @return true iff this has a connection to the db
skipping to change at line 243 skipping to change at line 244
static int getNumConnections() { return _numConnections; } static int getNumConnections() { return _numConnections; }
private: private:
static AtomicUInt _numConnections; static AtomicUInt _numConnections;
}; };
/** Use to get a connection from the pool. On exceptions things /** Use to get a connection from the pool. On exceptions things
clean up nicely (i.e. the socket gets closed automatically when the clean up nicely (i.e. the socket gets closed automatically when the
scopeddbconnection goes out of scope). scopeddbconnection goes out of scope).
*/ */
class ScopedDbConnection : public AScopedConnection { class MONGO_CLIENT_API ScopedDbConnection : public AScopedConnection {
public: public:
/** the main constructor you want to use /** the main constructor you want to use
throws UserException if can't connect throws UserException if can't connect
*/ */
explicit ScopedDbConnection(const string& host, double socketTimeou t = 0) : _host(host), _conn( pool.get(host, socketTimeout) ), _socketTimeou t( socketTimeout ) { explicit ScopedDbConnection(const string& host, double socketTimeou t = 0) : _host(host), _conn( pool.get(host, socketTimeout) ), _socketTimeou t( socketTimeout ) {
_setSocketTimeout(); _setSocketTimeout();
} }
explicit ScopedDbConnection(const ConnectionString& host, double so cketTimeout = 0) : _host(host.toString()), _conn( pool.get(host, socketTime out) ), _socketTimeout( socketTimeout ) { explicit ScopedDbConnection(const ConnectionString& host, double so cketTimeout = 0) : _host(host.toString()), _conn( pool.get(host, socketTime out) ), _socketTimeout( socketTimeout ) {
_setSocketTimeout(); _setSocketTimeout();
 End of changes. 8 change blocks. 
6 lines changed or deleted 7 lines changed or added


 const_element-inl.h   const_element-inl.h 
skipping to change at line 36 skipping to change at line 36
} }
inline ConstElement ConstElement::rightChild() const { inline ConstElement ConstElement::rightChild() const {
return _basis.rightChild(); return _basis.rightChild();
} }
inline bool ConstElement::hasChildren() const { inline bool ConstElement::hasChildren() const {
return _basis.hasChildren(); return _basis.hasChildren();
} }
inline ConstElement ConstElement::leftSibling() const { inline ConstElement ConstElement::leftSibling(size_t distance) const {
return _basis.leftSibling(); return _basis.leftSibling(distance);
} }
inline ConstElement ConstElement::rightSibling() const { inline ConstElement ConstElement::rightSibling(size_t distance) const {
return _basis.rightSibling(); return _basis.rightSibling(distance);
} }
inline ConstElement ConstElement::parent() const { inline ConstElement ConstElement::parent() const {
return _basis.parent(); return _basis.parent();
} }
inline ConstElement ConstElement::findNthChild(size_t n) const {
return _basis.findNthChild(n);
}
inline ConstElement ConstElement::operator[](size_t n) const { inline ConstElement ConstElement::operator[](size_t n) const {
return _basis[n]; return _basis[n];
} }
inline ConstElement ConstElement::findFirstChildNamed(const StringData&
name) const {
return _basis.findFirstChildNamed(name);
}
inline ConstElement ConstElement::operator[](const StringData& name) co nst { inline ConstElement ConstElement::operator[](const StringData& name) co nst {
return _basis[name]; return _basis[name];
} }
inline ConstElement ConstElement::findElementNamed(const StringData& na
me) const {
return _basis.findElementNamed(name);
}
inline size_t ConstElement::countSiblingsLeft() const {
return _basis.countSiblingsLeft();
}
inline size_t ConstElement::countSiblingsRight() const {
return _basis.countSiblingsRight();
}
inline size_t ConstElement::countChildren() const {
return _basis.countChildren();
}
inline bool ConstElement::hasValue() const { inline bool ConstElement::hasValue() const {
return _basis.hasValue(); return _basis.hasValue();
} }
inline const BSONElement ConstElement::getValue() const { inline const BSONElement ConstElement::getValue() const {
return _basis.getValue(); return _basis.getValue();
} }
inline double ConstElement::getValueDouble() const { inline double ConstElement::getValueDouble() const {
return _basis.getValueDouble(); return _basis.getValueDouble();
 End of changes. 5 change blocks. 
4 lines changed or deleted 30 lines changed or added


 const_element.h   const_element.h 
skipping to change at line 46 skipping to change at line 46
class ConstElement { class ConstElement {
public: public:
// This one argument constructor is intentionally not explicit, sin ce we want to be // This one argument constructor is intentionally not explicit, sin ce we want to be
// able to pass Elements to functions taking ConstElements without complaint. // able to pass Elements to functions taking ConstElements without complaint.
inline ConstElement(const Element& basis); inline ConstElement(const Element& basis);
inline ConstElement leftChild() const; inline ConstElement leftChild() const;
inline ConstElement rightChild() const; inline ConstElement rightChild() const;
inline bool hasChildren() const; inline bool hasChildren() const;
inline ConstElement leftSibling() const; inline ConstElement leftSibling(size_t distance = 1) const;
inline ConstElement rightSibling() const; inline ConstElement rightSibling(size_t distance = 1) const;
inline ConstElement parent() const; inline ConstElement parent() const;
inline ConstElement findNthChild(size_t n) const;
inline ConstElement operator[](size_t n) const; inline ConstElement operator[](size_t n) const;
inline ConstElement findFirstChildNamed(const StringData& name) con st;
inline ConstElement operator[](const StringData& n) const; inline ConstElement operator[](const StringData& n) const;
inline ConstElement findElementNamed(const StringData& name) const;
inline size_t countSiblingsLeft() const;
inline size_t countSiblingsRight() const;
inline size_t countChildren() const;
inline bool hasValue() const; inline bool hasValue() const;
inline const BSONElement getValue() const; inline const BSONElement getValue() const;
inline double getValueDouble() const; inline double getValueDouble() const;
inline StringData getValueString() const; inline StringData getValueString() const;
inline BSONObj getValueObject() const; inline BSONObj getValueObject() const;
inline BSONArray getValueArray() const; inline BSONArray getValueArray() const;
inline bool isValueUndefined() const; inline bool isValueUndefined() const;
inline OID getValueOID() const; inline OID getValueOID() const;
 End of changes. 4 change blocks. 
2 lines changed or deleted 9 lines changed or added


 constraints.h   constraints.h 
skipping to change at line 89 skipping to change at line 89
public: public:
ImmutableKeyConstraint(const Key& k) : KeyConstraint(k) ImmutableKeyConstraint(const Key& k) : KeyConstraint(k)
{ } { }
virtual ~ImmutableKeyConstraint() {} virtual ~ImmutableKeyConstraint() {}
private: private:
virtual Status check(const Environment& env); virtual Status check(const Environment& env);
Value _value; Value _value;
}; };
/** Implementation of a Constraint that makes two keys mutually exclusi
ve. Fails if both keys
* are set.
*/
class MutuallyExclusiveKeyConstraint : public KeyConstraint {
public:
MutuallyExclusiveKeyConstraint(const Key& key, const Key& otherKey)
: KeyConstraint(key),
_otherKey(otherKey)
{ }
virtual ~MutuallyExclusiveKeyConstraint() {}
private:
virtual Status check(const Environment& env);
Key _otherKey;
};
/** Implementation of a Constraint that makes one key require another.
Fails if the first key
* is set but the other is not.
*/
class RequiresOtherKeyConstraint : public KeyConstraint {
public:
RequiresOtherKeyConstraint(const Key& key, const Key& otherKey) : K
eyConstraint(key),
_
otherKey(otherKey)
{ }
virtual ~RequiresOtherKeyConstraint() {}
private:
virtual Status check(const Environment& env);
Key _otherKey;
};
/** Implementation of a Constraint that enforces a specific format on a
string value. Fails if
* the value of the key is not a string or does not match the given re
gex.
*/
class StringFormatKeyConstraint : public KeyConstraint {
public:
StringFormatKeyConstraint(const Key& key,
const std::string& regexFormat,
const std::string& displayFormat) : KeyCo
nstraint(key),
_rege
xFormat(regexFormat),
_disp
layFormat(displayFormat)
{ }
virtual ~StringFormatKeyConstraint() {}
private:
virtual Status check(const Environment& env);
std::string _regexFormat;
std::string _displayFormat;
};
/** Implementation of a Constraint on the type of a Value. Fails if we cannot extract the given /** Implementation of a Constraint on the type of a Value. Fails if we cannot extract the given
* type from our Value, which means the implementation of the access f unctions of Value * type from our Value, which means the implementation of the access f unctions of Value
* controls which types are "compatible" * controls which types are "compatible"
*/ */
template <typename T> template <typename T>
class TypeKeyConstraint : public KeyConstraint { class TypeKeyConstraint : public KeyConstraint {
public: public:
TypeKeyConstraint(const Key& k) : TypeKeyConstraint(const Key& k) :
KeyConstraint(k) KeyConstraint(k)
{ } { }
 End of changes. 1 change blocks. 
0 lines changed or deleted 57 lines changed or added


 core.h   core.h 
skipping to change at line 49 skipping to change at line 49
inline double deg2rad(const double deg) { return deg * (M_PI / 180.0); } inline double deg2rad(const double deg) { return deg * (M_PI / 180.0); }
inline double rad2deg(const double rad) { return rad * (180.0 / M_PI); } inline double rad2deg(const double rad) { return rad * (180.0 / M_PI); }
inline double computeXScanDistance(double y, double maxDistDegrees) { inline double computeXScanDistance(double y, double maxDistDegrees) {
// TODO: this overestimates for large maxDistDegrees far from the e quator // TODO: this overestimates for large maxDistDegrees far from the e quator
return maxDistDegrees / min(cos(deg2rad(min(+89.0, y + maxDistDegre es))), return maxDistDegrees / min(cos(deg2rad(min(+89.0, y + maxDistDegre es))),
cos(deg2rad(max(-89.0, y - maxDistDegre es)))); cos(deg2rad(max(-89.0, y - maxDistDegre es))));
} }
inline bool twoDWontWrap(double x, double y, double radius) {
// XXX XXX XXX SERVER-11387
// The 0.001 for error is totally bogus and must depend on the bits
used.
double yscandist = rad2deg(radius) + 0.001;
double xscandist = computeXScanDistance(y, yscandist);
bool ret = x + xscandist < 180
&& x - xscandist > -180
&& y + yscandist < 90
&& y - yscandist > -90;
return ret;
}
} }
 End of changes. 1 change blocks. 
13 lines changed or deleted 0 lines changed or added


 count.h   count.h 
// count.h // count.h
/** /**
* Copyright (C) 2008 10gen Inc. * Copyright (C) 2013 MongoDB Inc.
* *
* This program is free software: you can redistribute it and/or modify * This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
skipping to change at line 31 skipping to change at line 31
* linked combinations including the program with the OpenSSL library. Y ou * linked combinations including the program with the OpenSSL library. Y ou
* must comply with the GNU Affero General Public License in all respect s for * must comply with the GNU Affero General Public License in all respect s for
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
/** /**
* 'ns' is the namespace we're counting on.
*
* { count: "collectionname"[, query: <query>] } * { count: "collectionname"[, query: <query>] }
* @return -1 on ns does not exist error and other errors, 0 on other e *
rrors, otherwise the match count. * @return -1 on ns does not exist error and other errors, 0 on other e
rrors, otherwise the
* match count.
*/ */
long long runCount(const char *ns, const BSONObj& cmd, string& err, int & errCode ); long long runCount(const std::string& ns, const BSONObj& cmd, string& e rr, int& errCode );
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
5 lines changed or deleted 8 lines changed or added


 counter.h   counter.h 
skipping to change at line 17 skipping to change at line 17
* it under the terms of the GNU Affero General Public License, version 3 , * it under the terms of the GNU Affero General Public License, version 3 ,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Licen se * You should have received a copy of the GNU Affero General Public Licen se
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certai
n
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Yo
u
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If y
ou
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/platform/atomic_word.h" #include "mongo/platform/atomic_word.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
namespace mongo { namespace mongo {
/** /**
* A 64bit (atomic) counter. * A 64bit (atomic) counter.
 End of changes. 1 change blocks. 
0 lines changed or deleted 17 lines changed or added


 counters.h   counters.h 
skipping to change at line 33 skipping to change at line 33
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "../jsobj.h" #include "mongo/db/jsobj.h"
#include "../../util/net/message.h" #include "mongo/util/net/message.h"
#include "../../util/processinfo.h" #include "mongo/util/processinfo.h"
#include "../../util/concurrency/spin_lock.h" #include "mongo/util/concurrency/spin_lock.h"
#include "mongo/db/pdfile.h" #include "mongo/db/pdfile.h"
namespace mongo { namespace mongo {
/** /**
* for storing operation counters * for storing operation counters
* note: not thread safe. ok with that for speed * note: not thread safe. ok with that for speed
*/ */
class OpCounters { class OpCounters {
public: public:
 End of changes. 1 change blocks. 
4 lines changed or deleted 4 lines changed or added


 curop.h   curop.h 
skipping to change at line 37 skipping to change at line 37
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <vector>
#include "mongo/bson/util/atomic_int.h" #include "mongo/bson/util/atomic_int.h"
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/catalog/ondisk/namespace.h" #include "mongo/db/structure/catalog/namespace.h"
#include "mongo/util/concurrency/spin_lock.h" #include "mongo/util/concurrency/spin_lock.h"
#include "mongo/util/net/hostandport.h" #include "mongo/util/net/hostandport.h"
#include "mongo/util/progress_meter.h" #include "mongo/util/progress_meter.h"
#include "mongo/util/time_support.h" #include "mongo/util/time_support.h"
namespace mongo { namespace mongo {
class CurOp; class CurOp;
/* lifespan is different than CurOp because of recursives with DBDirect Client */ /* lifespan is different than CurOp because of recursives with DBDirect Client */
skipping to change at line 95 skipping to change at line 95
long long cursorid; long long cursorid;
int ntoreturn; int ntoreturn;
int ntoskip; int ntoskip;
bool exhaust; bool exhaust;
// debugging/profile info // debugging/profile info
long long nscanned; long long nscanned;
bool idhack; // indicates short circuited code path on an u pdate to make the update faster bool idhack; // indicates short circuited code path on an u pdate to make the update faster
bool scanAndOrder; // scanandorder query plan aspect was used bool scanAndOrder; // scanandorder query plan aspect was used
long long nupdated; // number of records updated (including no-ops ) long long nupdated; // number of records updated (including no-ops )
long long nupdateNoops; // number of records updated which were no ops long long nModified; // number of records written (no no-ops)
long long nmoved; // updates resulted in a move (moves are expen sive) long long nmoved; // updates resulted in a move (moves are expen sive)
long long ninserted; long long ninserted;
long long ndeleted; long long ndeleted;
bool fastmod; bool fastmod;
bool fastmodinsert; // upsert of an $operation. builds a default o bject bool fastmodinsert; // upsert of an $operation. builds a default o bject
bool upsert; // true if the update actually did an insert bool upsert; // true if the update actually did an insert
int keyUpdates; int keyUpdates;
std::string planSummary; // a brief string describing the query sol
ution
// New Query Framework debugging/profiling info
// XXX: should this really be an opaque BSONObj? Not sure.
BSONObj execStats;
// error handling // error handling
ExceptionInfo exceptionInfo; ExceptionInfo exceptionInfo;
// response info // response info
int executionTime; int executionTime;
int nreturned; int nreturned;
int responseLength; int responseLength;
}; };
skipping to change at line 194 skipping to change at line 199
~CurOp(); ~CurOp();
bool haveQuery() const { return _query.have(); } bool haveQuery() const { return _query.have(); }
BSONObj query() const { return _query.get(); } BSONObj query() const { return _query.get(); }
void appendQuery( BSONObjBuilder& b , const StringData& name ) cons t { _query.append( b , name ); } void appendQuery( BSONObjBuilder& b , const StringData& name ) cons t { _query.append( b , name ); }
void enter( Client::Context * context ); void enter( Client::Context * context );
void leave( Client::Context * context ); void leave( Client::Context * context );
void reset(); void reset();
void reset( const HostAndPort& remote, int op ); void reset( const HostAndPort& remote, int op );
void markCommand() { _command = true; } void markCommand() { _isCommand = true; }
OpDebug& debug() { return _debug; } OpDebug& debug() { return _debug; }
int profileLevel() const { return _dbprofile; } int profileLevel() const { return _dbprofile; }
const char * getNS() const { return _ns; } const char * getNS() const { return _ns; }
bool shouldDBProfile( int ms ) const { bool shouldDBProfile( int ms ) const {
if ( _dbprofile <= 0 ) if ( _dbprofile <= 0 )
return false; return false;
return _dbprofile >= 2 || ms >= serverGlobalParams.slowMS; return _dbprofile >= 2 || ms >= serverGlobalParams.slowMS;
} }
skipping to change at line 273 skipping to change at line 278
return curTimeMicros64() - startTime(); return curTimeMicros64() - startTime();
} }
int elapsedMillis() { int elapsedMillis() {
return (int) (elapsedMicros() / 1000); return (int) (elapsedMicros() / 1000);
} }
int elapsedSeconds() { return elapsedMillis() / 1000; } int elapsedSeconds() { return elapsedMillis() / 1000; }
void setQuery(const BSONObj& query) { _query.set( query ); } void setQuery(const BSONObj& query) { _query.set( query ); }
Client * getClient() const { return _client; } Client * getClient() const { return _client; }
Command * getCommand() const { return _command; }
void setCommand(Command* command) { _command = command; }
BSONObj info(); BSONObj info();
// Fetches less information than "info()"; used to search for ops w ith certain criteria // Fetches less information than "info()"; used to search for ops w ith certain criteria
BSONObj description(); BSONObj description();
string getRemoteString( bool includePort = true ) { return _remote. toString(includePort); } string getRemoteString( bool includePort = true ) { return _remote. toString(includePort); }
ProgressMeter& setMessage(const char * msg, ProgressMeter& setMessage(const char * msg,
std::string name = "Progress", std::string name = "Progress",
unsigned long long progressMeterTotal = 0 , unsigned long long progressMeterTotal = 0 ,
int secondsBetween = 3); int secondsBetween = 3);
skipping to change at line 325 skipping to change at line 333
* @return a pointer to a matching op or NULL if no ops match * @return a pointer to a matching op or NULL if no ops match
*/ */
static CurOp* getOp(const BSONObj& criteria); static CurOp* getOp(const BSONObj& criteria);
private: private:
friend class Client; friend class Client;
void _reset(); void _reset();
static AtomicUInt _nextOpNum; static AtomicUInt _nextOpNum;
Client * _client; Client * _client;
CurOp * _wrapped; CurOp * _wrapped;
Command * _command;
unsigned long long _start; unsigned long long _start;
unsigned long long _end; unsigned long long _end;
bool _active; bool _active;
bool _suppressFromCurop; // unless $all is set bool _suppressFromCurop; // unless $all is set
int _op; int _op;
bool _command; bool _isCommand;
int _dbprofile; // 0=off, 1=slow, 2=all int _dbprofile; // 0=off, 1=slow, 2=all
AtomicUInt _opNum; // todo: simple being "unsigned" m ay make more sense here AtomicUInt _opNum; // todo: simple being "unsigned" m ay make more sense here
char _ns[Namespace::MaxNsLen+2]; char _ns[Namespace::MaxNsLen+2];
HostAndPort _remote; // CAREFUL here with thread safety HostAndPort _remote; // CAREFUL here with thread safety
CachedBSONObj _query; // CachedBSONObj is thread safe CachedBSONObj _query; // CachedBSONObj is thread safe
OpDebug _debug; OpDebug _debug;
ThreadSafeString _message; ThreadSafeString _message;
ProgressMeter _progressMeter; ProgressMeter _progressMeter;
AtomicInt32 _killPending; AtomicInt32 _killPending;
int _numYields; int _numYields;
 End of changes. 7 change blocks. 
4 lines changed or deleted 14 lines changed or added


 cursors.h   cursors.h 
skipping to change at line 46 skipping to change at line 46
#include "mongo/client/parallel.h" #include "mongo/client/parallel.h"
#include "mongo/db/dbmessage.h" #include "mongo/db/dbmessage.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/platform/random.h" #include "mongo/platform/random.h"
#include "mongo/s/request.h" #include "mongo/s/request.h"
namespace mongo { namespace mongo {
class ShardedClientCursor : boost::noncopyable { class ShardedClientCursor : boost::noncopyable {
public: public:
ShardedClientCursor( QueryMessage& q , ClusteredCursor * cursor ); ShardedClientCursor( QueryMessage& q , ParallelSortClusteredCursor * cursor );
virtual ~ShardedClientCursor(); virtual ~ShardedClientCursor();
long long getId(); long long getId();
/** /**
* @return the cumulative number of documents seen by this cursor. * @return the cumulative number of documents seen by this cursor.
*/ */
int getTotalSent() const; int getTotalSent() const;
/** /**
skipping to change at line 88 skipping to change at line 88
/** @return idle time in ms */ /** @return idle time in ms */
long long idleTime( long long now ); long long idleTime( long long now );
std::string getNS() { return _cursor->getNS(); } std::string getNS() { return _cursor->getNS(); }
// The default initial buffer size for sending responses. // The default initial buffer size for sending responses.
static const int INIT_REPLY_BUFFER_SIZE; static const int INIT_REPLY_BUFFER_SIZE;
protected: protected:
ClusteredCursor * _cursor; ParallelSortClusteredCursor * _cursor;
int _skip; int _skip;
int _ntoreturn; int _ntoreturn;
int _totalSent; int _totalSent;
bool _done; bool _done;
long long _id; long long _id;
long long _lastAccessMillis; // 0 means no timeout long long _lastAccessMillis; // 0 means no timeout
 End of changes. 2 change blocks. 
2 lines changed or deleted 2 lines changed or added


 d_concurrency.h   d_concurrency.h 
skipping to change at line 250 skipping to change at line 250
}; };
class writelocktry : boost::noncopyable { class writelocktry : boost::noncopyable {
bool _got; bool _got;
scoped_ptr<Lock::GlobalWrite> _dbwlock; scoped_ptr<Lock::GlobalWrite> _dbwlock;
public: public:
writelocktry( int tryms ); writelocktry( int tryms );
~writelocktry(); ~writelocktry();
bool got() const { return _got; } bool got() const { return _got; }
}; };
/** a mutex, but reported in curop() - thus a "high level" (HL) one
some overhead so we don't use this for everything. the externalobj
sort mutex
uses this, as it can be held for eons. implementation still needed.
*/
class HLMutex : public SimpleMutex {
LockStat ls;
public:
HLMutex(const char *name);
};
} }
 End of changes. 1 change blocks. 
12 lines changed or deleted 0 lines changed or added


 d_logic.h   d_logic.h 
skipping to change at line 373 skipping to change at line 373
* if it's relevant. The entries saved here are later transferred to th e receiving side of * if it's relevant. The entries saved here are later transferred to th e receiving side of
* the migration. A relevant entry is an insertion, a deletion, or an u pdate. * the migration. A relevant entry is an insertion, a deletion, or an u pdate.
*/ */
void logOpForSharding( const char * opstr, void logOpForSharding( const char * opstr,
const char * ns, const char * ns,
const BSONObj& obj, const BSONObj& obj,
BSONObj * patt, BSONObj * patt,
const BSONObj* fullObj, const BSONObj* fullObj,
bool forMigrateCleanup ); bool forMigrateCleanup );
void aboutToDeleteForSharding( const StringData& ns, const Database* db
, const DiskLoc& dl );
} }
 End of changes. 1 change blocks. 
3 lines changed or deleted 0 lines changed or added


 data_file.h   data_file.h 
skipping to change at line 85 skipping to change at line 85
void init(int fileno, int filelength, const char* filename); void init(int fileno, int filelength, const char* filename);
bool isEmpty() const { bool isEmpty() const {
return uninitialized() || ( unusedLength == fileLength - Header Size - 16 ); return uninitialized() || ( unusedLength == fileLength - Header Size - 16 );
} }
}; };
#pragma pack() #pragma pack()
class DataFile { class DataFile {
friend class DataFileMgr;
friend class BasicCursor; friend class BasicCursor;
friend class ExtentManager; friend class ExtentManager;
public: public:
DataFile(int fn) : _mb(0), fileNo(fn) { } DataFile(int fn) : _mb(0), fileNo(fn) { }
/** @return true if found and opened. if uninitialized (prealloc on ly) does not open. */ /** @return true if found and opened. if uninitialized (prealloc on ly) does not open. */
Status openExisting( const char *filename ); Status openExisting( const char *filename );
/** creates if DNE */ /** creates if DNE */
void open(const char *filename, int requestedDataSize = 0, bool pre allocateOnly = false); void open(const char *filename, int requestedDataSize = 0, bool pre allocateOnly = false);
 End of changes. 1 change blocks. 
1 lines changed or deleted 0 lines changed or added


 database.h   database.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/cc_by_loc.h" #include "mongo/db/structure/catalog/namespace_details.h"
#include "mongo/db/namespace_details.h"
#include "mongo/db/storage/extent_manager.h" #include "mongo/db/storage/extent_manager.h"
#include "mongo/db/storage/record.h" #include "mongo/db/storage/record.h"
#include "mongo/db/storage_options.h" #include "mongo/db/storage_options.h"
#include "mongo/util/string_map.h"
namespace mongo { namespace mongo {
class Collection; class Collection;
class Extent; class Extent;
class DataFile; class DataFile;
class IndexCatalog; class IndexCatalog;
class IndexDetails; class IndexDetails;
/** /**
skipping to change at line 124 skipping to change at line 124
return false; return false;
return ns[_name.size()] == '.'; return ns[_name.size()] == '.';
} }
const RecordStats& recordStats() const { return _recordStats; } const RecordStats& recordStats() const { return _recordStats; }
RecordStats& recordStats() { return _recordStats; } RecordStats& recordStats() { return _recordStats; }
int getProfilingLevel() const { return _profile; } int getProfilingLevel() const { return _profile; }
const char* getProfilingNS() const { return _profileName.c_str(); } const char* getProfilingNS() const { return _profileName.c_str(); }
CCByLoc& ccByLoc() { return _ccByLoc; }
const NamespaceIndex& namespaceIndex() const { return _namespaceInd ex; } const NamespaceIndex& namespaceIndex() const { return _namespaceInd ex; }
NamespaceIndex& namespaceIndex() { return _namespaceIndex; } NamespaceIndex& namespaceIndex() { return _namespaceIndex; }
// TODO: do not think this method should exist, so should try and e ncapsulate better // TODO: do not think this method should exist, so should try and e ncapsulate better
ExtentManager& getExtentManager() { return _extentManager; } ExtentManager& getExtentManager() { return _extentManager; }
const ExtentManager& getExtentManager() const { return _extentManag er; } const ExtentManager& getExtentManager() const { return _extentManag er; }
Status dropCollection( const StringData& fullns ); Status dropCollection( const StringData& fullns );
Collection* createCollection( const StringData& ns, Collection* createCollection( const StringData& ns,
bool capped = false, bool capped = false,
const BSONObj* options = NULL, const BSONObj* options = NULL,
bool allocateDefaultSpace = true ); bool allocateDefaultSpace = true );
/** /**
* @param ns - this is fully qualified, which is maybe not ideal ?? ? * @param ns - this is fully qualified, which is maybe not ideal ?? ?
*/ */
Collection* getCollection( const StringData& ns ); Collection* getCollection( const StringData& ns );
Collection* getCollection( const NamespaceString& ns ) { return get
Collection( ns.ns() ); }
Collection* getOrCreateCollection( const StringData& ns );
Status renameCollection( const StringData& fromNS, const StringData & toNS, bool stayTemp ); Status renameCollection( const StringData& fromNS, const StringData & toNS, bool stayTemp );
/** /**
* @return name of an existing database with same text name but dif ferent * @return name of an existing database with same text name but dif ferent
* casing, if one exists. Otherwise the empty string is returned. If * casing, if one exists. Otherwise the empty string is returned. If
* 'duplicates' is specified, it is filled with all duplicate names . * 'duplicates' is specified, it is filled with all duplicate names .
*/ */
static string duplicateUncasedName( bool inholderlockalready, const string &name, const string &path, set< string > *duplicates = 0 ); static string duplicateUncasedName( bool inholderlockalready, const string &name, const string &path, set< string > *duplicates = 0 );
static Status validateDBName( const StringData& dbname ); static Status validateDBName( const StringData& dbname );
skipping to change at line 210 skipping to change at line 212
const string _path; // "/data/db" const string _path; // "/data/db"
NamespaceIndex _namespaceIndex; NamespaceIndex _namespaceIndex;
ExtentManager _extentManager; ExtentManager _extentManager;
const string _profileName; // "alleyinsider.system.profile" const string _profileName; // "alleyinsider.system.profile"
const string _namespacesName; // "alleyinsider.system.namespaces" const string _namespacesName; // "alleyinsider.system.namespaces"
const string _indexesName; // "alleyinsider.system.indexes" const string _indexesName; // "alleyinsider.system.indexes"
const string _extentFreelistName; const string _extentFreelistName;
CCByLoc _ccByLoc; // use by ClientCursor
RecordStats _recordStats; RecordStats _recordStats;
int _profile; // 0=off. int _profile; // 0=off.
int _magic; // used for making sure the object is still loaded in m emory int _magic; // used for making sure the object is still loaded in m emory
// TODO: probably shouldn't be a std::map
// TODO: make sure deletes go through // TODO: make sure deletes go through
// this in some ways is a dupe of _namespaceIndex // this in some ways is a dupe of _namespaceIndex
// but it points to a much more useful data structure // but it points to a much more useful data structure
typedef std::map< std::string, Collection* > CollectionMap; typedef StringMap< Collection* > CollectionMap;
CollectionMap _collections; CollectionMap _collections;
mutex _collectionLock; mutex _collectionLock;
friend class Collection; friend class Collection;
friend class NamespaceDetails; friend class NamespaceDetails;
friend class IndexDetails; friend class IndexDetails;
friend class IndexCatalog; friend class IndexCatalog;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 7 change blocks. 
8 lines changed or deleted 8 lines changed or added


 database_holder.h   database_holder.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/database.h" #include "mongo/db/catalog/database.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
namespace mongo { namespace mongo {
/** /**
* path + dbname -> Database * path + dbname -> Database
*/ */
class DatabaseHolder { class DatabaseHolder {
typedef map<string,Database*> DBs; typedef map<string,Database*> DBs;
typedef map<string,DBs> Paths; typedef map<string,DBs> Paths;
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 db.h   db.h 
skipping to change at line 35 skipping to change at line 35
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/curop.h" #include "mongo/db/curop.h"
#include "mongo/db/database_holder.h" #include "mongo/db/catalog/database_holder.h"
#include "mongo/db/pdfile.h" #include "mongo/db/pdfile.h"
#include "mongo/util/net/message.h" #include "mongo/util/net/message.h"
namespace mongo { namespace mongo {
// todo: relocked is being called when there was no unlock below. // todo: relocked is being called when there was no unlock below.
// that is weird. // that is weird.
/**
* Releases the current lock for the duration of its lifetime.
*
* WARNING: do not put in a smart pointer or any other class. If you ab
solutely must, you need
* to add the throw(DBException) annotation to it's destructor.
*/
struct dbtemprelease { struct dbtemprelease {
Client::Context * _context; Client::Context * _context;
scoped_ptr<Lock::TempRelease> tr; scoped_ptr<Lock::TempRelease> tr;
dbtemprelease() { dbtemprelease() {
const Client& c = cc(); const Client& c = cc();
_context = c.getContext(); _context = c.getContext();
verify( Lock::isLocked() ); verify( Lock::isLocked() );
if( Lock::nested() ) { if( Lock::nested() ) {
massert(10298 , "can't temprelease nested lock", false); massert(10298 , "can't temprelease nested lock", false);
} }
if ( _context ) { if ( _context ) {
_context->unlocked(); _context->unlocked();
} }
tr.reset(new Lock::TempRelease); tr.reset(new Lock::TempRelease);
verify( c.curop() ); verify( c.curop() );
c.curop()->yielded(); c.curop()->yielded();
} }
~dbtemprelease() { ~dbtemprelease() throw(DBException) {
tr.reset();
if ( _context )
_context->relocked();
}
};
/** must be write locked
no verify(and no release) if nested write lock
a lot like dbtempreleasecond, eliminate?
*/
struct dbtempreleasewritelock {
Client::Context * _context;
int _locktype;
scoped_ptr<Lock::TempRelease> tr;
dbtempreleasewritelock() {
const Client& c = cc();
_context = c.getContext();
verify( Lock::isW() );
if( Lock::nested() )
return;
if ( _context )
_context->unlocked();
tr.reset(new Lock::TempRelease);
verify( c.curop() );
c.curop()->yielded();
}
~dbtempreleasewritelock() {
tr.reset(); tr.reset();
if ( _context ) if ( _context )
_context->relocked(); _context->relocked();
} }
}; };
/** /**
only does a temp release if we're not nested and have a lock * only does a temp release if we're not nested and have a lock
*
* WARNING: do not put in a smart pointer or any other class. If you ab
solutely must, you need
* to add the throw(DBException) annotation to it's destructor.
*/ */
class dbtempreleasecond : boost::noncopyable { class dbtempreleasecond : boost::noncopyable {
dbtemprelease * real; dbtemprelease * real;
public: public:
dbtempreleasecond() { dbtempreleasecond() {
real = 0; real = 0;
if( Lock::isLocked() ) { if( Lock::isLocked() ) {
// if nested don't temprelease, and we don't complain eithe r for this class // if nested don't temprelease, and we don't complain eithe r for this class
if( !Lock::nested() ) { if( !Lock::nested() ) {
real = new dbtemprelease(); real = new dbtemprelease();
} }
} }
} }
~dbtempreleasecond() { ~dbtempreleasecond() throw(DBException) {
if ( real ) { if ( real ) {
delete real; delete real;
real = 0; real = 0;
} }
} }
bool unlocked() const { return real != 0; } bool unlocked() const { return real != 0; }
}; };
extern void (*snmpInit)(); extern void (*snmpInit)();
 End of changes. 5 change blocks. 
31 lines changed or deleted 15 lines changed or added


 dbclient.h   dbclient.h 
skipping to change at line 28 skipping to change at line 28
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#ifdef MONGO_EXPOSE_MACROS #ifdef MONGO_EXPOSE_MACROS
#error dbclient.h is for C++ driver consumer use only #error dbclient.h is for C++ driver consumer use only
#endif #endif
#define LIBMONGOCLIENT_CONSUMER
#include "mongo/client/redef_macros.h" #include "mongo/client/redef_macros.h"
#include "mongo/pch.h" #include "mongo/pch.h"
#include "mongo/client/connpool.h" #include "mongo/client/connpool.h"
#include "mongo/client/dbclient_rs.h" #include "mongo/client/dbclient_rs.h"
#include "mongo/client/dbclientcursor.h" #include "mongo/client/dbclientcursor.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/gridfs.h" #include "mongo/client/gridfs.h"
#include "mongo/client/init.h" #include "mongo/client/init.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 dbclient_multi_command.h   dbclient_multi_command.h 
skipping to change at line 47 skipping to change at line 47
/** /**
* A DBClientMultiCommand uses the client driver (DBClientConnections) to send and recv * A DBClientMultiCommand uses the client driver (DBClientConnections) to send and recv
* commands to different hosts in parallel. * commands to different hosts in parallel.
* *
* See MultiCommandDispatch for more details. * See MultiCommandDispatch for more details.
*/ */
class DBClientMultiCommand : public MultiCommandDispatch { class DBClientMultiCommand : public MultiCommandDispatch {
public: public:
DBClientMultiCommand() : _timeoutMillis( 0 ) {}
~DBClientMultiCommand(); ~DBClientMultiCommand();
void addCommand( const ConnectionString& endpoint, void addCommand( const ConnectionString& endpoint,
const StringData& dbName, const StringData& dbName,
const BSONSerializable& request ); const BSONSerializable& request );
void sendAll(); void sendAll();
int numPending() const; int numPending() const;
Status recvAny( ConnectionString* endpoint, BSONSerializable* respo nse ); Status recvAny( ConnectionString* endpoint, BSONSerializable* respo nse );
void setTimeoutMillis( int milliSecs );
private: private:
// All info associated with an pre- or in-flight command // All info associated with an pre- or in-flight command
struct PendingCommand { struct PendingCommand {
PendingCommand( const ConnectionString& endpoint, PendingCommand( const ConnectionString& endpoint,
const StringData& dbName, const StringData& dbName,
const BSONObj& cmdObj ); const BSONObj& cmdObj );
// What to send // What to send
skipping to change at line 82 skipping to change at line 86
// Where to send it // Where to send it
DBClientBase* conn; DBClientBase* conn;
// If anything goes wrong // If anything goes wrong
Status status; Status status;
}; };
typedef std::deque<PendingCommand*> PendingQueue; typedef std::deque<PendingCommand*> PendingQueue;
PendingQueue _pendingCommands; PendingQueue _pendingCommands;
int _timeoutMillis;
}; };
} }
 End of changes. 3 change blocks. 
0 lines changed or deleted 5 lines changed or added


 dbclient_rs.h   dbclient_rs.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/pch.h"
#include <boost/function.hpp>
#include <boost/shared_ptr.hpp> #include <boost/shared_ptr.hpp>
#include <set>
#include <utility> #include <utility>
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
#include "mongo/util/net/hostandport.h" #include "mongo/util/net/hostandport.h"
namespace mongo { namespace mongo {
class ReplicaSetMonitor; class ReplicaSetMonitor;
class TagSet; class TagSet;
struct ReadPreferenceSetting; struct ReadPreferenceSetting;
typedef shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorPtr; typedef shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorPtr;
typedef pair<set<string>,set<int> > NodeDiff;
/**
* manages state about a replica set for client
* keeps tabs on whose master and what slaves are up
* can hand a slave to someone for SLAVE_OK
* one instance per process per replica set
* TODO: we might be able to use a regular Node * to avoid _lock
*/
class ReplicaSetMonitor {
public:
typedef boost::function1<void,const ReplicaSetMonitor*> ConfigChang
eHook;
/**
* Data structure for keeping track of the states of individual rep
lica
* members. This class is not thread-safe so proper measures should
be taken
* when sharing this object across multiple threads.
*
* Note: these get copied around in the nodes vector so be sure to
maintain
* copyable semantics here
*/
struct Node {
Node( const HostAndPort& a , DBClientConnection* c ) :
addr( a ),
conn(c),
ok( c != NULL ),
ismaster(false),
secondary( false ),
hidden( false ),
pingTimeMillis( 0 ) {
}
bool okForSecondaryQueries() const {
return ok && secondary && ! hidden;
}
/**
* Checks if the given tag matches the tag attached to this nod
e.
*
* Example:
*
* Tag of this node: { "dc": "nyc", "region": "na", "rack": "4"
}
*
* match: {}
* match: { "dc": "nyc", "rack": 4 }
* match: { "region": "na", "dc": "nyc" }
* not match: { "dc: "nyc", "rack": 2 }
* not match: { "dc": "sf" }
*
* @param tag the tag to use to compare. Should not contain any
* embedded documents.
*
* @return true if the given tag matches the this node's tag
* specification
*/
bool matchesTag(const BSONObj& tag) const;
/**
* @param threshold max ping time (in ms) to be considered lo
cal
* @return true if node is a local secondary, and can handle qu
eries
**/
bool isLocalSecondary( const int threshold ) const {
return pingTimeMillis < threshold;
}
/**
* Checks whether this nodes is compatible with the given readP
reference and
* tag. Compatibility check is strict in the sense that seconda
ry preferred
* is treated like secondary only and primary preferred is trea
ted like
* primary only.
*
* @return true if this node is compatible with the read prefer
ence and tags.
*/
bool isCompatible(ReadPreference readPreference, const TagSet*
tag) const;
BSONObj toBSON() const;
string toString() const {
return toBSON().toString();
}
HostAndPort addr;
boost::shared_ptr<DBClientConnection> conn;
// if this node is in a failure state
// used for slave routing
// this is too simple, should make it better
bool ok;
// as reported by ismaster
BSONObj lastIsMaster;
bool ismaster;
bool secondary;
bool hidden;
int pingTimeMillis;
};
static const double SOCKET_TIMEOUT_SECS;
/**
* Selects the right node given the nodes to pick from and the pref
erence.
*
* @param nodes the nodes to select from
* @param preference the read mode to use
* @param tags the tags used for filtering nodes
* @param localThresholdMillis the exclusive upper bound of ping ti
me to be
* considered as a local node. Local nodes are favored over non
-local
* nodes if multiple nodes matches the other criteria.
* @param lastHost the host used in the last successful request. Th
is is used for
* selecting a different node as much as possible, by doing a s
imple round
* robin, starting from the node next to this lastHost. This wi
ll be overwritten
* with the newly chosen host if not empty, not primary and whe
n preference
* is not Nearest.
* @param isPrimarySelected out parameter that is set to true if th
e returned host
* is a primary. Cannot be NULL and valid only if returned host
is not empty.
*
* @return the host object of the node selected. If none of the nod
es are
* eligible, returns an empty host.
*/
static HostAndPort selectNode(const std::vector<Node>& nodes,
ReadPreference preference,
TagSet* tags,
int localThresholdMillis,
HostAndPort* lastHost,
bool* isPrimarySelected);
/**
* Selects the right node given the nodes to pick from and the pref
erence. This
* will also attempt to refresh the local view of the replica set c
onfiguration
* if the primary node needs to be returned but is not currently av
ailable (except
* for ReadPrefrence_Nearest).
*
* @param preference the read mode to use.
* @param tags the tags used for filtering nodes.
* @param isPrimarySelected out parameter that is set to true if th
e returned host
* is a primary. Cannot be NULL and valid only if returned host
is not empty.
*
* @return the host object of the node selected. If none of the nod
es are
* eligible, returns an empty host.
*/
HostAndPort selectAndCheckNode(ReadPreference preference,
TagSet* tags,
bool* isPrimarySelected);
/**
* Creates a new ReplicaSetMonitor, if it doesn't already exist.
*/
static void createIfNeeded( const string& name , const vector<HostA
ndPort>& servers );
/**
* gets a cached Monitor per name. If the monitor is not found and
createFromSeed is false,
* it will return none. If createFromSeed is true, it will try to l
ook up the last known
* servers list for this set and will create a new monitor using th
at as the seed list.
*/
static ReplicaSetMonitorPtr get( const string& name, const bool cre
ateFromSeed = false );
/**
* Populates activeSets with all the currently tracked replica set
names.
*/
static void getAllTrackedSets(set<string>* activeSets);
/**
* checks all sets for current master and new secondaries
* usually only called from a BackgroundJob
*/
static void checkAll();
/**
* Removes the ReplicaSetMonitor for the given set name from _sets,
which will delete it.
* If clearSeedCache is true, then the cached seed string for this
Replica Set will be removed
* from _seedServers.
*/
static void remove( const string& name, bool clearSeedCache = false
);
static int getMaxFailedChecks() { return _maxFailedChecks; };
static void setMaxFailedChecks(int numChecks) { _maxFailedChecks =
numChecks; };
/**
* this is called whenever the config of any replica set changes
* currently only 1 globally
* asserts if one already exists
* ownership passes to ReplicaSetMonitor and the hook will actually
never be deleted
*/
static void setConfigChangeHook( ConfigChangeHook hook );
/**
* Permanently stops all monitoring on replica sets and clears all
cached information
* as well. As a consequence, NEVER call this if you have other thr
eads that have a
* DBClientReplicaSet instance.
*/
static void cleanup();
~ReplicaSetMonitor();
/** @return HostAndPort or throws an exception */
HostAndPort getMaster();
/**
* notify the monitor that server has faild
*/
void notifyFailure( const HostAndPort& server );
/**
* @deprecated use #getCandidateNode instead
* @return prev if its still ok, and if not returns a random slave
that is ok for reads
*/
HostAndPort getSlave( const HostAndPort& prev );
/**
* @param preferLocal Prefer a local secondary, otherwise pick an
y
* secondary, or fall back to primary
* @return a random slave that is ok for reads
*/
HostAndPort getSlave( bool preferLocal = true );
/**
* notify the monitor that server has faild
*/
void notifySlaveFailure( const HostAndPort& server );
/**
* checks for current master and new secondaries
*/
void check();
string getName() const { return _name; }
string getServerAddress() const;
bool contains( const string& server ) const;
void appendInfo( BSONObjBuilder& b ) const;
/**
* Set the threshold value (in ms) for a node to be considered loca
l.
* NOTE: This function acquires the _lock mutex.
**/
void setLocalThresholdMillis( const int millis );
/**
* @return true if the host is compatible with the given readPrefer
ence and tag set.
*/
bool isHostCompatible(const HostAndPort& host, ReadPreference readP
reference,
const TagSet* tagSet) const;
/**
* Performs a quick check if at least one node is up based on the c
ached
* view of the set.
*
* @return true if any node is ok
*/
bool isAnyNodeOk() const;
private:
/**
* This populates a list of hosts from the list of seeds (discardin
g the
* seed list). Should only be called from within _setsLock.
* @param name set name
* @param servers seeds
*/
ReplicaSetMonitor( const string& name , const vector<HostAndPort>&
servers );
static void _remove_inlock( const string& name, bool clearSeedCache
= false );
/**
* Checks all connections from the host list and sets the current
* master.
*/
void _check();
/**
* Add array of hosts to host list. Doesn't do anything if hosts ar
e
* already in host list.
* @param hostList the list of hosts to add
* @param changed if new hosts were added
*/
void _checkHosts(const BSONObj& hostList, bool& changed);
/**
* Updates host list.
* Invariant: if nodesOffset is >= 0, _nodes[nodesOffset].conn shou
ld be
* equal to conn.
*
* @param conn the connection to check
* @param maybePrimary OUT
* @param verbose
* @param nodesOffset - offset into _nodes array, -1 for not in it
*
* @return true if the connection is good or false if invariant
* is broken
*/
bool _checkConnection( DBClientConnection* conn, string& maybePrima
ry,
bool verbose, int nodesOffset );
/**
* Save the seed list for the current set into the _seedServers map
* Should only be called if you're already holding _setsLock and th
is
* monitor's _lock.
*/
void _cacheServerAddresses_inlock();
string _getServerAddress_inlock() const;
NodeDiff _getHostDiff_inlock( const BSONObj& hostList );
bool _shouldChangeHosts( const BSONObj& hostList, bool inlock );
/**
* @return the index to _nodes corresponding to the server address.
*/
int _find( const string& server ) const ;
int _find_inlock( const string& server ) const ;
/**
* Checks whether the given connection matches the connection store
d in _nodes.
* Mainly used for sanity checking to confirm that nodeOffset still
* refers to the right connection after releasing and reacquiring
* a mutex.
*/
bool _checkConnMatch_inlock( DBClientConnection* conn, size_t nodeO
ffset ) const;
/**
* Populates the local view of the set using the list of servers.
*
* Invariants:
* 1. Should be called while holding _setsLock and while not holdin
g _lock since
* this calls #_checkConnection, which locks _checkConnectionLoc
k
* 2. _nodes should be empty before this is called
*/
void _populateHosts_inSetsLock(const std::vector<HostAndPort>& seed
List);
// protects _localThresholdMillis, _nodes and refs to _nodes
// (eg. _master & _lastReadPrefHost)
mutable mongo::mutex _lock;
/**
* "Synchronizes" the _checkConnection method. Should ideally be on
e mutex per
* connection object being used. The purpose of this lock is to mak
e sure that
* the reply from the connection the lock holder got is the actual
response
* to what it sent.
*
* Deadlock WARNING: never acquire this while holding _lock
*/
mutable mongo::mutex _checkConnectionLock;
string _name;
/**
* Host list.
*/
std::vector<Node> _nodes;
int _master; // which node is the current master. -1 means no mast
er is known
int _nextSlave; // which node is the current slave, only used by th
e deprecated getSlave
// last host returned by _selectNode, used for round robin selectio
n
HostAndPort _lastReadPrefHost;
// The number of consecutive times the set has been checked and eve
ry member in the set was down.
int _failedChecks;
static mongo::mutex _setsLock; // protects _seedServers and _sets
// set name to seed list.
// Used to rebuild the monitor if it is cleaned up but then the set
is accessed again.
static map<string, vector<HostAndPort> > _seedServers;
static map<string, ReplicaSetMonitorPtr> _sets; // set name to Moni
tor
static ConfigChangeHook _hook;
int _localThresholdMillis; // local ping latency threshold (protect
ed by _lock)
static int _maxFailedChecks;
};
/** Use this class to connect to a replica set of servers. The class w ill manage /** Use this class to connect to a replica set of servers. The class w ill manage
checking for which server in a replica set is master, and do failove r automatically. checking for which server in a replica set is master, and do failove r automatically.
This can also be used to connect to replica pairs since pairs are a subset of sets This can also be used to connect to replica pairs since pairs are a subset of sets
On a failover situation, expect at least one operation to return an error (throw On a failover situation, expect at least one operation to return an error (throw
an exception) before the failover is complete. Operations are not r etried. an exception) before the failover is complete. Operations are not r etried.
*/ */
class DBClientReplicaSet : public DBClientBase { class MONGO_CLIENT_API DBClientReplicaSet : public DBClientBase {
public: public:
using DBClientBase::query; using DBClientBase::query;
using DBClientBase::update; using DBClientBase::update;
using DBClientBase::remove; using DBClientBase::remove;
/** Call connect() after constructing. autoReconnect is always on f or DBClientReplicaSet connections. */ /** Call connect() after constructing. autoReconnect is always on f or DBClientReplicaSet connections. */
DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout=0 ); DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout=0 );
virtual ~DBClientReplicaSet(); virtual ~DBClientReplicaSet();
/** /**
skipping to change at line 508 skipping to change at line 131
// ----- status ------ // ----- status ------
virtual bool isFailed() const { return ! _master || _master->isFail ed(); } virtual bool isFailed() const { return ! _master || _master->isFail ed(); }
bool isStillConnected(); bool isStillConnected();
// ----- informational ---- // ----- informational ----
double getSoTimeout() const { return _so_timeout; } double getSoTimeout() const { return _so_timeout; }
string toString() { return getServerAddress(); } string toString() const { return getServerAddress(); }
string getServerAddress() const; string getServerAddress() const;
virtual ConnectionString::ConnectionType type() const { return Conn ectionString::SET; } virtual ConnectionString::ConnectionType type() const { return Conn ectionString::SET; }
virtual bool lazySupported() const { return true; } virtual bool lazySupported() const { return true; }
// ---- low level ------ // ---- low level ------
virtual bool call( Message &toSend, Message &response, bool assertO k=true , string * actualServer = 0 ); virtual bool call( Message &toSend, Message &response, bool assertO k=true , string * actualServer = 0 );
virtual bool callRead( Message& toSend , Message& response ) { retu rn checkMaster()->callRead( toSend , response ); } virtual bool callRead( Message& toSend , Message& response ) { retu rn checkMaster()->callRead( toSend , response ); }
/**
* Returns whether a query or command can be sent to secondaries ba
sed on the query object
* and options.
*
* @param ns the namespace of the query.
* @param queryObj the query object to check.
* @param queryOptions the query options
*
* @return true if the query/cmd could potentially be sent to a sec
ondary, false otherwise
*/
static bool isSecondaryQuery( const string& ns,
const BSONObj& queryObj,
int queryOptions );
virtual void setRunCommandHook(DBClientWithCommands::RunCommandHook
Func func);
virtual void setPostRunCommandHook(DBClientWithCommands::PostRunCom
mandHookFunc func);
protected: protected:
/** Authorize. Authorizes all nodes as needed /** Authorize. Authorizes all nodes as needed
*/ */
virtual void _auth(const BSONObj& params); virtual void _auth(const BSONObj& params);
virtual void sayPiggyBack( Message &toSend ) { checkMaster()->say( toSend ); } virtual void sayPiggyBack( Message &toSend ) { checkMaster()->say( toSend ); }
private: private:
/** /**
* Used to simplify slave-handling logic on errors * Used to simplify slave-handling logic on errors
skipping to change at line 607 skipping to change at line 247
// not sure if/how we should handle // not sure if/how we should handle
std::map<string, BSONObj> _auths; // dbName -> auth parameters std::map<string, BSONObj> _auths; // dbName -> auth parameters
protected: protected:
/** /**
* for storing (non-threadsafe) information between lazy calls * for storing (non-threadsafe) information between lazy calls
*/ */
class LazyState { class LazyState {
public: public:
LazyState() : _lastClient( NULL ), _lastOp( -1 ), _slaveOk( fal LazyState() :
se ), _retries( 0 ) {} _lastClient( NULL ), _lastOp( -1 ), _secondaryQueryOk( fals
e ), _retries( 0 ) {}
DBClientConnection* _lastClient; DBClientConnection* _lastClient;
int _lastOp; int _lastOp;
bool _slaveOk; bool _secondaryQueryOk;
int _retries; int _retries;
} _lazyState; } _lazyState;
}; };
/** /**
* A simple object for representing the list of tags. The initial state * A simple object for representing the list of tags requested by a $re
will adPreference.
* have a valid current tag as long as the list is not empty.
*/ */
class TagSet { class MONGO_CLIENT_API TagSet {
public: public:
/** /**
* Creates an empty tag list that is initially exhausted. * Creates a TagSet that matches any nodes.
*
* Do not call during static init.
*/ */
TagSet(); TagSet();
/** /**
* Creates a copy of the given TagSet. The new copy will have the * Creates a TagSet from a BSONArray of tags.
* iterator pointing at the initial position.
*/
explicit TagSet(const TagSet& other);
/**
* Creates a tag set object that lazily iterates over the tag list.
* *
* @param tags the list of tags associated with this option. This o bject * @param tags the list of tags associated with this option. This o bject
* will get a shared copy of the list. Therefore, it is importa nt * will get a shared copy of the list. Therefore, it is importa nt
* for the the given tag to live longer than the created tag se t. * for the the given tag to live longer than the created tag se t.
*/ */
explicit TagSet(const BSONArray& tags); explicit TagSet(const BSONArray& tags) : _tags(tags) {}
/**
* Advance to the next tag.
*
* @throws AssertionException if iterator is exhausted before this
is called.
*/
void next();
/**
* Rests the iterator to point to the first element (if there is a
tag).
*/
void reset();
//
// Getters
//
/**
* @return the current tag. Returned tag is invalid if isExhausted
is true.
*/
const BSONObj& getCurrentTag() const;
/**
* @return true if the iterator has been exhausted.
*/
bool isExhausted() const;
/** /**
* @return an unordered iterator to the tag list. The caller is res * Returns the BSONArray listing all tags that should be accepted.
ponsible for
* destroying the returned iterator.
*/ */
BSONObjIterator* getIterator() const; const BSONArray& getTagBSON() const { return _tags; }
/** bool operator==(const TagSet& other) const { return _tags == other.
* @returns true if the other TagSet has the same tag set specifica _tags; }
tion with
* this tag set, disregarding where the current iterator is poi
nting to.
*/
bool equals(const TagSet& other) const;
const BSONArray& getTagBSON() const;
private: private:
/**
* This is purposely undefined as the semantics for assignment can
be
* confusing. This is because BSONArrayIteratorSorted shouldn't be
* copied (because of how it manages internal buffer).
*/
TagSet& operator=(const TagSet& other);
BSONObj _currentTag;
bool _isExhausted;
// Important: do not re-order _tags & _tagIterator
BSONArray _tags; BSONArray _tags;
scoped_ptr<BSONArrayIteratorSorted> _tagIterator;
}; };
struct ReadPreferenceSetting { struct MONGO_CLIENT_API ReadPreferenceSetting {
/** /**
* @parm pref the read preference mode. * @parm pref the read preference mode.
* @param tag the tag set. Note that this object will have the * @param tag the tag set. Note that this object will have the
* tag set will have this in a reset state (meaning, this * tag set will have this in a reset state (meaning, this
* object's copy of tag will have the iterator in the initial * object's copy of tag will have the iterator in the initial
* position). * position).
*/ */
ReadPreferenceSetting(ReadPreference pref, const TagSet& tag): ReadPreferenceSetting(ReadPreference pref, const TagSet& tag):
pref(pref), tags(tag) { pref(pref), tags(tag) {
} }
inline bool equals(const ReadPreferenceSetting& other) const { inline bool equals(const ReadPreferenceSetting& other) const {
return pref == other.pref && tags.equals(other.tags); return pref == other.pref && tags == other.tags;
} }
BSONObj toBSON() const; BSONObj toBSON() const;
const ReadPreference pref; const ReadPreference pref;
TagSet tags; TagSet tags;
}; };
} }
 End of changes. 21 change blocks. 
522 lines changed or deleted 42 lines changed or added


 dbclient_safe_writer.h   dbclient_safe_writer.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <string>
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/db/lasterror.h" #include "mongo/db/jsobj.h"
#include "mongo/s/write_ops/batch_downconvert.h" #include "mongo/s/write_ops/batch_downconvert.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
namespace mongo { namespace mongo {
/** /**
* Executes a batch write operation using safe writes and DBClientConne ctions. * Executes a batch write operation using safe writes and DBClientConne ctions.
* TODO: Remove post-2.6 * TODO: Remove post-2.6
*/ */
class DBClientSafeWriter : public SafeWriter { class DBClientSafeWriter : public SafeWriter {
public: public:
virtual ~DBClientSafeWriter() { virtual ~DBClientSafeWriter() {
} }
void safeWrite( DBClientBase* conn, const BatchItemRef& batchItem, Status safeWrite( DBClientBase* conn,
LastError* error ); const BatchItemRef& batchItem,
const BSONObj& writeConcern,
BSONObj* gleResponse );
Status enforceWriteConcern( DBClientBase* conn,
const StringData& dbName,
const BSONObj& writeConcern,
BSONObj* gleResponse );
}; };
} }
 End of changes. 3 change blocks. 
4 lines changed or deleted 11 lines changed or added


 dbclient_shard_resolver.h   dbclient_shard_resolver.h 
skipping to change at line 62 skipping to change at line 62
* *
* Note: Does *not* trigger a refresh of either the shard or replic a set monitor caches, * Note: Does *not* trigger a refresh of either the shard or replic a set monitor caches,
* though refreshes may happen unexpectedly between calls. * though refreshes may happen unexpectedly between calls.
* *
* Returns ShardNotFound if the shard name is unknown * Returns ShardNotFound if the shard name is unknown
* Returns ReplicaSetNotFound if the replica set is not being track ed * Returns ReplicaSetNotFound if the replica set is not being track ed
* Returns !OK with message if the shard host could not be found fo r other reasons. * Returns !OK with message if the shard host could not be found fo r other reasons.
*/ */
Status chooseWriteHost( const std::string& shardName, ConnectionStr ing* shardHost ) const; Status chooseWriteHost( const std::string& shardName, ConnectionStr ing* shardHost ) const;
/**
* Resolves a replica set connection string to a master, if possib
le.
* Returns HostNotFound if the master is not reachable
* Returns ReplicaSetNotFound if the replica set is not being track
ed
*/
static Status findMaster( const std::string connString, ConnectionS
tring* resolvedHost );
}; };
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
0 lines changed or deleted 10 lines changed or added


 dbclientcursor.h   dbclientcursor.h 
skipping to change at line 25 skipping to change at line 25
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include <stack> #include <stack>
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/json.h" #include "mongo/db/json.h"
#include "mongo/util/net/message.h" #include "mongo/util/net/message.h"
namespace mongo { namespace mongo {
class AScopedConnection; class AScopedConnection;
/** for mock purposes only -- do not create variants of DBClientCursor, nor hang code here /** for mock purposes only -- do not create variants of DBClientCursor, nor hang code here
@see DBClientMockCursor @see DBClientMockCursor
*/ */
class DBClientCursorInterface : boost::noncopyable { class MONGO_CLIENT_API DBClientCursorInterface : boost::noncopyable {
public: public:
virtual ~DBClientCursorInterface() {} virtual ~DBClientCursorInterface() {}
virtual bool more() = 0; virtual bool more() = 0;
virtual BSONObj next() = 0; virtual BSONObj next() = 0;
// TODO bring more of the DBClientCursor interface to here // TODO bring more of the DBClientCursor interface to here
protected: protected:
DBClientCursorInterface() {} DBClientCursorInterface() {}
}; };
/** Queries return a cursor object */ /** Queries return a cursor object */
class DBClientCursor : public DBClientCursorInterface { class MONGO_CLIENT_API DBClientCursor : public DBClientCursorInterface {
public: public:
/** If true, safe to call next(). Requests more from server if nec essary. */ /** If true, safe to call next(). Requests more from server if nec essary. */
bool more(); bool more();
/** If true, there is more in our local buffers to be fetched via n ext(). Returns /** If true, there is more in our local buffers to be fetched via n ext(). Returns
false when a getMore request back to server would be required. You can use this false when a getMore request back to server would be required. You can use this
if you want to exhaust whatever data has been fetched to the cl ient already but if you want to exhaust whatever data has been fetched to the cl ient already but
then perhaps stop. then perhaps stop.
*/ */
int objsLeftInBatch() const { _assertIfNull(); return _putBack.size () + batch.nReturned - batch.pos; } int objsLeftInBatch() const { _assertIfNull(); return _putBack.size () + batch.nReturned - batch.pos; }
skipping to change at line 258 skipping to change at line 259
// non-copyable , non-assignable // non-copyable , non-assignable
DBClientCursor( const DBClientCursor& ); DBClientCursor( const DBClientCursor& );
DBClientCursor& operator=( const DBClientCursor& ); DBClientCursor& operator=( const DBClientCursor& );
// init pieces // init pieces
void _assembleInit( Message& toSend ); void _assembleInit( Message& toSend );
}; };
/** iterate over objects in current batch only - will not cause a netwo rk call /** iterate over objects in current batch only - will not cause a netwo rk call
*/ */
class DBClientCursorBatchIterator { class MONGO_CLIENT_API DBClientCursorBatchIterator {
public: public:
DBClientCursorBatchIterator( DBClientCursor &c ) : _c( c ), _n() {} DBClientCursorBatchIterator( DBClientCursor &c ) : _c( c ), _n() {}
bool moreInCurrentBatch() { return _c.moreInCurrentBatch(); } bool moreInCurrentBatch() { return _c.moreInCurrentBatch(); }
BSONObj nextSafe() { BSONObj nextSafe() {
massert( 13383, "BatchIterator empty", moreInCurrentBatch() ); massert( 13383, "BatchIterator empty", moreInCurrentBatch() );
++_n; ++_n;
return _c.nextSafe(); return _c.nextSafe();
} }
int n() const { return _n; } int n() const { return _n; }
private: private:
 End of changes. 4 change blocks. 
3 lines changed or deleted 4 lines changed or added


 dbclientinterface.h   dbclientinterface.h 
skipping to change at line 27 skipping to change at line 27
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include <boost/function.hpp> #include <boost/function.hpp>
#include "mongo/client/export_macros.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/logger/log_severity.h" #include "mongo/logger/log_severity.h"
#include "mongo/platform/atomic_word.h" #include "mongo/platform/atomic_word.h"
#include "mongo/util/net/message.h" #include "mongo/util/net/message.h"
#include "mongo/util/net/message_port.h" #include "mongo/util/net/message_port.h"
namespace mongo { namespace mongo {
/** the query field 'options' can have these bits set: */ /** the query field 'options' can have these bits set: */
enum QueryOptions { enum MONGO_CLIENT_API QueryOptions {
/** Tailable means cursor is not closed when the last data is retri eved. rather, the cursor marks /** Tailable means cursor is not closed when the last data is retri eved. rather, the cursor marks
the final object's position. you can resume using the cursor la ter, from where it was located, the final object's position. you can resume using the cursor la ter, from where it was located,
if more data were received. Set on dbQuery and dbGetMore. if more data were received. Set on dbQuery and dbGetMore.
like any "latent cursor", the cursor may become invalid at some point -- for example if that like any "latent cursor", the cursor may become invalid at some point -- for example if that
final object it references were deleted. Thus, you should be pr epared to requery if you get back final object it references were deleted. Thus, you should be pr epared to requery if you get back
ResultFlag_CursorNotFound. ResultFlag_CursorNotFound.
*/ */
QueryOption_CursorTailable = 1 << 1, QueryOption_CursorTailable = 1 << 1,
skipping to change at line 90 skipping to change at line 91
/** When sharded, this means its ok to return partial results /** When sharded, this means its ok to return partial results
Usually we will fail a query if all required shards aren't up Usually we will fail a query if all required shards aren't up
If this is set, it'll be a partial result set If this is set, it'll be a partial result set
*/ */
QueryOption_PartialResults = 1 << 7 , QueryOption_PartialResults = 1 << 7 ,
QueryOption_AllSupported = QueryOption_CursorTailable | QueryOption _SlaveOk | QueryOption_OplogReplay | QueryOption_NoCursorTimeout | QueryOpt ion_AwaitData | QueryOption_Exhaust | QueryOption_PartialResults QueryOption_AllSupported = QueryOption_CursorTailable | QueryOption _SlaveOk | QueryOption_OplogReplay | QueryOption_NoCursorTimeout | QueryOpt ion_AwaitData | QueryOption_Exhaust | QueryOption_PartialResults
}; };
enum UpdateOptions { enum MONGO_CLIENT_API UpdateOptions {
/** Upsert - that is, insert the item if no matching item is found. */ /** Upsert - that is, insert the item if no matching item is found. */
UpdateOption_Upsert = 1 << 0, UpdateOption_Upsert = 1 << 0,
/** Update multiple documents (if multiple documents match query ex pression). /** Update multiple documents (if multiple documents match query ex pression).
(Default is update a single document and stop.) */ (Default is update a single document and stop.) */
UpdateOption_Multi = 1 << 1, UpdateOption_Multi = 1 << 1,
/** flag from mongo saying this update went everywhere */ /** flag from mongo saying this update went everywhere */
UpdateOption_Broadcast = 1 << 2 UpdateOption_Broadcast = 1 << 2
}; };
enum RemoveOptions { enum MONGO_CLIENT_API RemoveOptions {
/** only delete one option */ /** only delete one option */
RemoveOption_JustOne = 1 << 0, RemoveOption_JustOne = 1 << 0,
/** flag from mongo saying this update went everywhere */ /** flag from mongo saying this update went everywhere */
RemoveOption_Broadcast = 1 << 1 RemoveOption_Broadcast = 1 << 1
}; };
/** /**
* need to put in DbMesssage::ReservedOptions as well * need to put in DbMesssage::ReservedOptions as well
*/ */
enum InsertOptions { enum MONGO_CLIENT_API InsertOptions {
/** With muli-insert keep processing inserts if one fails */ /** With muli-insert keep processing inserts if one fails */
InsertOption_ContinueOnError = 1 << 0 InsertOption_ContinueOnError = 1 << 0
}; };
/** /**
* Start from *top* of bits, these are generic write options that apply to all * Start from *top* of bits, these are generic write options that apply to all
*/ */
enum WriteOptions { enum MONGO_CLIENT_API WriteOptions {
/** logical writeback option */ /** logical writeback option */
WriteOption_FromWriteback = 1 << 31 WriteOption_FromWriteback = 1 << 31
}; };
// //
// For legacy reasons, the reserved field pre-namespace of certain type s of messages is used // For legacy reasons, the reserved field pre-namespace of certain type s of messages is used
// to store options as opposed to the flags after the namespace. This should be transparent to // to store options as opposed to the flags after the namespace. This should be transparent to
// the api user, but we need these constants to disassemble/reassemble the messages correctly. // the api user, but we need these constants to disassemble/reassemble the messages correctly.
// //
enum ReservedOptions { enum MONGO_CLIENT_API ReservedOptions {
Reserved_InsertOption_ContinueOnError = 1 << 0 , Reserved_InsertOption_ContinueOnError = 1 << 0 ,
Reserved_FromWriteback = 1 << 1 Reserved_FromWriteback = 1 << 1
}; };
enum ReadPreference { enum MONGO_CLIENT_API ReadPreference {
/** /**
* Read from primary only. All operations produce an error (throw a n * Read from primary only. All operations produce an error (throw a n
* exception where applicable) if primary is unavailable. Cannot be * exception where applicable) if primary is unavailable. Cannot be
* combined with tags. * combined with tags.
*/ */
ReadPreference_PrimaryOnly = 0, ReadPreference_PrimaryOnly = 0,
/** /**
* Read from primary if available, otherwise a secondary. Tags will * Read from primary if available, otherwise a secondary. Tags will
* only be applied in the event that the primary is unavailable and * only be applied in the event that the primary is unavailable and
skipping to change at line 169 skipping to change at line 170
* Read from a secondary if available, otherwise read from the prim ary. * Read from a secondary if available, otherwise read from the prim ary.
*/ */
ReadPreference_SecondaryPreferred, ReadPreference_SecondaryPreferred,
/** /**
* Read from any member. * Read from any member.
*/ */
ReadPreference_Nearest, ReadPreference_Nearest,
}; };
class DBClientBase; class MONGO_CLIENT_API DBClientBase;
class DBClientConnection; class MONGO_CLIENT_API DBClientConnection;
/** /**
* ConnectionString handles parsing different ways to connect to mongo and determining method * ConnectionString handles parsing different ways to connect to mongo and determining method
* samples: * samples:
* server * server
* server:port * server:port
* foo/server:port,server:port SET * foo/server:port,server:port SET
* server,server,server SYNC * server,server,server SYNC
* Warning - you usually don't want "SYNC", it's used * Warning - you usually don't want "SYNC", it's used
* for some special things such as s harding config servers. * for some special things such as s harding config servers.
* See syncclusterconnection.h for m ore info. * See syncclusterconnection.h for m ore info.
* *
* tyipcal use * tyipcal use
* string errmsg, * string errmsg,
* ConnectionString cs = ConnectionString::parse( url , errmsg ); * ConnectionString cs = ConnectionString::parse( url , errmsg );
* if ( ! cs.isValid() ) throw "bad: " + errmsg; * if ( ! cs.isValid() ) throw "bad: " + errmsg;
* DBClientBase * conn = cs.connect( errmsg ); * DBClientBase * conn = cs.connect( errmsg );
*/ */
class ConnectionString { class MONGO_CLIENT_API ConnectionString {
public: public:
enum ConnectionType { INVALID , MASTER , PAIR , SET , SYNC, CUSTOM }; enum ConnectionType { INVALID , MASTER , PAIR , SET , SYNC, CUSTOM };
ConnectionString() { ConnectionString() {
_type = INVALID; _type = INVALID;
} }
// Note: This should only be used for direct connections to a singl e server. For replica // Note: This should only be used for direct connections to a singl e server. For replica
// set and SyncClusterConnections, use ConnectionString::parse. // set and SyncClusterConnections, use ConnectionString::parse.
ConnectionString( const HostAndPort& server ) { ConnectionString( const HostAndPort& server ) {
skipping to change at line 295 skipping to change at line 296
static void setConnectionHook( ConnectionHook* hook ){ static void setConnectionHook( ConnectionHook* hook ){
scoped_lock lk( _connectHookMutex ); scoped_lock lk( _connectHookMutex );
_connectHook = hook; _connectHook = hook;
} }
static ConnectionHook* getConnectionHook() { static ConnectionHook* getConnectionHook() {
scoped_lock lk( _connectHookMutex ); scoped_lock lk( _connectHookMutex );
return _connectHook; return _connectHook;
} }
//
// FOR TESTING ONLY - useful to be able to directly mock a connecti
on string without
// including the entire client library.
//
static ConnectionString mock( const HostAndPort& server ) {
ConnectionString connStr;
connStr._servers.push_back( server );
connStr._string = server.toString( true );
return connStr;
}
private: private:
void _fillServers( string s ); void _fillServers( string s );
void _finishInit(); void _finishInit();
ConnectionType _type; ConnectionType _type;
vector<HostAndPort> _servers; vector<HostAndPort> _servers;
string _string; string _string;
string _setName; string _setName;
static mutex _connectHookMutex; static mutex _connectHookMutex;
static ConnectionHook* _connectHook; static ConnectionHook* _connectHook;
}; };
/** /**
* controls how much a clients cares about writes * controls how much a clients cares about writes
* default is NORMAL * default is NORMAL
*/ */
enum WriteConcern { enum MONGO_CLIENT_API WriteConcern {
W_NONE = 0 , // TODO: not every connection type fully supports this W_NONE = 0 , // TODO: not every connection type fully supports this
W_NORMAL = 1 W_NORMAL = 1
// TODO SAFE = 2 // TODO SAFE = 2
}; };
class BSONObj; class BSONObj;
class ScopedDbConnection; class ScopedDbConnection;
class DBClientCursor; class DBClientCursor;
class DBClientCursorBatchIterator; class DBClientCursorBatchIterator;
/** Represents a Mongo query expression. Typically one uses the QUERY( ...) macro to construct a Query object. /** Represents a Mongo query expression. Typically one uses the QUERY( ...) macro to construct a Query object.
Examples: Examples:
QUERY( "age" << 33 << "school" << "UCLA" ).sort("name") QUERY( "age" << 33 << "school" << "UCLA" ).sort("name")
QUERY( "age" << GT << 30 << LT << 50 ) QUERY( "age" << GT << 30 << LT << 50 )
*/ */
class Query { class MONGO_CLIENT_API Query {
public: public:
static const BSONField<BSONObj> ReadPrefField; static const BSONField<BSONObj> ReadPrefField;
static const BSONField<std::string> ReadPrefModeField; static const BSONField<std::string> ReadPrefModeField;
static const BSONField<BSONArray> ReadPrefTagsField; static const BSONField<BSONArray> ReadPrefTagsField;
BSONObj obj; BSONObj obj;
Query() : obj(BSONObj()) { } Query() : obj(BSONObj()) { }
Query(const BSONObj& b) : obj(b) { } Query(const BSONObj& b) : obj(b) { }
Query(const string &json); Query(const string &json);
Query(const char * json); Query(const char * json);
skipping to change at line 451 skipping to change at line 464
b.appendElements(obj); b.appendElements(obj);
b.append(fieldName, val); b.append(fieldName, val);
obj = b.obj(); obj = b.obj();
} }
}; };
/** /**
* Represents a full query description, including all options required for the query to be passed on * Represents a full query description, including all options required for the query to be passed on
* to other hosts * to other hosts
*/ */
class QuerySpec { class MONGO_CLIENT_API QuerySpec {
string _ns; string _ns;
int _ntoskip; int _ntoskip;
int _ntoreturn; int _ntoreturn;
int _options; int _options;
BSONObj _query; BSONObj _query;
BSONObj _fields; BSONObj _fields;
Query _queryObj; Query _queryObj;
public: public:
skipping to change at line 508 skipping to change at line 521
}; };
/** Typically one uses the QUERY(...) macro to construct a Query object . /** Typically one uses the QUERY(...) macro to construct a Query object .
Example: QUERY( "age" << 33 << "school" << "UCLA" ) Example: QUERY( "age" << 33 << "school" << "UCLA" )
*/ */
#define QUERY(x) ::mongo::Query( BSON(x) ) #define QUERY(x) ::mongo::Query( BSON(x) )
// Useful utilities for namespaces // Useful utilities for namespaces
/** @return the database name portion of an ns string */ /** @return the database name portion of an ns string */
string nsGetDB( const string &ns ); MONGO_CLIENT_API string nsGetDB( const string &ns );
/** @return the collection name portion of an ns string */ /** @return the collection name portion of an ns string */
string nsGetCollection( const string &ns ); MONGO_CLIENT_API string nsGetCollection( const string &ns );
/** /**
interface that handles communication with the db interface that handles communication with the db
*/ */
class DBConnector { class MONGO_CLIENT_API DBConnector {
public: public:
virtual ~DBConnector() {} virtual ~DBConnector() {}
/** actualServer is set to the actual server where they call went i f there was a choice (SlaveOk) */ /** actualServer is set to the actual server where they call went i f there was a choice (SlaveOk) */
virtual bool call( Message &toSend, Message &response, bool assertO k=true , string * actualServer = 0 ) = 0; virtual bool call( Message &toSend, Message &response, bool assertO k=true , string * actualServer = 0 ) = 0;
virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 ) = 0; virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 ) = 0;
virtual void sayPiggyBack( Message &toSend ) = 0; virtual void sayPiggyBack( Message &toSend ) = 0;
/* used by QueryOption_Exhaust. To use that your subclass must imp lement this. */ /* used by QueryOption_Exhaust. To use that your subclass must imp lement this. */
virtual bool recv( Message& m ) { verify(false); return false; } virtual bool recv( Message& m ) { verify(false); return false; }
// In general, for lazy queries, we'll need to say, recv, then chec kResponse // In general, for lazy queries, we'll need to say, recv, then chec kResponse
virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL ) { virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL ) {
if( retry ) *retry = false; if( targetHost ) *targetHost = ""; if( retry ) *retry = false; if( targetHost ) *targetHost = "";
} }
virtual bool lazySupported() const = 0; virtual bool lazySupported() const = 0;
}; };
/** /**
The interface that any db connection should implement The interface that any db connection should implement
*/ */
class DBClientInterface : boost::noncopyable { class MONGO_CLIENT_API DBClientInterface : boost::noncopyable {
public: public:
virtual auto_ptr<DBClientCursor> query(const string &ns, Query quer y, int nToReturn = 0, int nToSkip = 0, virtual auto_ptr<DBClientCursor> query(const string &ns, Query quer y, int nToReturn = 0, int nToSkip = 0,
const BSONObj *fieldsToRetur n = 0, int queryOptions = 0 , int batchSize = 0 ) = 0; const BSONObj *fieldsToRetur n = 0, int queryOptions = 0 , int batchSize = 0 ) = 0;
virtual void insert( const string &ns, BSONObj obj , int flags=0) = 0; virtual void insert( const string &ns, BSONObj obj , int flags=0) = 0;
virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0) = 0; virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0) = 0;
virtual void remove( const string &ns , Query query, bool justOne = 0 ) = 0; virtual void remove( const string &ns , Query query, bool justOne = 0 ) = 0;
skipping to change at line 578 skipping to change at line 591
virtual string getServerAddress() const = 0; virtual string getServerAddress() const = 0;
/** don't use this - called automatically by DBClientCursor for you */ /** don't use this - called automatically by DBClientCursor for you */
virtual auto_ptr<DBClientCursor> getMore( const string &ns, long lo ng cursorId, int nToReturn = 0, int options = 0 ) = 0; virtual auto_ptr<DBClientCursor> getMore( const string &ns, long lo ng cursorId, int nToReturn = 0, int options = 0 ) = 0;
}; };
/** /**
DB "commands" DB "commands"
Basically just invocations of connection.$cmd.findOne({...}); Basically just invocations of connection.$cmd.findOne({...});
*/ */
class DBClientWithCommands : public DBClientInterface { class MONGO_CLIENT_API DBClientWithCommands : public DBClientInterface {
set<string> _seenIndexes; set<string> _seenIndexes;
public: public:
/** controls how chatty the client is about network errors & such. See log.h */ /** controls how chatty the client is about network errors & such. See log.h */
logger::LogSeverity _logLevel; logger::LogSeverity _logLevel;
DBClientWithCommands() : _logLevel(logger::LogSeverity::Log()), DBClientWithCommands() : _logLevel(logger::LogSeverity::Log()),
_cachedAvailableOptions( (enum QueryOptions)0 ), _cachedAvailableOptions( (enum QueryOptions)0 ),
_haveCachedAvailableOptions(false) { } _haveCachedAvailableOptions(false) { }
/** helper function. run a simple command where the command expres sion is simply /** helper function. run a simple command where the command expres sion is simply
skipping to change at line 955 skipping to change at line 968
string genIndexName( const BSONObj& keys ); string genIndexName( const BSONObj& keys );
/** Erase / drop an entire database */ /** Erase / drop an entire database */
virtual bool dropDatabase(const string &dbname, BSONObj *info = 0) { virtual bool dropDatabase(const string &dbname, BSONObj *info = 0) {
bool ret = simpleCommand(dbname, info, "dropDatabase"); bool ret = simpleCommand(dbname, info, "dropDatabase");
resetIndexCache(); resetIndexCache();
return ret; return ret;
} }
virtual string toString() = 0; virtual string toString() const = 0;
/**
* A function type for runCommand hooking; the function takes a poi
nter
* to a BSONObjBuilder and returns nothing. The builder contains a
* runCommand BSON object.
* Once such a function is set as the runCommand hook, every time t
he DBClient
* processes a runCommand, the hook will be called just prior to se
nding it to the server.
*/
typedef boost::function<void(BSONObjBuilder*)> RunCommandHookFunc;
virtual void setRunCommandHook(RunCommandHookFunc func);
RunCommandHookFunc getRunCommandHook() const {
return _runCommandHook;
}
/**
* Similar to above, but for running a function on a command respon
se after a command
* has been run.
*/
typedef boost::function<void(const BSONObj&, const std::string&)> P
ostRunCommandHookFunc;
virtual void setPostRunCommandHook(PostRunCommandHookFunc func);
PostRunCommandHookFunc getPostRunCommandHook() const {
return _postRunCommandHook;
}
protected: protected:
/** if the result of a command is ok*/ /** if the result of a command is ok*/
bool isOk(const BSONObj&); bool isOk(const BSONObj&);
/** if the element contains a not master error */ /** if the element contains a not master error */
bool isNotMasterErrorString( const BSONElement& e ); bool isNotMasterErrorString( const BSONElement& e );
BSONObj _countCmd(const string &ns, const BSONObj& query, int optio ns, int limit, int skip ); BSONObj _countCmd(const string &ns, const BSONObj& query, int optio ns, int limit, int skip );
skipping to change at line 996 skipping to change at line 1032
/** /**
* Use the MONGODB-X509 protocol to authenticate as "username. The certificate details * Use the MONGODB-X509 protocol to authenticate as "username. The certificate details
* has already been communicated automatically as part of the conne ct call. * has already been communicated automatically as part of the conne ct call.
* Returns false on failure and set "errmsg". * Returns false on failure and set "errmsg".
*/ */
bool _authX509(const string&dbname, bool _authX509(const string&dbname,
const string &username, const string &username,
BSONObj *info); BSONObj *info);
/**
* These functions will be executed by the driver on runCommand cal
ls.
*/
RunCommandHookFunc _runCommandHook;
PostRunCommandHookFunc _postRunCommandHook;
private: private:
enum QueryOptions _cachedAvailableOptions; enum QueryOptions _cachedAvailableOptions;
bool _haveCachedAvailableOptions; bool _haveCachedAvailableOptions;
}; };
/** /**
abstract class that implements the core db operations abstract class that implements the core db operations
*/ */
class DBClientBase : public DBClientWithCommands, public DBConnector { class MONGO_CLIENT_API DBClientBase : public DBClientWithCommands, publ ic DBConnector {
protected: protected:
static AtomicInt64 ConnectionIdSequence; static AtomicInt64 ConnectionIdSequence;
long long _connectionId; // unique connection id for this connectio n long long _connectionId; // unique connection id for this connectio n
WriteConcern _writeConcern; WriteConcern _writeConcern;
int _minWireVersion; int _minWireVersion;
int _maxWireVersion; int _maxWireVersion;
public: public:
static const uint64_t INVALID_SOCK_CREATION_TIME; static const uint64_t INVALID_SOCK_CREATION_TIME;
DBClientBase() { DBClientBase() {
skipping to change at line 1129 skipping to change at line 1171
virtual double getSoTimeout() const = 0; virtual double getSoTimeout() const = 0;
virtual uint64_t getSockCreationMicroSec() const { virtual uint64_t getSockCreationMicroSec() const {
return INVALID_SOCK_CREATION_TIME; return INVALID_SOCK_CREATION_TIME;
} }
}; // DBClientBase }; // DBClientBase
class DBClientReplicaSet; class DBClientReplicaSet;
class ConnectException : public UserException { class MONGO_CLIENT_API ConnectException : public UserException {
public: public:
ConnectException(string msg) : UserException(9000,msg) { } ConnectException(string msg) : UserException(9000,msg) { }
}; };
/** /**
A basic connection to the database. A basic connection to the database.
This is the main entry point for talking to a simple Mongo setup This is the main entry point for talking to a simple Mongo setup
*/ */
class DBClientConnection : public DBClientBase { class MONGO_CLIENT_API DBClientConnection : public DBClientBase {
public: public:
using DBClientBase::query; using DBClientBase::query;
/** /**
@param _autoReconnect if true, automatically reconnect on a conn ection failure @param _autoReconnect if true, automatically reconnect on a conn ection failure
@param cp used by DBClientReplicaSet. You do not need to specif y this parameter @param cp used by DBClientReplicaSet. You do not need to specif y this parameter
@param timeout tcp timeout in seconds - this is for read/write, not connect. @param timeout tcp timeout in seconds - this is for read/write, not connect.
Connect timeout is fixed, but short, at 5 seconds. Connect timeout is fixed, but short, at 5 seconds.
*/ */
DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* c p=0, double so_timeout=0) : DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* c p=0, double so_timeout=0) :
skipping to change at line 1235 skipping to change at line 1277
/** /**
@return true if this connection is currently in a failed state. When autoreconnect is on, @return true if this connection is currently in a failed state. When autoreconnect is on,
a connection will transition back to an ok state after r econnecting. a connection will transition back to an ok state after r econnecting.
*/ */
bool isFailed() const { return _failed; } bool isFailed() const { return _failed; }
bool isStillConnected() { return p ? p->isStillConnected() : true; } bool isStillConnected() { return p ? p->isStillConnected() : true; }
MessagingPort& port() { verify(p); return *p; } MessagingPort& port() { verify(p); return *p; }
string toStringLong() const { string toString() const {
stringstream ss; stringstream ss;
ss << _serverString; ss << _serverString;
if ( !_serverAddrString.empty() ) ss << " (" << _serverAddrStri ng << ")";
if ( _failed ) ss << " failed"; if ( _failed ) ss << " failed";
return ss.str(); return ss.str();
} }
/** Returns the address of the server */
string toString() { return _serverString; }
string getServerAddress() const { return _serverString; } string getServerAddress() const { return _serverString; }
virtual void killCursor( long long cursorID ); virtual void killCursor( long long cursorID );
virtual bool callRead( Message& toSend , Message& response ) { retu rn call( toSend , response ); } virtual bool callRead( Message& toSend , Message& response ) { retu rn call( toSend , response ); }
virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 ); virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 );
virtual bool recv( Message& m ); virtual bool recv( Message& m );
virtual void checkResponse( const char *data, int nReturned, bool* retry = NULL, string* host = NULL ); virtual void checkResponse( const char *data, int nReturned, bool* retry = NULL, string* host = NULL );
virtual bool call( Message &toSend, Message &response, bool assertO k = true , string * actualServer = 0 ); virtual bool call( Message &toSend, Message &response, bool assertO k = true , string * actualServer = 0 );
virtual ConnectionString::ConnectionType type() const { return Conn ectionString::MASTER; } virtual ConnectionString::ConnectionType type() const { return Conn ectionString::MASTER; }
void setSoTimeout(double timeout); void setSoTimeout(double timeout);
skipping to change at line 1292 skipping to change at line 1332
virtual void _auth(const BSONObj& params); virtual void _auth(const BSONObj& params);
virtual void sayPiggyBack( Message &toSend ); virtual void sayPiggyBack( Message &toSend );
DBClientReplicaSet *clientSet; DBClientReplicaSet *clientSet;
boost::scoped_ptr<MessagingPort> p; boost::scoped_ptr<MessagingPort> p;
boost::scoped_ptr<SockAddr> server; boost::scoped_ptr<SockAddr> server;
bool _failed; bool _failed;
const bool autoReconnect; const bool autoReconnect;
Backoff autoReconnectBackoff; Backoff autoReconnectBackoff;
HostAndPort _server; // remember for reconnects HostAndPort _server; // remember for reconnects
string _serverString; string _serverString; // server host and port
string _serverAddrString; // resolved ip of server
void _checkConnection(); void _checkConnection();
// throws SocketException if in failed state and not reconnecting o r if waiting to reconnect // throws SocketException if in failed state and not reconnecting o r if waiting to reconnect
void checkConnection() { if( _failed ) _checkConnection(); } void checkConnection() { if( _failed ) _checkConnection(); }
map<string, BSONObj> authCache; map<string, BSONObj> authCache;
double _so_timeout; double _so_timeout;
bool _connect( string& errmsg ); bool _connect( string& errmsg );
static AtomicUInt _numConnections; static AtomicUInt _numConnections;
static bool _lazyKillCursor; // lazy means we piggy back kill curso rs on next op static bool _lazyKillCursor; // lazy means we piggy back kill curso rs on next op
#ifdef MONGO_SSL #ifdef MONGO_SSL
SSLManagerInterface* sslManager(); SSLManagerInterface* sslManager();
#endif #endif
}; };
/** pings server to check if it's up /** pings server to check if it's up
*/ */
bool serverAlive( const string &uri ); MONGO_CLIENT_API bool serverAlive( const string &uri );
DBClientBase * createDirectClient(); MONGO_CLIENT_API DBClientBase * createDirectClient();
BSONElement getErrField( const BSONObj& result ); MONGO_CLIENT_API BSONElement getErrField( const BSONObj& result );
bool hasErrField( const BSONObj& result ); MONGO_CLIENT_API bool hasErrField( const BSONObj& result );
inline std::ostream& operator<<( std::ostream &s, const Query &q ) { MONGO_CLIENT_API inline std::ostream& operator<<( std::ostream &s, cons t Query &q ) {
return s << q.toString(); return s << q.toString();
} }
} // namespace mongo } // namespace mongo
#include "mongo/client/dbclientcursor.h" #include "mongo/client/dbclientcursor.h"
 End of changes. 32 change blocks. 
32 lines changed or deleted 80 lines changed or added


 dbclientmockcursor.h   dbclientmockcursor.h 
skipping to change at line 21 skipping to change at line 21
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/client/dbclientcursor.h" #include "mongo/client/dbclientcursor.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
class DBClientMockCursor : public DBClientCursorInterface { class MONGO_CLIENT_API DBClientMockCursor : public DBClientCursorInterf ace {
public: public:
DBClientMockCursor( const BSONArray& mockCollection ) : _iter( mock Collection ) {} DBClientMockCursor( const BSONArray& mockCollection ) : _iter( mock Collection ) {}
virtual ~DBClientMockCursor() {} virtual ~DBClientMockCursor() {}
bool more() { return _iter.more(); } bool more() { return _iter.more(); }
BSONObj next() { return _iter.next().Obj(); } BSONObj next() { return _iter.next().Obj(); }
private: private:
BSONObjIterator _iter; BSONObjIterator _iter;
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 dbhelpers.h   dbhelpers.h 
skipping to change at line 43 skipping to change at line 43
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/db.h" #include "mongo/db/db.h"
#include "mongo/db/keypattern.h" #include "mongo/db/keypattern.h"
#include "mongo/s/range_arithmetic.h" #include "mongo/s/range_arithmetic.h"
namespace mongo { namespace mongo {
extern const BSONObj reverseNaturalObj; // {"$natural": -1 } extern const BSONObj reverseNaturalObj; // {"$natural": -1 }
class Collection;
class Cursor; class Cursor;
class CoveredIndexMatcher;
/** /**
* db helpers are helper functions and classes that let us easily manip ulate the local * db helpers are helper functions and classes that let us easily manip ulate the local
* database instance in-proc. * database instance in-proc.
* *
* all helpers assume locking is handled above them * all helpers assume locking is handled above them
*/ */
struct Helpers { struct Helpers {
class RemoveSaver; class RemoveSaver;
skipping to change at line 94 skipping to change at line 94
*/ */
static vector<BSONObj> findAll( const string& ns , const BSONObj& q uery ); static vector<BSONObj> findAll( const string& ns , const BSONObj& q uery );
/** /**
* @param foundIndex if passed in will be set to 1 if ns and index found * @param foundIndex if passed in will be set to 1 if ns and index found
* @return true if object found * @return true if object found
*/ */
static bool findById(Client&, const char *ns, BSONObj query, BSONOb j& result , static bool findById(Client&, const char *ns, BSONObj query, BSONOb j& result ,
bool * nsFound = 0 , bool * indexFound = 0 ); bool * nsFound = 0 , bool * indexFound = 0 );
/* uasserts if no _id index. /* TODO: should this move into Collection?
@return null loc if not found */ * uasserts if no _id index.
static DiskLoc findById(NamespaceDetails *d, BSONObj query); * @return null loc if not found */
static DiskLoc findById(Collection* collection, const BSONObj& quer
y);
/** Get/put the first (or last) object from a collection. Generall y only useful if the collection /** Get/put the first (or last) object from a collection. Generall y only useful if the collection
only ever has a single object -- which is a "singleton collecti on". only ever has a single object -- which is a "singleton collecti on".
You do not need to set the database (Context) before calling. You do not need to set the database (Context) before calling.
@return true if object exists. @return true if object exists.
*/ */
static bool getSingleton(const char *ns, BSONObj& result); static bool getSingleton(const char *ns, BSONObj& result);
static void putSingleton(const char *ns, BSONObj obj); static void putSingleton(const char *ns, BSONObj obj);
 End of changes. 3 change blocks. 
4 lines changed or deleted 6 lines changed or added


 dbtests.h   dbtests.h 
skipping to change at line 18 skipping to change at line 18
* it under the terms of the GNU Affero General Public License, version 3 , * it under the terms of the GNU Affero General Public License, version 3 ,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Licen se * You should have received a copy of the GNU Affero General Public Licen se
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certai
n
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Yo
u
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If y
ou
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/instance.h" #include "mongo/db/instance.h"
#include "mongo/unittest/unittest.h" #include "mongo/unittest/unittest.h"
using namespace mongo; using namespace mongo;
using namespace mongo::unittest; using namespace mongo::unittest;
using boost::shared_ptr; using boost::shared_ptr;
 End of changes. 1 change blocks. 
0 lines changed or deleted 17 lines changed or added


 delete.h   delete.h 
// delete.h // delete.h
/** /**
* Copyright (C) 2008 10gen Inc. * Copyright (C) 2008 10gen Inc.
* *
* This program is free software: you can redistribute it and/or modify * This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3 * it under the terms of the GNU Affero General Public License, version
, 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Licen * You should have received a copy of the GNU Affero General Public Lice
se nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
* *
* As a special exception, the copyright holders give permission to link * As a special exception, the copyright holders give permission to link
the the
* code of portions of this program with the OpenSSL library under certai * code of portions of this program with the OpenSSL library under certa
n in
* conditions as described in each individual source file and distribute * conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Yo * linked combinations including the program with the OpenSSL library. Y
u ou
* must comply with the GNU Affero General Public License in all respects * must comply with the GNU Affero General Public License in all respect
for s for
* all of the code used other than as permitted herein. If you modify fil * all of the code used other than as permitted herein. If you modify fi
e(s) le(s)
* with this exception, you may extend this exception to your version of * with this exception, you may extend this exception to your version of
the the
* file(s), but you are not obligated to do so. If you do not wish to do * file(s), but you are not obligated to do so. If you do not wish to do
so, so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de * exception statement from all source files in the program, then also d
lete elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
// If justOne is true, deletedId is set to the id of the deleted object . // If justOne is true, deletedId is set to the id of the deleted object .
long long deleteObjects(const StringData& ns, long long deleteObjects(const StringData& ns,
BSONObj pattern, BSONObj pattern,
bool justOne, bool justOne,
bool logop = false, bool logop = false,
bool god=false); bool god = false);
} }
 End of changes. 2 change blocks. 
37 lines changed or deleted 37 lines changed or added


 diskloc.h   diskloc.h 
skipping to change at line 167 skipping to change at line 167
* @returns a non const reference to this disk loc * @returns a non const reference to this disk loc
* This function explicitly signals we are writing and casts away c onst * This function explicitly signals we are writing and casts away c onst
*/ */
DiskLoc& writing() const; // see dur.h DiskLoc& writing() const; // see dur.h
/* Get the "thing" associated with this disk location. /* Get the "thing" associated with this disk location.
it is assumed the object is what you say it is -- you must assur e that it is assumed the object is what you say it is -- you must assur e that
(think of this as an unchecked type cast) (think of this as an unchecked type cast)
Note: set your Context first so that the database to which the d iskloc applies is known. Note: set your Context first so that the database to which the d iskloc applies is known.
*/ */
BSONObj obj() const; BSONObj obj() const; // TODO(ERH): remove
Record* rec() const; Record* rec() const; // TODO(ERH): remove
DeletedRecord* drec() const; DeletedRecord* drec() const; // TODO(ERH): remove
Extent* ext() const; Extent* ext() const; // TODO(ERH): remove
template< class V > template< class V >
const BtreeBucket<V> * btree() const; const BtreeBucket<V> * btree() const; // TODO(ERH): remove
// Explicitly signals we are writing and casts away const // Explicitly signals we are writing and casts away const
template< class V > template< class V >
BtreeBucket<V> * btreemod() const; BtreeBucket<V> * btreemod() const; // TODO(ERH): remove
/*DataFile& pdf() const;*/
/// members for Sorter /// members for Sorter
struct SorterDeserializeSettings {}; // unused struct SorterDeserializeSettings {}; // unused
void serializeForSorter(BufBuilder& buf) const { buf.appendStruct(* this); } void serializeForSorter(BufBuilder& buf) const { buf.appendStruct(* this); }
static DiskLoc deserializeForSorter(BufReader& buf, const SorterDes erializeSettings&) { static DiskLoc deserializeForSorter(BufReader& buf, const SorterDes erializeSettings&) {
return buf.read<DiskLoc>(); return buf.read<DiskLoc>();
} }
int memUsageForSorter() const { return sizeof(DiskLoc); } int memUsageForSorter() const { return sizeof(DiskLoc); }
DiskLoc getOwned() const { return *this; } DiskLoc getOwned() const { return *this; }
}; };
 End of changes. 3 change blocks. 
8 lines changed or deleted 6 lines changed or added


 distlock.h   distlock.h 
skipping to change at line 22 skipping to change at line 22
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "mongo/client/connpool.h" #include "mongo/client/connpool.h"
#include "mongo/client/export_macros.h"
#include "mongo/client/syncclusterconnection.h" #include "mongo/client/syncclusterconnection.h"
#define LOCK_TIMEOUT (15 * 60 * 1000) #define LOCK_TIMEOUT (15 * 60 * 1000)
#define LOCK_SKEW_FACTOR (30) #define LOCK_SKEW_FACTOR (30)
#define LOCK_PING (LOCK_TIMEOUT / LOCK_SKEW_FACTOR) #define LOCK_PING (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
#define MAX_LOCK_NET_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR) #define MAX_LOCK_NET_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
#define MAX_LOCK_CLOCK_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR) #define MAX_LOCK_CLOCK_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
#define NUM_LOCK_SKEW_CHECKS (3) #define NUM_LOCK_SKEW_CHECKS (3)
// The maximum clock skew we need to handle between config servers is // The maximum clock skew we need to handle between config servers is
// 2 * MAX_LOCK_NET_SKEW + MAX_LOCK_CLOCK_SKEW. // 2 * MAX_LOCK_NET_SKEW + MAX_LOCK_CLOCK_SKEW.
// Net effect of *this* clock being slow is effectively a multiplier on the max net skew // Net effect of *this* clock being slow is effectively a multiplier on the max net skew
// and a linear increase or decrease of the max clock skew. // and a linear increase or decrease of the max clock skew.
namespace mongo { namespace mongo {
/** /**
* Exception class to encapsulate exceptions while managing distributed locks * Exception class to encapsulate exceptions while managing distributed locks
*/ */
class LockException : public DBException { class MONGO_CLIENT_API LockException : public DBException {
public: public:
LockException( const char * msg , int code ) : DBException( msg, cod e ) {} LockException( const char * msg , int code ) : DBException( msg, cod e ) {}
LockException( const string& msg, int code ) : DBException( msg, cod e ) {} LockException( const string& msg, int code ) : DBException( msg, cod e ) {}
virtual ~LockException() throw() { } virtual ~LockException() throw() { }
}; };
/** /**
* Indicates an error in retrieving time values from remote servers. * Indicates an error in retrieving time values from remote servers.
*/ */
class TimeNotFoundException : public LockException { class MONGO_CLIENT_API TimeNotFoundException : public LockException {
public: public:
TimeNotFoundException( const char * msg , int code ) : LockExceptio n( msg, code ) {} TimeNotFoundException( const char * msg , int code ) : LockExceptio n( msg, code ) {}
TimeNotFoundException( const string& msg, int code ) : LockExceptio n( msg, code ) {} TimeNotFoundException( const string& msg, int code ) : LockExceptio n( msg, code ) {}
virtual ~TimeNotFoundException() throw() { } virtual ~TimeNotFoundException() throw() { }
}; };
/** /**
* The distributed lock is a configdb backed way of synchronizing syste m-wide tasks. A task must be identified by a * The distributed lock is a configdb backed way of synchronizing syste m-wide tasks. A task must be identified by a
* unique name across the system (e.g., "balancer"). A lock is taken by writing a document in the configdb's locks * unique name across the system (e.g., "balancer"). A lock is taken by writing a document in the configdb's locks
* collection with that name. * collection with that name.
* *
* To be maintained, each taken lock needs to be revalidated ("pinged") within a pre-established amount of time. This * To be maintained, each taken lock needs to be revalidated ("pinged") within a pre-established amount of time. This
* class does this maintenance automatically once a DistributedLock obj ect was constructed. * class does this maintenance automatically once a DistributedLock obj ect was constructed.
*/ */
class DistributedLock { class MONGO_CLIENT_API DistributedLock {
public: public:
static LabeledLevel logLvl; static LabeledLevel logLvl;
struct PingData { struct PingData {
PingData( const string& _id , Date_t _lastPing , Date_t _remote , OID _ts ) PingData( const string& _id , Date_t _lastPing , Date_t _remote , OID _ts )
: id(_id), lastPing(_lastPing), remote(_remote), ts(_ts){ : id(_id), lastPing(_lastPing), remote(_remote), ts(_ts){
} }
skipping to change at line 195 skipping to change at line 196
PingData getLastPing(){ return lastPings.getLastPing( _conn, _name ); } PingData getLastPing(){ return lastPings.getLastPing( _conn, _name ); }
// May or may not exist, depending on startup // May or may not exist, depending on startup
mongo::mutex _mutex; mongo::mutex _mutex;
string _threadId; string _threadId;
}; };
// Helper functions for tests, allows us to turn the creation of a lock pinger on and off. // Helper functions for tests, allows us to turn the creation of a lock pinger on and off.
// *NOT* thread-safe // *NOT* thread-safe
bool isLockPingerEnabled(); bool MONGO_CLIENT_API isLockPingerEnabled();
void setLockPingerEnabled(bool enabled); void MONGO_CLIENT_API setLockPingerEnabled(bool enabled);
class dist_lock_try { class MONGO_CLIENT_API dist_lock_try {
public: public:
dist_lock_try() : _lock(NULL), _got(false) {} dist_lock_try() : _lock(NULL), _got(false) {}
dist_lock_try( const dist_lock_try& that ) : _lock(that._lock), _got (that._got), _other(that._other) { dist_lock_try( const dist_lock_try& that ) : _lock(that._lock), _got (that._got), _other(that._other) {
_other.getOwned(); _other.getOwned();
// Make sure the lock ownership passes to this object, // Make sure the lock ownership passes to this object,
// so we only unlock once. // so we only unlock once.
((dist_lock_try&) that)._got = false; ((dist_lock_try&) that)._got = false;
skipping to change at line 279 skipping to change at line 280
bool _got; bool _got;
BSONObj _other; BSONObj _other;
string _why; string _why;
}; };
/** /**
* Scoped wrapper for a distributed lock acquisition attempt. One or m ore attempts to acquire * Scoped wrapper for a distributed lock acquisition attempt. One or m ore attempts to acquire
* the distributed lock are managed by this class, and the distributed lock is unlocked if * the distributed lock are managed by this class, and the distributed lock is unlocked if
* successfully acquired on object destruction. * successfully acquired on object destruction.
*/ */
class ScopedDistributedLock { class MONGO_CLIENT_API ScopedDistributedLock {
public: public:
ScopedDistributedLock(const ConnectionString& conn, const string& n ame); ScopedDistributedLock(const ConnectionString& conn, const string& n ame);
virtual ~ScopedDistributedLock(); virtual ~ScopedDistributedLock();
/** /**
* Tries once to obtain a lock, and can fail with an error message. * Tries once to obtain a lock, and can fail with an error message.
* *
* Subclasses of this lock can override this method (and are also r equired to call the base * Subclasses of this lock can override this method (and are also r equired to call the base
 End of changes. 7 change blocks. 
7 lines changed or deleted 8 lines changed or added


 document.h   document.h 
skipping to change at line 118 skipping to change at line 118
*/ */
size_t getApproximateSize() const; size_t getApproximateSize() const;
/** Compare two documents. /** Compare two documents.
* *
* BSON document field order is significant, so this just goes thr ough * BSON document field order is significant, so this just goes thr ough
* the fields in order. The comparison is done in roughly the sam e way * the fields in order. The comparison is done in roughly the sam e way
* as strings are compared, but comparing one field at a time inst ead * as strings are compared, but comparing one field at a time inst ead
* of one character at a time. * of one character at a time.
* *
* Note: This does not consider metadata when comparing documents.
*
* @returns an integer less than zero, zero, or an integer greater than * @returns an integer less than zero, zero, or an integer greater than
* zero, depending on whether lhs < rhs, lhs == rhs, or l hs > rhs * zero, depending on whether lhs < rhs, lhs == rhs, or l hs > rhs
* Warning: may return values other than -1, 0, or 1 * Warning: may return values other than -1, 0, or 1
*/ */
static int compare(const Document& lhs, const Document& rhs); static int compare(const Document& lhs, const Document& rhs);
string toString() const; string toString() const;
friend friend
ostream& operator << (ostream& out, const Document& doc) { return o ut << doc.toString(); } ostream& operator << (ostream& out, const Document& doc) { return o ut << doc.toString(); }
/** Calculate a hash value. /** Calculate a hash value.
* *
* Meant to be used to create composite hashes suitable for * Meant to be used to create composite hashes suitable for
* hashed container classes such as unordered_map. * hashed container classes such as unordered_map.
*/ */
void hash_combine(size_t &seed) const; void hash_combine(size_t &seed) const;
/// Add this document to the BSONObj under construction with the gi /**
ven BSONObjBuilder. * Add this document to the BSONObj under construction with the giv
en BSONObjBuilder.
* Does not include metadata.
*/
void toBson(BSONObjBuilder *pBsonObjBuilder) const; void toBson(BSONObjBuilder *pBsonObjBuilder) const;
BSONObj toBson() const; BSONObj toBson() const;
/**
* Like toBson, but includes metadata at the top-level.
* Output is parseable by fromBsonWithMetaData
*/
BSONObj toBsonWithMetaData() const;
/**
* Like Document(BSONObj) but treats top-level fields with special
names as metadata.
* Special field names are available as static constants on this cl
ass with names starting
* with metaField.
*/
static Document fromBsonWithMetaData(const BSONObj& bson);
// Support BSONObjBuilder and BSONArrayBuilder "stream" API // Support BSONObjBuilder and BSONArrayBuilder "stream" API
friend BSONObjBuilder& operator << (BSONObjBuilderValueStream& buil der, const Document& d); friend BSONObjBuilder& operator << (BSONObjBuilderValueStream& buil der, const Document& d);
/** Return the abstract Position of a field, suitable to pass to op erator[] or getField(). /** Return the abstract Position of a field, suitable to pass to op erator[] or getField().
* This can potentially save time if you need to refer to a field multiple times. * This can potentially save time if you need to refer to a field multiple times.
*/ */
Position positionOf(StringData fieldName) const { return storage(). findField(fieldName); } Position positionOf(StringData fieldName) const { return storage(). findField(fieldName); }
/** Clone a document. /** Clone a document.
* *
* This should only be called by MutableDocument and tests * This should only be called by MutableDocument and tests
* *
* The new document shares all the fields' values with the origina l. * The new document shares all the fields' values with the origina l.
* This is not a deep copy. Only the fields on the top-level docu ment * This is not a deep copy. Only the fields on the top-level docu ment
* are cloned. * are cloned.
*/ */
Document clone() const { return Document(storage().clone().get()); } Document clone() const { return Document(storage().clone().get()); }
static const StringData metaFieldTextScore; // "$textScore"
bool hasTextScore() const { return storage().hasTextScore(); }
double getTextScore() const { return storage().getTextScore(); }
/// members for Sorter /// members for Sorter
struct SorterDeserializeSettings {}; // unused struct SorterDeserializeSettings {}; // unused
void serializeForSorter(BufBuilder& buf) const; void serializeForSorter(BufBuilder& buf) const;
static Document deserializeForSorter(BufReader& buf, const SorterDe serializeSettings&); static Document deserializeForSorter(BufReader& buf, const SorterDe serializeSettings&);
int memUsageForSorter() const { return getApproximateSize(); } int memUsageForSorter() const { return getApproximateSize(); }
Document getOwned() const { return *this; } Document getOwned() const { return *this; }
/// only for testing /// only for testing
const void* getPtr() const { return _storage.get(); } const void* getPtr() const { return _storage.get(); }
skipping to change at line 341 skipping to change at line 363
void setNestedField(const FieldPath& dottedField, const Value& val) { void setNestedField(const FieldPath& dottedField, const Value& val) {
getNestedField(dottedField) = val; getNestedField(dottedField) = val;
} }
/// Takes positions vector from Document::getNestedField. All field s in path must exist. /// Takes positions vector from Document::getNestedField. All field s in path must exist.
MutableValue getNestedField(const vector<Position>& positions); MutableValue getNestedField(const vector<Position>& positions);
void setNestedField(const vector<Position>& positions, const Value& val) { void setNestedField(const vector<Position>& positions, const Value& val) {
getNestedField(positions) = val; getNestedField(positions) = val;
} }
/**
* Copies all metadata from source if it has any.
* Note: does not clear metadata from this.
*/
void copyMetaDataFrom(const Document& source) {
storage().copyMetaDataFrom(source.storage());
}
void setTextScore(double score) { storage().setTextScore(score); }
/** Convert to a read-only document and release reference. /** Convert to a read-only document and release reference.
* *
* Call this to indicate that you are done with this Document and will * Call this to indicate that you are done with this Document and will
* not be making further changes from this MutableDocument. * not be making further changes from this MutableDocument.
* *
* TODO: there are some optimizations that may make sense at freez e time. * TODO: there are some optimizations that may make sense at freez e time.
*/ */
Document freeze() { Document freeze() {
// This essentially moves _storage into a new Document by way o f temp. // This essentially moves _storage into a new Document by way o f temp.
Document ret; Document ret;
 End of changes. 5 change blocks. 
2 lines changed or deleted 36 lines changed or added


 document_internal.h   document_internal.h 
skipping to change at line 168 skipping to change at line 168
/// Storage class used by both Document and MutableDocument /// Storage class used by both Document and MutableDocument
class DocumentStorage : public RefCountable { class DocumentStorage : public RefCountable {
public: public:
// Note: default constructor should zero-init to support emptyDoc() // Note: default constructor should zero-init to support emptyDoc()
DocumentStorage() : _buffer(NULL) DocumentStorage() : _buffer(NULL)
, _bufferEnd(NULL) , _bufferEnd(NULL)
, _usedBytes(0) , _usedBytes(0)
, _numFields(0) , _numFields(0)
, _hashTabMask(0) , _hashTabMask(0)
, _hasTextScore(false)
, _textScore(0)
{} {}
~DocumentStorage(); ~DocumentStorage();
static const DocumentStorage& emptyDoc() { static const DocumentStorage& emptyDoc() {
static const char emptyBytes[sizeof(DocumentStorage)] = {0}; static const char emptyBytes[sizeof(DocumentStorage)] = {0};
return *reinterpret_cast<const DocumentStorage*>(emptyBytes); return *reinterpret_cast<const DocumentStorage*>(emptyBytes);
} }
size_t size() const { size_t size() const {
// can't use _numFields because it includes removed Fields // can't use _numFields because it includes removed Fields
skipping to change at line 239 skipping to change at line 241
return DocumentStorageIterator(_firstElement, end(), true); return DocumentStorageIterator(_firstElement, end(), true);
} }
/// Shallow copy of this. Caller owns memory. /// Shallow copy of this. Caller owns memory.
intrusive_ptr<DocumentStorage> clone() const; intrusive_ptr<DocumentStorage> clone() const;
size_t allocatedBytes() const { size_t allocatedBytes() const {
return !_buffer ? 0 : (_bufferEnd - _buffer + hashTabBytes()); return !_buffer ? 0 : (_bufferEnd - _buffer + hashTabBytes());
} }
/**
* Copies all metadata from source if it has any.
* Note: does not clear metadata from this.
*/
void copyMetaDataFrom(const DocumentStorage& source) {
if (source.hasTextScore()) {
setTextScore(source.getTextScore());
}
}
bool hasTextScore() const { return _hasTextScore; }
double getTextScore() const { return _textScore; }
void setTextScore(double score) {
_hasTextScore = true;
_textScore = score;
}
private: private:
/// Same as lastElement->next() or firstElement() if empty. /// Same as lastElement->next() or firstElement() if empty.
const ValueElement* end() const { return _firstElement->plusBytes(_ usedBytes); } const ValueElement* end() const { return _firstElement->plusBytes(_ usedBytes); }
/// Allocates space in _buffer. Copies existing data if there is an y. /// Allocates space in _buffer. Copies existing data if there is an y.
void alloc(unsigned newSize); void alloc(unsigned newSize);
/// Call after adding field to _buffer and increasing _numFields /// Call after adding field to _buffer and increasing _numFields
void addFieldToHashTable(Position pos); void addFieldToHashTable(Position pos);
skipping to change at line 307 skipping to change at line 326
union { union {
// pointer to "end" of _buffer element space and start of hash table (same position) // pointer to "end" of _buffer element space and start of hash table (same position)
char* _bufferEnd; char* _bufferEnd;
Position* _hashTab; // table lazily initialized once _numFields == HASH_TAB_MIN Position* _hashTab; // table lazily initialized once _numFields == HASH_TAB_MIN
}; };
unsigned _usedBytes; // position where next field would start unsigned _usedBytes; // position where next field would start
unsigned _numFields; // this includes removed fields unsigned _numFields; // this includes removed fields
unsigned _hashTabMask; // equal to hashTabBuckets()-1 but used more often unsigned _hashTabMask; // equal to hashTabBuckets()-1 but used more often
bool _hasTextScore; // When adding more metadata fields, this shoul
d become a bitvector
double _textScore;
// When adding a field, make sure to update clone() method // When adding a field, make sure to update clone() method
}; };
} }
 End of changes. 3 change blocks. 
0 lines changed or deleted 23 lines changed or added


 document_source.h   document_source.h 
skipping to change at line 41 skipping to change at line 41
#include "mongo/pch.h" #include "mongo/pch.h"
#include <boost/optional.hpp> #include <boost/optional.hpp>
#include <boost/unordered_map.hpp> #include <boost/unordered_map.hpp>
#include <deque> #include <deque>
#include "mongo/db/clientcursor.h" #include "mongo/db/clientcursor.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/matcher.h" #include "mongo/db/matcher.h"
#include "mongo/db/pipeline/document.h" #include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/dependencies.h"
#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context.h"
#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression.h"
#include "mongo/db/pipeline/value.h" #include "mongo/db/pipeline/value.h"
#include "mongo/db/projection.h" #include "mongo/db/projection.h"
#include "mongo/db/sorter/sorter.h" #include "mongo/db/sorter/sorter.h"
#include "mongo/s/shard.h" #include "mongo/s/shard.h"
#include "mongo/s/strategy.h" #include "mongo/s/strategy.h"
#include "mongo/util/intrusive_counter.h" #include "mongo/util/intrusive_counter.h"
namespace mongo { namespace mongo {
skipping to change at line 78 skipping to change at line 79
/** /**
* Inform the source that it is no longer needed and may release it s resources. After * Inform the source that it is no longer needed and may release it s resources. After
* dispose() is called the source must still be able to handle iter ation requests, but may * dispose() is called the source must still be able to handle iter ation requests, but may
* become eof(). * become eof().
* NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will * NOTE: For proper mutex yielding, dispose() must be called on any DocumentSource that will
* not be advanced until eof(), see SERVER-6123. * not be advanced until eof(), see SERVER-6123.
*/ */
virtual void dispose(); virtual void dispose();
/** /**
* See ClientCursor::kill()
*/
virtual void kill();
/**
Get the source's name. Get the source's name.
@returns the string name of the source as a constant string; @returns the string name of the source as a constant string;
this is static, and there's no need to worry about adopting it this is static, and there's no need to worry about adopting it
*/ */
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
/** /**
Set the underlying source this source should use to get Documents Set the underlying source this source should use to get Documents
from. from.
skipping to change at line 131 skipping to change at line 137
results, first coalesce compatible sources using coalesce(). results, first coalesce compatible sources using coalesce().
This is intended for any operations that include expressions, and This is intended for any operations that include expressions, and
provides a hook for those to optimize those operations. provides a hook for those to optimize those operations.
The default implementation is to do nothing. The default implementation is to do nothing.
*/ */
virtual void optimize(); virtual void optimize();
enum GetDepsReturn { enum GetDepsReturn {
NOT_SUPPORTED, // This means the set should be ignored and the NOT_SUPPORTED = 0x0, // The full object and all metadata may be
full object is required. required
EXHAUSTIVE, // This means that everything needed should be in t SEE_NEXT = 0x1, // Later stages could need either fields or met
he set adata
SEE_NEXT, // Add the next Source's deps to the set EXHAUSTIVE_FIELDS = 0x2, // Later stages won't need more fields
from input
EXHAUSTIVE_META = 0x4, // Later stages won't need more metadata
from input
EXHAUSTIVE_ALL = EXHAUSTIVE_FIELDS | EXHAUSTIVE_META, // Later
stages won't need either
}; };
/** Get the fields this operation needs to do its job. /**
* Deps should be in "a.b.c" notation * Get the dependencies this operation needs to do its job.
* An empty string in deps means the whole document is needed.
*
* @param deps results are added here. NOT CLEARED
*/ */
virtual GetDepsReturn getDependencies(set<string>& deps) const { virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
return NOT_SUPPORTED; return NOT_SUPPORTED;
} }
/** This takes dependencies from getDependencies and
* returns a projection that includes all of them
*/
static BSONObj depsToProjection(const set<string>& deps);
/** These functions take the same input as depsToProjection but are
able to
* produce a Document from a BSONObj with the needed fields much f
aster.
*/
typedef Document ParsedDeps; // See implementation for structure
static ParsedDeps parseDeps(const set<string>& deps);
static Document documentFromBsonWithDeps(const BSONObj& object, con
st ParsedDeps& deps);
/** /**
* In the default case, serializes the DocumentSource and adds it t o the vector<Value>. * In the default case, serializes the DocumentSource and adds it t o the vector<Value>.
* *
* A subclass may choose to overwrite this, rather than serialize, * A subclass may choose to overwrite this, rather than serialize,
* if it should output multiple stages (eg, $sort sometimes also ou tputs a $limit). * if it should output multiple stages (eg, $sort sometimes also ou tputs a $limit).
*/ */
virtual void serializeToArray(vector<Value>& array, bool explain = false) const; virtual void serializeToArray(vector<Value>& array, bool explain = false) const;
/// Returns true if doesn't require an input source (most DocumentS ources do). /// Returns true if doesn't require an input source (most DocumentS ources do).
skipping to change at line 355 skipping to change at line 348
public DocumentSource { public DocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual ~DocumentSourceCursor(); virtual ~DocumentSourceCursor();
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual void setSource(DocumentSource *pSource); virtual void setSource(DocumentSource *pSource);
virtual bool coalesce(const intrusive_ptr<DocumentSource>& nextSour ce); virtual bool coalesce(const intrusive_ptr<DocumentSource>& nextSour ce);
virtual bool isValidInitialSource() const { return true; } virtual bool isValidInitialSource() const { return true; }
/**
* Release the Cursor and the read lock it requires, but without ch
anging the other data.
* Releasing the lock is required for proper concurrency, see SERVE
R-6123. This
* functionality is also used by the explain version of pipeline ex
ecution.
*/
virtual void dispose(); virtual void dispose();
virtual void kill();
/** /**
* Create a document source based on a passed-in cursor. * Create a document source based on a passed-in cursor.
* *
* This is usually put at the beginning of a chain of document sour ces * This is usually put at the beginning of a chain of document sour ces
* in order to fetch data from the database. * in order to fetch data from the database.
* *
* The DocumentSource takes ownership of the cursor and will destro y it * The DocumentSource takes ownership of the cursor and will destro y it
* when the DocumentSource is finished with the cursor, if it hasn' t * when the DocumentSource is finished with the cursor, if it hasn' t
* already been destroyed. * already been destroyed.
skipping to change at line 383 skipping to change at line 371
* @param ns the namespace the cursor is over * @param ns the namespace the cursor is over
* @param cursorId the id of the cursor to use * @param cursorId the id of the cursor to use
* @param pExpCtx the expression context for the pipeline * @param pExpCtx the expression context for the pipeline
*/ */
static intrusive_ptr<DocumentSourceCursor> create( static intrusive_ptr<DocumentSourceCursor> create(
const string& ns, const string& ns,
CursorId cursorId, CursorId cursorId,
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
/* /*
Record the namespace. Required for explain.
@param namespace the namespace
*/
/*
Record the query that was specified for the cursor this wraps, if Record the query that was specified for the cursor this wraps, if
any. any.
This should be captured after any optimizations are applied to This should be captured after any optimizations are applied to
the pipeline so that it reflects what is really used. the pipeline so that it reflects what is really used.
This gets used for explain output. This gets used for explain output.
@param pBsonObj the query to record @param pBsonObj the query to record
*/ */
skipping to change at line 414 skipping to change at line 396
This should be captured after any optimizations are applied to This should be captured after any optimizations are applied to
the pipeline so that it reflects what is really used. the pipeline so that it reflects what is really used.
This gets used for explain output. This gets used for explain output.
@param pBsonObj the sort to record @param pBsonObj the sort to record
*/ */
void setSort(const BSONObj& sort) { _sort = sort; } void setSort(const BSONObj& sort) { _sort = sort; }
void setProjection(const BSONObj& projection, const ParsedDeps& dep /**
s); * Informs this object of projection and dependency information.
*
* @param projection A projection specification describing the fiel
ds needed by the rest of
* the pipeline.
* @param deps The output of DepsTracker::toParsedDeps
*/
void setProjection(const BSONObj& projection, const boost::optional
<ParsedDeps>& deps);
/// returns -1 for no limit /// returns -1 for no limit
long long getLimit() const; long long getLimit() const;
private: private:
DocumentSourceCursor( DocumentSourceCursor(
const string& ns, const string& ns,
CursorId cursorId, CursorId cursorId,
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
void loadBatch(); void loadBatch();
std::deque<Document> _currentBatch; std::deque<Document> _currentBatch;
// BSONObj members must outlive _projection and cursor. // BSONObj members must outlive _projection and cursor.
BSONObj _query; BSONObj _query;
BSONObj _sort; BSONObj _sort;
shared_ptr<Projection> _projection; // shared with pClientCursor BSONObj _projection;
ParsedDeps _dependencies; boost::optional<ParsedDeps> _dependencies;
intrusive_ptr<DocumentSourceLimit> _limit; intrusive_ptr<DocumentSourceLimit> _limit;
long long _docsAddedToBatches; // for _limit enforcement long long _docsAddedToBatches; // for _limit enforcement
string ns; // namespace string _ns; // namespace
CursorId _cursorId; CursorId _cursorId;
CollectionMetadataPtr _collMetadata; bool _killed;
bool canUseCoveredIndex(ClientCursor* cursor) const;
/*
Yield the cursor sometimes.
If the state of the world changed during the yield such that we
are unable to continue execution of the query, this will release
the
client cursor, and throw an error. NOTE This differs from the
behavior of most other operations, see SERVER-2454.
*/
void yieldSometimes(ClientCursor* cursor);
}; };
class DocumentSourceGroup : public DocumentSource class DocumentSourceGroup : public DocumentSource
, public SplittableDocumentSource { , public SplittableDocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual void optimize(); virtual void optimize();
virtual GetDepsReturn getDependencies(set<string>& deps) const; virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
virtual void dispose(); virtual void dispose();
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
/** /**
Create a new grouping DocumentSource. Create a new grouping DocumentSource.
@param pExpCtx the expression context for the pipeline @param pExpCtx the expression context for the pipeline
@returns the DocumentSource @returns the DocumentSource
*/ */
static intrusive_ptr<DocumentSourceGroup> create( static intrusive_ptr<DocumentSourceGroup> create(
skipping to change at line 590 skipping to change at line 567
Accumulators _currentAccumulators; Accumulators _currentAccumulators;
}; };
class DocumentSourceMatch : public DocumentSource { class DocumentSourceMatch : public DocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual bool coalesce(const intrusive_ptr<DocumentSource>& nextSour ce); virtual bool coalesce(const intrusive_ptr<DocumentSource>& nextSour ce);
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual void setSource(DocumentSource* Source);
/** /**
Create a filter. Create a filter.
@param pBsonElement the raw BSON specification for the filter @param pBsonElement the raw BSON specification for the filter
@returns the filter @returns the filter
*/ */
static intrusive_ptr<DocumentSource> createFromBson( static intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, BSONElement elem,
const intrusive_ptr<ExpressionContext> &pCtx); const intrusive_ptr<ExpressionContext> &pCtx);
skipping to change at line 618 skipping to change at line 596
* *
* To be safe to promote, removing a field from a document to be m atched must not cause * To be safe to promote, removing a field from a document to be m atched must not cause
* that document to be accepted when it would otherwise be rejecte d. As an example, * that document to be accepted when it would otherwise be rejecte d. As an example,
* {name: {$ne: "bob smith"}} accepts documents without a name fie ld, which means that * {name: {$ne: "bob smith"}} accepts documents without a name fie ld, which means that
* running this filter before a redact that would remove the name field would leak * running this filter before a redact that would remove the name field would leak
* information. On the other hand, {age: {$gt:5}} is ok because it doesn't accept documents * information. On the other hand, {age: {$gt:5}} is ok because it doesn't accept documents
* that have had their age field removed. * that have had their age field removed.
*/ */
BSONObj redactSafePortion() const; BSONObj redactSafePortion() const;
static bool isTextQuery(const BSONObj& query);
bool isTextQuery() const { return _isTextQuery; }
private: private:
DocumentSourceMatch(const BSONObj &query, DocumentSourceMatch(const BSONObj &query,
const intrusive_ptr<ExpressionContext> &pExpCtx); const intrusive_ptr<ExpressionContext> &pExpCtx);
scoped_ptr<Matcher> matcher; scoped_ptr<Matcher> matcher;
bool _isTextQuery;
}; };
class DocumentSourceMergeCursors : class DocumentSourceMergeCursors :
public DocumentSource { public DocumentSource {
public: public:
typedef vector<pair<ConnectionString, CursorId> > CursorIds; typedef vector<pair<ConnectionString, CursorId> > CursorIds;
// virtuals from DocumentSource // virtuals from DocumentSource
boost::optional<Document> getNext(); boost::optional<Document> getNext();
virtual void setSource(DocumentSource *pSource); virtual void setSource(DocumentSource *pSource);
skipping to change at line 691 skipping to change at line 673
class DocumentSourceOut : public DocumentSource class DocumentSourceOut : public DocumentSource
, public SplittableDocumentSource , public SplittableDocumentSource
, public DocumentSourceNeedsMongod { , public DocumentSourceNeedsMongod {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual ~DocumentSourceOut(); virtual ~DocumentSourceOut();
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
// Virtuals for SplittableDocumentSource // Virtuals for SplittableDocumentSource
virtual intrusive_ptr<DocumentSource> getShardSource() { return NUL L; } virtual intrusive_ptr<DocumentSource> getShardSource() { return NUL L; }
virtual intrusive_ptr<DocumentSource> getMergeSource() { return thi s; } virtual intrusive_ptr<DocumentSource> getMergeSource() { return thi s; }
const NamespaceString& getOutputNs() const { return _outputNs; } const NamespaceString& getOutputNs() const { return _outputNs; }
/** /**
Create a document source for output and pass-through. Create a document source for output and pass-through.
skipping to change at line 738 skipping to change at line 721
class DocumentSourceProject : class DocumentSourceProject :
public DocumentSource { public DocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual void optimize(); virtual void optimize();
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual GetDepsReturn getDependencies(set<string>& deps) const; virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
/** /**
Create a new projection DocumentSource from BSON. Create a new projection DocumentSource from BSON.
This is a convenience for directly handling BSON, and relies on t he This is a convenience for directly handling BSON, and relies on t he
above methods. above methods.
@param pBsonElement the BSONElement with an object named $project @param pBsonElement the BSONElement with an object named $project
@param pExpCtx the expression context for the pipeline @param pExpCtx the expression context for the pipeline
@returns the created projection @returns the created projection
skipping to change at line 812 skipping to change at line 795
class DocumentSourceSort : public DocumentSource class DocumentSourceSort : public DocumentSource
, public SplittableDocumentSource { , public SplittableDocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual void serializeToArray(vector<Value>& array, bool explain = false) const; virtual void serializeToArray(vector<Value>& array, bool explain = false) const;
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce);
virtual void dispose(); virtual void dispose();
virtual GetDepsReturn getDependencies(set<string>& deps) const; virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
virtual intrusive_ptr<DocumentSource> getShardSource(); virtual intrusive_ptr<DocumentSource> getShardSource();
virtual intrusive_ptr<DocumentSource> getMergeSource(); virtual intrusive_ptr<DocumentSource> getMergeSource();
/** /**
Add sort key field. Add sort key field.
Adds a sort key field to the key being built up. A concatenated Adds a sort key field to the key being built up. A concatenated
key is built up by calling this repeatedly. key is built up by calling this repeatedly.
@param fieldPath the field path to the key component @param fieldPath the field path to the key component
@param ascending if true, use the key for an ascending sort, @param ascending if true, use the key for an ascending sort,
otherwise, use it for descending otherwise, use it for descending
*/ */
void addKey(const string &fieldPath, bool ascending); void addKey(const string &fieldPath, bool ascending);
/// Write out a Document whose contents are the sort key. /// Write out a Document whose contents are the sort key.
Document serializeSortKey() const; Document serializeSortKey(bool explain) const;
/** /**
Create a sorting DocumentSource from BSON. Create a sorting DocumentSource from BSON.
This is a convenience method that uses the above, and operates on This is a convenience method that uses the above, and operates on
a BSONElement that has been deteremined to be an Object with an a BSONElement that has been deteremined to be an Object with an
element named $group. element named $group.
@param pBsonElement the BSONELement that defines the group @param pBsonElement the BSONELement that defines the group
@param pExpCtx the expression context for the pipeline @param pExpCtx the expression context for the pipeline
skipping to change at line 887 skipping to change at line 870
// These are used to merge pre-sorted results from a DocumentSource MergeCursors or a // These are used to merge pre-sorted results from a DocumentSource MergeCursors or a
// DocumentSourceCommandShards depending on whether we have finishe d upgrading to 2.6 or // DocumentSourceCommandShards depending on whether we have finishe d upgrading to 2.6 or
// not. // not.
class IteratorFromCursor; class IteratorFromCursor;
class IteratorFromBsonArray; class IteratorFromBsonArray;
void populateFromCursors(const vector<DBClientCursor*>& cursors); void populateFromCursors(const vector<DBClientCursor*>& cursors);
void populateFromBsonArrays(const vector<BSONArray>& arrays); void populateFromBsonArrays(const vector<BSONArray>& arrays);
/* these two parallel each other */ /* these two parallel each other */
typedef vector<intrusive_ptr<ExpressionFieldPath> > SortPaths; typedef vector<intrusive_ptr<Expression> > SortKey;
SortPaths vSortKey; SortKey vSortKey;
vector<char> vAscending; // used like vector<bool> but without spec ialization vector<char> vAscending; // used like vector<bool> but without spec ialization
/// Extracts the fields in vSortKey from the Document; /// Extracts the fields in vSortKey from the Document;
Value extractKey(const Document& d) const; Value extractKey(const Document& d) const;
/// Compare two Values according to the specified sort key. /// Compare two Values according to the specified sort key.
int compare(const Value& lhs, const Value& rhs) const; int compare(const Value& lhs, const Value& rhs) const;
typedef Sorter<Value, Document> MySorter; typedef Sorter<Value, Document> MySorter;
skipping to change at line 926 skipping to change at line 909
class DocumentSourceLimit : public DocumentSource class DocumentSourceLimit : public DocumentSource
, public SplittableDocumentSource { , public SplittableDocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce);
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual GetDepsReturn getDependencies(set<string>& deps) const { virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
return SEE_NEXT; // This doesn't affect needed fields return SEE_NEXT; // This doesn't affect needed fields
} }
/** /**
Create a new limiting DocumentSource. Create a new limiting DocumentSource.
@param pExpCtx the expression context for the pipeline @param pExpCtx the expression context for the pipeline
@returns the DocumentSource @returns the DocumentSource
*/ */
static intrusive_ptr<DocumentSourceLimit> create( static intrusive_ptr<DocumentSourceLimit> create(
skipping to change at line 982 skipping to change at line 965
class DocumentSourceSkip : public DocumentSource class DocumentSourceSkip : public DocumentSource
, public SplittableDocumentSource { , public SplittableDocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce);
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual GetDepsReturn getDependencies(set<string>& deps) const { virtual GetDepsReturn getDependencies(DepsTracker* deps) const {
return SEE_NEXT; // This doesn't affect needed fields return SEE_NEXT; // This doesn't affect needed fields
} }
/** /**
Create a new skipping DocumentSource. Create a new skipping DocumentSource.
@param pExpCtx the expression context @param pExpCtx the expression context
@returns the DocumentSource @returns the DocumentSource
*/ */
static intrusive_ptr<DocumentSourceSkip> create( static intrusive_ptr<DocumentSourceSkip> create(
skipping to change at line 1035 skipping to change at line 1018
}; };
class DocumentSourceUnwind : class DocumentSourceUnwind :
public DocumentSource { public DocumentSource {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
virtual GetDepsReturn getDependencies(set<string>& deps) const; virtual GetDepsReturn getDependencies(DepsTracker* deps) const;
/** /**
Create a new projection DocumentSource from BSON. Create a new projection DocumentSource from BSON.
This is a convenience for directly handling BSON, and relies on t he This is a convenience for directly handling BSON, and relies on t he
above methods. above methods.
@param pBsonElement the BSONElement with an object named $project @param pBsonElement the BSONElement with an object named $project
@param pExpCtx the expression context for the pipeline @param pExpCtx the expression context for the pipeline
@returns the created projection @returns the created projection
skipping to change at line 1074 skipping to change at line 1057
scoped_ptr<Unwinder> _unwinder; scoped_ptr<Unwinder> _unwinder;
}; };
class DocumentSourceGeoNear : public DocumentSource class DocumentSourceGeoNear : public DocumentSource
, public SplittableDocumentSource , public SplittableDocumentSource
, public DocumentSourceNeedsMongod { , public DocumentSourceNeedsMongod {
public: public:
// virtuals from DocumentSource // virtuals from DocumentSource
virtual boost::optional<Document> getNext(); virtual boost::optional<Document> getNext();
virtual const char *getSourceName() const; virtual const char *getSourceName() const;
virtual void setSource(DocumentSource *pSource); // errors out sinc e this must be first virtual void setSource(DocumentSource *pSource);
virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce); virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSou rce);
virtual bool isValidInitialSource() const { return true; } virtual bool isValidInitialSource() const { return true; }
virtual Value serialize(bool explain = false) const; virtual Value serialize(bool explain = false) const;
// Virtuals for SplittableDocumentSource // Virtuals for SplittableDocumentSource
virtual intrusive_ptr<DocumentSource> getShardSource(); virtual intrusive_ptr<DocumentSource> getShardSource();
virtual intrusive_ptr<DocumentSource> getMergeSource(); virtual intrusive_ptr<DocumentSource> getMergeSource();
static intrusive_ptr<DocumentSource> createFromBson( static intrusive_ptr<DocumentSource> createFromBson(
BSONElement elem, BSONElement elem,
 End of changes. 26 change blocks. 
70 lines changed or deleted 50 lines changed or added


 dur.h   dur.h 
skipping to change at line 172 skipping to change at line 172
/** Commits pending changes, flushes all changes to main data /** Commits pending changes, flushes all changes to main data
files, then removes the journal. files, then removes the journal.
This is useful as a "barrier" to ensure that writes before this This is useful as a "barrier" to ensure that writes before this
call will never go through recovery and be applied to files call will never go through recovery and be applied to files
that have had changes made after this call applied. that have had changes made after this call applied.
*/ */
virtual void syncDataAndTruncateJournal() = 0; virtual void syncDataAndTruncateJournal() = 0;
virtual bool isDurable() const = 0;
static DurableInterface& getDur() { return *_impl; } static DurableInterface& getDur() { return *_impl; }
private: private:
/** Intentionally unimplemented method. /** Intentionally unimplemented method.
It's very easy to manipulate Record::data open ended. Thus a call to writing(Record*) is suspect. It's very easy to manipulate Record::data open ended. Thus a call to writing(Record*) is suspect.
This will override the templated version and yield an unresolv ed external. This will override the templated version and yield an unresolv ed external.
*/ */
Record* writing(Record* r); Record* writing(Record* r);
/** Intentionally unimplemented method. BtreeBuckets are alloca ted in buffers larger than sizeof( BtreeBucket ). */ /** Intentionally unimplemented method. BtreeBuckets are alloca ted in buffers larger than sizeof( BtreeBucket ). */
// BtreeBucket* writing( BtreeBucket* ); // BtreeBucket* writing( BtreeBucket* );
skipping to change at line 205 skipping to change at line 207
void* writingPtr(void *x, unsigned len); void* writingPtr(void *x, unsigned len);
void* writingAtOffset(void *buf, unsigned ofs, unsigned len) { return buf; } void* writingAtOffset(void *buf, unsigned ofs, unsigned len) { return buf; }
void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges) { return buf; } void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges) { return buf; }
void declareWriteIntent(void *, unsigned); void declareWriteIntent(void *, unsigned);
void createdFile(const std::string& filename, unsigned long lon g len) { } void createdFile(const std::string& filename, unsigned long lon g len) { }
bool awaitCommit() { return false; } bool awaitCommit() { return false; }
bool commitNow() { return false; } bool commitNow() { return false; }
bool commitIfNeeded(bool) { return false; } bool commitIfNeeded(bool) { return false; }
bool aCommitIsNeeded() const { return false; } bool aCommitIsNeeded() const { return false; }
void syncDataAndTruncateJournal() {} void syncDataAndTruncateJournal() {}
bool isDurable() const { return false; }
}; };
class DurableImpl : public DurableInterface { class DurableImpl : public DurableInterface {
bool _aCommitIsNeeded(); bool _aCommitIsNeeded();
void* writingPtr(void *x, unsigned len); void* writingPtr(void *x, unsigned len);
void* writingAtOffset(void *buf, unsigned ofs, unsigned len); void* writingAtOffset(void *buf, unsigned ofs, unsigned len);
void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges); void* writingRangesAtOffsets(void *buf, const vector< pair< lon g long, unsigned > > &ranges);
void declareWriteIntent(void *, unsigned); void declareWriteIntent(void *, unsigned);
void createdFile(const std::string& filename, unsigned long lon g len); void createdFile(const std::string& filename, unsigned long lon g len);
bool awaitCommit(); bool awaitCommit();
bool commitNow(); bool commitNow();
bool aCommitIsNeeded() const; bool aCommitIsNeeded() const;
bool commitIfNeeded(bool); bool commitIfNeeded(bool);
void syncDataAndTruncateJournal(); void syncDataAndTruncateJournal();
bool isDurable() const { return true; }
}; };
} // namespace dur } // namespace dur
inline dur::DurableInterface& getDur() { return dur::DurableInterface:: getDur(); } inline dur::DurableInterface& getDur() { return dur::DurableInterface:: getDur(); }
/** declare that we are modifying a diskloc and this is a datafile writ e. */ /** declare that we are modifying a diskloc and this is a datafile writ e. */
inline DiskLoc& DiskLoc::writing() const { return getDur().writingDiskL oc(*const_cast< DiskLoc * >( this )); } inline DiskLoc& DiskLoc::writing() const { return getDur().writingDiskL oc(*const_cast< DiskLoc * >( this )); }
} }
 End of changes. 3 change blocks. 
0 lines changed or deleted 4 lines changed or added


 elapsed_tracker.h   elapsed_tracker.h 
skipping to change at line 54 skipping to change at line 54
* @return true if one of the triggers has gone off. * @return true if one of the triggers has gone off.
*/ */
bool intervalHasElapsed(); bool intervalHasElapsed();
void resetLastTime(); void resetLastTime();
private: private:
const int32_t _hitsBetweenMarks; const int32_t _hitsBetweenMarks;
const int32_t _msBetweenMarks; const int32_t _msBetweenMarks;
uint64_t _pings; int32_t _pings;
int64_t _last; int64_t _last;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 element-inl.h   element-inl.h 
skipping to change at line 21 skipping to change at line 21
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
namespace mongo { namespace mongo {
namespace mutablebson { namespace mutablebson {
inline Element Element::operator[](size_t n) const {
return findNthChild(n);
}
inline Element Element::operator[](const StringData& name) const {
return findFirstChildNamed(name);
}
inline double Element::getValueDouble() const { inline double Element::getValueDouble() const {
dassert(hasValue() && isType(mongo::NumberDouble)); dassert(hasValue() && isType(mongo::NumberDouble));
return getValue()._numberDouble(); return getValue()._numberDouble();
} }
inline StringData Element::getValueString() const { inline StringData Element::getValueString() const {
dassert(hasValue() && isType(mongo::String)); dassert(hasValue() && isType(mongo::String));
return getValueStringOrSymbol(); return getValueStringOrSymbol();
} }
 End of changes. 1 change blocks. 
0 lines changed or deleted 8 lines changed or added


 element.h   element.h 
skipping to change at line 176 skipping to change at line 176
* exists. Note that obtaining the right child may require realizi ng all immediate * exists. Note that obtaining the right child may require realizi ng all immediate
* child nodes of a document that is being consumed lazily. * child nodes of a document that is being consumed lazily.
*/ */
Element rightChild() const; Element rightChild() const;
/** Returns true if this element has children. Always returns false if this Element is /** Returns true if this element has children. Always returns false if this Element is
* not an Object or Array. * not an Object or Array.
*/ */
bool hasChildren() const; bool hasChildren() const;
/** Returns either this Element's left sibling, or a non-ok Element /** Returns either this Element's sibling 'distance' elements to th
if no left sibling e left, or a non-ok
* exists. * Element if no such left sibling exists.
*/ */
Element leftSibling() const; Element leftSibling(size_t distance = 1) const;
/** Returns either this Element's right sibling, or a non-ok Elemen /** Returns either this Element's sibling 'distance' Elements to th
t if no right e right, or a non-ok
* sibling exists. * Element if no such right sibling exists.
*/ */
Element rightSibling() const; Element rightSibling(size_t distance = 1) const;
/** Returns this Element's parent, or a non-ok Element if this Elem ent has no parent /** Returns this Element's parent, or a non-ok Element if this Elem ent has no parent
* (is a root). * (is a root).
*/ */
Element parent() const; Element parent() const;
/** Returns the nth child, if any, of this Element. If no such elem ent exists, a non-ok /** Returns the nth child, if any, of this Element. If no such elem ent exists, a non-ok
* Element is returned. This is not a constant time operation. Thi * Element is returned. This is not a constant time operation. Thi
s is purely s method is also
* syntactic sugar for calling getNthChild from algorithm.h * available as operator[] taking a size_t for convenience.
*/ */
Element operator[](size_t n) const; Element findNthChild(size_t n) const;
inline Element operator[](size_t n) const;
/** Returns the first child, if any, of this Element named 'name'. If no such Element /** Returns the first child, if any, of this Element named 'name'. If no such Element
* exists, a non-ok Element is returned. This is not a constant ti me operation. This * exists, a non-ok Element is returned. This is not a constant ti me operation. This
* is purely syntactic sugar for calling findFirstChildNamed from algorithm.h. * method is also available as operator[] taking a StringData for convenience.
*/ */
Element operator[](const StringData& name) const; Element findFirstChildNamed(const StringData& name) const;
inline Element operator[](const StringData& name) const;
/** Returns the first element found named 'name', starting the sear
ch at the current
* Element, and walking right. If no such Element exists, a non-ok
Element is
* returned. This is not a constant time operation. This implement
ation is used in the
* specialized implementation of findElement<ElementType, FieldNam
eEquals>.
*/
Element findElementNamed(const StringData& name) const;
//
// Counting API.
//
/** Returns the number of valid siblings to the left of this Elemen
t. */
size_t countSiblingsLeft() const;
/** Returns the number of valid siblings to the right of this Eleme
nt. */
size_t countSiblingsRight() const;
/** Return the number of children of this Element. */
size_t countChildren() const;
// //
// Value access API. // Value access API.
// //
// We only provide accessors for BSONElement and for simple types. For more complex // We only provide accessors for BSONElement and for simple types. For more complex
// types like regex you should obtain the BSONElement and use that API to extract the // types like regex you should obtain the BSONElement and use that API to extract the
// components. // components.
// //
// Note that the getValueX methods are *unchecked* in release build s: You are // Note that the getValueX methods are *unchecked* in release build s: You are
// responsible for calling hasValue() to ensure that this element h as a value // responsible for calling hasValue() to ensure that this element h as a value
 End of changes. 8 change blocks. 
14 lines changed or deleted 42 lines changed or added


 engine_v8.h   engine_v8.h 
skipping to change at line 341 skipping to change at line 341
StringData sd (str, StringData::LiteralTag()); StringData sd (str, StringData::LiteralTag());
v8::Handle<v8::String> v8Str = v8StringData(sd); v8::Handle<v8::String> v8Str = v8StringData(sd);
// We never need to Dispose since this should last as long as V 8Scope exists // We never need to Dispose since this should last as long as V 8Scope exists
_strLitMap[str] = v8::Persistent<v8::String>::New(v8Str); _strLitMap[str] = v8::Persistent<v8::String>::New(v8Str);
return v8Str; return v8Str;
} }
private: private:
/**
* Recursion limit when converting from JS objects to BSON.
*/
static const int objectDepthLimit = 500;
/** /**
* Attach data to obj such that the data has the same lifetime as t he Object obj points to. * Attach data to obj such that the data has the same lifetime as t he Object obj points to.
* obj must have been created by either LazyBsonFT or ROBsonFT. * obj must have been created by either LazyBsonFT or ROBsonFT.
*/ */
void wrapBSONObject(v8::Handle<v8::Object> obj, BSONObj data, bool readOnly); void wrapBSONObject(v8::Handle<v8::Object> obj, BSONObj data, bool readOnly);
/** /**
* Trampoline to call a c++ function with a specific signature (V8S cope*, v8::Arguments&). * Trampoline to call a c++ function with a specific signature (V8S cope*, v8::Arguments&).
* Handles interruption, exceptions, etc. * Handles interruption, exceptions, etc.
 End of changes. 1 change blocks. 
0 lines changed or deleted 4 lines changed or added


 eof_runner.h   eof_runner.h 
skipping to change at line 43 skipping to change at line 43
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/query/runner.h" #include "mongo/db/query/runner.h"
namespace mongo { namespace mongo {
class BSONObj; class BSONObj;
class CanonicalQuery; class CanonicalQuery;
class DiskLoc; class DiskLoc;
class TypeExplain; class TypeExplain;
struct PlanInfo;
/** /**
* EOFRunner is EOF immediately and doesn't do anything except return E OF and possibly die * EOFRunner is EOF immediately and doesn't do anything except return E OF and possibly die
* during a yield. * during a yield.
*/ */
class EOFRunner : public Runner { class EOFRunner : public Runner {
public: public:
/* Takes onwership */ /* Takes ownership */
EOFRunner(CanonicalQuery* cq, const std::string& ns); EOFRunner(CanonicalQuery* cq, const std::string& ns);
virtual ~EOFRunner(); virtual ~EOFRunner();
virtual Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut ); virtual Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut );
virtual bool isEOF(); virtual bool isEOF();
virtual void saveState(); virtual void saveState();
virtual bool restoreState(); virtual bool restoreState();
virtual void setYieldPolicy(Runner::YieldPolicy policy); virtual void setYieldPolicy(Runner::YieldPolicy policy);
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual const std::string& ns(); virtual const std::string& ns();
virtual void kill(); virtual void kill();
// this can return NULL since we never yield or anything over it
virtual const Collection* collection() { return NULL; }
/** /**
* Always returns OK, allocating and filling in '*explain' with a f ake ("zeroed") * Always returns OK, allocating and filling in '*explain' with a f ake ("zeroed")
* collection scan plan. Caller owns '*explain', though. * collection scan plan. Fills in '*planInfo' with information indi
cating an
* EOF runner. Caller owns '*explain', though.
*/ */
virtual Status getExplainPlan(TypeExplain** explain) const; virtual Status getInfo(TypeExplain** explain,
PlanInfo** planInfo) const;
private: private:
boost::scoped_ptr<CanonicalQuery> _cq; boost::scoped_ptr<CanonicalQuery> _cq;
std::string _ns; std::string _ns;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
4 lines changed or deleted 11 lines changed or added


 error_codes.h   error_codes.h 
skipping to change at line 21 skipping to change at line 21
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* This is a generated class containing a table of error codes and thei r corresponding error * This is a generated class containing a table of error codes and thei r corresponding error
* strings. The class is derived from the definitions in src/mongo/base /error_codes.err file. * strings. The class is derived from the definitions in src/mongo/base /error_codes.err file.
* *
* Do not update this file directly. Update src/mongo/base/error_codes. err instead. * Do not update this file directly. Update src/mongo/base/error_codes. err instead.
*/ */
class ErrorCodes { class MONGO_CLIENT_API ErrorCodes {
public: public:
enum Error { enum Error {
OK = 0, OK = 0,
InternalError = 1, InternalError = 1,
BadValue = 2, BadValue = 2,
OBSOLETE_DuplicateKey = 3, OBSOLETE_DuplicateKey = 3,
NoSuchKey = 4, NoSuchKey = 4,
GraphContainsCycle = 5, GraphContainsCycle = 5,
HostUnreachable = 6, HostUnreachable = 6,
HostNotFound = 7, HostNotFound = 7,
skipping to change at line 106 skipping to change at line 107
StaleShardVersion = 63, StaleShardVersion = 63,
WriteConcernFailed = 64, WriteConcernFailed = 64,
MultipleErrorsOccurred = 65, MultipleErrorsOccurred = 65,
ImmutableField = 66, ImmutableField = 66,
CannotCreateIndex = 67, CannotCreateIndex = 67,
IndexAlreadyExists = 68, IndexAlreadyExists = 68,
AuthSchemaIncompatible = 69, AuthSchemaIncompatible = 69,
ShardNotFound = 70, ShardNotFound = 70,
ReplicaSetNotFound = 71, ReplicaSetNotFound = 71,
InvalidOptions = 72, InvalidOptions = 72,
InvalidNamespace = 73,
NodeNotFound = 74,
WriteConcernLegacyOK = 75,
NoReplicationEnabled = 76,
OperationIncomplete = 77,
CommandResultSchemaViolation = 78,
UnknownReplWriteConcern = 79,
RoleDataInconsistent = 80,
NoClientContext = 81,
NoProgressMade = 82,
NotMaster = 10107,
DuplicateKey = 11000, DuplicateKey = 11000,
Interrupted = 11601,
MaxError MaxError
}; };
static const char* errorString(Error err); static const char* errorString(Error err);
/** /**
* Parse an Error from its "name". Returns UnknownError if "name" is unrecognized. * Parse an Error from its "name". Returns UnknownError if "name" is unrecognized.
* *
* NOTE: Also returns UnknownError for the string "UnknownError". * NOTE: Also returns UnknownError for the string "UnknownError".
*/ */
 End of changes. 4 change blocks. 
1 lines changed or deleted 14 lines changed or added


 explain_plan.h   explain_plan.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/query_solution.h"
namespace mongo { namespace mongo {
class TypeExplain; class TypeExplain;
struct PlanInfo;
/** /**
* Returns OK, allocating and filling in '*explain' describing the acce ss paths used in * Returns OK, allocating and filling in '*explainOut' describing the a ccess paths used in
* the 'stats' tree of a given query solution. The caller has the owner ship of * the 'stats' tree of a given query solution. The caller has the owner ship of
* '*explain', on success. Otherwise return and erros status describing the problem. * '*explainOut', on success. Otherwise return an error status describi ng the problem.
* *
* If 'fullDetails' was requested, the explain will return all availabl e information about * If 'fullDetails' was requested, the explain will return all availabl e information about
* the plan, otherwise, just a summary. The fields in the summary are: 'cursor', 'n', * the plan, otherwise, just a summary. The fields in the summary are: 'cursor', 'n',
* 'nscannedObjects', 'nscanned', and 'indexBounds'. The remaining fiel ds are: 'isMultKey', * 'nscannedObjects', 'nscanned', and 'indexBounds'. The remaining fiel ds are: 'isMultKey',
* 'nscannedObjectsAllPlans', 'nscannedAllPlans', 'scanAndOrder', 'inde xOnly', 'nYields', * 'nscannedObjectsAllPlans', 'nscannedAllPlans', 'scanAndOrder', 'inde xOnly', 'nYields',
* 'nChunkSkips', 'millis', 'allPlans', and 'oldPlan'. * 'nChunkSkips', 'millis', 'allPlans', and 'oldPlan'.
* *
* All these fields are documented in type_explain.h * All these fields are documented in type_explain.h
* *
* TODO: Currently, only working for single-leaf plans. * TODO: Currently, only working for single-leaf plans.
*/ */
Status explainPlan(const PlanStageStats& stats, TypeExplain** explain, Status explainPlan(const PlanStageStats& stats, TypeExplain** explainOu
bool fullDetails); t, bool fullDetails);
/**
* If the out-parameter 'info' is non-null, fills in '*infoOut' with in
formation
* from the query solution tree 'soln' that can be determined before th
e query is done
* running. Whereas 'explainPlan(...)' above is for collecting runtime
debug information,
* this function is for collecting static debug information that is kno
wn prior
* to query runtime.
*
* The caller is responsible for deleting '*infoOut'.
*/
void getPlanInfo(const QuerySolution& soln, PlanInfo** infoOut);
void statsToBSON(const PlanStageStats& stats, BSONObjBuilder* bob);
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
4 lines changed or deleted 23 lines changed or added


 expression_geo.h   expression_geo.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/geo/geonear.h"
#include "mongo/db/geo/geoquery.h" #include "mongo/db/geo/geoquery.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_leaf.h"
namespace mongo { namespace mongo {
class GeoMatchExpression : public LeafMatchExpression { class GeoMatchExpression : public LeafMatchExpression {
public: public:
GeoMatchExpression() : LeafMatchExpression( GEO ){} GeoMatchExpression() : LeafMatchExpression( GEO ){}
virtual ~GeoMatchExpression(){} virtual ~GeoMatchExpression(){}
 End of changes. 1 change blocks. 
1 lines changed or deleted 0 lines changed or added


 expression_index.h   expression_index.h 
skipping to change at line 50 skipping to change at line 50
* TODO: I think we could structure this more generally with respect to planning. * TODO: I think we could structure this more generally with respect to planning.
*/ */
class ExpressionMapping { class ExpressionMapping {
public: public:
static BSONObj hash(const BSONElement& value) { static BSONObj hash(const BSONElement& value) {
BSONObjBuilder bob; BSONObjBuilder bob;
bob.append("", BSONElementHasher::hash64(value, BSONElementHash er::DEFAULT_HASH_SEED)); bob.append("", BSONElementHasher::hash64(value, BSONElementHash er::DEFAULT_HASH_SEED));
return bob.obj(); return bob.obj();
} }
static void cover2dsphere(const S2Region& region, OrderedIntervalLi // TODO: what should we really pass in for indexInfoObj?
st* oilOut) { static void cover2dsphere(const S2Region& region,
// XXX: should grab coarsest level from the index since the use const BSONObj& indexInfoObj,
r can possibly change it. OrderedIntervalList* oilOut) {
int coarsestIndexedLevel =
S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / kRadius int coarsestIndexedLevel;
OfEarthInMeters); BSONElement ce = indexInfoObj["coarsestIndexedLevel"];
if (ce.isNumber()) {
coarsestIndexedLevel = ce.numberInt();
}
else {
coarsestIndexedLevel =
S2::kAvgEdge.GetClosestLevel(100 * 1000.0 / kRadiusOfEa
rthInMeters);
}
// The min level of our covering is the level whose cells are t he closest match to the // The min level of our covering is the level whose cells are t he closest match to the
// *area* of the region (or the max indexed level, whichever is smaller) The max level // *area* of the region (or the max indexed level, whichever is smaller) The max level
// is 4 sizes larger. // is 4 sizes larger.
double edgeLen = sqrt(region.GetRectBound().Area()); double edgeLen = sqrt(region.GetRectBound().Area());
S2RegionCoverer coverer; S2RegionCoverer coverer;
coverer.set_min_level(min(coarsestIndexedLevel, coverer.set_min_level(min(coarsestIndexedLevel,
2 + S2::kAvgEdge.GetClosestLevel(edge Len))); 2 + S2::kAvgEdge.GetClosestLevel(edge Len)));
coverer.set_max_level(4 + coverer.min_level()); coverer.set_max_level(4 + coverer.min_level());
 End of changes. 1 change blocks. 
7 lines changed or deleted 15 lines changed or added


 expression_parser.h   expression_parser.h 
skipping to change at line 66 skipping to change at line 66
private: private:
/** /**
* 5 = false * 5 = false
{ a : 5 } = false { a : 5 } = false
{ $lt : 5 } = true { $lt : 5 } = true
{ $ref : "s" } = false { $ref : "s" } = false
*/ */
static bool _isExpressionDocument( const BSONElement& e ); static bool _isExpressionDocument( const BSONElement& e );
static bool _isDBRefDocument( const BSONObj& obj );
static StatusWithMatchExpression _parse( const BSONObj& obj, bool t opLevel ); static StatusWithMatchExpression _parse( const BSONObj& obj, bool t opLevel );
/** /**
* parses a field in a sub expression * parses a field in a sub expression
* if the query is { x : { $gt : 5, $lt : 8 } } * if the query is { x : { $gt : 5, $lt : 8 } }
* e is { $gt : 5, $lt : 8 } * e is { $gt : 5, $lt : 8 }
*/ */
static Status _parseSub( const char* name, static Status _parseSub( const char* name,
const BSONObj& obj, const BSONObj& obj,
AndMatchExpression* root ); AndMatchExpression* root );
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 extent.h   extent.h 
skipping to change at line 34 skipping to change at line 34
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/catalog/ondisk/namespace.h" #include "mongo/db/structure/catalog/namespace.h"
namespace mongo { namespace mongo {
/* extents are datafile regions where all the records within the region /* extents are datafile regions where all the records within the region
belong to the same namespace. belong to the same namespace.
(11:12:35 AM) dm10gen: when the extent is allocated, all its empty spac e is stuck into one big DeletedRecord (11:12:35 AM) dm10gen: when the extent is allocated, all its empty spac e is stuck into one big DeletedRecord
(11:12:55 AM) dm10gen: and that is placed on the free list (11:12:55 AM) dm10gen: and that is placed on the free list
*/ */
#pragma pack(1) #pragma pack(1)
skipping to change at line 90 skipping to change at line 90
void assertOk() const { verify(isOk()); } void assertOk() const { verify(isOk()); }
Record* getRecord(DiskLoc dl) { Record* getRecord(DiskLoc dl) {
verify( !dl.isNull() ); verify( !dl.isNull() );
verify( dl.sameFile(myLoc) ); verify( dl.sameFile(myLoc) );
int x = dl.getOfs() - myLoc.getOfs(); int x = dl.getOfs() - myLoc.getOfs();
verify( x > 0 ); verify( x > 0 );
return (Record *) (((char *) this) + x); return (Record *) (((char *) this) + x);
} }
DeletedRecord* getDeletedRecord(const DiskLoc& dl ) {
return reinterpret_cast<DeletedRecord*>( getRecord( dl ) );
}
static int maxSize(); static int maxSize();
static int minSize() { return 0x1000; } static int minSize() { return 0x1000; }
/** /**
* @param len lengt of record we need * @param len lengt of record we need
* @param lastRecord size of last extent which is a factor in next extent size * @param lastRecord size of last extent which is a factor in next extent size
*/ */
static int followupSize(int len, int lastExtentLen); static int followupSize(int len, int lastExtentLen);
/** get a suggested size for the first extent in a namespace /** get a suggested size for the first extent in a namespace
* @param len length of record we need to insert * @param len length of record we need to insert
 End of changes. 2 change blocks. 
1 lines changed or deleted 5 lines changed or added


 extent_manager.h   extent_manager.h 
skipping to change at line 103 skipping to change at line 103
DataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly = false ); DataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly = false );
DataFile* addAFile( int sizeNeeded, bool preallocateNextFile ); DataFile* addAFile( int sizeNeeded, bool preallocateNextFile );
void preallocateAFile() { getFile( numFiles() , 0, true ); }// XXX- ERH void preallocateAFile() { getFile( numFiles() , 0, true ); }// XXX- ERH
void flushFiles( bool sync ); void flushFiles( bool sync );
/* allocate a new Extent, does not check free list /* allocate a new Extent, does not check free list
@param capped - true if capped collection * @param maxFileNoForQuota - 0 for unlimited
*/ */
DiskLoc createExtent( int approxSize, int maxFileNoForQuota ); DiskLoc createExtent( int approxSize, int maxFileNoForQuota );
/** /**
* will return NULL if nothing suitable in free list * will return NULL if nothing suitable in free list
*/ */
DiskLoc allocFromFreeList( int approxSize, bool capped ); DiskLoc allocFromFreeList( int approxSize, bool capped );
/** /**
* @param quotaMax 0 == no limit
* TODO: this isn't quite in the right spot * TODO: this isn't quite in the right spot
* really need the concept of a NamespaceStructure in the current paradigm * really need the concept of a NamespaceStructure in the current paradigm
*/ */
Extent* increaseStorageSize( const string& ns, Extent* increaseStorageSize( const string& ns,
NamespaceDetails* details, NamespaceDetails* details,
int size, int size,
int quotaMax ); int quotaMax );
/** /**
* firstExt has to be == lastExt or a chain * firstExt has to be == lastExt or a chain
skipping to change at line 141 skipping to change at line 142
* @param loc - has to be for a specific Record * @param loc - has to be for a specific Record
*/ */
Record* recordFor( const DiskLoc& loc ) const; Record* recordFor( const DiskLoc& loc ) const;
/** /**
* @param loc - has to be for a specific Record (not an Extent) * @param loc - has to be for a specific Record (not an Extent)
*/ */
Extent* extentFor( const DiskLoc& loc ) const; Extent* extentFor( const DiskLoc& loc ) const;
/** /**
* @param loc - has to be for a specific Record (not an Extent)
*/
DiskLoc extentLocFor( const DiskLoc& loc ) const;
/**
* @param loc - has to be for a specific Extent * @param loc - has to be for a specific Extent
*/ */
Extent* getExtent( const DiskLoc& loc, bool doSanityCheck = true ) const; Extent* getExtent( const DiskLoc& loc, bool doSanityCheck = true ) const;
Extent* getNextExtent( Extent* ) const; Extent* getNextExtent( Extent* ) const;
Extent* getPrevExtent( Extent* ) const; Extent* getPrevExtent( Extent* ) const;
// get(Next|Prev)Record follows the Record linked list // get(Next|Prev)Record follows the Record linked list
// these WILL cross Extent boundaries // these WILL cross Extent boundaries
// * @param loc - has to be the DiskLoc for a Record // * @param loc - has to be the DiskLoc for a Record
 End of changes. 3 change blocks. 
1 lines changed or deleted 7 lines changed or added


 extsort.h   extsort.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h"
#include "mongo/db/storage/index_details.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/curop-inl.h" #include "mongo/db/diskloc.h"
#include "mongo/util/array.h" #include "mongo/db/sorter/sorter.h"
#define MONGO_USE_NEW_SORTER 1
#if MONGO_USE_NEW_SORTER
# include "mongo/db/sorter/sorter.h"
#endif
namespace mongo { namespace mongo {
typedef pair<BSONObj, DiskLoc> ExternalSortDatum; typedef pair<BSONObj, DiskLoc> ExternalSortDatum;
/** /**
* To external sort, you provide a pointer to an implementation of this class. * To external sort, you provide a pointer to an implementation of this class.
* The compare function follows the usual -1, 0, 1 semantics. * The compare function follows the usual -1, 0, 1 semantics.
*/ */
class ExternalSortComparison { class ExternalSortComparison {
public: public:
virtual ~ExternalSortComparison() { } virtual ~ExternalSortComparison() { }
virtual int compare(const ExternalSortDatum& l, const ExternalSortD atum& r) const = 0; virtual int compare(const ExternalSortDatum& l, const ExternalSortD atum& r) const = 0;
}; };
#if MONGO_USE_NEW_SORTER
// TODO This class will probably disappear in the future or be replaced with a typedef // TODO This class will probably disappear in the future or be replaced with a typedef
class BSONObjExternalSorter : boost::noncopyable { class BSONObjExternalSorter : boost::noncopyable {
public: public:
typedef pair<BSONObj, DiskLoc> Data; typedef pair<BSONObj, DiskLoc> Data;
typedef SortIteratorInterface<BSONObj, DiskLoc> Iterator; typedef SortIteratorInterface<BSONObj, DiskLoc> Iterator;
BSONObjExternalSorter(const ExternalSortComparison* comp, long maxF ileSize=100*1024*1024); BSONObjExternalSorter(const ExternalSortComparison* comp, long maxF ileSize=100*1024*1024);
void add( const BSONObj& o, const DiskLoc& loc, bool mayInterrupt ) { void add( const BSONObj& o, const DiskLoc& loc, bool mayInterrupt ) {
*_mayInterrupt = mayInterrupt; *_mayInterrupt = mayInterrupt;
skipping to change at line 85 skipping to change at line 75
void sort( bool mayInterrupt ) { *_mayInterrupt = mayInterrupt; } void sort( bool mayInterrupt ) { *_mayInterrupt = mayInterrupt; }
int numFiles() { return _sorter->numFiles(); } int numFiles() { return _sorter->numFiles(); }
long getCurSizeSoFar() { return _sorter->memUsed(); } long getCurSizeSoFar() { return _sorter->memUsed(); }
void hintNumObjects(long long) {} // unused void hintNumObjects(long long) {} // unused
private: private:
shared_ptr<bool> _mayInterrupt; shared_ptr<bool> _mayInterrupt;
scoped_ptr<Sorter<BSONObj, DiskLoc> > _sorter; scoped_ptr<Sorter<BSONObj, DiskLoc> > _sorter;
}; };
#else
/**
for external (disk) sorting by BSONObj and attaching a value
*/
class BSONObjExternalSorter : boost::noncopyable {
public:
BSONObjExternalSorter(const ExternalSortComparison* cmp,
long maxFileSize = 1024 * 1024 * 100 );
~BSONObjExternalSorter();
private:
static HLMutex _extSortMutex;
static int _compare(const ExternalSortComparison* cmp, const Extern
alSortDatum& l,
const ExternalSortDatum& r);
class MyCmp {
public:
MyCmp(const ExternalSortComparison* cmp) : _cmp(cmp) { }
bool operator()( const ExternalSortDatum &l, const ExternalSort
Datum &r ) const {
return _cmp->compare(l, r) < 0;
};
private:
const ExternalSortComparison* _cmp;
};
static bool extSortMayInterrupt;
static int extSortComp( const void *lv, const void *rv );
static const ExternalSortComparison* staticExtSortCmp;
class FileIterator : boost::noncopyable {
public:
FileIterator( const std::string& file );
~FileIterator();
bool more();
ExternalSortDatum next();
private:
bool _read( char* buf, long long count );
int _file;
unsigned long long _length;
unsigned long long _readSoFar;
};
public:
typedef FastArray<ExternalSortDatum> InMemory;
class Iterator : boost::noncopyable {
public:
Iterator( BSONObjExternalSorter * sorter );
~Iterator();
bool more();
ExternalSortDatum next();
private:
MyCmp _cmp;
vector<FileIterator*> _files;
vector< pair<ExternalSortDatum,bool> > _stash;
InMemory * _in;
InMemory::iterator _it;
};
void add( const BSONObj& o, const DiskLoc& loc, bool mayInterrupt )
;
/* call after adding values, and before fetching the iterator */
void sort( bool mayInterrupt );
auto_ptr<Iterator> iterator() {
uassert( 10052 , "not sorted" , _sorted );
return auto_ptr<Iterator>( new Iterator( this ) );
}
int numFiles() {
return _files.size();
}
long getCurSizeSoFar() { return _curSizeSoFar; }
void hintNumObjects( long long numObjects ) {
if ( numObjects < _arraySize )
_arraySize = (int)(numObjects + 100);
}
private:
void _sortInMem( bool mayInterrupt );
void sort( const std::string& file );
void finishMap( bool mayInterrupt );
const ExternalSortComparison* _cmp;
long _maxFilesize;
boost::filesystem::path _root;
int _arraySize;
InMemory * _cur;
long _curSizeSoFar;
list<string> _files;
bool _sorted;
static unsigned long long _compares;
static unsigned long long _uniqueNumber;
};
#endif
} }
 End of changes. 4 change blocks. 
124 lines changed or deleted 2 lines changed or added


 fetch.h   fetch.h 
skipping to change at line 56 skipping to change at line 56
class FetchStage : public PlanStage { class FetchStage : public PlanStage {
public: public:
FetchStage(WorkingSet* ws, PlanStage* child, const MatchExpression* filter); FetchStage(WorkingSet* ws, PlanStage* child, const MatchExpression* filter);
virtual ~FetchStage(); virtual ~FetchStage();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
PlanStageStats* getStats(); PlanStageStats* getStats();
private: private:
/** /**
* If the member (with id memberID) passes our filter, set *out to memberID and return that * If the member (with id memberID) passes our filter, set *out to memberID and return that
* ADVANCED. Otherwise, free memberID and return NEED_TIME. * ADVANCED. Otherwise, free memberID and return NEED_TIME.
*/ */
StageState returnIfMatches(WorkingSetMember* member, WorkingSetID m emberID, StageState returnIfMatches(WorkingSetMember* member, WorkingSetID m emberID,
WorkingSetID* out); WorkingSetID* out);
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 field_parser-inl.h   field_parser-inl.h 
skipping to change at line 37 skipping to change at line 37
*/ */
#include "mongo/db/field_parser.h" #include "mongo/db/field_parser.h"
#include "mongo/util/mongoutils/str.h" #include "mongo/util/mongoutils/str.h"
namespace mongo { namespace mongo {
using mongoutils::str::stream; using mongoutils::str::stream;
template<class T> template<class T>
void _genFieldErrMsg(const BSONObj& doc, void _genFieldErrMsg(const BSONElement& elem,
const BSONField<T>& field, const BSONField<T>& field,
const string expected, const string expected,
string* errMsg) string* errMsg)
{ {
if (!errMsg) return; if (!errMsg) return;
*errMsg = stream() << "wrong type for '" << field() << "' field, ex pected " << expected *errMsg = stream() << "wrong type for '" << field() << "' field, ex pected " << expected
<< ", found " << doc[field.name()].toString(); << ", found " << elem.toString();
} }
template<typename T> template<typename T>
FieldParser::FieldState FieldParser::extract(BSONObj doc, FieldParser::FieldState FieldParser::extract(BSONObj doc,
const BSONField<T>& field, const BSONField<T>& field,
T* out, T* out,
string* errMsg) string* errMsg)
{ {
BSONElement elem = doc[field.name()]; BSONElement elem = doc[field.name()];
if (elem.eoo()) { if (elem.eoo()) {
if (field.hasDefault()) { if (field.hasDefault()) {
field.getDefault().cloneTo(out); field.getDefault().cloneTo(out);
return FIELD_DEFAULT; return FIELD_DEFAULT;
} }
else { else {
return FIELD_NONE; return FIELD_NONE;
} }
} }
if (elem.type() != Object && elem.type() != Array) { if (elem.type() != Object && elem.type() != Array) {
_genFieldErrMsg(doc, field, "Object/Array", errMsg); _genFieldErrMsg(elem, field, "Object/Array", errMsg);
return FIELD_INVALID; return FIELD_INVALID;
} }
if (!out->parseBSON(elem.embeddedObject(), errMsg)) { if (!out->parseBSON(elem.embeddedObject(), errMsg)) {
return FIELD_INVALID; return FIELD_INVALID;
} }
return FIELD_SET; return FIELD_SET;
} }
skipping to change at line 97 skipping to change at line 97
*out = temp.release(); *out = temp.release();
return FIELD_DEFAULT; return FIELD_DEFAULT;
} }
else { else {
return FIELD_NONE; return FIELD_NONE;
} }
} }
if (elem.type() != Object && elem.type() != Array) { if (elem.type() != Object && elem.type() != Array) {
_genFieldErrMsg(doc, field, "Object/Array", errMsg); _genFieldErrMsg(elem, field, "Object/Array", errMsg);
return FIELD_INVALID; return FIELD_INVALID;
} }
auto_ptr<T> temp(new T); auto_ptr<T> temp(new T);
if (!temp->parseBSON(elem.embeddedObject(), errMsg)) { if (!temp->parseBSON(elem.embeddedObject(), errMsg)) {
return FIELD_INVALID; return FIELD_INVALID;
} }
*out = temp.release(); *out = temp.release();
return FIELD_SET; return FIELD_SET;
skipping to change at line 148 skipping to change at line 148
if (!temp->parseBSON(elem.embeddedObject(), errMsg)) { if (!temp->parseBSON(elem.embeddedObject(), errMsg)) {
return FIELD_INVALID; return FIELD_INVALID;
} }
*out = temp.release(); *out = temp.release();
return FIELD_SET; return FIELD_SET;
} }
// Extracts an array into a vector // Extracts an array into a vector
template<typename T> template<typename T>
FieldParser::FieldState FieldParser::extract(BSONObj doc, FieldParser::FieldState FieldParser::extract( BSONObj doc,
const BSONField<vector<T> >& field, const BSONField<vector<T>
vector<T>* out, >& field,
string* errMsg) vector<T>* out,
string* errMsg ) {
return extract( doc[field.name()], field, out, errMsg );
}
template<typename T>
FieldParser::FieldState FieldParser::extract( BSONElement elem,
const BSONField<vector<T>
>& field,
vector<T>* out,
string* errMsg )
{ {
BSONElement elem = doc[field.name()];
if (elem.eoo()) { if (elem.eoo()) {
if (field.hasDefault()) { if (field.hasDefault()) {
*out = field.getDefault(); *out = field.getDefault();
return FIELD_DEFAULT; return FIELD_DEFAULT;
} }
else { else {
return FIELD_NONE; return FIELD_NONE;
} }
} }
skipping to change at line 178 skipping to change at line 185
// Append all the new elements to the end of the vector // Append all the new elements to the end of the vector
size_t initialSize = out->size(); size_t initialSize = out->size();
out->resize(initialSize + arr.nFields()); out->resize(initialSize + arr.nFields());
int i = 0; int i = 0;
BSONObjIterator objIt(arr); BSONObjIterator objIt(arr);
while (objIt.more()) { while (objIt.more()) {
BSONElement next = objIt.next(); BSONElement next = objIt.next();
BSONField<T> fieldFor(next.fieldName(), out->at(initialSize + i)); BSONField<T> fieldFor(next.fieldName(), out->at(initialSize + i));
if (!FieldParser::extract(arr, if (!FieldParser::extract(next,
fieldFor, fieldFor,
&out->at(initialSize + i), &out->at(initialSize + i),
&elErrMsg)) &elErrMsg))
{ {
if (errMsg) { if (errMsg) {
*errMsg = stream() << "error parsing element " << i << " of field " *errMsg = stream() << "error parsing element " << i << " of field "
<< field() << causedBy(elErrMsg) ; << field() << causedBy(elErrMsg) ;
} }
return FIELD_INVALID; return FIELD_INVALID;
} }
i++; i++;
} }
return FIELD_SET; return FIELD_SET;
} }
if (errMsg) { if (errMsg) {
*errMsg = stream() << "wrong type for '" << field() << "' field , expected " *errMsg = stream() << "wrong type for '" << field() << "' field , expected "
<< "vector array" << ", found " << doc[field .name()].toString(); << "vector array" << ", found " << elem.toSt ring();
} }
return FIELD_INVALID; return FIELD_INVALID;
} }
template<typename T> template<typename T>
FieldParser::FieldState FieldParser::extract(BSONObj doc, FieldParser::FieldState FieldParser::extract(BSONObj doc,
const BSONField<vector<T*> >& field, const BSONField<vector<T*> >& field,
vector<T*>* out, vector<T*>* out,
string* errMsg) { string* errMsg) {
dassert(!field.hasDefault()); dassert(!field.hasDefault());
skipping to change at line 308 skipping to change at line 315
tempVector->push_back(toInsert.release()); tempVector->push_back(toInsert.release());
} }
*out = tempVector.release(); *out = tempVector.release();
return FIELD_SET; return FIELD_SET;
} }
// Extracts an object into a map // Extracts an object into a map
template<typename K, typename T> template<typename K, typename T>
FieldParser::FieldState FieldParser::extract(BSONObj doc, FieldParser::FieldState FieldParser::extract( BSONObj doc,
const BSONField<map<K, T> >& field, const BSONField<map<K, T>
map<K, T>* out, >& field,
string* errMsg) map<K, T>* out,
string* errMsg ) {
return extract( doc[field.name()], field, out, errMsg );
}
template<typename K, typename T>
FieldParser::FieldState FieldParser::extract( BSONElement elem,
const BSONField<map<K, T>
>& field,
map<K, T>* out,
string* errMsg )
{ {
BSONElement elem = doc[field.name()];
if (elem.eoo()) { if (elem.eoo()) {
if (field.hasDefault()) { if (field.hasDefault()) {
*out = field.getDefault(); *out = field.getDefault();
return FIELD_DEFAULT; return FIELD_DEFAULT;
} }
else { else {
return FIELD_NONE; return FIELD_NONE;
} }
} }
if (elem.type() == Object) { if (elem.type() == Object) {
BSONObj obj = elem.embeddedObject(); BSONObj obj = elem.embeddedObject();
string elErrMsg; string elErrMsg;
BSONObjIterator objIt(obj); BSONObjIterator objIt(obj);
while (objIt.more()) { while (objIt.more()) {
BSONElement next = objIt.next(); BSONElement next = objIt.next();
T& value = (*out)[next.fieldName()]; T& value = (*out)[next.fieldName()];
BSONField<T> fieldFor(next.fieldName(), value); BSONField<T> fieldFor(next.fieldName(), value);
if (!FieldParser::extract(obj, fieldFor, &value, &elErrMsg) ) { if (!FieldParser::extract(next, fieldFor, &value, &elErrMsg )) {
if (errMsg) { if (errMsg) {
*errMsg = stream() << "error parsing map element " << next.fieldName() *errMsg = stream() << "error parsing map element " << next.fieldName()
<< " of field " << field() << ca usedBy(elErrMsg); << " of field " << field() << ca usedBy(elErrMsg);
} }
return FIELD_INVALID; return FIELD_INVALID;
} }
} }
return FIELD_SET; return FIELD_SET;
} }
if (errMsg) { if (errMsg) {
*errMsg = stream() << "wrong type for '" << field() << "' field , expected " *errMsg = stream() << "wrong type for '" << field() << "' field , expected "
<< "vector array" << ", found " << doc[field .name()].toString(); << "vector array" << ", found " << elem.toSt ring();
} }
return FIELD_INVALID; return FIELD_INVALID;
} }
} // namespace mongo } // namespace mongo
 End of changes. 12 change blocks. 
18 lines changed or deleted 36 lines changed or added


 field_parser.h   field_parser.h 
skipping to change at line 69 skipping to change at line 69
// The field is present and has the correct type // The field is present and has the correct type
FIELD_SET, FIELD_SET,
// The field is absent in the BSON object but set from default // The field is absent in the BSON object but set from default
FIELD_DEFAULT, FIELD_DEFAULT,
// The field is absent and no default was specified // The field is absent and no default was specified
FIELD_NONE FIELD_NONE
}; };
static FieldState extract(BSONObj doc, static FieldState extract( BSONObj doc,
const BSONField<bool>& field, const BSONField<bool>& field,
bool* out, bool* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract(BSONObj doc, static FieldState extract( BSONElement elem,
const BSONField<BSONArray>& field, const BSONField<bool>& field,
BSONArray* out, bool* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract(BSONObj doc, static FieldState extract( BSONObj doc,
const BSONField<BSONObj>& field, const BSONField<BSONArray>& field,
BSONObj* out, BSONArray* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract(BSONObj doc, static FieldState extract( BSONElement elem,
const BSONField<Date_t>& field, const BSONField<BSONArray>& field,
Date_t* out, BSONArray* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract(BSONObj doc, static FieldState extract( BSONObj doc,
const BSONField<string>& field, const BSONField<BSONObj>& field,
string* out, BSONObj* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract(BSONObj doc, static FieldState extract( BSONElement elem,
const BSONField<OID>& field, const BSONField<BSONObj>& field,
OID* out, BSONObj* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract(BSONObj doc, static FieldState extract( BSONObj doc,
const BSONField<int>& field, const BSONField<Date_t>& field,
int* out, Date_t* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract(BSONObj doc, static FieldState extract( BSONElement elem,
const BSONField<long long>& field, const BSONField<Date_t>& field,
long long* out, Date_t* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extract( BSONObj doc,
const BSONField<OpTime>& field,
OpTime* out,
string* errMsg = NULL );
static FieldState extract( BSONElement elem,
const BSONField<OpTime>& field,
OpTime* out,
string* errMsg = NULL );
static FieldState extract( BSONObj doc,
const BSONField<string>& field,
string* out,
string* errMsg = NULL );
static FieldState extract( BSONElement elem,
const BSONField<string>& field,
string* out,
string* errMsg = NULL );
static FieldState extract( BSONObj doc,
const BSONField<OID>& field,
OID* out,
string* errMsg = NULL );
static FieldState extract( BSONElement elem,
const BSONField<OID>& field,
OID* out,
string* errMsg = NULL );
static FieldState extract( BSONObj doc,
const BSONField<int>& field,
int* out,
string* errMsg = NULL );
static FieldState extract( BSONElement elem,
const BSONField<int>& field,
int* out,
string* errMsg = NULL );
static FieldState extract( BSONObj doc,
const BSONField<long long>& field,
long long* out,
string* errMsg = NULL );
static FieldState extract( BSONElement elem,
const BSONField<long long>& field,
long long* out,
string* errMsg = NULL );
/** /**
* The following extractNumber methods do implicit conversion betwe en any numeric type and * The following extractNumber methods do implicit conversion betwe en any numeric type and
* the BSONField type. This can be useful when an exact numeric ty pe is not needed, for * the BSONField type. This can be useful when an exact numeric ty pe is not needed, for
* example if the field is sometimes modified from the shell which can change the type. * example if the field is sometimes modified from the shell which can change the type.
*/ */
static FieldState extractNumber(BSONObj doc, static FieldState extractNumber( BSONObj doc,
const BSONField<int>& field, const BSONField<int>& field,
int* out, int* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extractNumber(BSONObj doc, static FieldState extractNumber( BSONElement elem,
const BSONField<long long>& field, const BSONField<int>& field,
long long* out, int* out,
string* errMsg = NULL); string* errMsg = NULL );
static FieldState extractNumber( BSONObj doc,
const BSONField<long long>& field,
long long* out,
string* errMsg = NULL );
static FieldState extractNumber( BSONElement elem,
const BSONField<long long>& field,
long long* out,
string* errMsg = NULL );
/** /**
* Extracts a document id from a particular field name, which may b e of any type but Array. * Extracts a document id from a particular field name, which may b e of any type but Array.
* Wraps the extracted id value in a BSONObj with one element and e mpty field name. * Wraps the extracted id value in a BSONObj with one element and e mpty field name.
*/ */
static FieldState extractID( BSONObj doc, static FieldState extractID( BSONObj doc,
const BSONField<BSONObj>& field, const BSONField<BSONObj>& field,
BSONObj* out, BSONObj* out,
string* errMsg = NULL ); string* errMsg = NULL );
static FieldState extractID( BSONElement elem,
const BSONField<BSONObj>& field,
BSONObj* out,
string* errMsg = NULL );
// TODO: BSONElement extraction of types below
/** /**
* Extracts a mandatory BSONSerializable structure 'field' from the object 'doc'. Write * Extracts a mandatory BSONSerializable structure 'field' from the object 'doc'. Write
* the extracted contents to '*out' if successful or fills '*errMsg ', if exising, * the extracted contents to '*out' if successful or fills '*errMsg ', if exising,
* otherwise. This variant relies on T having a parseBSON, which a ll * otherwise. This variant relies on T having a parseBSON, which a ll
* BSONSerializable's have. * BSONSerializable's have.
* *
* TODO: Tighten for BSONSerializable's only * TODO: Tighten for BSONSerializable's only
*/ */
template<typename T> template<typename T>
static FieldState extract(BSONObj doc, static FieldState extract(BSONObj doc,
skipping to change at line 211 skipping to change at line 278
/** /**
* The following extract methods are templatized to handle extracti on of vectors and * The following extract methods are templatized to handle extracti on of vectors and
* maps of sub-objects. Keys in the map should be StringData compa tible. * maps of sub-objects. Keys in the map should be StringData compa tible.
* *
* It's possible to nest extraction of vectors and maps to any dept h, i.e: * It's possible to nest extraction of vectors and maps to any dept h, i.e:
* *
* vector<map<string,vector<string> > > val; * vector<map<string,vector<string> > > val;
* FieldParser::extract(doc, field, val, &val); * FieldParser::extract(doc, field, val, &val);
*/ */
template<typename T> template<typename T>
static FieldState extract(BSONObj doc, static FieldState extract( BSONObj doc,
const BSONField<vector<T> >& field, const BSONField<vector<T> >& field,
vector<T>* out, vector<T>* out,
string* errMsg = NULL); string* errMsg = NULL );
template<typename T>
static FieldState extract( BSONElement elem,
const BSONField<vector<T> >& field,
vector<T>* out,
string* errMsg = NULL );
template<typename K, typename T> template<typename K, typename T>
static FieldState extract(BSONObj doc, static FieldState extract( BSONObj doc,
const BSONField<map<K, T> >& field, const BSONField<map<K, T> >& field,
map<K, T>* out, map<K, T>* out,
string* errMsg = NULL); string* errMsg = NULL );
template<typename K, typename T>
static FieldState extract( BSONElement elem,
const BSONField<map<K, T> >& field,
map<K, T>* out,
string* errMsg = NULL );
private: private:
template<typename T> template<typename T>
static void clearOwnedVector(vector<T*>* vec); static void clearOwnedVector(vector<T*>* vec);
}; };
} // namespace mongo } // namespace mongo
// Inline functions for templating // Inline functions for templating
#include "field_parser-inl.h" #include "field_parser-inl.h"
 End of changes. 5 change blocks. 
56 lines changed or deleted 135 lines changed or added


 field_ref.h   field_ref.h 
skipping to change at line 54 skipping to change at line 54
* "split" the dotted fields in its parts, but no validation is done. * "split" the dotted fields in its parts, but no validation is done.
* *
* Any field part may be replaced, after the "original" field reference was parsed. Any * Any field part may be replaced, after the "original" field reference was parsed. Any
* part can be accessed through a StringData object. * part can be accessed through a StringData object.
* *
* The class is not thread safe. * The class is not thread safe.
*/ */
class FieldRef { class FieldRef {
MONGO_DISALLOW_COPYING(FieldRef); MONGO_DISALLOW_COPYING(FieldRef);
public: public:
FieldRef() : _size(0) {} FieldRef();
explicit FieldRef(const StringData& path);
/** /**
* Field parts accessed through getPart() calls no longer would be valid, after the * Field parts accessed through getPart() calls no longer would be valid, after the
* destructor ran. * destructor ran.
*/ */
~FieldRef() {} ~FieldRef() {}
/** /**
* Builds a field path out of each field part in 'dottedField'. * Builds a field path out of each field part in 'dottedField'.
*/ */
skipping to change at line 129 skipping to change at line 131
bool empty() const { return numParts() == 0; } bool empty() const { return numParts() == 0; }
private: private:
// Dotted fields are most often not longer than four parts. We use a mixed structure // Dotted fields are most often not longer than four parts. We use a mixed structure
// here that will not require any extra memory allocation when that is the case. And // here that will not require any extra memory allocation when that is the case. And
// handle larger dotted fields if it is. The idea is not to penaliz e the common case // handle larger dotted fields if it is. The idea is not to penaliz e the common case
// with allocations. // with allocations.
static const size_t kReserveAhead = 4; static const size_t kReserveAhead = 4;
/**
* Parses 'path' into parts.
*/
void _parse(const StringData& path);
/** Converts the field part index to the variable part equivalent * / /** Converts the field part index to the variable part equivalent * /
size_t getIndex(size_t i) const { return i-kReserveAhead; } size_t getIndex(size_t i) const { return i-kReserveAhead; }
/** /**
* Returns the new number of parts after appending 'part' to this f ield path. It * Returns the new number of parts after appending 'part' to this f ield path. It
* assumes that 'part' is pointing to an internally allocated area. * assumes that 'part' is pointing to an internally allocated area.
*/ */
size_t appendPart(const StringData& part); size_t appendPart(const StringData& part);
/** /**
 End of changes. 2 change blocks. 
1 lines changed or deleted 8 lines changed or added


 field_ref_set.h   field_ref_set.h 
skipping to change at line 97 skipping to change at line 97
void fillFrom(const std::vector<FieldRef*>& fields); void fillFrom(const std::vector<FieldRef*>& fields);
/** /**
* Replace any existing conflicting FieldRef with the shortest (clo sest to root) one * Replace any existing conflicting FieldRef with the shortest (clo sest to root) one
*/ */
void keepShortest(const FieldRef* toInsert); void keepShortest(const FieldRef* toInsert);
/** /**
* Find all inserted fields which conflict with the FieldRef 'toChe ck' by the semantics * Find all inserted fields which conflict with the FieldRef 'toChe ck' by the semantics
* of 'insert', and add those fields to the 'conflicts' set. * of 'insert', and add those fields to the 'conflicts' set.
*
* Return true if conflicts were found.
*/ */
void getConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const; bool findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const;
void clear() { void clear() {
_fieldSet.clear(); _fieldSet.clear();
} }
/** /**
* A debug/log-able string * A debug/log-able string
*/ */
const std::string toString() const; const std::string toString() const;
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 framework.h   framework.h 
skipping to change at line 17 skipping to change at line 17
* it under the terms of the GNU Affero General Public License, version 3 , * it under the terms of the GNU Affero General Public License, version 3 ,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Licen se * You should have received a copy of the GNU Affero General Public Licen se
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certai
n
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Yo
u
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If y
ou
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
/* /*
simple portable regression system simple portable regression system
*/ */
#include <string> #include <string>
namespace mongo { namespace mongo {
 End of changes. 1 change blocks. 
0 lines changed or deleted 17 lines changed or added


 framework_options.h   framework_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
namespace mongo { namespace mongo {
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 fts_access_method.h   fts_access_method.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/fts/fts_spec.h" #include "mongo/db/fts/fts_spec.h"
#include "mongo/db/index/btree_access_method_internal.h" #include "mongo/db/index/btree_based_access_method.h"
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class FTSAccessMethod : public BtreeBasedAccessMethod { class FTSAccessMethod : public BtreeBasedAccessMethod {
public: public:
FTSAccessMethod(IndexDescriptor* descriptor); FTSAccessMethod(IndexCatalogEntry* btreeState );
virtual ~FTSAccessMethod() { } virtual ~FTSAccessMethod() { }
// Not implemented: const fts::FTSSpec& getSpec() const { return _ftsSpec; }
virtual Status newCursor(IndexCursor** out);
fts::FTSSpec& getSpec() { return _ftsSpec; }
private: private:
// Implemented: // Implemented:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
fts::FTSSpec _ftsSpec; fts::FTSSpec _ftsSpec;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
6 lines changed or deleted 3 lines changed or added


 fts_language.h   fts_language.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/fts/fts_util.h"
#include "mongo/base/status_with.h" #include "mongo/base/status_with.h"
#include <string> #include <string>
namespace mongo { namespace mongo {
namespace fts { namespace fts {
#define MONGO_FTS_LANGUAGE_DECLARE( language, name, version ) \
FTSLanguage language; \
MONGO_INITIALIZER_GENERAL( language, MONGO_NO_PREREQUISITES, \
( "FTSAllLanguagesRegistered" ) ) \
( ::mongo::InitializerContext* context
) { \
FTSLanguage::registerLanguage( name, version, &language );
\
return Status::OK(); \
}
/** /**
* A FTSLanguage is a copyable glorified enum representing a langua * A FTSLanguage represents a language for a text-indexed document
ge for a text-indexed or a text search.
* document or a text search. Example of suggested usage: * FTSLanguage objects are not copyable.
*
* Recommended usage:
* *
* StatusWithFTSLanguage swl = FTSLanguage::makeFTSLanguage( "e n" ); * StatusWithFTSLanguage swl = FTSLanguage::make( "en", TEXT_IN DEX_VERSION_2 );
* if ( !swl.getStatus().isOK() ) { * if ( !swl.getStatus().isOK() ) {
* // Error. * // Error.
* } * }
* else { * else {
* const FTSLanguage language = swl.getValue(); * const FTSLanguage* language = swl.getValue();
* // Use language. * // Use language.
* } * }
*/ */
class FTSLanguage { class FTSLanguage {
// Use make() instead of copying.
MONGO_DISALLOW_COPYING( FTSLanguage );
public: public:
/** Create an uninitialized language. */ /** Create an uninitialized language. */
FTSLanguage(); FTSLanguage();
~FTSLanguage(); /**
FTSLanguage( const FTSLanguage& ); * Returns the language as a string in canonical form (lowercas
FTSLanguage& operator=( const FTSLanguage & ); ed English name). It is
* an error to call str() on an uninitialized language.
*/
const std::string& str() const;
/** /**
* Initialize an FTSLanguage from a language string. Language * Register string 'languageName' as a new language with text i
strings are ndex version
* case-insensitive, and can be in one of the two following for * 'textIndexVersion'. Saves the resulting language to out-arg
ms: ument 'languageOut'.
* - English name, like "spanish". * Subsequent calls to FTSLanguage::make() will recognize the n
* - Two-letter code, like "es". ewly-registered language
* Returns an error Status if an invalid language string is pas * string.
sed.
*/ */
Status init( const std::string& lang ); static void registerLanguage( const StringData& languageName,
TextIndexVersion textIndexVersion
,
FTSLanguage *languageOut );
/** /**
* Returns the language as a string in canonical form (lowercas * Register 'alias' as an alias for 'language' with text index
ed English name). It is version
* an error to call str() on an uninitialized language. * 'textIndexVersion'. Subsequent calls to FTSLanguage::make()
will recognize the
* newly-registered alias.
*/ */
std::string str() const; static void registerLanguageAlias( const FTSLanguage* language,
const StringData& alias,
TextIndexVersion textIndexVe
rsion );
/** /**
* Convenience method for creating an FTSLanguage out of a lang * Return the FTSLanguage associated with the given language st
uage string. Caller ring. Returns an error
* must check getStatus().isOK() on return value. * Status if an invalid language string is passed.
*
* For textIndexVersion=TEXT_INDEX_VERSION_2, language strings
are
* case-insensitive, and need to be in one of the two following
forms:
* - English name, like "spanish".
* - Two-letter code, like "es".
*
* For textIndexVersion=TEXT_INDEX_VERSION_1, no validation or
normalization of
* language strings is performed. This is necessary to preserv
e indexing behavior for
* documents with language strings like "en": for compatibility
, text data in these
* documents needs to be processed with the English stemmer and
the empty stopword list
* (since "en" is recognized by Snowball but not the stopword p
rocessing logic).
*/ */
static StatusWith<const FTSLanguage> makeFTSLanguage( const std static StatusWith<const FTSLanguage*> make( const StringData& l
::string& lang ); angName,
TextIndexVersion te
xtIndexVersion );
private: private:
// Pointer to string representation of language. Not owned her // String representation of language in canonical form.
e. std::string _canonicalName;
StringData _lang;
}; };
typedef StatusWith<const FTSLanguage> StatusWithFTSLanguage; typedef StatusWith<const FTSLanguage*> StatusWithFTSLanguage;
extern FTSLanguage languagePorterV1;
extern FTSLanguage languageEnglishV2;
extern FTSLanguage languageFrenchV2;
} }
} }
 End of changes. 15 change blocks. 
30 lines changed or deleted 79 lines changed or added


 fts_matcher.h   fts_matcher.h 
skipping to change at line 55 skipping to change at line 55
/** /**
* @return true if obj has a negated term * @return true if obj has a negated term
*/ */
bool hasNegativeTerm(const BSONObj& obj ) const; bool hasNegativeTerm(const BSONObj& obj ) const;
/** /**
* @return true if obj is ok by all phrases * @return true if obj is ok by all phrases
* so all full phrases and no negated * so all full phrases and no negated
*/ */
bool phrasesMatch( const BSONObj& obj ) const; bool phrasesMatch( const BSONObj& obj ) const;
bool phraseMatch( const string& phrase, const BSONObj& obj ) co nst; bool phraseMatch( const string& phrase, const BSONObj& obj ) co nst;
bool matchesNonTerm( const BSONObj& obj ) const { bool matchesNonTerm( const BSONObj& obj ) const {
return !hasNegativeTerm( obj ) && phrasesMatch( obj ); return !hasNegativeTerm( obj ) && phrasesMatch( obj );
} }
private: private:
bool _hasNegativeTerm_recurse(const BSONObj& obj ) const;
/** /**
* @return true if raw has a negated term * @return true if raw has a negated term
*/ */
bool _hasNegativeTerm_string( const string& raw ) const; bool _hasNegativeTerm_string( const string& raw ) const;
bool _phraseRecurse( const string& phrase, const BSONObj& obj ) /**
const; * @return true if raw has a phrase
bool _phraseMatches( const string& phrase, const string& haysta */
ck ) const; bool _phraseMatches( const string& phrase, const string& raw )
const;
FTSQuery _query; FTSQuery _query;
FTSSpec _spec; FTSSpec _spec;
Stemmer _stemmer; Stemmer _stemmer;
}; };
} }
} }
 End of changes. 4 change blocks. 
9 lines changed or deleted 7 lines changed or added


 fts_query.h   fts_query.h 
skipping to change at line 53 skipping to change at line 53
namespace fts { namespace fts {
using std::string; using std::string;
using std::vector; using std::vector;
using std::set; using std::set;
class FTSQuery { class FTSQuery {
public: public:
Status parse(const string& query, const string& language); Status parse(const string& query, const StringData& language);
const vector<string>& getTerms() const { return _terms; } const vector<string>& getTerms() const { return _terms; }
const unordered_set<string>& getNegatedTerms() const { return _ negatedTerms; } const unordered_set<string>& getNegatedTerms() const { return _ negatedTerms; }
const vector<string>& getPhr() const { return _phrases; } const vector<string>& getPhr() const { return _phrases; }
const vector<string>& getNegatedPhr() const { return _negatedPh rases; } const vector<string>& getNegatedPhr() const { return _negatedPh rases; }
/** /**
* @return true if any negations or phrase + or - * @return true if any negations or phrase + or -
*/ */
bool hasNonTermPieces() const { bool hasNonTermPieces() const {
return return
_negatedTerms.size() > 0 || _negatedTerms.size() > 0 ||
_phrases.size() > 0 || _phrases.size() > 0 ||
_negatedPhrases.size() > 0; _negatedPhrases.size() > 0;
} }
string getSearch() const { return _search; } string getSearch() const { return _search; }
const FTSLanguage getLanguage() const { return _language; } const FTSLanguage& getLanguage() const { return *_language; }
string toString() const; string toString() const;
string debugString() const; string debugString() const;
protected: protected:
string _search; string _search;
FTSLanguage _language; const FTSLanguage* _language;
vector<string> _terms; vector<string> _terms;
unordered_set<string> _negatedTerms; unordered_set<string> _negatedTerms;
vector<string> _phrases; vector<string> _phrases;
vector<string> _negatedPhrases; vector<string> _negatedPhrases;
private: private:
void _addTerm( const StopWords* sw, Stemmer& stemmer, const str ing& term, bool negated ); void _addTerm( const StopWords* sw, Stemmer& stemmer, const str ing& term, bool negated );
}; };
} }
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 fts_spec.h   fts_spec.h 
skipping to change at line 49 skipping to change at line 49
#include "mongo/db/fts/stemmer.h" #include "mongo/db/fts/stemmer.h"
#include "mongo/db/fts/stop_words.h" #include "mongo/db/fts/stop_words.h"
#include "mongo/db/fts/tokenizer.h" #include "mongo/db/fts/tokenizer.h"
#include "mongo/platform/unordered_map.h" #include "mongo/platform/unordered_map.h"
namespace mongo { namespace mongo {
namespace fts { namespace fts {
extern const double MAX_WEIGHT; extern const double MAX_WEIGHT;
extern const double MAX_WORD_WEIGHT;
extern const double DEFAULT_WEIGHT;
typedef std::map<string,double> Weights; // TODO cool map typedef std::map<string,double> Weights; // TODO cool map
typedef unordered_map<string,double> TermFrequencyMap; typedef unordered_map<string,double> TermFrequencyMap;
struct ScoreHelperStruct {
ScoreHelperStruct()
: freq(0), count(0), exp(0){
}
double freq;
double count;
double exp;
};
typedef unordered_map<string,ScoreHelperStruct> ScoreHelperMap;
class FTSSpec { class FTSSpec {
struct Tools { struct Tools {
Tools( const FTSLanguage _language, Tools( const FTSLanguage& _language,
const Stemmer* _stemmer, const Stemmer* _stemmer,
const StopWords* _stopwords ) const StopWords* _stopwords )
: language( _language ) : language( _language )
, stemmer( _stemmer ) , stemmer( _stemmer )
, stopwords( _stopwords ) {} , stopwords( _stopwords ) {}
const FTSLanguage language; const FTSLanguage& language;
const Stemmer* stemmer; const Stemmer* stemmer;
const StopWords* stopwords; const StopWords* stopwords;
}; };
public: public:
FTSSpec( const BSONObj& indexInfo ); FTSSpec( const BSONObj& indexInfo );
bool wildcard() const { return _wildcard; } bool wildcard() const { return _wildcard; }
const FTSLanguage defaultLanguage() const { return _defaultLang uage; } const FTSLanguage& defaultLanguage() const { return *_defaultLa nguage; }
const string& languageOverrideField() const { return _languageO verrideField; } const string& languageOverrideField() const { return _languageO verrideField; }
size_t numExtraBefore() const { return _extraBefore.size(); } size_t numExtraBefore() const { return _extraBefore.size(); }
const std::string& extraBefore( unsigned i ) const { return _ex traBefore[i]; } const std::string& extraBefore( unsigned i ) const { return _ex traBefore[i]; }
size_t numExtraAfter() const { return _extraAfter.size(); } size_t numExtraAfter() const { return _extraAfter.size(); }
const std::string& extraAfter( unsigned i ) const { return _ext raAfter[i]; } const std::string& extraAfter( unsigned i ) const { return _ext raAfter[i]; }
/** /**
* Calculates term/score pairs for a BSONObj as applied to this spec. * Calculates term/score pairs for a BSONObj as applied to this spec.
* - "obj": the BSONObj to traverse; can be a subdocument or ar * @arg obj document to traverse; can be a subdocument or arra
ray y
* - "parentLanguage": nearest enclosing document "language" sp * @arg term_freqs output parameter to store (term,score) resu
ec for obj lts
* - "parentPath": obj's dotted path in containing document
* - "isArray": true if obj is an array
* - "term_freqs": out-parameter to store results
*/ */
void scoreDocument( const BSONObj& obj, void scoreDocument( const BSONObj& obj, TermFrequencyMap* term_
const FTSLanguage parentLanguage, freqs ) const;
const string& parentPath,
bool isArray,
TermFrequencyMap* term_freqs ) const;
/** /**
* given a query, pulls out the pieces (in order) that go in th e index first * given a query, pulls out the pieces (in order) that go in th e index first
*/ */
Status getIndexPrefix( const BSONObj& filter, BSONObj* out ) co nst; Status getIndexPrefix( const BSONObj& filter, BSONObj* out ) co nst;
const Weights& weights() const { return _weights; } const Weights& weights() const { return _weights; }
static BSONObj fixSpec( const BSONObj& spec ); static BSONObj fixSpec( const BSONObj& spec );
private: private:
//
// Helper methods. Invoked for TEXT_INDEX_VERSION_2 spec objec
ts only.
//
/**
* Calculate the term scores for 'raw' and update 'term_freqs'
with the result. Parses
* 'raw' using 'tools', and weights term scores based on 'weigh
t'.
*/
void _scoreStringV2( const Tools& tools,
const StringData& raw,
TermFrequencyMap* term_freqs,
double weight ) const;
public:
/** /**
* Get the language override for the given BSON doc. If no lan guage override is * Get the language override for the given BSON doc. If no lan guage override is
* specified, returns currentLanguage. * specified, returns currentLanguage.
*/ */
const FTSLanguage getLanguageToUse( const BSONObj& userDoc, const FTSLanguage* _getLanguageToUseV2( const BSONObj& userDoc,
const FTSLanguage currentLa const FTSLanguage* curr
nguage ) const; entLanguage ) const;
void _scoreString( const Tools& tools, private:
const StringData& raw, //
TermFrequencyMap* term_freqs, // Deprecated helper methods. Invoked for TEXT_INDEX_VERSION_1
double weight ) const; spec objects only.
//
void _scoreStringV1( const Tools& tools,
const StringData& raw,
TermFrequencyMap* docScores,
double weight ) const;
bool _weightV1( const StringData& field, double* out ) const;
void _scoreRecurseV1( const Tools& tools,
const BSONObj& obj,
TermFrequencyMap* term_freqs ) const;
void _scoreDocumentV1( const BSONObj& obj, TermFrequencyMap* te
rm_freqs ) const;
FTSLanguage _defaultLanguage; const FTSLanguage& _getLanguageToUseV1( const BSONObj& userDoc
) const;
static BSONObj _fixSpecV1( const BSONObj& spec );
//
// Instance variables.
//
TextIndexVersion _textIndexVersion;
const FTSLanguage* _defaultLanguage;
string _languageOverrideField; string _languageOverrideField;
bool _wildcard; bool _wildcard;
// _weights stores a mapping between the fields and the value a // mapping : fieldname -> weight
s a double
// basically, how much should an occurence of (query term) in (
field) be worth
Weights _weights; Weights _weights;
// other fields to index // Prefix compound key - used to partition search index
std::vector<string> _extraBefore; std::vector<string> _extraBefore;
// Suffix compound key - used for covering index behavior
std::vector<string> _extraAfter; std::vector<string> _extraAfter;
}; };
} }
} }
 End of changes. 17 change blocks. 
30 lines changed or deleted 77 lines changed or added


 fts_util.h   fts_util.h 
skipping to change at line 47 skipping to change at line 47
#include "mongo/db/storage/record.h" #include "mongo/db/storage/record.h"
#include "mongo/util/unordered_fast_key_table.h" #include "mongo/util/unordered_fast_key_table.h"
namespace mongo { namespace mongo {
namespace fts { namespace fts {
extern const std::string WILDCARD; extern const std::string WILDCARD;
extern const std::string INDEX_NAME; extern const std::string INDEX_NAME;
enum TextIndexVersion {
TEXT_INDEX_VERSION_1 = 1, // Legacy index format. Deprecated.
TEXT_INDEX_VERSION_2 = 2 // Current index format.
};
/** /**
* destructive! * destructive!
*/ */
inline void makeLower( std::string* s ) { inline void makeLower( std::string* s ) {
std::string::size_type sz = s->size(); std::string::size_type sz = s->size();
for ( std::string::size_type i = 0; i < sz; i++ ) for ( std::string::size_type i = 0; i < sz; i++ )
(*s)[i] = (char)tolower( (int)(*s)[i] ); (*s)[i] = (char)tolower( (int)(*s)[i] );
} }
/* /*
 End of changes. 1 change blocks. 
0 lines changed or deleted 5 lines changed or added


 geoquery.h   geoquery.h 
skipping to change at line 117 skipping to change at line 117
maxDistance(std::numeric_limits<double>::max()), maxDistance(std::numeric_limits<double>::max()),
isNearSphere(false) { } isNearSphere(false) { }
NearQuery(const string& f) NearQuery(const string& f)
: field(f), : field(f),
minDistance(0), minDistance(0),
maxDistance(std::numeric_limits<double>::max()), maxDistance(std::numeric_limits<double>::max()),
isNearSphere(false) { } isNearSphere(false) { }
bool parseFrom(const BSONObj &obj); bool parseFrom(const BSONObj &obj);
bool parseFromGeoNear(const BSONObj &obj, double radius);
// The name of the field that contains the geometry. // The name of the field that contains the geometry.
string field; string field;
// The starting point of the near search. // The starting point of the near search.
PointWithCRS centroid; PointWithCRS centroid;
// Min and max distance from centroid that we're willing to search. // Min and max distance from centroid that we're willing to search.
// Distance is in whatever units the centroid's CRS implies. // Distance is in whatever units the centroid's CRS implies.
// If centroid.crs == FLAT these are radians. // If centroid.crs == FLAT these are radians.
skipping to change at line 151 skipping to change at line 150
} }
private: private:
bool parseLegacyQuery(const BSONObj &obj); bool parseLegacyQuery(const BSONObj &obj);
bool parseNewQuery(const BSONObj &obj); bool parseNewQuery(const BSONObj &obj);
}; };
// This represents either a $within or a $geoIntersects. // This represents either a $within or a $geoIntersects.
class GeoQuery { class GeoQuery {
public: public:
GeoQuery() : field(""), predicate(INVALID), _uniqueDocs(true) {} GeoQuery() : field(""), predicate(INVALID) {}
GeoQuery(const string& f) : field(f), predicate(INVALID), _uniqueDo GeoQuery(const string& f) : field(f), predicate(INVALID) {}
cs(true) {}
enum Predicate { enum Predicate {
WITHIN, WITHIN,
INTERSECT, INTERSECT,
INVALID INVALID
}; };
bool parseFrom(const BSONObj &obj); bool parseFrom(const BSONObj &obj);
bool satisfiesPredicate(const GeometryContainer &otherContainer) co nst; bool satisfiesPredicate(const GeometryContainer &otherContainer) co nst;
bool hasS2Region() const; bool hasS2Region() const;
const S2Region& getRegion() const; const S2Region& getRegion() const;
string getField() const { return field; } string getField() const { return field; }
Predicate getPred() const { return predicate; } Predicate getPred() const { return predicate; }
const GeometryContainer& getGeometry() const { return geoContainer; } const GeometryContainer& getGeometry() const { return geoContainer; }
bool uniqueDocs() const { return _uniqueDocs; }
private: private:
// Try to parse the provided object into the right place. // Try to parse the provided object into the right place.
bool parseLegacyQuery(const BSONObj &obj); bool parseLegacyQuery(const BSONObj &obj);
bool parseNewQuery(const BSONObj &obj); bool parseNewQuery(const BSONObj &obj);
// Name of the field in the query. // Name of the field in the query.
string field; string field;
GeometryContainer geoContainer; GeometryContainer geoContainer;
Predicate predicate; Predicate predicate;
bool _uniqueDocs;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
7 lines changed or deleted 2 lines changed or added


 gridfs.h   gridfs.h 
skipping to change at line 23 skipping to change at line 23
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement.h"
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
typedef unsigned long long gridfs_offset; typedef unsigned long long gridfs_offset;
class GridFS; class GridFS;
class GridFile; class GridFile;
class GridFSChunk { class MONGO_CLIENT_API GridFSChunk {
public: public:
GridFSChunk( BSONObj data ); GridFSChunk( BSONObj data );
GridFSChunk( BSONObj fileId , int chunkNumber , const char * data , int len ); GridFSChunk( BSONObj fileId , int chunkNumber , const char * data , int len );
int len() const { int len() const {
int len; int len;
_data["data"].binDataClean( len ); _data["data"].binDataClean( len );
return len; return len;
} }
skipping to change at line 55 skipping to change at line 56
private: private:
BSONObj _data; BSONObj _data;
friend class GridFS; friend class GridFS;
}; };
/** /**
GridFS is for storing large file-style objects in MongoDB. GridFS is for storing large file-style objects in MongoDB.
@see http://dochub.mongodb.org/core/gridfsspec @see http://dochub.mongodb.org/core/gridfsspec
*/ */
class GridFS { class MONGO_CLIENT_API GridFS {
public: public:
/** /**
* @param client - db connection * @param client - db connection
* @param dbName - root database name * @param dbName - root database name
* @param prefix - if you want your data somewhere besides <dbname> .fs * @param prefix - if you want your data somewhere besides <dbname> .fs
*/ */
GridFS( DBClientBase& client , const string& dbName , const string& prefix="fs" ); GridFS( DBClientBase& client , const string& dbName , const string& prefix="fs" );
~GridFS(); ~GridFS();
/** /**
skipping to change at line 87 skipping to change at line 88
* @param contentType optional MIME type for this object. * @param contentType optional MIME type for this object.
* (default is to omit) * (default is to omit)
* @return the file object * @return the file object
*/ */
BSONObj storeFile( const string& fileName , const string& remoteNam e="" , const string& contentType=""); BSONObj storeFile( const string& fileName , const string& remoteNam e="" , const string& contentType="");
/** /**
* puts the file represented by data into the db * puts the file represented by data into the db
* @param data pointer to buffer to store in GridFS * @param data pointer to buffer to store in GridFS
* @param length length of buffer * @param length length of buffer
* @param remoteName optional filename to use for file stored in Gr * @param remoteName filename to use for file stored in GridFS
idFS
* (default is to use fileName parameter)
* @param contentType optional MIME type for this object. * @param contentType optional MIME type for this object.
* (default is to omit) * (default is to omit)
* @return the file object * @return the file object
*/ */
BSONObj storeFile( const char* data , size_t length , const string& remoteName , const string& contentType=""); BSONObj storeFile( const char* data , size_t length , const string& remoteName , const string& contentType="");
/** /**
* removes file referenced by fileName from the db * removes file referenced by fileName from the db
* @param fileName filename (in GridFS) of the file to remove * @param fileName filename (in GridFS) of the file to remove
* @return the file object * @return the file object
skipping to change at line 139 skipping to change at line 139
// insert fileobject. All chunks must be in DB. // insert fileobject. All chunks must be in DB.
BSONObj insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType); BSONObj insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType);
friend class GridFile; friend class GridFile;
}; };
/** /**
wrapper for a file stored in the Mongo database wrapper for a file stored in the Mongo database
*/ */
class GridFile { class MONGO_CLIENT_API GridFile {
public: public:
/** /**
* @return whether or not this file exists * @return whether or not this file exists
* findFile will always return a GriFile, so need to check this * findFile will always return a GriFile, so need to check this
*/ */
bool exists() const { bool exists() const {
return ! _obj.isEmpty(); return ! _obj.isEmpty();
} }
string getFilename() const { string getFilename() const {
 End of changes. 5 change blocks. 
6 lines changed or deleted 5 lines changed or added


 hash_access_method.h   hash_access_method.h 
skipping to change at line 36 skipping to change at line 36
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/hasher.h" // For HashSeed. #include "mongo/db/hasher.h" // For HashSeed.
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/btree_access_method_internal.h" #include "mongo/db/index/btree_based_access_method.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
/** /**
* This is the access method for "hashed" indices. * This is the access method for "hashed" indices.
*/ */
class HashAccessMethod : public BtreeBasedAccessMethod { class HashAccessMethod : public BtreeBasedAccessMethod {
public: public:
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
HashAccessMethod(IndexDescriptor* descriptor); HashAccessMethod(IndexCatalogEntry* btreeState);
virtual ~HashAccessMethod() { } virtual ~HashAccessMethod() { }
virtual Status newCursor(IndexCursor** out);
// This is a NO-OP. // This is a NO-OP.
virtual Status setOptions(const CursorOptions& options) { virtual Status setOptions(const CursorOptions& options) {
return Status::OK(); return Status::OK();
} }
/** /**
* Hashing function used by both this class and the cursors we crea te. * Hashing function used by both this class and the cursors we crea te.
* Exposed for testing and so mongo/db/index_legacy.cpp can use it. * Exposed for testing and so mongo/db/index_legacy.cpp can use it.
*/ */
static long long int makeSingleKey(const BSONElement& e, HashSeed s eed, int v); static long long int makeSingleKey(const BSONElement& e, HashSeed s eed, int v);
 End of changes. 3 change blocks. 
4 lines changed or deleted 2 lines changed or added


 hasher.h   hasher.h 
skipping to change at line 102 skipping to change at line 102
* ints via truncation, so floating point values round towards 0 to the * ints via truncation, so floating point values round towards 0 to the
* nearest int representable as a 64-bit long. * nearest int representable as a 64-bit long.
* *
* This function is used in the computation of hashed indexes * This function is used in the computation of hashed indexes
* and hashed shard keys, and thus should not be changed unless * and hashed shard keys, and thus should not be changed unless
* the associated "getKeys" and "makeSingleKey" method in the * the associated "getKeys" and "makeSingleKey" method in the
* hashindex type is changed accordingly. * hashindex type is changed accordingly.
*/ */
static long long int hash64( const BSONElement& e , HashSeed seed ) ; static long long int hash64( const BSONElement& e , HashSeed seed ) ;
private:
BSONElementHasher();
/* This incrementally computes the hash of BSONElement "e" /* This incrementally computes the hash of BSONElement "e"
* using hash function "h". If "includeFieldName" is true, * using hash function "h". If "includeFieldName" is true,
* then the name of the field is hashed in between the type of * then the name of the field is hashed in between the type of
* the element and the element value. The hash function "h" * the element and the element value. The hash function "h"
* is applied recursively to any sub-elements (arrays/sub-documents ), * is applied recursively to any sub-elements (arrays/sub-documents ),
* squashing elements of the same canonical type. * squashing elements of the same canonical type.
* Used as a helper for hash64 above. * Used as a helper for hash64 above.
*/ */
static void recursiveHash( Hasher* h , const BSONElement& e , bool includeFieldName ); static void recursiveHash( Hasher* h , const BSONElement& e , bool includeFieldName );
private:
BSONElementHasher();
}; };
} }
 End of changes. 2 change blocks. 
3 lines changed or deleted 3 lines changed or added


 hashtab.h   hashtab.h 
skipping to change at line 27 skipping to change at line 27
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include <map> #include <map>
#include "../db/dur.h" #include "mongo/db/dur.h"
namespace mongo { namespace mongo {
#pragma pack(1) #pragma pack(1)
/* you should define: /* you should define:
int Key::hash() return > 0 always. int Key::hash() return > 0 always.
*/ */
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 haystack_access_method.h   haystack_access_method.h 
skipping to change at line 32 skipping to change at line 32
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/index/btree_access_method_internal.h" #include "mongo/db/index/btree_based_access_method.h"
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
/** /**
* Maps (lat, lng) to the bucketSize-sided square bucket that contains it. * Maps (lat, lng) to the bucketSize-sided square bucket that contains it.
* Examines all documents in a given radius of a given point. * Examines all documents in a given radius of a given point.
* Returns all documents that match a given search restriction. * Returns all documents that match a given search restriction.
* See http://dochub.mongodb.org/core/haystackindexes * See http://dochub.mongodb.org/core/haystackindexes
skipping to change at line 58 skipping to change at line 58
* db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 }) * db.foo.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 })
* pos is the name of the field to be indexed that has lat/lng data i n an array. * pos is the name of the field to be indexed that has lat/lng data i n an array.
* type is the name of the secondary field to be indexed. * type is the name of the secondary field to be indexed.
* bucketSize specifies the dimension of the square bucket for the da ta in pos. * bucketSize specifies the dimension of the square bucket for the da ta in pos.
* ALL fields are mandatory. * ALL fields are mandatory.
*/ */
class HaystackAccessMethod : public BtreeBasedAccessMethod { class HaystackAccessMethod : public BtreeBasedAccessMethod {
public: public:
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
HaystackAccessMethod(IndexDescriptor* descriptor); HaystackAccessMethod(IndexCatalogEntry* btreeState);
virtual ~HaystackAccessMethod() { } virtual ~HaystackAccessMethod() { }
// Not implemented.
virtual Status newCursor(IndexCursor** out);
protected: protected:
friend class GeoHaystackSearchCommand; friend class GeoHaystackSearchCommand;
void searchCommand(const BSONObj& nearObj, double maxDistance, cons t BSONObj& search, void searchCommand(const BSONObj& nearObj, double maxDistance, cons t BSONObj& search,
BSONObjBuilder* result, unsigned limit); BSONObjBuilder* result, unsigned limit);
private: private:
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// Helper methods called by getKeys: // Helper methods called by getKeys:
int hash(const BSONElement& e) const; int hash(const BSONElement& e) const;
 End of changes. 3 change blocks. 
5 lines changed or deleted 2 lines changed or added


 hostandport.h   hostandport.h 
skipping to change at line 49 skipping to change at line 49
/** @param p port number. -1 is ok to use default. */ /** @param p port number. -1 is ok to use default. */
HostAndPort(const std::string& h, int p /*= -1*/) : _host(h), _port (p) { HostAndPort(const std::string& h, int p /*= -1*/) : _host(h), _port (p) {
verify(!mongoutils::str::startsWith(h, '#')); verify(!mongoutils::str::startsWith(h, '#'));
} }
HostAndPort(const SockAddr& sock ) : _host( sock.getAddr() ) , _por t( sock.getPort() ) { } HostAndPort(const SockAddr& sock ) : _host( sock.getAddr() ) , _por t( sock.getPort() ) { }
static HostAndPort me(); static HostAndPort me();
bool operator<(const HostAndPort& r) const { bool operator<(const HostAndPort& r) const {
string h = host(); const int cmp = host().compare(r.host());
string rh = r.host(); if (cmp)
if( h < rh ) return cmp < 0;
return true; return port() < r.port();
if( h == rh )
return port() < r.port();
return false;
} }
bool operator==(const HostAndPort& r) const { bool operator==(const HostAndPort& r) const {
return host() == r.host() && port() == r.port(); return host() == r.host() && port() == r.port();
} }
bool operator!=(const HostAndPort& r) const { return !(*this == r); } bool operator!=(const HostAndPort& r) const { return !(*this == r); }
/* returns true if the host/port combo identifies this process inst ance. */ /* returns true if the host/port combo identifies this process inst ance. */
bool isSelf() const; // defined in isself.cpp bool isSelf() const; // defined in isself.cpp
skipping to change at line 81 skipping to change at line 78
*/ */
string toString( bool includePort=true ) const; string toString( bool includePort=true ) const;
operator string() const { return toString(); } operator string() const { return toString(); }
void append( StringBuilder& ss ) const; void append( StringBuilder& ss ) const;
bool empty() const { bool empty() const {
return _host.empty() && _port < 0; return _host.empty() && _port < 0;
} }
string host() const { const string& host() const {
return _host; return _host;
} }
int port() const { int port() const {
if (hasPort()) if (hasPort())
return _port; return _port;
return ServerGlobalParams::DefaultDBPort; return ServerGlobalParams::DefaultDBPort;
} }
bool hasPort() const { bool hasPort() const {
return _port >= 0; return _port >= 0;
} }
 End of changes. 2 change blocks. 
8 lines changed or deleted 5 lines changed or added


 httpclient.h   httpclient.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/client/export_macros.h"
#include "mongo/pch.h" #include "mongo/pch.h"
namespace mongo { namespace mongo {
class HttpClient : boost::noncopyable { class MONGO_CLIENT_API HttpClient : boost::noncopyable {
public: public:
typedef map<string,string> Headers; typedef map<string,string> Headers;
class Result { class MONGO_CLIENT_API Result {
public: public:
Result() {} Result() {}
const string& getEntireResponse() const { const string& getEntireResponse() const {
return _entireResponse; return _entireResponse;
} }
Headers getHeaders() const { Headers getHeaders() const {
return _headers; return _headers;
} }
 End of changes. 3 change blocks. 
2 lines changed or deleted 3 lines changed or added


 index_access_method.h   index_access_method.h 
skipping to change at line 35 skipping to change at line 35
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/index/index_cursor.h" #include "mongo/db/index/index_cursor.h"
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/namespace_details.h"
namespace mongo { namespace mongo {
class UpdateTicket; class UpdateTicket;
struct InsertDeleteOptions; struct InsertDeleteOptions;
/** /**
* An IndexAccessMethod is the interface through which all the mutation , lookup, and * An IndexAccessMethod is the interface through which all the mutation , lookup, and
* traversal of index entries is done. The class is designed so that th e underlying index * traversal of index entries is done. The class is designed so that th e underlying index
* data structure is opaque to the caller. * data structure is opaque to the caller.
skipping to change at line 112 skipping to change at line 111
* 'from' will remain. Assumes that the index has not changed sinc e validateUpdate was * 'from' will remain. Assumes that the index has not changed sinc e validateUpdate was
* called. If the index was changed, we may return an error, as ou r ticket may have been * called. If the index was changed, we may return an error, as ou r ticket may have been
* invalidated. * invalidated.
*/ */
virtual Status update(const UpdateTicket& ticket, int64_t* numUpdat ed) = 0; virtual Status update(const UpdateTicket& ticket, int64_t* numUpdat ed) = 0;
/** /**
* Fills in '*out' with an IndexCursor. Return a status indicating success or reason of * Fills in '*out' with an IndexCursor. Return a status indicating success or reason of
* failure. If the latter, '*out' contains NULL. See index_cursor. h for IndexCursor usage. * failure. If the latter, '*out' contains NULL. See index_cursor. h for IndexCursor usage.
*/ */
virtual Status newCursor(IndexCursor **out) = 0; virtual Status newCursor(IndexCursor **out) const = 0;
// ------ index level operations ------
/**
* initializes this index
* only called once for the lifetime of the index
* if called multiple times, is an error
*/
virtual Status initializeAsEmpty() = 0;
/** /**
* Try to page-in the pages that contain the keys generated from 'o bj'. * Try to page-in the pages that contain the keys generated from 'o bj'.
* This can be used to speed up future accesses to an index by tryi ng to ensure the * This can be used to speed up future accesses to an index by tryi ng to ensure the
* appropriate pages are not swapped out. * appropriate pages are not swapped out.
* See prefetch.cpp. * See prefetch.cpp.
*/ */
virtual Status touch(const BSONObj& obj) = 0; virtual Status touch(const BSONObj& obj) = 0;
/** /**
skipping to change at line 134 skipping to change at line 142
* Set numKeys to the number of keys in the index. * Set numKeys to the number of keys in the index.
* *
* Return OK if the index is valid. * Return OK if the index is valid.
* *
* Currently wasserts that the index is invalid. This could/should be changed in * Currently wasserts that the index is invalid. This could/should be changed in
* the future to return a Status. * the future to return a Status.
*/ */
virtual Status validate(int64_t* numKeys) = 0; virtual Status validate(int64_t* numKeys) = 0;
// //
// Bulk operations support (TODO) // Bulk operations support
// //
// virtual Status insertBulk(BulkDocs arg) = 0; /**
* Starts a bulk operation.
* You work on the returned IndexAccessMethod and then call commitB
ulk.
* This can return NULL, meaning bulk mode is not available.
*
* Long term, you'll eventually be able to mix/match bulk, not bulk
,
* have as many as you want, etc..
*
* For now (1/8/14) you can only do bulk when the index is empty
* it will fail if you try other times.
*/
virtual IndexAccessMethod* initiateBulk() = 0;
// virtual Status removeBulk(BulkDocs arg) = 0; /**
* Call this when you are ready to finish your bulk work.
* Pass in the IndexAccessMethod gotten from initiateBulk.
* After this method is called, the bulk index access method is inv
alid
* and should not be used.
* @param bulk - something created from initiateBulk
* @param mayInterrupt - is this commit interruptable (will cancel)
* @param dups - if NULL, error out on dups if not allowed
* if not NULL, put the bad DiskLocs there
*/
virtual Status commitBulk( IndexAccessMethod* bulk,
bool mayInterrupt,
std::set<DiskLoc>* dups ) = 0;
}; };
/** /**
* Updates are two steps: verify that it's a valid update, and perform it. * Updates are two steps: verify that it's a valid update, and perform it.
* validateUpdate fills out the UpdateStatus and update actually applie s it. * validateUpdate fills out the UpdateStatus and update actually applie s it.
*/ */
class UpdateTicket { class UpdateTicket {
public: public:
UpdateTicket() : _isValid(false) { } UpdateTicket() : _isValid(false) { }
 End of changes. 5 change blocks. 
5 lines changed or deleted 39 lines changed or added


 index_bounds.h   index_bounds.h 
skipping to change at line 85 skipping to change at line 85
bool isValidFor(const BSONObj& keyPattern, int direction); bool isValidFor(const BSONObj& keyPattern, int direction);
// Methods below used for debugging purpose only. Do not use outsid e testing code. // Methods below used for debugging purpose only. Do not use outsid e testing code.
size_t size() const; size_t size() const;
std::string getFieldName(size_t i) const; std::string getFieldName(size_t i) const;
size_t getNumIntervals(size_t i) const; size_t getNumIntervals(size_t i) const;
Interval getInterval(size_t i, size_t j) const; Interval getInterval(size_t i, size_t j) const;
std::string toString() const; std::string toString() const;
BSONObj toBSON() const; BSONObj toBSON() const;
// TODO: KILL THIS? // TODO: we use this for max/min scan. Consider migrating that.
// We need this for legacy non-index indices (2d/2dsphere) that tak
e a BSONObj and don't
// deal with the kind of absurd Btree-only behavior of IndexBoundsC
hecker.
bool isSimpleRange; bool isSimpleRange;
BSONObj startKey; BSONObj startKey;
BSONObj endKey; BSONObj endKey;
bool endKeyInclusive; bool endKeyInclusive;
}; };
/** /**
* A helper used by IndexScan to navigate an index. * A helper used by IndexScan to navigate an index.
*/ */
class IndexBoundsChecker { class IndexBoundsChecker {
 End of changes. 1 change blocks. 
5 lines changed or deleted 1 lines changed or added


 index_bounds_builder.h   index_bounds_builder.h 
skipping to change at line 35 skipping to change at line 35
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/hasher.h" #include "mongo/db/hasher.h"
#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/index_entry.h"
namespace mongo { namespace mongo {
/** /**
* Translates expressions over fields into bounds on an index. * Translates expressions over fields into bounds on an index.
*/ */
class IndexBoundsBuilder { class IndexBoundsBuilder {
public: public:
enum BoundsTightness {
// Index bounds are exact.
EXACT,
// Index bounds are inexact, and a fetch is required.
INEXACT_FETCH,
// Index bounds are inexact, but no fetch is required
INEXACT_COVERED
};
/** /**
* Populate the provided O.I.L. with one interval goes from MinKey to MaxKey (or vice-versa * Populate the provided O.I.L. with one interval goes from MinKey to MaxKey (or vice-versa
* depending on the index direction). * depending on the index direction).
*/ */
static void allValuesForField(const BSONElement& elt, OrderedInterv alList* out); static void allValuesForField(const BSONElement& elt, OrderedInterv alList* out);
/** /**
* Turn the MatchExpression in 'expr' into a set of index bounds. * Turn the MatchExpression in 'expr' into a set of index bounds.
The field that 'expr' The field that 'expr' is
* is concerned with is indexed according to the keypattern element * concerned with is indexed according to the keypattern element 'e
'elt'. lt' from index 'index'.
* *
* If 'expr' is elemMatch, the index tag is affixed to a child. * If 'expr' is elemMatch, the index tag is affixed to a child.
* *
* The expression must be a predicate over one field. That is, exp r->isLeaf() or * The expression must be a predicate over one field. That is, exp r->isLeaf() or
* expr->isArray() must be true, and expr->isLogical() must be fals e. * expr->isArray() must be true, and expr->isLogical() must be fals e.
*/ */
static void translate(const MatchExpression* expr, const BSONElemen static void translate(const MatchExpression* expr,
t& elt, const BSONElement& elt,
OrderedIntervalList* oilOut, bool* exactOut); const IndexEntry& index,
OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut);
/** /**
* Creates bounds for 'expr' (indexed according to 'elt'). Interse cts those bounds * Creates bounds for 'expr' (indexed according to 'elt'). Interse cts those bounds
* with the bounds in oilOut, which is an in/out parameter. * with the bounds in oilOut, which is an in/out parameter.
*/ */
static void translateAndIntersect(const MatchExpression* expr, cons static void translateAndIntersect(const MatchExpression* expr,
t BSONElement& elt, const BSONElement& elt,
OrderedIntervalList* oilOut, bool const IndexEntry& index,
* exactOut); OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut);
/** /**
* Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds * Creates bounds for 'expr' (indexed according to 'elt'). Unions those bounds
* with the bounds in oilOut, which is an in/out parameter. * with the bounds in oilOut, which is an in/out parameter.
*/ */
static void translateAndUnion(const MatchExpression* expr, const BS static void translateAndUnion(const MatchExpression* expr,
ONElement& elt, const BSONElement& elt,
OrderedIntervalList* oilOut, bool* ex const IndexEntry& index,
actOut); OrderedIntervalList* oilOut,
BoundsTightness* tightnessOut);
/** /**
* Make a range interval from the provided object. * Make a range interval from the provided object.
* The object must have exactly two fields. The first field is the start, the second the * The object must have exactly two fields. The first field is the start, the second the
* end. * end.
* The two inclusive flags indicate whether or not the start/end fi elds are included in the * The two inclusive flags indicate whether or not the start/end fi elds are included in the
* interval (closed interval if included, open if not). * interval (closed interval if included, open if not).
*/ */
static Interval makeRangeInterval(const BSONObj& obj, bool startInc static Interval makeRangeInterval(const BSONObj& obj,
lusive, bool startInclusive,
bool endInclusive);
static Interval makeRangeInterval(const string& start,
const string& end,
bool startInclusive,
bool endInclusive); bool endInclusive);
static Interval makeRangeInterval(const string& start, const string
& end,
bool startInclusive, bool endIncl
usive);
/** /**
* Make a point interval from the provided object. * Make a point interval from the provided object.
* The object must have exactly one field which is the value of the point interval. * The object must have exactly one field which is the value of the point interval.
*/ */
static Interval makePointInterval(const BSONObj& obj); static Interval makePointInterval(const BSONObj& obj);
static Interval makePointInterval(const string& str); static Interval makePointInterval(const string& str);
/** /**
* Since we have no BSONValue we must make an object that's a copy of a piece of another * Since we have no BSONValue we must make an object that's a copy of a piece of another
skipping to change at line 114 skipping to change at line 139
/** /**
* Copied almost verbatim from db/queryutil.cpp. * Copied almost verbatim from db/queryutil.cpp.
* *
* returns a string that when used as a matcher, would match a sup er set of regex() * returns a string that when used as a matcher, would match a sup er set of regex()
* *
* returns "" for complex regular expressions * returns "" for complex regular expressions
* *
* used to optimize queries in some simple regex cases that start with '^' * used to optimize queries in some simple regex cases that start with '^'
*/ */
static string simpleRegex(const char* regex, const char* flags, boo static string simpleRegex(const char* regex,
l* exact); const char* flags,
BoundsTightness* tightnessOut);
/**
* Returns an Interval from minKey to maxKey
*/
static Interval allValues(); static Interval allValues();
static void translateRegex(const RegexMatchExpression* rme, Ordered static void translateRegex(const RegexMatchExpression* rme,
IntervalList* oil, OrderedIntervalList* oil,
bool* exact); BoundsTightness* tightnessOut);
static void translateEquality(const BSONElement& data, bool isHashe static void translateEquality(const BSONElement& data,
d, bool isHashed,
OrderedIntervalList* oil, bool* exact OrderedIntervalList* oil,
); BoundsTightness* tightnessOut);
static void unionize(OrderedIntervalList* oilOut); static void unionize(OrderedIntervalList* oilOut);
static void intersectize(const OrderedIntervalList& arg, OrderedInt static void intersectize(const OrderedIntervalList& arg,
ervalList* oilOut); OrderedIntervalList* oilOut);
/**
* Fills out 'bounds' with the bounds for an index scan over all va
lues of the
* index described by 'keyPattern' in the default forward direction
.
*/
static void allValuesBounds(const BSONObj& keyPattern, IndexBounds*
bounds);
/**
* Assumes each OIL in 'bounds' is increasing.
*
* Aligns OILs (and bounds) according to the 'kp' direction * the s
canDir.
*/
static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int
scanDir = 1);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 12 change blocks. 
33 lines changed or deleted 72 lines changed or added


 index_builder.h   index_builder.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/db/client.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/util/background.h" #include "mongo/util/background.h"
/** /**
* Forks off a thread to build an index. * Forks off a thread to build an index.
*/ */
namespace mongo { namespace mongo {
class IndexBuilder : public BackgroundJob { class IndexBuilder : public BackgroundJob {
public: public:
IndexBuilder(const std::string ns, const BSONObj index); IndexBuilder(const BSONObj& index);
virtual ~IndexBuilder(); virtual ~IndexBuilder();
virtual void run(); virtual void run();
/**
* name of the builder, not the index
*/
virtual std::string name() const; virtual std::string name() const;
void build() const; Status build( Client::Context& context ) const;
/** /**
* Kill all in-progress indexes matching criteria and, optionally, store them in the * Kill all in-progress indexes matching criteria and, optionally, store them in the
* indexes list. * indexes list.
*/ */
static std::vector<BSONObj> killMatchingIndexBuilds(const BSONObj& criteria); static std::vector<BSONObj> killMatchingIndexBuilds(const BSONObj& criteria);
/** /**
* Retry all index builds in the list. Builds each index in a separ ate thread. If ns does * Retry all index builds in the list. Builds each index in a separ ate thread. If ns does
* not match the ns field in the indexes list, the BSONObj's ns fie ld is changed before the * not match the ns field in the indexes list, the BSONObj's ns fie ld is changed before the
* index is built (to handle rename). * index is built (to handle rename).
*/ */
static void restoreIndexes(const std::string& ns, const std::vector <BSONObj>& indexes); static void restoreIndexes(const std::vector<BSONObj>& indexes);
private: private:
const std::string _ns;
const BSONObj _index; const BSONObj _index;
std::string _name; std::string _name; // name of this builder, not related to the inde x
static AtomicUInt _indexBuildCount; static AtomicUInt _indexBuildCount;
}; };
} }
 End of changes. 7 change blocks. 
5 lines changed or deleted 9 lines changed or added


 index_catalog.h   index_catalog.h 
skipping to change at line 35 skipping to change at line 35
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <vector>
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class Collection; class Collection;
class NamespaceDetails; class NamespaceDetails;
class BtreeInMemoryState;
class IndexDescriptor; class IndexDescriptor;
class IndexDetails; class IndexDetails;
class IndexAccessMethod; class IndexAccessMethod;
class BtreeAccessMethod; class BtreeAccessMethod;
class BtreeBasedAccessMethod; class BtreeBasedAccessMethod;
/** /**
* how many: 1 per Collection * how many: 1 per Collection
* lifecycle: attached to a Collection * lifecycle: attached to a Collection
*/ */
class IndexCatalog { class IndexCatalog {
public: public:
IndexCatalog( Collection* collection, NamespaceDetails* details ); IndexCatalog( Collection* collection, NamespaceDetails* details );
~IndexCatalog(); ~IndexCatalog();
// must be called before used
Status init();
bool ok() const;
// ---- accessors ----- // ---- accessors -----
int numIndexesTotal() const; int numIndexesTotal() const;
int numIndexesReady() const; int numIndexesReady() const;
int numIndexesInProgress() const { return numIndexesTotal() - numIn dexesReady(); } int numIndexesInProgress() const { return numIndexesTotal() - numIn dexesReady(); }
/** /**
* this is in "alive" until the Collection goes away * this is in "alive" until the Collection goes away
* in which case everything from this tree has to go away * in which case everything from this tree has to go away
*/ */
IndexDescriptor* findIdIndex(); bool haveIdIndex() const;
IndexDescriptor* findIdIndex() const;
/** /**
* @return null if cannot find * @return null if cannot find
*/ */
IndexDescriptor* findIndexByName( const StringData& name, IndexDescriptor* findIndexByName( const StringData& name,
bool includeUnfinishedIndexes = f alse ); bool includeUnfinishedIndexes = f alse ) const;
/** /**
* @return null if cannot find * @return null if cannot find
*/ */
IndexDescriptor* findIndexByKeyPattern( const BSONObj& key, IndexDescriptor* findIndexByKeyPattern( const BSONObj& key,
bool includeUnfinishedIndex es = false ); bool includeUnfinishedIndex es = false ) const;
/* Returns the index entry for the first index whose prefix contain s /* Returns the index entry for the first index whose prefix contain s
* 'keyPattern'. If 'requireSingleKey' is true, skip indices that c ontain * 'keyPattern'. If 'requireSingleKey' is true, skip indices that c ontain
* array attributes. Otherwise, returns NULL. * array attributes. Otherwise, returns NULL.
*/ */
IndexDescriptor* findIndexByPrefix( const BSONObj &keyPattern, IndexDescriptor* findIndexByPrefix( const BSONObj &keyPattern,
bool requireSingleKey ); bool requireSingleKey ) const;
// throws void findIndexByType( const string& type , vector<IndexDescriptor*>
// never returns NULL & matches ) const;
IndexDescriptor* getDescriptor( int idxNo );
// never returns NULL // never returns NULL
IndexAccessMethod* getIndex( IndexDescriptor* desc ); IndexAccessMethod* getIndex( const IndexDescriptor* desc );
const IndexAccessMethod* getIndex( const IndexDescriptor* desc ) co
nst;
BtreeBasedAccessMethod* getBtreeBasedIndex( IndexDescriptor* desc ) class IndexIterator {
; public:
bool more();
IndexDescriptor* next();
IndexAccessMethod* getBtreeIndex( IndexDescriptor* desc ); // returns the access method for the last return IndexDescripto
r
IndexAccessMethod* accessMethod( IndexDescriptor* desc );
private:
IndexIterator( const IndexCatalog* cat, bool includeUnfinishedI
ndexes );
void _advance();
// TODO: add iterator, search methods bool _includeUnfinishedIndexes;
const IndexCatalog* _catalog;
IndexCatalogEntryContainer::const_iterator _iterator;
// ---- index modifiers ------ bool _start; // only true before we've called next() or more()
IndexCatalogEntry* _prev;
IndexCatalogEntry* _next;
friend class IndexCatalog;
};
IndexIterator getIndexIterator( bool includeUnfinishedIndexes ) con
st {
return IndexIterator( this, includeUnfinishedIndexes );
};
// ---- index set modifiers ------
Status ensureHaveIdIndex(); Status ensureHaveIdIndex();
Status createIndex( BSONObj spec, bool mayInterrupt ); enum ShutdownBehavior {
SHUTDOWN_CLEANUP, // fully clean up this build
SHUTDOWN_LEAVE_DIRTY // leave as if kill -9 happened, so have t
o deal with on restart
};
Status createIndex( BSONObj spec,
bool mayInterrupt,
ShutdownBehavior shutdownBehavior = SHUTDOWN_CL
EANUP );
Status okToAddIndex( const BSONObj& spec ) const; Status okToAddIndex( const BSONObj& spec ) const;
Status dropAllIndexes( bool includingIdIndex ); Status dropAllIndexes( bool includingIdIndex );
Status dropIndex( IndexDescriptor* desc ); Status dropIndex( IndexDescriptor* desc );
Status dropIndex( int idxNo );
/** /**
* drops ALL uncompleted indexes * will drop all incompleted indexes and return specs
* this is meant to only run at startup after a crash * after this, the indexes can be rebuilt
*/ */
Status blowAwayInProgressIndexEntries(); vector<BSONObj> getAndClearUnfinishedIndexes();
/** // ---- modify single index
* will drop an uncompleted index and return spec
* @return the info for a single index to retry /* Updates the expireAfterSeconds field of the given index to the v
alue in newExpireSecs.
* The specified index must already contain an expireAfterSeconds f
ield, and the value in
* that field and newExpireSecs must both be numeric.
*/ */
BSONObj prepOneUnfinishedIndex(); void updateTTLSetting( const IndexDescriptor* idx, long long newExp ireSeconds );
void markMultikey( IndexDescriptor* idx, bool isMultikey = true ); bool isMultikey( const IndexDescriptor* idex );
// --- these probably become private? // --- these probably become private?
/**
* disk creation order
* 1) system.indexes entry
* 2) collection's NamespaceDetails
* a) info + head
* b) _indexBuildsInProgress++
* 3) indexes entry in .ns file
* 4) system.namespaces entry for index ns
*/
class IndexBuildBlock { class IndexBuildBlock {
public: public:
IndexBuildBlock( IndexCatalog* catalog, const StringData& index IndexBuildBlock( Collection* collection,
Name, const DiskLoc& loc ); const BSONObj& spec );
~IndexBuildBlock(); ~IndexBuildBlock();
IndexDetails* indexDetails() { return _indexDetails; } Status init();
void success(); void success();
/**
* index build failed, clean up meta data
*/
void fail();
/**
* we're stopping the build
* do NOT cleanup, leave meta data as is
*/
void abort();
IndexCatalogEntry* getEntry() { return _entry; }
private: private:
Collection* _collection;
IndexCatalog* _catalog; IndexCatalog* _catalog;
string _ns; string _ns;
BSONObj _spec;
string _indexName; string _indexName;
string _indexNamespace;
NamespaceDetails* _nsd; IndexCatalogEntry* _entry;
IndexDetails* _indexDetails; bool _inProgress;
}; };
// ----- data modifiers ------ // ----- data modifiers ------
// this throws for now // this throws for now
void indexRecord( const BSONObj& obj, const DiskLoc &loc ); void indexRecord( const BSONObj& obj, const DiskLoc &loc );
void unindexRecord( const BSONObj& obj, const DiskLoc& loc, bool no Warn ); void unindexRecord( const BSONObj& obj, const DiskLoc& loc, bool no Warn );
/** /**
* checks all unique indexes and checks for conflicts * checks all unique indexes and checks for conflicts
* should not throw * should not throw
*/ */
Status checkNoIndexConflicts( const BSONObj& obj ); Status checkNoIndexConflicts( const BSONObj& obj );
// ------- temp internal ------- // ------- temp internal -------
int _removeFromSystemIndexes( const StringData& indexName );
string getAccessMethodName(const BSONObj& keyPattern) { string getAccessMethodName(const BSONObj& keyPattern) {
return _getAccessMethodName( keyPattern ); return _getAccessMethodName( keyPattern );
} }
// public static helpers // public static helpers
static bool validKeyPattern( const BSONObj& obj ); static bool validKeyPattern( const BSONObj& obj );
static BSONObj fixIndexSpec( const BSONObj& spec ); static BSONObj fixIndexSpec( const BSONObj& spec );
static BSONObj fixIndexKey( const BSONObj& key ); static BSONObj fixIndexKey( const BSONObj& key );
private: private:
void _deleteCacheEntry( unsigned i ); // creates a new thing, no caching
void _fixDescriptorCacheNumbers(); IndexAccessMethod* _createAccessMethod( const IndexDescriptor* desc
,
IndexCatalogEntry* entry );
Status _upgradeDatabaseMinorVersionIfNeeded( const string& newPlugi nName ); Status _upgradeDatabaseMinorVersionIfNeeded( const string& newPlugi nName );
/** int _removeFromSystemIndexes( const StringData& indexName );
* this is just an attempt to clean up old orphaned stuff on a dele
te all indexes
* call. repair database is the clean solution, but this gives one
a lighter weight
* partial option. see dropIndexes()
* @param idIndex - can be NULL
* @return how many things were deleted, should be 0
*/
int _assureSysIndexesEmptied( IndexDetails* idIndex );
bool _shouldOverridePlugin( const BSONObj& keyPattern ); bool _shouldOverridePlugin( const BSONObj& keyPattern ) const;
/** /**
* This differs from IndexNames::findPluginName in that returns the plugin name we *should* * This differs from IndexNames::findPluginName in that returns the plugin name we *should*
* use, not the plugin name inside of the provided key pattern. To understand when these * use, not the plugin name inside of the provided key pattern. To understand when these
* differ, see shouldOverridePlugin. * differ, see shouldOverridePlugin.
*/ */
string _getAccessMethodName(const BSONObj& keyPattern); string _getAccessMethodName(const BSONObj& keyPattern) const;
IndexDetails* _getIndexDetails( const IndexDescriptor* descriptor )
const;
void _checkMagic() const; void _checkMagic() const;
Status _indexRecord( int idxNo, const BSONObj& obj, const DiskLoc & // checks if there is anything in _leftOverIndexes
loc ); // meaning we shouldn't modify catalog
Status _unindexRecord( int idxNo, const BSONObj& obj, const DiskLoc Status _checkUnfinished() const;
&loc, bool logIfError );
Status _indexRecord( IndexCatalogEntry* index, const BSONObj& obj,
const DiskLoc &loc );
Status _unindexRecord( IndexCatalogEntry* index, const BSONObj& obj
, const DiskLoc &loc,
bool logIfError );
/** /**
* this does no sanity checks * this does no sanity checks
*/ */
Status _dropIndex( int idxNo ); Status _dropIndex( IndexCatalogEntry* entry );
// just does disk hanges
// doesn't change memory state, etc...
void _deleteIndexFromDisk( const string& indexName,
const string& indexNamespace,
int idxNo );
// descriptor ownership passes to _setupInMemoryStructures
IndexCatalogEntry* _setupInMemoryStructures( IndexDescriptor* descr
iptor );
int _magic; int _magic;
Collection* _collection; Collection* _collection;
NamespaceDetails* _details; NamespaceDetails* _details;
// these are caches, not source of truth IndexCatalogEntryContainer _entries;
// they should be treated as such
std::vector<IndexDescriptor*> _descriptorCache; // These are the index specs of indexes that were "leftover"
std::vector<IndexAccessMethod*> _accessMethodCache; // "Leftover" means they were unfinished when a mongod shut down
std::vector<BtreeAccessMethod*> _forcedBtreeAccessMethodCache; // Certain operations are prohibted until someone fixes
// get by calling getAndClearUnfinishedIndexes
std::vector<BSONObj> _unfinishedIndexes;
static const BSONObj _idObj; // { _id : 1 } static const BSONObj _idObj; // { _id : 1 }
}; };
} }
 End of changes. 38 change blocks. 
54 lines changed or deleted 139 lines changed or added


 index_create.h   index_create.h 
skipping to change at line 36 skipping to change at line 36
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
namespace mongo { namespace mongo {
class IndexCatalogEntry;
class Collection; class Collection;
class IndexDescriptor;
// Build an index in the foreground // Build an index in the foreground
// If background is false, uses fast index builder // If background is false, uses fast index builder
// If background is true, uses background index builder; blocks until d one. // If background is true, uses background index builder; blocks until d one.
void buildAnIndex( Collection* collection, void buildAnIndex( Collection* collection,
IndexDescriptor* idx, IndexCatalogEntry* btreeState,
bool mayInterrupt ); bool mayInterrupt );
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
2 lines changed or deleted 3 lines changed or added


 index_descriptor.h   index_descriptor.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/db/storage/index_details.h" // For IndexDetails. #include "mongo/db/structure/catalog/index_details.h" // For IndexDetails.
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/namespace_details.h" // For NamespaceDetails. #include "mongo/db/structure/catalog/namespace_details.h" // For Namespace
#include "mongo/db/structure/collection.h" Details.
#include "mongo/db/catalog/collection.h"
#include "mongo/util/stacktrace.h" #include "mongo/util/stacktrace.h"
namespace mongo { namespace mongo {
class IndexCatalog; class IndexCatalog;
class IndexCatalogEntry;
/** class IndexCatalogEntryContainer;
* OnDiskIndexData (aka IndexDetails) is memory-mapped on-disk index da
ta.
* It contains two DiskLocs:
* The first points to the head of the index. This is currently turned
into a Btree node.
* The second points to a BSONObj which describes the index.
*/
typedef IndexDetails OnDiskIndexData;
/** /**
* A cache of information computed from the memory-mapped per-index dat a (OnDiskIndexData). * A cache of information computed from the memory-mapped per-index dat a (OnDiskIndexData).
* Contains accessors for the various immutable index parameters, and a n accessor for the * Contains accessors for the various immutable index parameters, and a n accessor for the
* mutable "head" pointer which is index-specific. * mutable "head" pointer which is index-specific.
* *
* All synchronization is the responsibility of the caller. * All synchronization is the responsibility of the caller.
*/ */
class IndexDescriptor { class IndexDescriptor {
public: public:
/** /**
* OnDiskIndexData is a pointer to the memory mapped per-index data . * OnDiskIndexData is a pointer to the memory mapped per-index data .
* infoObj is a copy of the index-describing BSONObj contained in t he OnDiskIndexData. * infoObj is a copy of the index-describing BSONObj contained in t he OnDiskIndexData.
*/ */
IndexDescriptor(Collection* collection, int indexNumber, OnDiskInde IndexDescriptor(Collection* collection, BSONObj infoObj)
xData* data,
BSONObj infoObj)
: _magic(123987), : _magic(123987),
_collection(collection), _indexNumber(indexNumber), _onDiskDa ta(data), _collection(collection),
_infoObj(infoObj.getOwned()), _infoObj(infoObj.getOwned()),
_numFields(infoObj.getObjectField("key").nFields()), _numFields(infoObj.getObjectField("key").nFields()),
_keyPattern(infoObj.getObjectField("key").getOwned()), _keyPattern(infoObj.getObjectField("key").getOwned()),
_indexName(infoObj.getStringField("name")), _indexName(infoObj.getStringField("name")),
_parentNS(infoObj.getStringField("ns")), _parentNS(infoObj.getStringField("ns")),
_isIdIndex(IndexDetails::isIdIndexPattern( _keyPattern )), _isIdIndex(IndexDetails::isIdIndexPattern( _keyPattern )),
_sparse(infoObj["sparse"].trueValue()), _sparse(infoObj["sparse"].trueValue()),
_dropDups(infoObj["dropDups"].trueValue()), _dropDups(infoObj["dropDups"].trueValue()),
_unique( _isIdIndex || infoObj["unique"].trueValue() ) _unique( _isIdIndex || infoObj["unique"].trueValue() ),
_cachedEntry( NULL )
{ {
_indexNamespace = _parentNS + ".$" + _indexNamespace; _indexNamespace = _parentNS + ".$" + _indexName;
_version = 0; _version = 0;
BSONElement e = _infoObj["v"]; BSONElement e = _infoObj["v"];
if ( e.isNumber() ) { if ( e.isNumber() ) {
_version = e.numberInt(); _version = e.numberInt();
} }
} }
~IndexDescriptor() { ~IndexDescriptor() {
_magic = 555; _magic = 555;
} }
// XXX this is terrible
IndexDescriptor* clone() const {
return new IndexDescriptor(_collection, _indexNumber, _onDiskDa
ta, _infoObj);
}
// //
// Information about the key pattern. // Information about the key pattern.
// //
/** /**
* Return the user-provided index key pattern. * Return the user-provided index key pattern.
* Example: {geo: "2dsphere", nonGeo: 1} * Example: {geo: "2dsphere", nonGeo: 1}
* Example: {foo: 1, bar: -1} * Example: {foo: 1, bar: -1}
*/ */
const BSONObj& keyPattern() const { _checkOk(); return _keyPattern; } const BSONObj& keyPattern() const { _checkOk(); return _keyPattern; }
skipping to change at line 141 skipping to change at line 130
// May each key only occur once? // May each key only occur once?
bool unique() const { return _unique; } bool unique() const { return _unique; }
// Is dropDups set on this index? // Is dropDups set on this index?
bool dropDups() const { return _dropDups; } bool dropDups() const { return _dropDups; }
// Is this index sparse? // Is this index sparse?
bool isSparse() const { return _sparse; } bool isSparse() const { return _sparse; }
// Is this index multikey? // Is this index multikey?
bool isMultikey() const { _checkOk(); return _collection->details() ->isMultikey(_indexNumber); } bool isMultikey() const { _checkOk(); return _collection->getIndexC atalog()->isMultikey( this ); }
bool isIdIndex() const { _checkOk(); return _isIdIndex; } bool isIdIndex() const { _checkOk(); return _isIdIndex; }
// //
// Properties that are Index-specific. // Properties that are Index-specific.
// //
// Allow access to arbitrary fields in the per-index info object. Some indices stash // Allow access to arbitrary fields in the per-index info object. Some indices stash
// index-specific data there. // index-specific data there.
BSONElement getInfoElement(const string& name) { return _infoObj[na me]; } BSONElement getInfoElement(const string& name) const { return _info Obj[name]; }
// //
// "Internals" of accessing the index, used by IndexAccessMethod(s) . // "Internals" of accessing the index, used by IndexAccessMethod(s) .
// //
// Return the memory-mapped index data block.
OnDiskIndexData& getOnDisk() { _checkOk(); return *_onDiskData; }
// Return the mutable head of the index.
const DiskLoc& getHead() const { _checkOk(); return _onDiskData->he
ad; }
// Return a (rather compact) string representation. // Return a (rather compact) string representation.
string toString() const { _checkOk(); return _infoObj.toString(); } string toString() const { _checkOk(); return _infoObj.toString(); }
// Return the info object. // Return the info object.
const BSONObj& infoObj() const { _checkOk(); return _infoObj; } const BSONObj& infoObj() const { _checkOk(); return _infoObj; }
// Set multikey attribute. We never unset it.
void setMultikey() {
_collection->getIndexCatalog()->markMultikey( this );
}
// Is this index being created in the background?
bool isBackgroundIndex() const {
return _indexNumber >= _collection->details()->getCompletedInde
xCount();
}
// this is the collection over which the index is over
Collection* getIndexedCollection() const { return _collection; }
// this is the owner of this IndexDescriptor // this is the owner of this IndexDescriptor
IndexCatalog* getIndexCatalog() const { return _collection->getInde xCatalog(); } IndexCatalog* getIndexCatalog() const { return _collection->getInde xCatalog(); }
private: private:
void _checkOk() const { void _checkOk() const {
if ( _magic == 123987 ) if ( _magic == 123987 )
return; return;
log() << "uh oh: " << (void*)(this) << " " << _magic; log() << "uh oh: " << (void*)(this) << " " << _magic;
verify(0); verify(0);
} }
int getIndexNumber() const { return _indexNumber; }
int _magic; int _magic;
// Related catalog information of the parent collection // Related catalog information of the parent collection
Collection* _collection; Collection* _collection;
// What # index are we in the catalog represented by _namespaceDeta
ils? Needed for setting
// and getting multikey.
int _indexNumber;
OnDiskIndexData* _onDiskData;
// The BSONObj describing the index. Accessed through the various members above. // The BSONObj describing the index. Accessed through the various members above.
const BSONObj _infoObj; const BSONObj _infoObj;
// --- cached data from _infoObj // --- cached data from _infoObj
int64_t _numFields; // How many fields are indexed? int64_t _numFields; // How many fields are indexed?
BSONObj _keyPattern; BSONObj _keyPattern;
string _indexName; string _indexName;
string _parentNS; string _parentNS;
string _indexNamespace; string _indexNamespace;
bool _isIdIndex; bool _isIdIndex;
bool _sparse; bool _sparse;
bool _dropDups; bool _dropDups;
bool _unique; bool _unique;
int _version; int _version;
// only used by IndexCatalogEntryContainer to do caching for perf
// users not allowed to touch, and not part of API
IndexCatalogEntry* _cachedEntry;
friend class IndexCatalog; friend class IndexCatalog;
friend class IndexCatalogEntry;
friend class IndexCatalogEntryContainer;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 16 change blocks. 
57 lines changed or deleted 19 lines changed or added


 index_details.h   index_details.h 
skipping to change at line 39 skipping to change at line 39
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include <vector> #include <vector>
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/key.h" #include "mongo/db/structure/btree/key.h"
#include "mongo/db/catalog/ondisk/namespace.h" #include "mongo/db/structure/catalog/namespace.h"
namespace mongo { namespace mongo {
/* Details about a particular index. There is one of these effectively for each object in /* Details about a particular index. There is one of these effectively for each object in
system.namespaces (although this also includes the head pointer, whi ch is not in that system.namespaces (although this also includes the head pointer, whi ch is not in that
collection). collection).
** MemoryMapped Record ** (i.e., this is on disk data) ** MemoryMapped Record ** (i.e., this is on disk data)
*/ */
class IndexDetails { class IndexDetails {
 End of changes. 1 change blocks. 
2 lines changed or deleted 2 lines changed or added


 index_entry.h   index_entry.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <sstream>
#include <string> #include <string>
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
/** /**
* This name sucks, but every name involving 'index' is used somewhere. * This name sucks, but every name involving 'index' is used somewhere.
*/ */
struct IndexEntry { struct IndexEntry {
IndexEntry(const BSONObj& kp, bool mk, bool sp, const string& n) IndexEntry(const BSONObj& kp,
: keyPattern(kp), multikey(mk), sparse(sp), name(n) { } bool mk = false,
bool sp = false,
IndexEntry(const IndexEntry& other) { const string& n = "default_name",
keyPattern = other.keyPattern; const BSONObj& io = BSONObj())
multikey = other.multikey; : keyPattern(kp),
sparse = other.sparse; multikey(mk),
name = other.name; sparse(sp),
} name(n),
infoObj(io) { }
BSONObj keyPattern; BSONObj keyPattern;
bool multikey; bool multikey;
bool sparse; bool sparse;
string name; string name;
// Geo indices have extra parameters. We need those available to p
lan correctly.
BSONObj infoObj;
std::string toString() const { std::string toString() const {
stringstream ss; mongoutils::str::stream ss;
ss << keyPattern.toString(); ss << "kp: " << keyPattern.toString();
if (multikey) { if (multikey) {
ss << " multikey"; ss << " multikey";
} }
if (sparse) { if (sparse) {
ss << " sparse"; ss << " sparse";
} }
return ss.str();
if (!infoObj.isEmpty()) {
ss << " io: " << infoObj.toString();
}
return ss;
} }
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
13 lines changed or deleted 24 lines changed or added


 index_names.h   index_names.h 
skipping to change at line 49 skipping to change at line 49
/** /**
* We use the string representation of index names all over the place, so we declare them all * We use the string representation of index names all over the place, so we declare them all
* once here. * once here.
*/ */
class IndexNames { class IndexNames {
public: public:
static const string GEO_2D; static const string GEO_2D;
static const string GEO_HAYSTACK; static const string GEO_HAYSTACK;
static const string GEO_2DSPHERE; static const string GEO_2DSPHERE;
static const string TEXT; static const string TEXT;
static const string TEXT_INTERNAL;
static const string HASHED; static const string HASHED;
/** /**
* True if is a regular (non-plugin) index or uses a plugin that ex isted before 2.4. * True if is a regular (non-plugin) index or uses a plugin that ex isted before 2.4.
* These plugins are grandfathered in and allowed to exist in DBs w ith * These plugins are grandfathered in and allowed to exist in DBs w ith
* PDFILE_MINOR_VERSION_22_AND_OLDER * PDFILE_MINOR_VERSION_22_AND_OLDER
*/ */
static bool existedBefore24(const string& name) { static bool existedBefore24(const string& name) {
return name.empty() return name.empty()
|| name == IndexNames::GEO_2D || name == IndexNames::GEO_2D
skipping to change at line 76 skipping to change at line 75
* a field with a non-string value indicates a "special" (not strai ght Btree) index. * a field with a non-string value indicates a "special" (not strai ght Btree) index.
*/ */
static string findPluginName(const BSONObj& keyPattern); static string findPluginName(const BSONObj& keyPattern);
static bool isKnownName(const string& name) { static bool isKnownName(const string& name) {
return name.empty() return name.empty()
|| name == IndexNames::GEO_2D || name == IndexNames::GEO_2D
|| name == IndexNames::GEO_2DSPHERE || name == IndexNames::GEO_2DSPHERE
|| name == IndexNames::GEO_HAYSTACK || name == IndexNames::GEO_HAYSTACK
|| name == IndexNames::TEXT || name == IndexNames::TEXT
|| name == IndexNames::TEXT_INTERNAL
|| name == IndexNames::HASHED; || name == IndexNames::HASHED;
} }
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
2 lines changed or deleted 0 lines changed or added


 index_rebuilder.h   index_rebuilder.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/namespace_details.h" #include <list>
#include <string>
#include "mongo/util/background.h" #include "mongo/util/background.h"
namespace mongo { namespace mongo {
// This is a job that's only run at startup. It finds all incomplete in dices and // This is a job that's only run at startup. It finds all incomplete in dices and
// finishes rebuilding them. After they complete rebuilding, the thread terminates. // finishes rebuilding them. After they complete rebuilding, the thread terminates.
class IndexRebuilder : public BackgroundJob { class IndexRebuilder : public BackgroundJob {
public: public:
IndexRebuilder(); IndexRebuilder();
 End of changes. 1 change blocks. 
1 lines changed or deleted 3 lines changed or added


 index_scan.h   index_scan.h 
skipping to change at line 48 skipping to change at line 48
#include "mongo/platform/unordered_set.h" #include "mongo/platform/unordered_set.h"
namespace mongo { namespace mongo {
class IndexAccessMethod; class IndexAccessMethod;
class IndexCursor; class IndexCursor;
class IndexDescriptor; class IndexDescriptor;
class WorkingSet; class WorkingSet;
struct IndexScanParams { struct IndexScanParams {
IndexScanParams() : descriptor(NULL), direction(1), limit(0), IndexScanParams() : descriptor(NULL),
forceBtreeAccessMethod(false), doNotDedup(false direction(1),
) { } doNotDedup(false),
maxScan(0),
addKeyMetadata(false) { }
IndexDescriptor* descriptor; const IndexDescriptor* descriptor;
IndexBounds bounds; IndexBounds bounds;
int direction; int direction;
// This only matters for 2d indices and will be ignored by every ot bool doNotDedup;
her index.
int limit;
// Special indices internally open an IndexCursor over themselves b // How many keys will we look at?
ut as a straight Btree. size_t maxScan;
bool forceBtreeAccessMethod;
bool doNotDedup; // Do we want to add the key as metadata?
bool addKeyMetadata;
}; };
/** /**
* Stage scans over an index from startKey to endKey, returning results that pass the provided * Stage scans over an index from startKey to endKey, returning results that pass the provided
* filter. Internally dedups on DiskLoc. * filter. Internally dedups on DiskLoc.
* *
* XXX: we probably should split this into 2 stages: one btree-only "fa
st" ixscan and one
* that strictly talks through the index API. Need to figure out
what we really want
* to ship down through that API predicate-wise though, currently
is a BSONObj but that's
* not going to be enough. See SERVER-12397 for tracking.
*
* Sub-stage preconditions: None. Is a leaf and consumes no stage data . * Sub-stage preconditions: None. Is a leaf and consumes no stage data .
*/ */
class IndexScan : public PlanStage { class IndexScan : public PlanStage {
public: public:
IndexScan(const IndexScanParams& params, WorkingSet* workingSet, IndexScan(const IndexScanParams& params, WorkingSet* workingSet,
const MatchExpression* filter); const MatchExpression* filter);
virtual ~IndexScan() { } virtual ~IndexScan() { }
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
/**
* Initialize the underlying IndexCursor
*/
void initIndexCursor();
/** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */ /** See if the cursor is pointing at or past _endKey, if _endKey is non-empty. */
void checkEnd(); void checkEnd();
// The WorkingSet we annotate with results. Not owned by us. // The WorkingSet we annotate with results. Not owned by us.
WorkingSet* _workingSet; WorkingSet* _workingSet;
// Index access. // Index access.
IndexAccessMethod* _iam; // owned by Collection -> IndexCatalog const IndexAccessMethod* _iam; // owned by Collection -> IndexCatal og
scoped_ptr<IndexCursor> _indexCursor; scoped_ptr<IndexCursor> _indexCursor;
IndexDescriptor* _descriptor; // owned by Collection -> IndexCatalo g const IndexDescriptor* _descriptor; // owned by Collection -> Index Catalog
// Have we hit the end of the index scan? // Have we hit the end of the index scan?
bool _hitEnd; bool _hitEnd;
// Contains expressions only over fields in the index key. We assu me this is built // Contains expressions only over fields in the index key. We assu me this is built
// correctly by whomever creates this class. // correctly by whomever creates this class.
// The filter is not owned by us. // The filter is not owned by us.
const MatchExpression* _filter; const MatchExpression* _filter;
// Could our index have duplicates? If so, we use _returned to ded up. // Could our index have duplicates? If so, we use _returned to ded up.
 End of changes. 10 change blocks. 
14 lines changed or deleted 27 lines changed or added


 index_tag.h   index_tag.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <vector>
#include "mongo/bson/util/builder.h" #include "mongo/bson/util/builder.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
namespace mongo { namespace mongo {
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 indexability.h   indexability.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#pragma once #pragma once
namespace mongo { namespace mongo {
/** /**
* Logic for how indices can be used with an expression. * Logic for how indices can be used with an expression.
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 initializer.h   initializer.h 
skipping to change at line 25 skipping to change at line 25
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/base/initializer_context.h" #include "mongo/base/initializer_context.h"
#include "mongo/base/initializer_dependency_graph.h" #include "mongo/base/initializer_dependency_graph.h"
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* Class representing an initialization process. * Class representing an initialization process.
* *
* Such a process is described by a directed acyclic graph of initializ ation operations, the * Such a process is described by a directed acyclic graph of initializ ation operations, the
* InitializerDependencyGraph. One constructs an initialization proces s by adding nodes and * InitializerDependencyGraph. One constructs an initialization proces s by adding nodes and
* edges to the graph. Then, one executes the process, causing each in itialization operation to * edges to the graph. Then, one executes the process, causing each in itialization operation to
* execute in an order that respects the programmer-established prerequ istes. * execute in an order that respects the programmer-established prerequ istes.
*/ */
class Initializer { class MONGO_CLIENT_API Initializer {
MONGO_DISALLOW_COPYING(Initializer); MONGO_DISALLOW_COPYING(Initializer);
public: public:
Initializer(); Initializer();
~Initializer(); ~Initializer();
/** /**
* Get the initializer dependency graph, presumably for the purpose of adding more nodes. * Get the initializer dependency graph, presumably for the purpose of adding more nodes.
*/ */
InitializerDependencyGraph& getInitializerDependencyGraph() { retur n _graph; } InitializerDependencyGraph& getInitializerDependencyGraph() { retur n _graph; }
skipping to change at line 70 skipping to change at line 71
/** /**
* Run the global initializers. * Run the global initializers.
* *
* It's a programming error for this to fail, but if it does it will re turn a status other * It's a programming error for this to fail, but if it does it will re turn a status other
* than Status::OK. * than Status::OK.
* *
* This means that the few initializers that might want to terminate th e program by failing * This means that the few initializers that might want to terminate th e program by failing
* should probably arrange to terminate the process themselves. * should probably arrange to terminate the process themselves.
*/ */
Status runGlobalInitializers(const InitializerContext::ArgumentVector& MONGO_CLIENT_API Status runGlobalInitializers(const InitializerContext:
args, :ArgumentVector& args,
const InitializerContext::EnvironmentMap& const InitializerContext:
env); :EnvironmentMap& env);
Status runGlobalInitializers(int argc, const char* const* argv, const c MONGO_CLIENT_API Status runGlobalInitializers(
har* const* envp); int argc, const char* const* argv, const char* const* envp);
/** /**
* Same as runGlobalInitializers(), except prints a brief message to st d::cerr * Same as runGlobalInitializers(), except prints a brief message to st d::cerr
* and terminates the process on failure. * and terminates the process on failure.
*/ */
void runGlobalInitializersOrDie(int argc, const char* const* argv, cons MONGO_CLIENT_API void runGlobalInitializersOrDie(
t char* const* envp); int argc, const char* const* argv, const char* const* envp);
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
9 lines changed or deleted 10 lines changed or added


 instance.h   instance.h 
skipping to change at line 98 skipping to change at line 98
const BSONObj *fieldsToRetur n = 0, int queryOptions = 0, int batchSize = 0); const BSONObj *fieldsToRetur n = 0, int queryOptions = 0, int batchSize = 0);
virtual bool isFailed() const { virtual bool isFailed() const {
return false; return false;
} }
virtual bool isStillConnected() { virtual bool isStillConnected() {
return true; return true;
} }
virtual string toString() { virtual string toString() const {
return "DBDirectClient"; return "DBDirectClient";
} }
virtual string getServerAddress() const { virtual string getServerAddress() const {
return "localhost"; // TODO: should this have the port? return "localhost"; // TODO: should this have the port?
} }
virtual bool call( Message &toSend, Message &response, bool assertO k=true , string * actualServer = 0 ); virtual bool call( Message &toSend, Message &response, bool assertO k=true , string * actualServer = 0 );
virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 ); virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 );
virtual void sayPiggyBack( Message &toSend ) { virtual void sayPiggyBack( Message &toSend ) {
// don't need to piggy back when connected locally // don't need to piggy back when connected locally
return say( toSend ); return say( toSend );
skipping to change at line 140 skipping to change at line 140
extern int lockFile; extern int lockFile;
#ifdef _WIN32 #ifdef _WIN32
extern HANDLE lockFileHandle; extern HANDLE lockFileHandle;
#endif #endif
void acquirePathLock(bool doingRepair=false); // if doingRepair=true do n't consider unclean shutdown an error void acquirePathLock(bool doingRepair=false); // if doingRepair=true do n't consider unclean shutdown an error
void maybeCreatePidFile(); void maybeCreatePidFile();
void exitCleanly( ExitCode code ); void exitCleanly( ExitCode code );
void checkAndInsert(const char *ns, BSONObj& js);
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
3 lines changed or deleted 1 lines changed or added


 internal_plans.h   internal_plans.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/exec/collection_scan.h" #include "mongo/db/exec/collection_scan.h"
#include "mongo/db/query/eof_runner.h"
#include "mongo/db/exec/fetch.h" #include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/index_scan.h"
#include "mongo/db/index/catalog_hack.h" #include "mongo/db/query/eof_runner.h"
#include "mongo/db/query/internal_runner.h" #include "mongo/db/query/internal_runner.h"
namespace mongo { namespace mongo {
/** /**
* The internal planner is a one-stop shop for "off-the-shelf" plans. Most internal procedures * The internal planner is a one-stop shop for "off-the-shelf" plans. Most internal procedures
* that do not require advanced queries could be served by plans alread y in here. * that do not require advanced queries could be served by plans alread y in here.
*/ */
class InternalPlanner { class InternalPlanner {
public: public:
skipping to change at line 64 skipping to change at line 65
IXSCAN_DEFAULT = 0, IXSCAN_DEFAULT = 0,
// The client wants the fetched object and the DiskLoc that ref ers to it. Delegating // The client wants the fetched object and the DiskLoc that ref ers to it. Delegating
// the fetch to the runner allows fetching outside of a lock. // the fetch to the runner allows fetching outside of a lock.
IXSCAN_FETCH = 1, IXSCAN_FETCH = 1,
}; };
/** /**
* Return a collection scan. Caller owns pointer. * Return a collection scan. Caller owns pointer.
*/ */
static Runner* collectionScan(const StringData& ns, static Runner* collectionScan(const StringData& ns, // TODO: make t his a Collection*
const Direction direction = FORWARD, const Direction direction = FORWARD,
const DiskLoc startLoc = DiskLoc()) { const DiskLoc startLoc = DiskLoc()) {
NamespaceDetails* nsd = nsdetails(ns); Collection* collection = cc().database()->getCollection(ns);
if (NULL == nsd) { return new EOFRunner(NULL, ns.toString()); } if (NULL == collection) { return new EOFRunner(NULL, ns.toStrin
g()); }
CollectionScanParams params; CollectionScanParams params;
params.ns = ns.toString(); params.ns = ns.toString();
params.start = startLoc; params.start = startLoc;
if (FORWARD == direction) { if (FORWARD == direction) {
params.direction = CollectionScanParams::FORWARD; params.direction = CollectionScanParams::FORWARD;
} }
else { else {
params.direction = CollectionScanParams::BACKWARD; params.direction = CollectionScanParams::BACKWARD;
} }
WorkingSet* ws = new WorkingSet(); WorkingSet* ws = new WorkingSet();
CollectionScan* cs = new CollectionScan(params, ws, NULL); CollectionScan* cs = new CollectionScan(params, ws, NULL);
return new InternalRunner(ns.toString(), cs, ws); return new InternalRunner(collection, cs, ws);
} }
/** /**
* Return an index scan. Caller owns returned pointer. * Return an index scan. Caller owns returned pointer.
*/ */
static Runner* indexScan(IndexDescriptor* descriptor, static Runner* indexScan(const Collection* collection,
const IndexDescriptor* descriptor,
const BSONObj& startKey, const BSONObj& en dKey, const BSONObj& startKey, const BSONObj& en dKey,
bool endKeyInclusive, Direction direction = FORWARD, bool endKeyInclusive, Direction direction = FORWARD,
int options = 0) { int options = 0) {
verify(descriptor); invariant(collection);
invariant(descriptor);
const NamespaceString& ns = descriptor->getIndexedCollection()-
>ns();
IndexScanParams params; IndexScanParams params;
params.descriptor = descriptor; params.descriptor = descriptor;
params.direction = direction; params.direction = direction;
params.bounds.isSimpleRange = true; params.bounds.isSimpleRange = true;
params.bounds.startKey = startKey; params.bounds.startKey = startKey;
params.bounds.endKey = endKey; params.bounds.endKey = endKey;
params.bounds.endKeyInclusive = endKeyInclusive; params.bounds.endKeyInclusive = endKeyInclusive;
// This always as 'true' as this is the new btreecursor. Even
if the underlying index
// is 'special' (ie, expression) we treat it like a Btree.
params.forceBtreeAccessMethod = true;
WorkingSet* ws = new WorkingSet(); WorkingSet* ws = new WorkingSet();
IndexScan* ix = new IndexScan(params, ws, NULL); IndexScan* ix = new IndexScan(params, ws, NULL);
if (IXSCAN_FETCH & options) { if (IXSCAN_FETCH & options) {
return new InternalRunner(ns.toString(), new FetchStage(ws, ix, NULL), ws); return new InternalRunner(collection, new FetchStage(ws, ix , NULL), ws);
} }
else { else {
return new InternalRunner(ns.toString(), ix, ws); return new InternalRunner(collection, ix, ws);
} }
} }
}; };
} // namespace mongo } // namespace mongo
 End of changes. 11 change blocks. 
17 lines changed or deleted 14 lines changed or added


 internal_runner.h   internal_runner.h 
skipping to change at line 46 skipping to change at line 46
namespace mongo { namespace mongo {
class BSONObj; class BSONObj;
class CanonicalQuery; class CanonicalQuery;
class DiskLoc; class DiskLoc;
class PlanExecutor; class PlanExecutor;
class PlanStage; class PlanStage;
struct QuerySolution; struct QuerySolution;
class TypeExplain; class TypeExplain;
struct PlanInfo;
class WorkingSet; class WorkingSet;
/** /**
* This is a runner that was requested by an internal client of the que ry system, as opposed to * This is a runner that was requested by an internal client of the que ry system, as opposed to
* runners that are built in response to a query entering the system. I t is only used by * runners that are built in response to a query entering the system. I t is only used by
* internal clients of the query systems (e.g., chunk migration, index building, commands that * internal clients of the query systems (e.g., chunk migration, index building, commands that
* traverse data such as md5, ... ) * traverse data such as md5, ... )
* *
* The salient feature of this Runner is that it does not interact with the cache at all. * The salient feature of this Runner is that it does not interact with the cache at all.
*/ */
class InternalRunner : public Runner { class InternalRunner : public Runner {
public: public:
/** Takes ownership of all arguments. */ /** Takes ownership of root and ws. */
InternalRunner(const string& ns, PlanStage* root, WorkingSet* ws); InternalRunner(const Collection* collection, PlanStage* root, Worki
ngSet* ws);
virtual ~InternalRunner(); virtual ~InternalRunner();
Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut); Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
virtual bool isEOF(); virtual bool isEOF();
virtual void saveState(); virtual void saveState();
virtual bool restoreState(); virtual bool restoreState();
virtual const std::string& ns(); virtual const std::string& ns();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual void setYieldPolicy(Runner::YieldPolicy policy); virtual void setYieldPolicy(Runner::YieldPolicy policy);
virtual void kill(); virtual void kill();
virtual const Collection* collection() { return _collection; }
/** /**
* Returns OK, allocating and filling in '*explain' with details of the plan used by * Returns OK, allocating and filling in '*explain' with details of the plan used by
* this runner. Caller takes ownership of '*explain'. Otherwise, re * this runner. Caller takes ownership of '*explain'. Similarly fil
turn a status ls in '*planInfo',
* describing the error. * which the caller takes ownership of. Otherwise, return a status
describing the
* error.
* *
* Strictly speaking, an InternalRunner's explain is never exposed, simply because an * Strictly speaking, an InternalRunner's explain is never exposed, simply because an
* InternalRunner itself is not exposed. But we implement the expla in here anyway so * InternalRunner itself is not exposed. But we implement the expla in here anyway so
* to help in debugging situations. * to help in debugging situations.
*/ */
virtual Status getExplainPlan(TypeExplain** explain) const; virtual Status getInfo(TypeExplain** explain,
PlanInfo** planInfo) const;
private: private:
std::string _ns; const Collection* _collection;
boost::scoped_ptr<PlanExecutor> _exec; boost::scoped_ptr<PlanExecutor> _exec;
Runner::YieldPolicy _policy; Runner::YieldPolicy _policy;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 7 change blocks. 
8 lines changed or deleted 15 lines changed or added


 interval.h   interval.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo { namespace mongo {
/** A range of values for one field. */ /** A range of values for one field. */
struct Interval { struct Interval {
// No BSONValue means we have to keep a BSONObj and pointers (BSONE lement) into it. // No BSONValue means we have to keep a BSONObj and pointers (BSONE lement) into it.
// 'start' may not point at the first field in _intervalData. // 'start' may not point at the first field in _intervalData.
// 'end' may not point at the last field in _intervalData. // 'end' may not point at the last field in _intervalData.
// 'start' and 'end' may point at the same field. // 'start' and 'end' may point at the same field.
skipping to change at line 43 skipping to change at line 56
BSONElement start; BSONElement start;
bool startInclusive; bool startInclusive;
BSONElement end; BSONElement end;
bool endInclusive; bool endInclusive;
/** Creates an empty interval */ /** Creates an empty interval */
Interval(); Interval();
string toString() const { string toString() const {
stringstream ss; mongoutils::str::stream ss;
if (startInclusive) { if (startInclusive) {
ss << "["; ss << "[";
} }
else { else {
ss << "("; ss << "(";
} }
// false means omit the field name // false means omit the field name
ss << start.toString(false); ss << start.toString(false);
ss << ", "; ss << ", ";
ss << end.toString(false); ss << end.toString(false);
if (endInclusive) { if (endInclusive) {
ss << "]"; ss << "]";
} }
else { else {
ss << ")"; ss << ")";
} }
return ss.str(); return ss;
} }
/** /**
* Creates an interval that starts at the first field of 'base' and ends at the second * Creates an interval that starts at the first field of 'base' and ends at the second
* field of 'base'. (In other words, 'base' is a bsonobj with at le ast two elements, of * field of 'base'. (In other words, 'base' is a bsonobj with at le ast two elements, of
* which we don't care about field names.) * which we don't care about field names.)
* *
* The interval's extremities are closed or not depending on whethe r * The interval's extremities are closed or not depending on whethe r
* 'start'/'endIncluded' are true or not. * 'start'/'endIncluded' are true or not.
*/ */
skipping to change at line 83 skipping to change at line 96
/** Sets the current interval to the given values (see constructor) */ /** Sets the current interval to the given values (see constructor) */
void init(BSONObj base, bool startIncluded, bool endIncluded); void init(BSONObj base, bool startIncluded, bool endIncluded);
/** Returns true if an empty-constructed interval hasn't been init( )-ialized yet */ /** Returns true if an empty-constructed interval hasn't been init( )-ialized yet */
bool isEmpty() const; bool isEmpty() const;
bool isPoint() const { bool isPoint() const {
return startInclusive && endInclusive && 0 == start.woCompare(e nd, false); return startInclusive && endInclusive && 0 == start.woCompare(e nd, false);
} }
/** Returns true if start is same as end and interval is open at ei
ther end */
bool isNull() const {
return (!startInclusive || !endInclusive) && 0 == start.woCompa
re(end, false);
}
/** Returns true if 'this' is the same interval as 'other' */
bool equals(const Interval& other) const;
/** /**
* Swap start and end points of interval. * Swap start and end points of interval.
*/ */
void reverse(); void reverse();
/** Returns how 'this' compares to 'other' */ /** Returns how 'this' compares to 'other' */
enum IntervalComparison { enum IntervalComparison {
// //
// There is some intersection. // There is some intersection.
// //
 End of changes. 5 change blocks. 
2 lines changed or deleted 32 lines changed or added


 introspect.h   introspect.h 
skipping to change at line 34 skipping to change at line 34
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include <string>
#include "mongo/db/curop.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/pdfile.h"
namespace mongo { namespace mongo {
class Collection;
class Database;
/* --- profiling -------------------------------------------- /* --- profiling --------------------------------------------
do when database->profile is set do when database->profile is set
*/ */
void profile(const Client& c, int op, CurOp& currentOp); void profile(const Client& c, int op, CurOp& currentOp);
/** /**
* Get (or create) the profile collection * Get (or create) the profile collection
* *
* @param db Database in which to create the profile collection * @param db Database in which to create the profile collection
* @param force Always create the collection if it does not exist * @param force Always create the collection if it does not exist
* @return NamespaceDetails for the newly created collection, or NULL on error * @return Collection for the newly created collection, or NULL on err or
**/ **/
NamespaceDetails* getOrCreateProfileCollection(Database *db, bool force Collection* getOrCreateProfileCollection(Database *db,
= false, string* errmsg = NULL); bool force = false,
std::string* errmsg = NULL);
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
5 lines changed or deleted 10 lines changed or added


 intrusive_counter.h   intrusive_counter.h 
skipping to change at line 118 skipping to change at line 118
}; };
/// This is an immutable reference-counted string /// This is an immutable reference-counted string
class RCString : public RefCountable { class RCString : public RefCountable {
public: public:
const char* c_str() const { return reinterpret_cast<const char*>(th is) + sizeof(RCString); } const char* c_str() const { return reinterpret_cast<const char*>(th is) + sizeof(RCString); }
int size() const { return _size; } int size() const { return _size; }
StringData stringData() const { return StringData(c_str(), _size); } StringData stringData() const { return StringData(c_str(), _size); }
static intrusive_ptr<const RCString> create(StringData s); static intrusive_ptr<const RCString> create(StringData s);
// MSVC: C4291: 'declaration' : no matching operator delete found; memory w
ill not be freed if
// initialization throws an exception
// We simply rely on the default global placement delete since a local plac
ement delete would be
// ambiguous for some compilers
#pragma warning(push)
#pragma warning(disable : 4291)
void operator delete (void* ptr) { free(ptr); } void operator delete (void* ptr) { free(ptr); }
#pragma warning(pop)
private: private:
// these can only be created by calling create() // these can only be created by calling create()
RCString() {}; RCString() {};
void* operator new (size_t objSize, size_t realSize) { return mallo c(realSize); } void* operator new (size_t objSize, size_t realSize) { return mallo c(realSize); }
int _size; // does NOT include trailing NUL byte. int _size; // does NOT include trailing NUL byte.
// char[_size+1] array allocated past end of class // char[_size+1] array allocated past end of class
}; };
 End of changes. 2 change blocks. 
0 lines changed or deleted 10 lines changed or added


 is_master.h   is_master.h 
skipping to change at line 32 skipping to change at line 32
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/database.h" #include "mongo/db/catalog/database.h"
#include "mongo/db/repl/master_slave.h" // replAllDead #include "mongo/db/repl/master_slave.h" // replAllDead
#include "mongo/db/repl/replication_server_status.h" // replSettings #include "mongo/db/repl/replication_server_status.h" // replSettings
#include "mongo/db/repl/rs.h" #include "mongo/db/repl/rs.h"
#include "mongo/util/mongoutils/str.h" #include "mongo/util/mongoutils/str.h"
namespace mongo { namespace mongo {
/* note we always return true for the "local" namespace. /* note we always return true for the "local" namespace.
we should not allow most operations when not the master we should not allow most operations when not the master
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 jsobjmanipulator.h   jsobjmanipulator.h 
skipping to change at line 46 skipping to change at line 46
/** Manipulate the binary representation of a BSONElement in-place. /** Manipulate the binary representation of a BSONElement in-place.
Careful, this casts away const. Careful, this casts away const.
*/ */
class BSONElementManipulator { class BSONElementManipulator {
public: public:
BSONElementManipulator( const BSONElement &element ) : BSONElementManipulator( const BSONElement &element ) :
_element( element ) { _element( element ) {
verify( !_element.eoo() ); verify( !_element.eoo() );
} }
/** Replace a Timestamp type with a Date type initialized to
OpTime::now().asDate()
*/
void initTimestamp();
// Note the ones with a capital letter call getDur().writing and jo urnal // Note the ones with a capital letter call getDur().writing and jo urnal
/** Change the value, in place, of the number. */ /** Change the value, in place, of the number. */
void setNumber(double d) { void setNumber(double d) {
if ( _element.type() == NumberDouble ) *reinterpret_cast< doubl e * >( value() ) = d; if ( _element.type() == NumberDouble ) *reinterpret_cast< doubl e * >( value() ) = d;
else if ( _element.type() == NumberInt ) *reinterpret_cast< int * >( value() ) = (int) d; else if ( _element.type() == NumberInt ) *reinterpret_cast< int * >( value() ) = (int) d;
else verify(0); else verify(0);
} }
void SetNumber(double d); void SetNumber(double d);
skipping to change at line 78 skipping to change at line 74
} }
void SetInt(int n); void SetInt(int n);
/** Replace the type and value of the element with the type and val ue of e, /** Replace the type and value of the element with the type and val ue of e,
preserving the original fieldName */ preserving the original fieldName */
void replaceTypeAndValue( const BSONElement &e ) { void replaceTypeAndValue( const BSONElement &e ) {
*data() = e.type(); *data() = e.type();
memcpy( value(), e.value(), e.valuesize() ); memcpy( value(), e.value(), e.valuesize() );
} }
/* dur:: version */
void ReplaceTypeAndValue( const BSONElement &e );
static void lookForTimestamps( const BSONObj& obj ) {
// If have a Timestamp field as the first or second element,
// update it to a Date field set to OpTime::now().asDate(). Th
e
// replacement policy is a work in progress.
BSONObjIterator i( obj );
for( int j = 0; i.moreWithEOO() && j < 2; ++j ) {
BSONElement e = i.next();
if ( e.eoo() )
break;
if ( e.type() == Timestamp ) {
// performance note, this locks a mutex:
BSONElementManipulator( e ).initTimestamp();
break;
}
}
}
private: private:
char *data() { return nonConst( _element.rawdata() ); } char *data() { return nonConst( _element.rawdata() ); }
char *value() { return nonConst( _element.value() ); } char *value() { return nonConst( _element.value() ); }
static char *nonConst( const char *s ) { return const_cast< char * >( s ); } static char *nonConst( const char *s ) { return const_cast< char * >( s ); }
const BSONElement _element; const BSONElement _element;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
25 lines changed or deleted 0 lines changed or added


 json.h   json.h 
skipping to change at line 35 skipping to change at line 35
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* Create a BSONObj from a JSON <http://www.json.org>, * Create a BSONObj from a JSON <http://www.json.org>,
* <http://www.ietf.org/rfc/rfc4627.txt> string. In addition to the JS ON * <http://www.ietf.org/rfc/rfc4627.txt> string. In addition to the JS ON
* extensions extensions described here * extensions extensions described here
* <http://dochub.mongodb.org/core/mongodbextendedjson>, this function * <http://dochub.mongodb.org/core/mongodbextendedjson>, this function
* accepts unquoted field names and allows single quotes to optionally be * accepts unquoted field names and allows single quotes to optionally be
* used when specifying field names and string values instead of double * used when specifying field names and string values instead of double
* quotes. JSON unicode escape sequences (of the form \uXXXX) are * quotes. JSON unicode escape sequences (of the form \uXXXX) are
* converted to utf8. * converted to utf8.
* *
* @throws MsgAssertionException if parsing fails. The message include d with * @throws MsgAssertionException if parsing fails. The message include d with
* this assertion includes the character offset where parsing failed. * this assertion includes the character offset where parsing failed.
*/ */
BSONObj fromjson(const std::string& str); MONGO_CLIENT_API BSONObj fromjson(const std::string& str);
/** @param len will be size of JSON object in text chars. */ /** @param len will be size of JSON object in text chars. */
BSONObj fromjson(const char* str, int* len=NULL); MONGO_CLIENT_API BSONObj fromjson(const char* str, int* len=NULL);
/** /**
* Parser class. A BSONObj is constructed incrementally by passing a * Parser class. A BSONObj is constructed incrementally by passing a
* BSONObjBuilder to the recursive parsing methods. The grammar for th e * BSONObjBuilder to the recursive parsing methods. The grammar for th e
* element parsed is described before each function. * element parsed is described before each function.
*/ */
class JParse { class JParse {
public: public:
explicit JParse(const char*); explicit JParse(const char*);
skipping to change at line 120 skipping to change at line 121
* FIELD : VALUE * FIELD : VALUE
* *
* SPECIALOBJECT : * SPECIALOBJECT :
* OIDOBJECT * OIDOBJECT
* | BINARYOBJECT * | BINARYOBJECT
* | DATEOBJECT * | DATEOBJECT
* | TIMESTAMPOBJECT * | TIMESTAMPOBJECT
* | REGEXOBJECT * | REGEXOBJECT
* | REFOBJECT * | REFOBJECT
* | UNDEFINEDOBJECT * | UNDEFINEDOBJECT
* | NUMBERLONGOBJECT
* *
*/ */
public: public:
Status object(const StringData& fieldName, BSONObjBuilder&, boo l subObj=true); Status object(const StringData& fieldName, BSONObjBuilder&, boo l subObj=true);
private: private:
/* The following functions are called with the '{' and the firs t /* The following functions are called with the '{' and the firs t
* field already parsed since they are both implied given the * field already parsed since they are both implied given the
* context. */ * context. */
/* /*
skipping to change at line 183 skipping to change at line 185
*/ */
Status dbRefObject(const StringData& fieldName, BSONObjBuilder& ); Status dbRefObject(const StringData& fieldName, BSONObjBuilder& );
/* /*
* UNDEFINEDOBJECT : * UNDEFINEDOBJECT :
* { FIELD("$undefined") : true } * { FIELD("$undefined") : true }
*/ */
Status undefinedObject(const StringData& fieldName, BSONObjBuil der&); Status undefinedObject(const StringData& fieldName, BSONObjBuil der&);
/* /*
* NUMBERLONGOBJECT :
* { FIELD("$numberLong") : "<number>" }
*/
Status numberLongObject(const StringData& fieldName, BSONObjBui
lder&);
/*
* ARRAY : * ARRAY :
* [] * []
* | [ ELEMENTS ] * | [ ELEMENTS ]
* *
* ELEMENTS : * ELEMENTS :
* VALUE * VALUE
* | VALUE , ELEMENTS * | VALUE , ELEMENTS
*/ */
Status array(const StringData& fieldName, BSONObjBuilder&); Status array(const StringData& fieldName, BSONObjBuilder&);
 End of changes. 5 change blocks. 
2 lines changed or deleted 11 lines changed or added


 key.h   key.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
/** Key class for precomputing a small format index key that is denser than a traditional BSONObj. /** Key class for precomputing a small format index key that is denser than a traditional BSONObj.
KeyBson is a legacy wrapper implementation for old BSONObj style ke ys for v:0 indexes. KeyBson is a legacy wrapper implementation for old BSONObj style ke ys for v:0 indexes.
KeyV1 is the new implementation. KeyV1 is the new implementation.
*/ */
class KeyBson /* "KeyV0" */ { class KeyBson /* "KeyV0" */ {
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 lasterror.h   lasterror.h 
skipping to change at line 32 skipping to change at line 32
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/bson/oid.h" #include "mongo/bson/oid.h"
#include "mongo/util/log.h" #include "mongo/util/log.h"
namespace mongo { namespace mongo {
class BSONObjBuilder; class BSONObjBuilder;
class Message; class Message;
static const char kUpsertedFieldName[] = "upserted"; static const char kUpsertedFieldName[] = "upserted";
static const char kGLEStatsFieldName[] = "$gleStats";
static const char kGLEStatsLastOpTimeFieldName[] = "lastOpTime";
static const char kGLEStatsElectionIdFieldName[] = "electionId";
struct LastError { struct LastError {
int code; int code;
std::string msg; std::string msg;
enum UpdatedExistingType { NotUpdate, True, False } updatedExisting ; enum UpdatedExistingType { NotUpdate, True, False } updatedExisting ;
// _id field value from inserted doc, returned as kUpsertedFieldNam e (above) // _id field value from inserted doc, returned as kUpsertedFieldNam e (above)
BSONObj upsertedId; BSONObj upsertedId;
OID writebackId; // this shouldn't get reset so that old GLE are ha ndled OID writebackId; // this shouldn't get reset so that old GLE are ha ndled
int writebackSince; int writebackSince;
long long nObjects; long long nObjects;
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 limit.h   limit.h 
skipping to change at line 54 skipping to change at line 54
class LimitStage : public PlanStage { class LimitStage : public PlanStage {
public: public:
LimitStage(int limit, WorkingSet* ws, PlanStage* child); LimitStage(int limit, WorkingSet* ws, PlanStage* child);
virtual ~LimitStage(); virtual ~LimitStage();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
WorkingSet* _ws; WorkingSet* _ws;
scoped_ptr<PlanStage> _child; scoped_ptr<PlanStage> _child;
// We only return this many results. // We only return this many results.
int _numToReturn; int _numToReturn;
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 listen.h   listen.h 
skipping to change at line 52 skipping to change at line 52
void initAndListen(); // never returns unless error (start a thread ) void initAndListen(); // never returns unless error (start a thread )
/* spawn a thread, etc., then return */ /* spawn a thread, etc., then return */
virtual void accepted(boost::shared_ptr<Socket> psocket, long long connectionId ); virtual void accepted(boost::shared_ptr<Socket> psocket, long long connectionId );
virtual void acceptedMP(MessagingPort *mp); virtual void acceptedMP(MessagingPort *mp);
const int _port; const int _port;
/** /**
* @return a rough estimate of elapsed time since the server starte d * @return a rough estimate of elapsed time since the server starte d
todo:
1) consider adding some sort of relaxedLoad semantic to the read
ing here of
_elapsedTime
2) curTimeMillis() implementations have gotten faster. consider
eliminating
this code? would have to measure it first. if eliminated be
careful if
syscall used isn't skewable. Note also if #2 is done, listen
() doesn't
then have to keep waking up and maybe that helps on a develop
er's laptop
battery usage...
*/ */
long long getMyElapsedTimeMillis() const { return _elapsedTime; } long long getMyElapsedTimeMillis() const { return _elapsedTime; }
/** /**
* Allocate sockets for the listener and set _setupSocketsSuccessfu l to true * Allocate sockets for the listener and set _setupSocketsSuccessfu l to true
* iff the process was successful. * iff the process was successful.
*/ */
void setupSockets(); void setupSockets();
void setAsTimeTracker() { void setAsTimeTracker() {
 End of changes. 1 change blocks. 
0 lines changed or deleted 13 lines changed or added


 lite_parsed_query.h   lite_parsed_query.h 
skipping to change at line 50 skipping to change at line 50
* Fills out a LiteParsedQuery. Used for debugging and testing, wh en we don't have a * Fills out a LiteParsedQuery. Used for debugging and testing, wh en we don't have a
* QueryMessage. * QueryMessage.
*/ */
static Status make(const string& ns, static Status make(const string& ns,
int ntoskip, int ntoskip,
int ntoreturn, int ntoreturn,
int queryoptions, int queryoptions,
const BSONObj& query, const BSONObj& query,
const BSONObj& proj, const BSONObj& proj,
const BSONObj& sort, const BSONObj& sort,
const BSONObj& hint,
const BSONObj& minObj,
const BSONObj& maxObj,
bool snapshot,
LiteParsedQuery** out); LiteParsedQuery** out);
/** /**
* Helper functions to parse maxTimeMS from a command object. Retu rns the contained value, * Helper functions to parse maxTimeMS from a command object. Retu rns the contained value,
* or an error on parsing fail. When passed an EOO-type element, r eturns 0 (special value * or an error on parsing fail. When passed an EOO-type element, r eturns 0 (special value
* for "allow to run indefinitely"). * for "allow to run indefinitely").
*/ */
static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj) ; static StatusWith<int> parseMaxTimeMSCommand(const BSONObj& cmdObj) ;
/** /**
* Same as parseMaxTimeMSCommand, but for a query object. * Same as parseMaxTimeMSCommand, but for a query object.
*/ */
static StatusWith<int> parseMaxTimeMSQuery(const BSONObj& queryObj) ; static StatusWith<int> parseMaxTimeMSQuery(const BSONObj& queryObj) ;
// Name of the maxTimeMS command option. /**
static const string cmdOptionMaxTimeMS; * Helper function to identify text search sort key
* Example: {a: {$meta: "textScore"}}
*/
static bool isTextScoreMeta(BSONElement elt);
/**
* Helper function to identify diskLoc projection
* Example: {a: {$meta: "diskloc"}}.
*/
static bool isDiskLocMeta(BSONElement elt);
/**
* Helper function to validate a sort object.
* Returns true if each element satisfies one of:
* 1. a number with value 1
* 2. a number with value -1
* 3. isTextScoreMeta
*/
static bool isValidSortOrder(const BSONObj& sortObj);
// Name of the maxTimeMS query option. /**
* Helper function to create a normalized sort object.
* Each element of the object returned satisfies one of:
* 1. a number with value 1
* 2. a number with value -1
* 3. isTextScoreMeta
*/
static BSONObj normalizeSortOrder(const BSONObj& sortObj);
// Names of the maxTimeMS command and query option.
static const string cmdOptionMaxTimeMS;
static const string queryOptionMaxTimeMS; static const string queryOptionMaxTimeMS;
// Names of the $meta projection values.
static const string metaTextScore;
static const string metaGeoNearDistance;
static const string metaGeoNearPoint;
static const string metaDiskLoc;
static const string metaIndexKey;
const string& ns() const { return _ns; } const string& ns() const { return _ns; }
bool isLocalDB() const { return _ns.compare(0, 6, "local.") == 0; } bool isLocalDB() const { return _ns.compare(0, 6, "local.") == 0; }
const BSONObj& getFilter() const { return _filter; } const BSONObj& getFilter() const { return _filter; }
const BSONObj& getProj() const { return _proj; } const BSONObj& getProj() const { return _proj; }
const BSONObj& getSort() const { return _sort; } const BSONObj& getSort() const { return _sort; }
const BSONObj& getHint() const { return _hint; } const BSONObj& getHint() const { return _hint; }
int getSkip() const { return _ntoskip; } int getSkip() const { return _ntoskip; }
int getNumToReturn() const { return _ntoreturn; } int getNumToReturn() const { return _ntoreturn; }
 End of changes. 4 change blocks. 
3 lines changed or deleted 42 lines changed or added


 lockstate.h   lockstate.h 
skipping to change at line 125 skipping to change at line 125
Lock::ScopedLock* _scopedLk; Lock::ScopedLock* _scopedLk;
bool _lockPending; bool _lockPending;
bool _lockPendingParallelWriter; bool _lockPendingParallelWriter;
friend class Acquiring; friend class Acquiring;
friend class AcquiringParallelWriter; friend class AcquiringParallelWriter;
}; };
class WrapperForRWLock : boost::noncopyable { class WrapperForRWLock : boost::noncopyable {
SimpleRWLock r; SimpleRWLock rw;
SimpleMutex m;
bool sharedLatching;
public: public:
string name() const { return r.name; } string name() const { return rw.name; }
LockStat stats; LockStat stats;
WrapperForRWLock(const StringData& name) : r(name) { } WrapperForRWLock(const StringData& name)
void lock() { r.lock(); } : rw(name), m(name) {
void lock_shared() { r.lock_shared(); } // For the local datbase, all operations are short,
void unlock() { r.unlock(); } // either writing one entry, or doing a tail.
void unlock_shared() { r.unlock_shared(); } // In tests, use a SimpleMutex is much faster for the local db.
sharedLatching = name != "local";
}
void lock() { if ( sharedLatching ) { rw.lock(); } else {
m.lock(); } }
void lock_shared() { if ( sharedLatching ) { rw.lock_shared(); }
else { m.lock(); } }
void unlock() { if ( sharedLatching ) { rw.unlock(); } else
{ m.unlock(); } }
void unlock_shared() { if ( sharedLatching ) { rw.unlock_shared();
} else { m.unlock(); } }
}; };
class ScopedLock; class ScopedLock;
class Acquiring { class Acquiring {
public: public:
Acquiring( Lock::ScopedLock* lock, LockState& ls ); Acquiring( Lock::ScopedLock* lock, LockState& ls );
~Acquiring(); ~Acquiring();
private: private:
Lock::ScopedLock* _lock; Lock::ScopedLock* _lock;
 End of changes. 3 change blocks. 
7 lines changed or deleted 19 lines changed or added


 log.h   log.h 
skipping to change at line 131 skipping to change at line 131
LogIndentLevel(); LogIndentLevel();
~LogIndentLevel(); ~LogIndentLevel();
}; };
extern Tee* const warnings; // Things put here go in serverStatus extern Tee* const warnings; // Things put here go in serverStatus
extern Tee* const startupWarningsLog; // Things put here get reported i n MMS extern Tee* const startupWarningsLog; // Things put here get reported i n MMS
string errnoWithDescription(int errorcode = -1); string errnoWithDescription(int errorcode = -1);
void rawOut( const StringData &s ); void rawOut( const StringData &s );
/*
* Redirects the output of "rawOut" to stderr. The default is stdout.
*
* NOTE: This needs to be here because the tools such as mongoexport an
d mongodump sometimes
* send data to stdout and share this code, so they need to be able to
redirect output to
* stderr. Eventually rawOut should be replaced with something better
and our tools should not
* need to call internal server shutdown functions.
*
* NOTE: This function is not thread safe and should not be called from
a multithreaded context.
*/
void setRawOutToStderr();
/** /**
* Write the current context (backtrace), along with the optional "msg" . * Write the current context (backtrace), along with the optional "msg" .
*/ */
void logContext(const char *msg = NULL); void logContext(const char *msg = NULL);
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
0 lines changed or deleted 16 lines changed or added


 log_builder.h   log_builder.h 
skipping to change at line 91 skipping to change at line 91
Status addToSetsWithNewFieldName(const StringData& name, const muta blebson::Element val); Status addToSetsWithNewFieldName(const StringData& name, const muta blebson::Element val);
/** /**
* Convenience method which calls addToSets after * Convenience method which calls addToSets after
* creating a new Element to wrap the old one. * creating a new Element to wrap the old one.
* *
* If any problem occurs then the operation will stop and return th at error Status. * If any problem occurs then the operation will stop and return th at error Status.
*/ */
Status addToSetsWithNewFieldName(const StringData& name, const BSON Element& val); Status addToSetsWithNewFieldName(const StringData& name, const BSON Element& val);
/** Add the given Element as a new entry in the '$unset' section of the log. If an /** Add the given path as a new entry in the '$unset' section of th e log. If an
* '$unset' section does not yet exist, it will be created. If thi s LogBuilder is * '$unset' section does not yet exist, it will be created. If thi s LogBuilder is
* currently configured to contain an object replacement, the requ est to add to the * currently configured to contain an object replacement, the requ est to add to the
* $unset section will return an Error. * $unset section will return an Error.
*/ */
Status addToUnsets(mutablebson::Element elt); Status addToUnsets(StringData path);
/**
* Convenience method which calls addToUnsets after
* creating a new Element to wrap the old one.
*
* If any problem occurs then the operation will stop and return th
at error Status.
*/
Status addToUnsetsWithNewFieldName(const StringData& name, const mu
tablebson::Element val);
/**
* Convenience method which calls addToUnsets after
* creating a new Element to wrap the old one.
*
* If any problem occurs then the operation will stop and return th
at error Status.
*/
Status addToUnsetsWithNewFieldName(const StringData& name, const BS
ONElement& val);
/** Obtain, via the out parameter 'outElt', a pointer to the mongo: :Object type Element /** Obtain, via the out parameter 'outElt', a pointer to the mongo: :Object type Element
* to which the components of an object replacement should be reco rded. It is an error * to which the components of an object replacement should be reco rded. It is an error
* to call this if any Elements have been added by calling either addToSets or * to call this if any Elements have been added by calling either addToSets or
* addToUnsets, and attempts to do so will return a non-OK Status. Similarly, if there * addToUnsets, and attempts to do so will return a non-OK Status. Similarly, if there
* is already object replacement data recorded for this log, the c all will fail. * is already object replacement data recorded for this log, the c all will fail.
*/ */
Status getReplacementObject(mutablebson::Element* outElt); Status getReplacementObject(mutablebson::Element* outElt);
private: private:
 End of changes. 2 change blocks. 
22 lines changed or deleted 2 lines changed or added


 log_domain-impl.h   log_domain-impl.h 
skipping to change at line 18 skipping to change at line 18
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/logger/message_log_domain.h"
#include <algorithm> #include <algorithm>
#include <cstdlib>
#include "mongo/base/status.h"
#include "mongo/logger/message_log_domain.h"
/* /*
* Implementation of LogDomain<E>. Include this in cpp files to instantiat e new LogDomain types. * Implementation of LogDomain<E>. Include this in cpp files to instantiat e new LogDomain types.
* See message_log_domain.h, e.g. * See message_log_domain.h, e.g.
*/ */
namespace mongo { namespace mongo {
namespace logger { namespace logger {
template <typename E> template <typename E>
LogDomain<E>::LogDomain() : _minimumLoggedSeverity(LogSeverity::Log()) LogDomain<E>::LogDomain()
{} : _minimumLoggedSeverity(LogSeverity::Log()), _abortOnFailure(false
)
{}
template <typename E> template <typename E>
LogDomain<E>::~LogDomain() { LogDomain<E>::~LogDomain() {
clearAppenders(); clearAppenders();
} }
template <typename E> template <typename E>
void LogDomain<E>::append(const E& event) { Status LogDomain<E>::append(const E& event) {
for (typename AppenderVector::const_iterator iter = _appenders.begi n(); for (typename AppenderVector::const_iterator iter = _appenders.begi n();
iter != _appenders.end(); ++iter) { iter != _appenders.end(); ++iter) {
if (*iter) { if (*iter) {
(*iter)->append(event); Status status = (*iter)->append(event);
if (!status.isOK()) {
if (_abortOnFailure) {
::abort();
}
return status;
}
} }
} }
return Status::OK();
} }
template <typename E> template <typename E>
typename LogDomain<E>::AppenderHandle LogDomain<E>::attachAppender( typename LogDomain<E>::AppenderHandle LogDomain<E>::attachAppender(
typename LogDomain<E>::AppenderAutoPtr appender) { typename LogDomain<E>::AppenderAutoPtr appender) {
typename AppenderVector::iterator iter = std::find( typename AppenderVector::iterator iter = std::find(
_appenders.begin(), _appenders.begin(),
_appenders.end(), _appenders.end(),
static_cast<EventAppender*>(NULL)); static_cast<EventAppender*>(NULL));
 End of changes. 6 change blocks. 
6 lines changed or deleted 17 lines changed or added


 log_domain.h   log_domain.h 
skipping to change at line 80 skipping to change at line 80
// TODO(schwerin): Replace with unique_ptr in C++11. // TODO(schwerin): Replace with unique_ptr in C++11.
typedef std::auto_ptr<EventAppender> AppenderAutoPtr; typedef std::auto_ptr<EventAppender> AppenderAutoPtr;
LogDomain(); LogDomain();
~LogDomain(); ~LogDomain();
/** /**
* Receives an event for logging, calling append(event) on all atta ched appenders. * Receives an event for logging, calling append(event) on all atta ched appenders.
* *
* TODO(schwerin): Should we return failed statuses somehow? vecto * If any appender fails, the behavior is determined by the abortOn
r<AppenderHandle, Status> Failure flag:
* for failed appends, e.g.? * *If abortOnFailure is set, ::abort() is immediately called.
* *If abortOnFailure is not set, the error is returned and no furt
her appenders are called.
*/ */
void append(const Event& event); Status append(const Event& event);
/** /**
* Predicate that answers the question, "Should I, the caller, appe nd to you, the log * Predicate that answers the question, "Should I, the caller, appe nd to you, the log
* domain, messages of the given severity?" True means yes. * domain, messages of the given severity?" True means yes.
*/ */
bool shouldLog(LogSeverity severity) { return severity >= _minimumL oggedSeverity; } bool shouldLog(LogSeverity severity) { return severity >= _minimumL oggedSeverity; }
/** /**
* Gets the minimum severity of messages that should be sent to thi s LogDomain. * Gets the minimum severity of messages that should be sent to thi s LogDomain.
*/ */
LogSeverity getMinimumLogSeverity() { return _minimumLoggedSeverity ; } LogSeverity getMinimumLogSeverity() { return _minimumLoggedSeverity ; }
/** /**
* Sets the minimum severity of messages that should be sent to thi s LogDomain. * Sets the minimum severity of messages that should be sent to thi s LogDomain.
*/ */
void setMinimumLoggedSeverity(LogSeverity severity) { _minimumLogge dSeverity = severity; } void setMinimumLoggedSeverity(LogSeverity severity) { _minimumLogge dSeverity = severity; }
/**
* Gets the state of the abortOnFailure flag.
*/
bool getAbortOnFailure() const { return _abortOnFailure; }
/**
* Sets the state of the abortOnFailure flag.
*/
void setAbortOnFailure(bool abortOnFailure) { _abortOnFailure = abo
rtOnFailure; }
// //
// Configuration methods. Must be synchronized with each other and calls to "append" by the // Configuration methods. Must be synchronized with each other and calls to "append" by the
// caller. // caller.
// //
/** /**
* Attaches "appender" to this domain, taking ownership of it. Ret urns a handle that may be * Attaches "appender" to this domain, taking ownership of it. Ret urns a handle that may be
* used later to detach this appender. * used later to detach this appender.
*/ */
AppenderHandle attachAppender(AppenderAutoPtr appender); AppenderHandle attachAppender(AppenderAutoPtr appender);
skipping to change at line 129 skipping to change at line 140
/** /**
* Destroy all attached appenders, invalidating all handles. * Destroy all attached appenders, invalidating all handles.
*/ */
void clearAppenders(); void clearAppenders();
private: private:
typedef std::vector<EventAppender*> AppenderVector; typedef std::vector<EventAppender*> AppenderVector;
LogSeverity _minimumLoggedSeverity; LogSeverity _minimumLoggedSeverity;
AppenderVector _appenders; AppenderVector _appenders;
bool _abortOnFailure;
}; };
} // namespace logger } // namespace logger
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
4 lines changed or deleted 18 lines changed or added


 logfile.h   logfile.h 
skipping to change at line 33 skipping to change at line 33
* for all of the code used other than as permitted herein. If you modify * for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your * file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do not * version of the file(s), but you are not obligated to do so. If you do not
* wish to do so, delete this exception statement from your version. If y ou * wish to do so, delete this exception statement from your version. If y ou
* delete this exception statement from all source files in the program, * delete this exception statement from all source files in the program,
* then also delete it in the license file. * then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
class LogFile { class MONGO_CLIENT_API LogFile {
public: public:
/** create the file and open. must not already exist. /** create the file and open. must not already exist.
throws UserAssertion on i/o error throws UserAssertion on i/o error
*/ */
LogFile(const std::string& name, bool readwrite = false); LogFile(const std::string& name, bool readwrite = false);
/** closes */ /** closes */
~LogFile(); ~LogFile();
/** append to file. does not return until sync'd. uses direct i/o when possible. /** append to file. does not return until sync'd. uses direct i/o when possible.
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 logger.h   logger.h 
skipping to change at line 18 skipping to change at line 18
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/client/export_macros.h"
#include "mongo/logger/message_log_domain.h" #include "mongo/logger/message_log_domain.h"
#include "mongo/logger/log_manager.h" #include "mongo/logger/log_manager.h"
#include "mongo/logger/rotatable_file_manager.h" #include "mongo/logger/rotatable_file_manager.h"
namespace mongo { namespace mongo {
namespace logger { namespace logger {
/** /**
* Gets a global singleton instance of RotatableFileManager. * Gets a global singleton instance of RotatableFileManager.
*/ */
RotatableFileManager* globalRotatableFileManager(); RotatableFileManager* globalRotatableFileManager();
/** /**
* Gets a global singleton instance of LogManager. * Gets a global singleton instance of LogManager.
*/ */
LogManager* globalLogManager(); MONGO_CLIENT_API LogManager* globalLogManager();
/** /**
* Gets the global MessageLogDomain associated for the global log manag er. * Gets the global MessageLogDomain associated for the global log manag er.
*/ */
inline MessageLogDomain* globalLogDomain() { return globalLogManager()- >getGlobalDomain(); } inline MessageLogDomain* globalLogDomain() { return globalLogManager()- >getGlobalDomain(); }
} // namespace logger } // namespace logger
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 logging.h   logging.h 
skipping to change at line 17 skipping to change at line 17
// //
// Unless required by applicable law or agreed to in writing, software // Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#ifndef BASE_LOGGING_H #ifndef BASE_LOGGING_H
#define BASE_LOGGING_H #define BASE_LOGGING_H
#include <stdlib.h> #include <iosfwd>
#include <stdlib.h>
#include <iostream> #include "mongo/util/log.h"
using std::ostream;
using std::cout;
using std::endl;
#include "macros.h" #include "macros.h"
// Always-on checking // Always-on checking
#define CHECK(x) if(x){}else LogMessageFatal(__FILE__, __LINE__).stre am() << "Check failed: " #x #define CHECK(x) if(x){}else LogMessageFatal(__FILE__, __LINE__).stre am() << "Check failed: " #x
#define CHECK_LT(x, y) CHECK((x) < (y)) #define CHECK_LT(x, y) CHECK((x) < (y))
#define CHECK_GT(x, y) CHECK((x) > (y)) #define CHECK_GT(x, y) CHECK((x) > (y))
#define CHECK_LE(x, y) CHECK((x) <= (y)) #define CHECK_LE(x, y) CHECK((x) <= (y))
#define CHECK_GE(x, y) CHECK((x) >= (y)) #define CHECK_GE(x, y) CHECK((x) >= (y))
#define CHECK_EQ(x, y) CHECK((x) == (y)) #define CHECK_EQ(x, y) CHECK((x) == (y))
skipping to change at line 56 skipping to change at line 53
#define DCHECK(condition) CHECK(false) #define DCHECK(condition) CHECK(false)
#define DCHECK_EQ(val1, val2) CHECK(false) #define DCHECK_EQ(val1, val2) CHECK(false)
#define DCHECK_NE(val1, val2) CHECK(false) #define DCHECK_NE(val1, val2) CHECK(false)
#define DCHECK_LE(val1, val2) CHECK(false) #define DCHECK_LE(val1, val2) CHECK(false)
#define DCHECK_LT(val1, val2) CHECK(false) #define DCHECK_LT(val1, val2) CHECK(false)
#define DCHECK_GE(val1, val2) CHECK(false) #define DCHECK_GE(val1, val2) CHECK(false)
#define DCHECK_GT(val1, val2) CHECK(false) #define DCHECK_GT(val1, val2) CHECK(false)
#endif #endif
#include "base/port.h" #include "base/port.h"
#define INFO std::cout #define INFO mongo::log().stream()
#define FATAL std::cerr #define FATAL LogMessageFatal(__FILE__, __LINE__).stream()
#define DFATAL std::cerr #define DFATAL LogMessageFatal(__FILE__, __LINE__).stream()
#define S2LOG(x) x #define S2LOG(x) x
#define VLOG(x) if (x>0) {} else S2LOG(INFO) #define VLOG(x) if (x>0) {} else S2LOG(INFO)
namespace google_base { class LogMessageFatal {
class DateLogger {
public:
DateLogger();
char* const HumanDate();
private:
char buffer_[9];
};
} // namespace google_base
class LogMessage {
public: public:
LogMessage(const char* file, int line) { LogMessageFatal(const char* file, int line);
std::cerr << "[" << pretty_date_.HumanDate() << "] " ~LogMessageFatal();
<< file << ":" << line << ": "; std::ostream& stream() { return _lsb.stream(); }
}
~LogMessage() { std::cerr << "\n"; }
std::ostream& stream() { return std::cerr; }
private: private:
google_base::DateLogger pretty_date_; mongo::logger::LogstreamBuilder _lsb;
DISALLOW_COPY_AND_ASSIGN(LogMessage);
};
class LogMessageFatal : public LogMessage {
public:
LogMessageFatal(const char* file, int line)
: LogMessage(file, line) { }
~LogMessageFatal() {
std::cerr << "\n";
::abort();
}
private:
DISALLOW_COPY_AND_ASSIGN(LogMessageFatal); DISALLOW_COPY_AND_ASSIGN(LogMessageFatal);
}; };
#endif // BASE_LOGGING_H #endif // BASE_LOGGING_H
 End of changes. 5 change blocks. 
39 lines changed or deleted 11 lines changed or added


 logstream_builder.h   logstream_builder.h 
skipping to change at line 23 skipping to change at line 23
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include <string> #include <string>
#include "mongo/client/export_macros.h"
#include "mongo/logger/labeled_level.h" #include "mongo/logger/labeled_level.h"
#include "mongo/logger/log_severity.h" #include "mongo/logger/log_severity.h"
#include "mongo/logger/message_log_domain.h" #include "mongo/logger/message_log_domain.h"
#include "mongo/util/exit_code.h" #include "mongo/util/exit_code.h"
namespace mongo { namespace mongo {
namespace logger { namespace logger {
class Tee; class Tee;
/** /**
* Stream-ish object used to build and append log messages. * Stream-ish object used to build and append log messages.
*/ */
class LogstreamBuilder { class MONGO_CLIENT_API LogstreamBuilder {
public: public:
static LogSeverity severityCast(int ll) { return LogSeverity::cast( ll); } static LogSeverity severityCast(int ll) { return LogSeverity::cast( ll); }
static LogSeverity severityCast(LogSeverity ls) { return ls; } static LogSeverity severityCast(LogSeverity ls) { return ls; }
static LabeledLevel severityCast(const LabeledLevel &labeled) { ret urn labeled; } static LabeledLevel severityCast(const LabeledLevel &labeled) { ret urn labeled; }
/** /**
* Construct a LogstreamBuilder that writes to "domain" on destruct ion. * Construct a LogstreamBuilder that writes to "domain" on destruct ion.
* *
* "contextName" is a short name of the thread or other context. * "contextName" is a short name of the thread or other context.
* "severity" is the logging priority/severity of the message. * "severity" is the logging priority/severity of the message.
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 master_slave.h   master_slave.h 
skipping to change at line 100 skipping to change at line 100
clone 100 databases in one pass.) clone 100 databases in one pass.)
*/ */
set<string> addDbNextPass; set<string> addDbNextPass;
set<string> incompleteCloneDbs; set<string> incompleteCloneDbs;
BSONObj _me; BSONObj _me;
ReplSource(); ReplSource();
// returns the dummy ns used to do the drop void resyncDrop( const string& db );
string resyncDrop( const char *db, const char *requester );
// call without the db mutex // call without the db mutex
void syncToTailOfRemoteLog(); void syncToTailOfRemoteLog();
string ns() const { return string( "local.oplog.$" ) + sourceName() ; } string ns() const { return string( "local.oplog.$" ) + sourceName() ; }
unsigned _sleepAdviceTime; unsigned _sleepAdviceTime;
/** /**
* If 'db' is a new database and its name would conflict with that of * If 'db' is a new database and its name would conflict with that of
* an existing database, synchronize these database names with the * an existing database, synchronize these database names with the
* master. * master.
* @return true iff an op with the specified ns may be applied. * @return true iff an op with the specified ns may be applied.
 End of changes. 1 change blocks. 
2 lines changed or deleted 1 lines changed or added


 matchable.h   matchable.h 
skipping to change at line 41 skipping to change at line 41
#pragma once #pragma once
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/db/field_ref.h" #include "mongo/db/field_ref.h"
#include "mongo/db/matcher/path.h" #include "mongo/db/matcher/path.h"
namespace mongo { namespace mongo {
class MatchableDocument { class MatchableDocument {
public: public:
virtual ~MatchableDocument(); // Inlining to allow subclasses to see that this is a no-op and avo
id a function call.
// Speeds up query execution measurably.
virtual ~MatchableDocument() {}
virtual BSONObj toBSON() const = 0; virtual BSONObj toBSON() const = 0;
/** /**
* The neewly returned ElementIterator is allowed to keep a pointer to path. * The neewly returned ElementIterator is allowed to keep a pointer to path.
* So the caller of this function should make sure path is in scope until * So the caller of this function should make sure path is in scope until
* the ElementIterator is deallocated * the ElementIterator is deallocated
*/ */
virtual ElementIterator* allocateIterator( const ElementPath* path ) const = 0; virtual ElementIterator* allocateIterator( const ElementPath* path ) const = 0;
 End of changes. 1 change blocks. 
1 lines changed or deleted 4 lines changed or added


 matcher.h   matcher.h 
skipping to change at line 40 skipping to change at line 40
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class Cursor; class Cursor;
class CoveredIndexMatcher;
class FieldRangeVector; class FieldRangeVector;
struct element_lt { struct element_lt {
bool operator()(const BSONElement& l, const BSONElement& r) const { bool operator()(const BSONElement& l, const BSONElement& r) const {
int x = (int) l.canonicalType() - (int) r.canonicalType(); int x = (int) l.canonicalType() - (int) r.canonicalType();
if ( x < 0 ) return true; if ( x < 0 ) return true;
else if ( x > 0 ) return false; else if ( x > 0 ) return false;
return compareElementValues(l,r) < 0; return compareElementValues(l,r) < 0;
} }
}; };
 End of changes. 1 change blocks. 
1 lines changed or deleted 0 lines changed or added


 merge_sort.h   merge_sort.h 
skipping to change at line 68 skipping to change at line 68
MergeSortStage(const MergeSortStageParams& params, WorkingSet* ws); MergeSortStage(const MergeSortStageParams& params, WorkingSet* ws);
virtual ~MergeSortStage(); virtual ~MergeSortStage();
void addChild(PlanStage* child); void addChild(PlanStage* child);
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
PlanStageStats* getStats(); PlanStageStats* getStats();
private: private:
// Not owned by us. // Not owned by us.
WorkingSet* _ws; WorkingSet* _ws;
// The pattern that we're sorting by. // The pattern that we're sorting by.
BSONObj _pattern; BSONObj _pattern;
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 mmap.h   mmap.h 
skipping to change at line 26 skipping to change at line 26
*/ */
#pragma once #pragma once
#include <set> #include <set>
#include <sstream> #include <sstream>
#include <vector> #include <vector>
#include <boost/thread/xtime.hpp> #include <boost/thread/xtime.hpp>
#include "mongo/client/export_macros.h"
#include "mongo/util/concurrency/rwlock.h" #include "mongo/util/concurrency/rwlock.h"
#include "mongo/util/goodies.h" #include "mongo/util/goodies.h"
namespace mongo { namespace mongo {
extern const size_t g_minOSPageSizeBytes; extern const size_t g_minOSPageSizeBytes;
void minOSPageSizeBytesTest(size_t minOSPageSizeBytes); // lame-o void minOSPageSizeBytesTest(size_t minOSPageSizeBytes); // lame-o
// call this if syncing data fails
void dataSyncFailedHandler();
class MAdvise { class MAdvise {
void *_p; MONGO_DISALLOW_COPYING(MAdvise);
unsigned _len;
public: public:
enum Advice { Sequential=1 , Random=2 }; enum Advice { Sequential=1 , Random=2 };
MAdvise(void *p, unsigned len, Advice a); MAdvise(void *p, unsigned len, Advice a);
~MAdvise(); // destructor resets the range to MADV_NORMAL ~MAdvise(); // destructor resets the range to MADV_NORMAL
private:
void *_p;
unsigned _len;
}; };
// lock order: lock dbMutex before this if you lock both // lock order: lock dbMutex before this if you lock both
class LockMongoFilesShared { class MONGO_CLIENT_API LockMongoFilesShared {
friend class LockMongoFilesExclusive; friend class LockMongoFilesExclusive;
static RWLockRecursiveNongreedy mmmutex; static RWLockRecursiveNongreedy mmmutex;
static unsigned era; static unsigned era;
RWLockRecursive::Shared lk; RWLockRecursive::Shared lk;
public: public:
LockMongoFilesShared() : lk(mmmutex) { } LockMongoFilesShared() : lk(mmmutex) { }
/** era changes anytime memory maps come and go. thus you can use this as a cheap way to check /** era changes anytime memory maps come and go. thus you can use this as a cheap way to check
if nothing has changed since the last time you locked. Of cour se you must be shared locked if nothing has changed since the last time you locked. Of cour se you must be shared locked
at the time of this call, otherwise someone could be in progres s. at the time of this call, otherwise someone could be in progres s.
This is used for yielding; see PageFaultException::touch(). This is used for yielding; see PageFaultException::touch().
*/ */
static unsigned getEra() { return era; } static unsigned getEra() { return era; }
static void assertExclusivelyLocked() { mmmutex.assertExclusivelyLo cked(); } static void assertExclusivelyLocked() { mmmutex.assertExclusivelyLo cked(); }
static void assertAtLeastReadLocked() { mmmutex.assertAtLeastReadLo cked(); } static void assertAtLeastReadLocked() { mmmutex.assertAtLeastReadLo cked(); }
}; };
class LockMongoFilesExclusive { class MONGO_CLIENT_API LockMongoFilesExclusive {
RWLockRecursive::Exclusive lk; RWLockRecursive::Exclusive lk;
public: public:
LockMongoFilesExclusive() : lk(LockMongoFilesShared::mmmutex) { LockMongoFilesExclusive() : lk(LockMongoFilesShared::mmmutex) {
LockMongoFilesShared::era++; LockMongoFilesShared::era++;
} }
}; };
/* the administrative-ish stuff here */ /* the administrative-ish stuff here */
class MongoFile : boost::noncopyable { class MongoFile : boost::noncopyable {
public: public:
skipping to change at line 112 skipping to change at line 118
static int flushAll( bool sync ); // returns n flushed static int flushAll( bool sync ); // returns n flushed
static long long totalMappedLength(); static long long totalMappedLength();
static void closeAllFiles( std::stringstream &message ); static void closeAllFiles( std::stringstream &message );
virtual bool isDurableMappedFile() { return false; } virtual bool isDurableMappedFile() { return false; }
string filename() const { return _filename; } string filename() const { return _filename; }
void setFilename(const std::string& fn); void setFilename(const std::string& fn);
virtual uint64_t getUniqueId() const = 0;
private: private:
string _filename; string _filename;
static int _flushAll( bool sync ); // returns n flushed static int _flushAll( bool sync ); // returns n flushed
protected: protected:
virtual void close() = 0; virtual void close() = 0;
virtual void flush(bool sync) = 0; virtual void flush(bool sync) = 0;
/** /**
* returns a thread safe object that you can call flush on * returns a thread safe object that you can call flush on
* Flushable has to fail nicely if the underlying object gets kille d * Flushable has to fail nicely if the underlying object gets kille d
*/ */
skipping to change at line 142 skipping to change at line 150
virtual unsigned long long length() const = 0; virtual unsigned long long length() const = 0;
}; };
/** look up a MMF by filename. scoped mutex locking convention. /** look up a MMF by filename. scoped mutex locking convention.
example: example:
MMFFinderByName finder; MMFFinderByName finder;
DurableMappedFile *a = finder.find("file_name_a"); DurableMappedFile *a = finder.find("file_name_a");
DurableMappedFile *b = finder.find("file_name_b"); DurableMappedFile *b = finder.find("file_name_b");
*/ */
class MongoFileFinder : boost::noncopyable { class MONGO_CLIENT_API MongoFileFinder : boost::noncopyable {
public: public:
/** @return The MongoFile object associated with the specified file name. If no file is open /** @return The MongoFile object associated with the specified file name. If no file is open
with the specified name, returns null. with the specified name, returns null.
*/ */
MongoFile* findByPath(const std::string& path) const; MongoFile* findByPath(const std::string& path) const;
private: private:
LockMongoFilesShared _lk; LockMongoFilesShared _lk;
}; };
skipping to change at line 209 skipping to change at line 217
void* createPrivateMap(); void* createPrivateMap();
/** make the private map range writable (necessary for our windows implementation) */ /** make the private map range writable (necessary for our windows implementation) */
static void makeWritable(void *, unsigned len) static void makeWritable(void *, unsigned len)
#if defined(_WIN32) #if defined(_WIN32)
; ;
#else #else
{ } { }
#endif #endif
virtual uint64_t getUniqueId() const { return _uniqueId; }
private: private:
static void updateLength( const char *filename, unsigned long long &length ); static void updateLength( const char *filename, unsigned long long &length );
HANDLE fd; HANDLE fd;
HANDLE maphandle; HANDLE maphandle;
std::vector<void *> views; std::vector<void *> views;
unsigned long long len; unsigned long long len;
const uint64_t _uniqueId;
#ifdef _WIN32 #ifdef _WIN32
boost::shared_ptr<mutex> _flushMutex; boost::shared_ptr<mutex> _flushMutex;
void clearWritableBits(void *privateView); void clearWritableBits(void *privateView);
public: public:
static const unsigned ChunkSize = 64 * 1024 * 1024; static const unsigned ChunkSize = 64 * 1024 * 1024;
static const unsigned NChunks = 1024 * 1024; static const unsigned NChunks = 1024 * 1024;
#else #else
void clearWritableBits(void *privateView) { } void clearWritableBits(void *privateView) { }
#endif #endif
 End of changes. 10 change blocks. 
6 lines changed or deleted 16 lines changed or added


 mock_conn_registry.h   mock_conn_registry.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/dbtests/mock/mock_dbclient_connection.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/dbtests/mock/mock_remote_db_server.h"
#include "mongo/platform/unordered_map.h" #include "mongo/platform/unordered_map.h"
#include "mongo/util/concurrency/mutex.h" #include "mongo/util/concurrency/mutex.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 mock_dbclient_connection.h   mock_dbclient_connection.h 
skipping to change at line 85 skipping to change at line 85
virtual void remove(const string& ns, Query query, int flags = 0); virtual void remove(const string& ns, Query query, int flags = 0);
// //
// Getters // Getters
// //
mongo::ConnectionString::ConnectionType type() const; mongo::ConnectionString::ConnectionType type() const;
bool isFailed() const; bool isFailed() const;
double getSoTimeout() const; double getSoTimeout() const;
std::string getServerAddress() const; std::string getServerAddress() const;
std::string toString(); std::string toString() const;
// //
// Unsupported methods (defined to get rid of virtual function was hidden error) // Unsupported methods (defined to get rid of virtual function was hidden error)
// //
unsigned long long query(boost::function<void(const mongo::BSONObj& )> f, unsigned long long query(boost::function<void(const mongo::BSONObj& )> f,
const std::string& ns, mongo::Query query, const std::string& ns, mongo::Query query,
const mongo::BSONObj* fieldsToReturn = 0, int queryOptions = 0); const mongo::BSONObj* fieldsToReturn = 0, int queryOptions = 0);
unsigned long long query(boost::function<void(mongo::DBClientCursor BatchIterator&)> f, unsigned long long query(boost::function<void(mongo::DBClientCursor BatchIterator&)> f,
const std::string& ns, mongo::Query query, const std::string& ns, mongo::Query query,
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 mock_ns_targeter.h   mock_ns_targeter.h 
skipping to change at line 143 skipping to change at line 143
} }
void noteCouldNotTarget() { void noteCouldNotTarget() {
// No-op // No-op
} }
void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ) { void noteStaleResponse( const ShardEndpoint& endpoint, const BSONOb j& staleInfo ) {
// No-op // No-op
} }
Status refreshIfNeeded() { Status refreshIfNeeded( bool* wasChanged ) {
// No-op // No-op
if ( wasChanged )
*wasChanged = false;
return Status::OK(); return Status::OK();
} }
const std::vector<MockRange*>& getRanges() const { const std::vector<MockRange*>& getRanges() const {
return _mockRanges.vector(); return _mockRanges.vector();
} }
private: private:
KeyRange parseRange( const BSONObj& query ) const { KeyRange parseRange( const BSONObj& query ) const {
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 mock_stage.h   mock_stage.h 
skipping to change at line 63 skipping to change at line 63
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
// These don't really mean anything here. // These don't really mean anything here.
// Some day we could count the # of calls to the yield functions to check that other stages // Some day we could count the # of calls to the yield functions to check that other stages
// have correct yielding behavior. // have correct yielding behavior.
virtual void prepareToYield() { } virtual void prepareToYield() { }
virtual void recoverFromYield() { } virtual void recoverFromYield() { }
virtual void invalidate(const DiskLoc& dl) { } virtual void invalidate(const DiskLoc& dl, InvalidationType type) { }
virtual PlanStageStats* getStats() { return NULL; } virtual PlanStageStats* getStats() { return NULL; }
/** /**
* Add a result to the back of the queue. work() goes through the queue. * Add a result to the back of the queue. work() goes through the queue.
* Either no data is returned (just a state), or... * Either no data is returned (just a state), or...
*/ */
void pushBack(const PlanStage::StageState state); void pushBack(const PlanStage::StageState state);
/** /**
* ...data is returned (and we ADVANCED) * ...data is returned (and we ADVANCED)
*
* Allocates a new member and copies 'member' into it.
* Does not take ownership of anything in 'member'.
*/ */
void pushBack(const WorkingSetMember& member); void pushBack(const WorkingSetMember& member);
private: private:
// We don't own this. // We don't own this.
WorkingSet* _ws; WorkingSet* _ws;
// The data we return. // The data we return.
std::queue<PlanStage::StageState> _results; std::queue<PlanStage::StageState> _results;
std::queue<WorkingSetMember> _members; std::queue<WorkingSetID> _members;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
2 lines changed or deleted 5 lines changed or added


 modifier_add_to_set.h   modifier_add_to_set.h 
skipping to change at line 54 skipping to change at line 54
public: public:
ModifierAddToSet(); ModifierAddToSet();
virtual ~ModifierAddToSet(); virtual ~ModifierAddToSet();
/** Goes over the array item(s) that are going to be set- unioned a nd converts them /** Goes over the array item(s) that are going to be set- unioned a nd converts them
* internally to a mutable bson. Both single and $each forms are s upported. Returns OK * internally to a mutable bson. Both single and $each forms are s upported. Returns OK
* if the item(s) are valid otherwise returns a status describing the error. * if the item(s) are valid otherwise returns a status describing the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** Decides which portion of the array items that are going to be s et-unioned to root's /** Decides which portion of the array items that are going to be s et-unioned to root's
* document and fills in 'execInfo' accordingly. Returns OK if the document has a * document and fills in 'execInfo' accordingly. Returns OK if the document has a
* valid array to set-union to, othwise returns a status describin g the error. * valid array to set-union to, othwise returns a status describin g the error.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
/** Updates the Element used in prepare with the effects of the $ad dToSet operation. */ /** Updates the Element used in prepare with the effects of the $ad dToSet operation. */
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_bit.h   modifier_bit.h 
skipping to change at line 59 skipping to change at line 59
ModifierBit(); ModifierBit();
virtual ~ModifierBit(); virtual ~ModifierBit();
/** /**
* A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $bit mod such as * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $bit mod such as
* {$bit: {<field: { [and|or] : <value>}}. init() extracts the fiel d name, the * {$bit: {<field: { [and|or] : <value>}}. init() extracts the fiel d name, the
* operation subtype, and the value to be assigned to it from 'modE xpr'. It returns OK * operation subtype, and the value to be assigned to it from 'modE xpr'. It returns OK
* if successful or a status describing the error. * if successful or a status describing the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** Validates the potential application of the init'ed mod to the g iven Element and /** Validates the potential application of the init'ed mod to the g iven Element and
* configures the internal state of the mod as necessary. * configures the internal state of the mod as necessary.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
/** Updates the Element used in prepare with the effects of the $bi t operation */ /** Updates the Element used in prepare with the effects of the $bi t operation */
virtual Status apply() const; virtual Status apply() const;
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_compare.h   modifier_compare.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <string> #include <string>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/bson/mutable/element.h" #include "mongo/bson/mutable/element.h"
#include "mongo/db/field_ref.h" #include "mongo/db/field_ref.h"
skipping to change at line 52 skipping to change at line 64
// //
// Modifier interface implementation // Modifier interface implementation
// //
/** /**
* A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
* {$set: {<fieldname: <value>}}. init() extracts the field name an d the value to be * {$set: {<fieldname: <value>}}. init() extracts the field name an d the value to be
* assigned to it from 'modExpr'. It returns OK if successful or a status describing * assigned to it from 'modExpr'. It returns OK if successful or a status describing
* the error. * the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** /**
* Looks up the field name in the sub-tree rooted at 'root', and bi nds, if necessary, * Looks up the field name in the sub-tree rooted at 'root', and bi nds, if necessary,
* the '$' field part using the 'matchedfield' number. prepare() re turns OK and * the '$' field part using the 'matchedfield' number. prepare() re turns OK and
* fills in 'execInfo' with information of whether this mod is a no -op on 'root' and * fills in 'execInfo' with information of whether this mod is a no -op on 'root' and
* whether it is an in-place candidate. Otherwise, returns a status describing the * whether it is an in-place candidate. Otherwise, returns a status describing the
* error. * error.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
 End of changes. 2 change blocks. 
2 lines changed or deleted 22 lines changed or added


 modifier_current_date.h   modifier_current_date.h 
skipping to change at line 59 skipping to change at line 59
virtual ~ModifierCurrentDate(); virtual ~ModifierCurrentDate();
/** /**
* A 'modExpr' is a BSONElement {<fieldname>: <value>} coming * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming
* from a $currentDate mod such as * from a $currentDate mod such as
* {$currentDate: {<fieldname: true/{$type: "date/timestamp"}}. * {$currentDate: {<fieldname: true/{$type: "date/timestamp"}}.
* init() extracts the field name and the value to be * init() extracts the field name and the value to be
* assigned to it from 'modExpr'. It returns OK if successful or a status describing * assigned to it from 'modExpr'. It returns OK if successful or a status describing
* the error. * the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** Evaluates the validity of applying $currentDate. /** Evaluates the validity of applying $currentDate.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
/** Updates the node passed in prepare with the results from prepar e */ /** Updates the node passed in prepare with the results from prepar e */
virtual Status apply() const; virtual Status apply() const;
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_inc.h   modifier_inc.h 
skipping to change at line 69 skipping to change at line 69
ModifierInc(ModifierIncMode mode = MODE_INC); ModifierInc(ModifierIncMode mode = MODE_INC);
virtual ~ModifierInc(); virtual ~ModifierInc();
/** /**
* A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $inc mod such as * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $inc mod such as
* {$inc: {<fieldname: <value>}}. init() extracts the field name an d the value to be * {$inc: {<fieldname: <value>}}. init() extracts the field name an d the value to be
* assigned to it from 'modExpr'. It returns OK if successful or a status describing * assigned to it from 'modExpr'. It returns OK if successful or a status describing
* the error. * the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** Evaluates the validity of applying $inc to the identified node, and computes /** Evaluates the validity of applying $inc to the identified node, and computes
* effects, handling upcasting and overflow as necessary. * effects, handling upcasting and overflow as necessary.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
/** Updates the node passed in prepare with the results of the $inc */ /** Updates the node passed in prepare with the results of the $inc */
virtual Status apply() const; virtual Status apply() const;
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_interface.h   modifier_interface.h 
skipping to change at line 79 skipping to change at line 79
struct Options; struct Options;
/** /**
* Returns OK and extracts the parameters for this given mod from ' modExpr'. For * Returns OK and extracts the parameters for this given mod from ' modExpr'. For
* instance, for a $inc, extracts the increment value. The init() m ethod would be * instance, for a $inc, extracts the increment value. The init() m ethod would be
* called only once per operand, that is, if a { $inc: { a: 1, b: 1 } } is issued, * called only once per operand, that is, if a { $inc: { a: 1, b: 1 } } is issued,
* there would be one instance of the operator working on 'a' and o ne on 'b'. In each * there would be one instance of the operator working on 'a' and o ne on 'b'. In each
* case, init() would be called once with the respective bson eleme nt. * case, init() would be called once with the respective bson eleme nt.
* *
* If 'modExpr' is invalid, returns an error status with a reason d escription. * If 'modExpr' is invalid, returns an error status with a reason d escription.
* *
* The optional bool out parameter 'positional', if provided, will
be set to 'true' if
* the mod requires matched field details to be provided when calli
ng 'prepare'. The
* field is optional since this is a hint to the caller about what
work is needed to
* correctly invoke 'prepare'. It is always legal to provide any ma
tch details
* unconditionally. The value set in 'positional' if any, is only m
eaningful if 'init'
* returns an OK status.
*
* Note: * Note:
* *
* + An operator may assume the modExpr passed here will be uncha nged throughout all * + An operator may assume the modExpr passed here will be uncha nged throughout all
* the mod object lifetime and also that the modExrp's lifetime exceeds the life * the mod object lifetime and also that the modExrp's lifetime exceeds the life
* time of this mod. Therefore, taking references to elements i nside modExpr is * time of this mod. Therefore, taking references to elements i nside modExpr is
* valid. * valid.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
) = 0; ,
bool* positional = NULL) = 0;
/** /**
* Returns OK if it would be correct to apply this mod over the doc ument 'root' (e.g, if * Returns OK if it would be correct to apply this mod over the doc ument 'root' (e.g, if
* we're $inc-ing a field, is that field numeric in the current doc ?). * we're $inc-ing a field, is that field numeric in the current doc ?).
* *
* If the field this mod is targeted to contains a $-positional par ameter, that value * If the field this mod is targeted to contains a $-positional par ameter, that value
* can be bound with 'matchedField', passed by the caller. * can be bound with 'matchedField', passed by the caller.
* *
* In addition, the call also identifies which fields(s) of 'root' the mod is interested * In addition, the call also identifies which fields(s) of 'root' the mod is interested
* in changing (note that the modifier may want to add a field that 's not present in * in changing (note that the modifier may want to add a field that 's not present in
* the document). The call also determines whether it could modify the document in * the document). The call also determines whether it could modify the document in
* place and whether it is a no-op for the given document. All this information is in * place and whether it is a no-op for the given document. All this information is in
* the passed 'execInfo', which is filled inside the call. * the passed 'execInfo', which is filled inside the call.
* *
* If the mod cannot be applied over 'root', returns an error statu s with a reason * If the mod cannot be applied over 'root', returns an error statu s with a reason
* description. * description.
*
* Note that you must provide a meaningful 'matchedField' here, unl
ess 'init' set
* 'positional' to 'false', in which case you may pass an empty Str
ingData object.
*/ */
struct ExecInfo; struct ExecInfo;
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
/* IN-OUT */ ExecInfo* execInfo) = 0; /* IN-OUT */ ExecInfo* execInfo) = 0;
/** /**
* Returns OK and modifies (or adds) an element (or elements) from the 'root' passed on * Returns OK and modifies (or adds) an element (or elements) from the 'root' passed on
* the prepareMod call. This may act on multiple fields but should only be called once * the prepareMod call. This may act on multiple fields but should only be called once
* per operator. * per operator.
 End of changes. 3 change blocks. 
2 lines changed or deleted 20 lines changed or added


 modifier_object_replace.h   modifier_object_replace.h 
skipping to change at line 60 skipping to change at line 60
// //
virtual ~ModifierObjectReplace(); virtual ~ModifierObjectReplace();
/** /**
* Returns true and takes the embedded object contained in 'modExpr ' to be the object * Returns true and takes the embedded object contained in 'modExpr ' to be the object
* we're replacing for. The field name of 'modExpr' is ignored. If 'modExpr' is in an * we're replacing for. The field name of 'modExpr' is ignored. If 'modExpr' is in an
* unexpected format or if it can't be parsed for some reason, retu rns an error status * unexpected format or if it can't be parsed for some reason, retu rns an error status
* describing the error. * describing the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** /**
* Registers the that 'root' is in the document that we want to ful ly replace. * Registers the that 'root' is in the document that we want to ful ly replace.
* prepare() returns OK and always fills 'execInfo' with true for * prepare() returns OK and always fills 'execInfo' with true for
* noOp. * noOp.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_pop.h   modifier_pop.h 
skipping to change at line 60 skipping to change at line 60
virtual ~ModifierPop(); virtual ~ModifierPop();
/** /**
* The format of this modifier ($pop) is {<fieldname>: <value>}. * The format of this modifier ($pop) is {<fieldname>: <value>}.
* If the value is number and greater than -1 then an element is re moved from the bottom, * If the value is number and greater than -1 then an element is re moved from the bottom,
* otherwise the top. Currently the value can be any anything but w e document * otherwise the top. Currently the value can be any anything but w e document
* the use of the numbers "1, -1" only. * the use of the numbers "1, -1" only.
* *
* Ex. $pop: {'a':1} will remove the last item from this array: [1, 2,3] -> [1,2] * Ex. $pop: {'a':1} will remove the last item from this array: [1, 2,3] -> [1,2]
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
virtual Status apply() const; virtual Status apply() const;
virtual Status log(LogBuilder* logBuilder) const; virtual Status log(LogBuilder* logBuilder) const;
private: private:
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_pull.h   modifier_pull.h 
skipping to change at line 50 skipping to change at line 50
class MatchExpression; class MatchExpression;
class ModifierPull : public ModifierInterface { class ModifierPull : public ModifierInterface {
MONGO_DISALLOW_COPYING(ModifierPull); MONGO_DISALLOW_COPYING(ModifierPull);
public: public:
ModifierPull(); ModifierPull();
virtual ~ModifierPull(); virtual ~ModifierPull();
/** Evaluates the array items to be removed and the match expressio n. */ /** Evaluates the array items to be removed and the match expressio n. */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** Decides which portion of the array items will be removed from t he provided element */ /** Decides which portion of the array items will be removed from t he provided element */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
/** Updates the Element used in prepare with the effects of the $pu ll operation. */ /** Updates the Element used in prepare with the effects of the $pu ll operation. */
virtual Status apply() const; virtual Status apply() const;
/** Converts the effects of this $pull into one or more equivalent $unset operations. */ /** Converts the effects of this $pull into one or more equivalent $unset operations. */
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_pull_all.h   modifier_pull_all.h 
skipping to change at line 58 skipping to change at line 58
ModifierPullAll(); ModifierPullAll();
virtual ~ModifierPullAll(); virtual ~ModifierPullAll();
/** /**
* The modifier $pullAll takes an array of values to match literall y, and remove * The modifier $pullAll takes an array of values to match literall y, and remove
* *
* Ex. {$pullAll : {<field> : [<values>]}} * Ex. {$pullAll : {<field> : [<values>]}}
* {$pullAll :{ array : [1,2] } } will transform {array: [1,2,3]} - > {array: [3]} * {$pullAll :{ array : [1,2] } } will transform {array: [1,2,3]} - > {array: [3]}
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
virtual Status apply() const; virtual Status apply() const;
virtual Status log(LogBuilder* logBuilder) const; virtual Status log(LogBuilder* logBuilder) const;
private: private:
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_push.h   modifier_push.h 
skipping to change at line 74 skipping to change at line 74
* *
* There are currently a few restrictions concerning the clauses (b ut all can be * There are currently a few restrictions concerning the clauses (b ut all can be
* lifted): * lifted):
* + $slice can be negative only (ie, slicing from the recent end ) * + $slice can be negative only (ie, slicing from the recent end )
* + $sort requires $slice to be present * + $sort requires $slice to be present
* + $sort can only sort objects (as opposed to basic types), so it only takes * + $sort can only sort objects (as opposed to basic types), so it only takes
* object as patterns * object as patterns
* + Because of the previous, $sort requires that the array being pushed to be made * + Because of the previous, $sort requires that the array being pushed to be made
* of objects * of objects
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** /**
* Locates the array to be pushed into in the 'root', if it exists, and fills in * Locates the array to be pushed into in the 'root', if it exists, and fills in
* execInfo accordingly. Returns true if $push would succeed in 'ro ot', otherwise * execInfo accordingly. Returns true if $push would succeed in 'ro ot', otherwise
* return a status describing the error. * return a status describing the error.
* *
* Note that a $push is never in-place. The cost of checking if it is a no-op makes it * Note that a $push is never in-place. The cost of checking if it is a no-op makes it
* so that we don't do such check either. As such, execInfo is alwa ys filled with * so that we don't do such check either. As such, execInfo is alwa ys filled with
* 'false' for those two options. * 'false' for those two options.
*/ */
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_rename.h   modifier_rename.h 
skipping to change at line 62 skipping to change at line 62
MONGO_DISALLOW_COPYING(ModifierRename); MONGO_DISALLOW_COPYING(ModifierRename);
public: public:
ModifierRename(); ModifierRename();
virtual ~ModifierRename(); virtual ~ModifierRename();
/** /**
* We will check that the to/from are valid paths; in prepare more validation is done * We will check that the to/from are valid paths; in prepare more validation is done
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** /**
* In prepare we will ensure that all restrictions are met: * In prepare we will ensure that all restrictions are met:
* -- The 'from' field exists, and is valid, else it is a no-op * -- The 'from' field exists, and is valid, else it is a no-op
* -- The 'to' field is valid as a destination * -- The 'to' field is valid as a destination
* -- The 'to' field is not on the path (or the same path) as the 'from' field * -- The 'to' field is not on the path (or the same path) as the 'from' field
* -- Neither 'to' nor 'from' have an array ancestor * -- Neither 'to' nor 'from' have an array ancestor
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_set.h   modifier_set.h 
skipping to change at line 64 skipping to change at line 64
// //
virtual ~ModifierSet(); virtual ~ModifierSet();
/** /**
* A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
* {$set: {<fieldname: <value>}}. init() extracts the field name an d the value to be * {$set: {<fieldname: <value>}}. init() extracts the field name an d the value to be
* assigned to it from 'modExpr'. It returns OK if successful or a status describing * assigned to it from 'modExpr'. It returns OK if successful or a status describing
* the error. * the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** /**
* Looks up the field name in the sub-tree rooted at 'root', and bi nds, if necessary, * Looks up the field name in the sub-tree rooted at 'root', and bi nds, if necessary,
* the '$' field part using the 'matchedfield' number. prepare() re turns OK and * the '$' field part using the 'matchedfield' number. prepare() re turns OK and
* fills in 'execInfo' with information of whether this mod is a no -op on 'root' and * fills in 'execInfo' with information of whether this mod is a no -op on 'root' and
* whether it is an in-place candidate. Otherwise, returns a status describing the * whether it is an in-place candidate. Otherwise, returns a status describing the
* error. * error.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 modifier_unset.h   modifier_unset.h 
skipping to change at line 63 skipping to change at line 63
// //
virtual ~ModifierUnset(); virtual ~ModifierUnset();
/** /**
* A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as * A 'modExpr' is a BSONElement {<fieldname>: <value>} coming from a $set mod such as
* {$unset: {<fieldname: <value>}}. init() extracts the field name and the value to be * {$unset: {<fieldname: <value>}}. init() extracts the field name and the value to be
* assigned to it from 'modExpr'. It returns OK if successful or a status describing * assigned to it from 'modExpr'. It returns OK if successful or a status describing
* the error. * the error.
*/ */
virtual Status init(const BSONElement& modExpr, const Options& opts virtual Status init(const BSONElement& modExpr, const Options& opts
); ,
bool* positional = NULL);
/** /**
* Locates the field to be removed under the 'root' element, if it exist, and fills in * Locates the field to be removed under the 'root' element, if it exist, and fills in
* 'execInfo' accordingly. Return OK if successful or a status desc ribing the error. * 'execInfo' accordingly. Return OK if successful or a status desc ribing the error.
*/ */
virtual Status prepare(mutablebson::Element root, virtual Status prepare(mutablebson::Element root,
const StringData& matchedField, const StringData& matchedField,
ExecInfo* execInfo); ExecInfo* execInfo);
/** /**
 End of changes. 1 change blocks. 
2 lines changed or deleted 3 lines changed or added


 mongobridge_options.h   mongobridge_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 mongodump_options.h   mongodump_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
namespace mongo { namespace mongo {
struct MongoDumpGlobalParams { struct MongoDumpGlobalParams {
std::string outputFile; std::string outputDirectory;
std::string query; std::string query;
bool useOplog; bool useOplog;
bool repair; bool repair;
bool snapShotQuery; bool snapShotQuery;
bool dumpUsersAndRoles;
}; };
extern MongoDumpGlobalParams mongoDumpGlobalParams; extern MongoDumpGlobalParams mongoDumpGlobalParams;
Status addMongoDumpOptions(moe::OptionSection* options); Status addMongoDumpOptions(moe::OptionSection* options);
void printMongoDumpHelp(std::ostream* out); void printMongoDumpHelp(std::ostream* out);
/** /**
* Handle options that should come before validation, such as "help". * Handle options that should come before validation, such as "help".
 End of changes. 3 change blocks. 
1 lines changed or deleted 21 lines changed or added


 mongoexport_options.h   mongoexport_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
skipping to change at line 38 skipping to change at line 50
struct MongoExportGlobalParams { struct MongoExportGlobalParams {
std::string query; std::string query;
bool csv; bool csv;
std::string outputFile; std::string outputFile;
bool outputFileSpecified; bool outputFileSpecified;
bool jsonArray; bool jsonArray;
bool slaveOk; bool slaveOk;
bool snapShotQuery; bool snapShotQuery;
unsigned int skip; unsigned int skip;
unsigned int limit; unsigned int limit;
std::string sort;
}; };
extern MongoExportGlobalParams mongoExportGlobalParams; extern MongoExportGlobalParams mongoExportGlobalParams;
Status addMongoExportOptions(moe::OptionSection* options); Status addMongoExportOptions(moe::OptionSection* options);
void printMongoExportHelp(std::ostream* out); void printMongoExportHelp(std::ostream* out);
/** /**
* Handle options that should come before validation, such as "help". * Handle options that should come before validation, such as "help".
 End of changes. 2 change blocks. 
0 lines changed or deleted 20 lines changed or added


 mongofiles_options.h   mongofiles_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 mongoimport_options.h   mongoimport_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 mongooplog_options.h   mongooplog_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 mongorestore_options.h   mongorestore_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
namespace mongo { namespace mongo {
struct MongoRestoreGlobalParams { struct MongoRestoreGlobalParams {
bool drop; bool drop;
bool oplogReplay; bool oplogReplay;
std::string oplogLimit; std::string oplogLimit;
bool keepIndexVersion; bool keepIndexVersion;
bool restoreOptions; bool restoreOptions;
bool restoreIndexes; bool restoreIndexes;
bool restoreUsersAndRoles;
int w; int w;
std::string restoreDirectory; std::string restoreDirectory;
}; };
extern MongoRestoreGlobalParams mongoRestoreGlobalParams; extern MongoRestoreGlobalParams mongoRestoreGlobalParams;
Status addMongoRestoreOptions(moe::OptionSection* options); Status addMongoRestoreOptions(moe::OptionSection* options);
void printMongoRestoreHelp(std::ostream* out); void printMongoRestoreHelp(std::ostream* out);
 End of changes. 2 change blocks. 
0 lines changed or deleted 20 lines changed or added


 mongos_options.h   mongos_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/server_options.h" #include "mongo/db/server_options.h"
#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/environment.h"
#include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/option_section.h"
namespace mongo { namespace mongo {
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 mongostat_options.h   mongostat_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 mongotop_options.h   mongotop_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/tools/tool_options.h" #include "mongo/tools/tool_options.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 multi_plan_runner.h   multi_plan_runner.h 
skipping to change at line 50 skipping to change at line 50
namespace mongo { namespace mongo {
class BSONObj; class BSONObj;
class CanonicalQuery; class CanonicalQuery;
class DiskLoc; class DiskLoc;
class PlanExecutor; class PlanExecutor;
class PlanStage; class PlanStage;
struct QuerySolution; struct QuerySolution;
class TypeExplain; class TypeExplain;
struct PlanInfo;
class WorkingSet; class WorkingSet;
/** /**
* Runs several plans in parallel and picks the best one. Caches the s election for future use. * Runs several plans in parallel and picks the best one. Caches the s election for future use.
*/ */
class MultiPlanRunner : public Runner { class MultiPlanRunner : public Runner {
public: public:
/** /**
* Takes ownership of query. * Takes ownership of query.
*/ */
MultiPlanRunner(CanonicalQuery* query); MultiPlanRunner(const Collection* collection, CanonicalQuery* query );
virtual ~MultiPlanRunner(); virtual ~MultiPlanRunner();
/** /**
* Takes ownership of all arguments * Takes ownership of all arguments
*/ */
void addPlan(QuerySolution* solution, PlanStage* root, WorkingSet* ws); void addPlan(QuerySolution* solution, PlanStage* root, WorkingSet* ws);
/** /**
* Get the next result. Yielding is handled internally. If a best plan is not picked when * Get the next result. Yielding is handled internally. If a best plan is not picked when
* this is called, we call pickBestPlan() internally. * this is called, we call pickBestPlan() internally.
skipping to change at line 88 skipping to change at line 89
* All further calls to getNext(...) will return results from the b est plan. * All further calls to getNext(...) will return results from the b est plan.
* *
* Returns true if a best plan was picked, false if there was an er ror. * Returns true if a best plan was picked, false if there was an er ror.
* *
* If out is not-NULL, set *out to the index of the picked plan. * If out is not-NULL, set *out to the index of the picked plan.
*/ */
bool pickBestPlan(size_t* out); bool pickBestPlan(size_t* out);
virtual void saveState(); virtual void saveState();
virtual bool restoreState(); virtual bool restoreState();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual void setYieldPolicy(Runner::YieldPolicy policy); virtual void setYieldPolicy(Runner::YieldPolicy policy);
virtual const std::string& ns(); virtual const std::string& ns();
virtual void kill(); virtual void kill();
virtual const Collection* collection() { return _collection; }
/** /**
* Returns OK, allocating and filling in '*explain' with details of * Returns OK, allocating and filling in '*explain' and '*planInfo'
the "winner" with details of
* plan. Caller takes ownership of '*explain'. Otherwise, return a * the "winner" plan. Caller takes ownership of '*explain' and '*pl
status describing anInfo'. Otherwise,
* the error. * return a status describing the error.
* *
* TOOD: fill in the explain of all candidate plans * TOOD: fill in the explain of all candidate plans
*/ */
virtual Status getExplainPlan(TypeExplain** explain) const; virtual Status getInfo(TypeExplain** explain,
PlanInfo** planInfo) const;
private: private:
/** /**
* Have all our candidate plans do something. * Have all our candidate plans do something.
*/ */
bool workAllPlans(); bool workAllPlans();
void allPlansSaveState(); void allPlansSaveState();
void allPlansRestoreState(); void allPlansRestoreState();
const Collection* _collection;
// Were we killed by an invalidate? // Were we killed by an invalidate?
bool _killed; bool _killed;
// Did all plans fail while we were running them? Note that one pl an can fail // Did all plans fail while we were running them? Note that one pl an can fail
// during normal execution of the plan competition. Here is an exa mple: // during normal execution of the plan competition. Here is an exa mple:
// //
// Plan 1: collection scan with sort. Sort runs out of memory. // Plan 1: collection scan with sort. Sort runs out of memory.
// Plan 2: ixscan that provides sort. Won't run out of memory. // Plan 2: ixscan that provides sort. Won't run out of memory.
// //
// We want to choose plan 2 even if plan 1 fails. // We want to choose plan 2 even if plan 1 fails.
 End of changes. 7 change blocks. 
8 lines changed or deleted 14 lines changed or added


 mutex.h   mutex.h 
skipping to change at line 142 skipping to change at line 142
typedef mongo::mutex::scoped_lock scoped_lock; typedef mongo::mutex::scoped_lock scoped_lock;
/** The concept with SimpleMutex is that it is a basic lock/unlock with no /** The concept with SimpleMutex is that it is a basic lock/unlock with no
special functionality (such as try and try timeout). Thus it can be special functionality (such as try and try timeout). Thus it can be
implemented using OS-specific facilities in all environments (if desired). implemented using OS-specific facilities in all environments (if desired).
On Windows, the implementation below is faster than boost mutex. On Windows, the implementation below is faster than boost mutex.
*/ */
#if defined(_WIN32) #if defined(_WIN32)
class SimpleMutex : boost::noncopyable { class SimpleMutex : boost::noncopyable {
public: public:
SimpleMutex( const char * ) { InitializeCriticalSection( &_cs ); } SimpleMutex( const StringData& ) { InitializeCriticalSection( &_cs ); }
void dassertLocked() const { } void dassertLocked() const { }
void lock() { EnterCriticalSection( &_cs ); } void lock() { EnterCriticalSection( &_cs ); }
void unlock() { LeaveCriticalSection( &_cs ); } void unlock() { LeaveCriticalSection( &_cs ); }
class scoped_lock { class scoped_lock {
SimpleMutex& _m; SimpleMutex& _m;
public: public:
scoped_lock( SimpleMutex &m ) : _m(m) { _m.lock(); } scoped_lock( SimpleMutex &m ) : _m(m) { _m.lock(); }
~scoped_lock() { _m.unlock(); } ~scoped_lock() { _m.unlock(); }
const SimpleMutex& m() const { return _m; } const SimpleMutex& m() const { return _m; }
}; };
private: private:
CRITICAL_SECTION _cs; CRITICAL_SECTION _cs;
}; };
#else #else
class SimpleMutex : boost::noncopyable { class SimpleMutex : boost::noncopyable {
public: public:
void dassertLocked() const { } void dassertLocked() const { }
SimpleMutex(const char* name) { verify( pthread_mutex_init(&_lock,0 ) == 0 ); } SimpleMutex(const StringData& name) { verify( pthread_mutex_init(&_ lock,0) == 0 ); }
~SimpleMutex(){ ~SimpleMutex(){
if ( ! StaticObserver::_destroyingStatics ) { if ( ! StaticObserver::_destroyingStatics ) {
verify( pthread_mutex_destroy(&_lock) == 0 ); verify( pthread_mutex_destroy(&_lock) == 0 );
} }
} }
void lock() { verify( pthread_mutex_lock(&_lock) == 0 ); } void lock() { verify( pthread_mutex_lock(&_lock) == 0 ); }
void unlock() { verify( pthread_mutex_unlock(&_lock) == 0 ); } void unlock() { verify( pthread_mutex_unlock(&_lock) == 0 ); }
public: public:
class scoped_lock : boost::noncopyable { class scoped_lock : boost::noncopyable {
skipping to change at line 189 skipping to change at line 189
private: private:
pthread_mutex_t _lock; pthread_mutex_t _lock;
}; };
#endif #endif
/** This can be used instead of boost recursive mutex. The advantage is the _DEBUG checks /** This can be used instead of boost recursive mutex. The advantage is the _DEBUG checks
* and ability to assertLocked(). This has not yet been tested for spe ed vs. the boost one. * and ability to assertLocked(). This has not yet been tested for spe ed vs. the boost one.
*/ */
class RecursiveMutex : boost::noncopyable { class RecursiveMutex : boost::noncopyable {
public: public:
RecursiveMutex(const char* name) : m(name) { } RecursiveMutex(const StringData& name) : m(name) { }
bool isLocked() const { return n.get() > 0; } bool isLocked() const { return n.get() > 0; }
class scoped_lock : boost::noncopyable { class scoped_lock : boost::noncopyable {
RecursiveMutex& rm; RecursiveMutex& rm;
int& nLocksByMe; int& nLocksByMe;
public: public:
scoped_lock( RecursiveMutex &m ) : rm(m), nLocksByMe(rm.n.getRe f()) { scoped_lock( RecursiveMutex &m ) : rm(m), nLocksByMe(rm.n.getRe f()) {
if( nLocksByMe++ == 0 ) if( nLocksByMe++ == 0 )
rm.m.lock(); rm.m.lock();
} }
~scoped_lock() { ~scoped_lock() {
 End of changes. 3 change blocks. 
3 lines changed or deleted 3 lines changed or added


 namespace-inl.h   namespace-inl.h 
skipping to change at line 44 skipping to change at line 44
inline Namespace& Namespace::operator=(const StringData& ns) { inline Namespace& Namespace::operator=(const StringData& ns) {
// we fill the remaining space with all zeroes here. as the full N amespace struct is in // we fill the remaining space with all zeroes here. as the full N amespace struct is in
// the datafiles (the .ns files specifically), that is helpful as t hen they are deterministic // the datafiles (the .ns files specifically), that is helpful as t hen they are deterministic
// in the bytes they have for a given sequence of operations. that makes testing and debugging // in the bytes they have for a given sequence of operations. that makes testing and debugging
// the data files easier. // the data files easier.
// //
// if profiling indicates this method is a significant bottleneck, we could have a version we // if profiling indicates this method is a significant bottleneck, we could have a version we
// use for reads which does not fill with zeroes, and keep the zero ing behavior on writes. // use for reads which does not fill with zeroes, and keep the zero ing behavior on writes.
// //
memset( buf, 0, MaxNsLen ); memset( buf, 0, sizeof(buf) );
uassert( 10080 , "ns name too long, max size is 128", ns.size() < M uassert( 10080 , "ns name too long, max size is 127 bytes", ns.size
axNsLen - 1); () <= MaxNsLen);
uassert( 17380 , "ns name can't contain embedded '\0' byte", ns.fin
d('\0') == string::npos);
ns.copyTo( buf, true ); ns.copyTo( buf, true );
return *this; return *this;
} }
inline string Namespace::extraName(int i) const { inline string Namespace::extraName(int i) const {
char ex[] = "$extra"; char ex[] = "$extra";
ex[5] += i; ex[5] += i;
string s = string(buf) + ex; string s = string(buf) + ex;
massert( 10348 , "$extra: ns name too long", s.size() < MaxNsLen); massert( 10348 , "$extra: ns name too long", s.size() <= MaxNsLen);
return s; return s;
} }
inline bool Namespace::isExtra() const { inline bool Namespace::isExtra() const {
const char *p = strstr(buf, "$extr"); const char *p = strstr(buf, "$extr");
return p && p[5] && p[6] == 0; //==0 important in case an index use s name "$extra_1" for example return p && p[5] && p[6] == 0; //==0 important in case an index use s name "$extra_1" for example
} }
inline int Namespace::hash() const { inline int Namespace::hash() const {
unsigned x = 0; unsigned x = 0;
 End of changes. 2 change blocks. 
4 lines changed or deleted 6 lines changed or added


 namespace.h   namespace.h 
skipping to change at line 73 skipping to change at line 73
std::string toString() const { return buf; } std::string toString() const { return buf; }
operator std::string() const { return buf; } operator std::string() const { return buf; }
/* NamespaceDetails::Extra was added after fact to allow chaining o f data blocks to support more than 10 indexes /* NamespaceDetails::Extra was added after fact to allow chaining o f data blocks to support more than 10 indexes
(more than 10 IndexDetails). It's a bit hacky because of this l ate addition with backward (more than 10 IndexDetails). It's a bit hacky because of this l ate addition with backward
file support. */ file support. */
std::string extraName(int i) const; std::string extraName(int i) const;
bool isExtra() const; /* ends with $extr... -- when true an extra b lock not a normal NamespaceDetails block */ bool isExtra() const; /* ends with $extr... -- when true an extra b lock not a normal NamespaceDetails block */
enum MaxNsLenValue { MaxNsLen = 128 }; enum MaxNsLenValue {
// Maximum possible length of name any namespace, including spe
cial ones like $extra.
// This includes rum for the NUL byte so it can be used when si
zing buffers.
MaxNsLenWithNUL = 128,
// MaxNsLenWithNUL excluding the NUL byte. Use this when compar
ing string lengths.
MaxNsLen = MaxNsLenWithNUL - 1,
// Maximum allowed length of fully qualified namespace name of
any real collection.
// Does not include NUL so it can be directly compared to strin
g lengths.
MaxNsColletionLen = MaxNsLen - 7/*strlen(".$extra")*/,
};
private: private:
char buf[MaxNsLen]; char buf[MaxNsLenWithNUL];
}; };
#pragma pack() #pragma pack()
} // namespace mongo } // namespace mongo
#include "mongo/db/catalog/ondisk/namespace-inl.h" #include "mongo/db/structure/catalog/namespace-inl.h"
 End of changes. 3 change blocks. 
2 lines changed or deleted 18 lines changed or added


 namespace_details-inl.h   namespace_details-inl.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/namespace_details.h" #include "mongo/db/structure/catalog/namespace_details.h"
namespace mongo { namespace mongo {
inline IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpec
ted ) {
if( idxNo < NIndexesBase ) {
IndexDetails& id = _indexes[idxNo];
return id;
}
Extra *e = extra();
if ( ! e ) {
if ( missingExpected )
throw MsgAssertionException( 13283 , "Missing Extra" );
massert(14045, "missing Extra", e);
}
int i = idxNo - NIndexesBase;
if( i >= NIndexesExtra ) {
e = e->next(this);
if ( ! e ) {
if ( missingExpected )
throw MsgAssertionException( 14823 , "missing extra" );
massert(14824, "missing Extra", e);
}
i -= NIndexesExtra;
}
return e->details[i];
}
inline int NamespaceDetails::idxNo(const IndexDetails& idx) {
IndexIterator i = ii();
while( i.more() ) {
if( &i.next() == &idx )
return i.pos()-1;
}
massert( 10349 , "E12000 idxNo fails", false);
return -1;
}
inline int NamespaceDetails::findIndexByKeyPattern(const BSONObj& keyPa
ttern,
bool includeBackgrou
ndInProgress) {
IndexIterator i = ii(includeBackgroundInProgress);
while( i.more() ) {
if( i.next().keyPattern() == keyPattern )
return i.pos()-1;
}
return -1;
}
inline const IndexDetails* NamespaceDetails::findIndexByPrefix( const B
SONObj &keyPattern ,
bool re
quireSingleKey ) {
const IndexDetails* bestMultiKeyIndex = NULL;
IndexIterator i = ii();
while( i.more() ) {
const IndexDetails& currentIndex = i.next();
if( keyPattern.isPrefixOf( currentIndex.keyPattern() ) ){
if( ! isMultikey( i.pos()-1 ) ){
return &currentIndex;
} else {
bestMultiKeyIndex = &currentIndex;
}
}
}
return requireSingleKey ? NULL : bestMultiKeyIndex;
}
// @return offset in indexes[]
inline int NamespaceDetails::findIndexByName(const StringData& name,
bool includeBackgroundInPr
ogress) {
IndexIterator i = ii(includeBackgroundInProgress);
while( i.more() ) {
if ( name == i.next().info.obj().getStringField("name") )
return i.pos()-1;
}
return -1;
}
inline NamespaceDetails::IndexIterator::IndexIterator(NamespaceDetails *_d, inline NamespaceDetails::IndexIterator::IndexIterator(NamespaceDetails *_d,
bool includeBackg roundInProgress) { bool includeBackg roundInProgress) {
d = _d; d = _d;
i = 0; i = 0;
n = includeBackgroundInProgress ? d->getTotalIndexCount() : d->_nIn dexes; n = includeBackgroundInProgress ? d->getTotalIndexCount() : d->_nIn dexes;
} }
} }
 End of changes. 2 change blocks. 
79 lines changed or deleted 1 lines changed or added


 namespace_details.h   namespace_details.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "mongo/db/d_concurrency.h" #include "mongo/db/d_concurrency.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/storage/index_details.h" #include "mongo/db/structure/catalog/index_details.h"
#include "mongo/db/index_names.h" #include "mongo/db/index_names.h"
#include "mongo/db/index_set.h" #include "mongo/db/index_set.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/storage/durable_mapped_file.h" #include "mongo/db/storage/durable_mapped_file.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/db/querypattern.h" #include "mongo/db/structure/catalog/namespace.h"
#include "mongo/db/catalog/ondisk/namespace.h" #include "mongo/db/structure/catalog/namespace_index.h"
#include "mongo/db/catalog/ondisk/namespace_index.h"
#include "mongo/platform/unordered_map.h" #include "mongo/platform/unordered_map.h"
namespace mongo { namespace mongo {
class Collection;
class IndexCatalogEntry;
class Database; class Database;
class IndexCatalog; class IndexCatalog;
/** @return true if a client can modify this namespace even though it i s under ".system." /** @return true if a client can modify this namespace even though it i s under ".system."
For example <dbname>.system.users is ok for regular clients to upda te. For example <dbname>.system.users is ok for regular clients to upda te.
@param write used when .system.js @param write used when .system.js
*/ */
bool legalClientSystemNS( const StringData& ns , bool write ); bool legalClientSystemNS( const StringData& ns , bool write );
/* deleted lists -- linked lists of deleted records -- are placed in 'b uckets' of various sizes /* deleted lists -- linked lists of deleted records -- are placed in 'b uckets' of various sizes
skipping to change at line 160 skipping to change at line 162
} }
}; };
Extra* extra() { Extra* extra() {
if( _extraOffset == 0 ) return 0; if( _extraOffset == 0 ) return 0;
return (Extra *) (((char *) this) + _extraOffset); return (Extra *) (((char *) this) + _extraOffset);
} }
/* add extra space for indexes when more than 10 */ /* add extra space for indexes when more than 10 */
Extra* allocExtra(const char *ns, int nindexessofar); Extra* allocExtra(const char *ns, int nindexessofar);
void copyingFrom(const char *thisns, NamespaceDetails *src); // mus t be called when renaming a NS to fix up extra void copyingFrom(const char *thisns, NamespaceDetails *src); // mus t be called when renaming a NS to fix up extra
/* dump info on this namespace. for debugging. */
void dump(const Namespace& k);
/* dump info on all extents for this namespace. for debugging. */
void dumpExtents();
public: public:
const DiskLoc& capExtent() const { return _capExtent; } const DiskLoc& capExtent() const { return _capExtent; }
const DiskLoc capFirstNewRecord() const { return _capFirstNewRecord ; } const DiskLoc capFirstNewRecord() const { return _capFirstNewRecord ; }
DiskLoc& capExtent() { return _capExtent; } DiskLoc& capExtent() { return _capExtent; }
DiskLoc& capFirstNewRecord() { return _capFirstNewRecord; } DiskLoc& capFirstNewRecord() { return _capFirstNewRecord; }
private: private:
Extent *theCapExtent() const { return _capExtent.ext(); } Extent *theCapExtent() const { return _capExtent.ext(); }
void advanceCapExtent( const StringData& ns ); void advanceCapExtent( const StringData& ns );
DiskLoc __capAlloc(int len); DiskLoc __capAlloc(int len);
DiskLoc cappedAlloc(const StringData& ns, int len); DiskLoc cappedAlloc(Collection* collection, const StringData& ns, i nt len);
DiskLoc &cappedFirstDeletedInCurExtent(); DiskLoc &cappedFirstDeletedInCurExtent();
bool nextIsInCapExtent( const DiskLoc &dl ) const; bool nextIsInCapExtent( const DiskLoc &dl ) const;
public: public:
const DiskLoc& firstExtent() const { return _firstExtent; } const DiskLoc& firstExtent() const { return _firstExtent; }
const DiskLoc& lastExtent() const { return _lastExtent; } const DiskLoc& lastExtent() const { return _lastExtent; }
DiskLoc& firstExtent() { return _firstExtent; } DiskLoc& firstExtent() { return _firstExtent; }
DiskLoc& lastExtent() { return _lastExtent; } DiskLoc& lastExtent() { return _lastExtent; }
skipping to change at line 224 skipping to change at line 220
void orphanDeletedList(); void orphanDeletedList();
/** /**
* @param max in and out, will be adjusted * @param max in and out, will be adjusted
* @return if the value is valid at all * @return if the value is valid at all
*/ */
static bool validMaxCappedDocs( long long* max ); static bool validMaxCappedDocs( long long* max );
DiskLoc& cappedListOfAllDeletedRecords() { return _deletedList[0]; } DiskLoc& cappedListOfAllDeletedRecords() { return _deletedList[0]; }
DiskLoc& cappedLastDelRecLastExtent() { return _deletedList[1]; } DiskLoc& cappedLastDelRecLastExtent() { return _deletedList[1]; }
void cappedDumpDelInfo();
bool capLooped() const { return _isCapped && _capFirstNewRecord.isV alid(); } bool capLooped() const { return _isCapped && _capFirstNewRecord.isV alid(); }
bool inCapExtent( const DiskLoc &dl ) const; bool inCapExtent( const DiskLoc &dl ) const;
void cappedCheckMigrate(); void cappedCheckMigrate();
/** /**
* Truncate documents newer than the document at 'end' from the cap ped * Truncate documents newer than the document at 'end' from the cap ped
* collection. The collection cannot be completely emptied using t his * collection. The collection cannot be completely emptied using t his
* function. An assertion will be thrown if that is attempted. * function. An assertion will be thrown if that is attempted.
* @param inclusive - Truncate 'end' as well iff true * @param inclusive - Truncate 'end' as well iff true
*/ */
void cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusiv e); void cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusiv e);
skipping to change at line 276 skipping to change at line 271
friend class NamespaceDetails; friend class NamespaceDetails;
int i, n; int i, n;
NamespaceDetails *d; NamespaceDetails *d;
IndexIterator(NamespaceDetails *_d, bool includeBackgroundInPro gress); IndexIterator(NamespaceDetails *_d, bool includeBackgroundInPro gress);
}; };
IndexIterator ii( bool includeBackgroundInProgress = false ) { IndexIterator ii( bool includeBackgroundInProgress = false ) {
return IndexIterator(this, includeBackgroundInProgress); return IndexIterator(this, includeBackgroundInProgress);
} }
/* hackish - find our index # in the indexes array */
int idxNo(const IndexDetails& idx);
/* multikey indexes are indexes where there are more than one key i n the index /* multikey indexes are indexes where there are more than one key i n the index
for a single document. see multikey in docs. for a single document. see multikey in docs.
for these, we have to do some dedup work on queries. for these, we have to do some dedup work on queries.
*/ */
bool isMultikey(int i) const { return (_multiKeyIndexBits & (((unsi gned long long) 1) << i)) != 0; } bool isMultikey(int i) const { return (_multiKeyIndexBits & (((unsi gned long long) 1) << i)) != 0; }
/** /**
* @return - if any state was changed * @return - if any state was changed
*/ */
bool setIndexIsMultikey(int i, bool multikey = true); bool setIndexIsMultikey(int i, bool multikey = true);
/** /**
* This fetches the IndexDetails for the next empty index slot. The caller must populate * This fetches the IndexDetails for the next empty index slot. The caller must populate
* returned object. This handles allocating extra index space, if necessary. * returned object. This handles allocating extra index space, if necessary.
*/ */
IndexDetails& getNextIndexDetails(const char* thisns); IndexDetails& getNextIndexDetails(const char* thisns);
/* returns index of the first index in which the field is present.
-1 if not present. */
int fieldIsIndexed(const char *fieldName);
/** /**
* @return the actual size to create * @return the actual size to create
* will be >= oldRecordSize * will be >= oldRecordSize
* based on padding and any other flags * based on padding and any other flags
*/ */
int getRecordAllocationSize( int minRecordSize ); int getRecordAllocationSize( int minRecordSize );
double paddingFactor() const { return _paddingFactor; } double paddingFactor() const { return _paddingFactor; }
void setPaddingFactor( double paddingFactor ) { void setPaddingFactor( double paddingFactor );
*getDur().writing(&_paddingFactor) = paddingFactor;
}
/* called to indicate that an update fit in place. /* called to indicate that an update fit in place.
fits also called on an insert -- idea there is that if you had s ome mix and then went to fits also called on an insert -- idea there is that if you had s ome mix and then went to
pure inserts it would adapt and PF would trend to 1.0. note upd ate calls insert on a move pure inserts it would adapt and PF would trend to 1.0. note upd ate calls insert on a move
so there is a double count there that must be adjusted for below . so there is a double count there that must be adjusted for below .
todo: greater sophistication could be helpful and added later. for example the absolute todo: greater sophistication could be helpful and added later. for example the absolute
size of documents might be considered -- in some cases sma ller ones are more likely size of documents might be considered -- in some cases sma ller ones are more likely
to grow than larger ones in the same collection? (not alwa ys) to grow than larger ones in the same collection? (not alwa ys)
*/ */
void paddingFits() { void paddingFits() {
MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis t o journal less MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis t o journal less
double x = _paddingFactor - 0.001; double x = max(1.0, _paddingFactor - 0.001 );
if ( x >= 1.0 ) { setPaddingFactor( x );
setPaddingFactor( x );
}
} }
} }
void paddingTooSmall() { void paddingTooSmall() {
MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis t o journal less MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis t o journal less
/* the more indexes we have, the higher the cost of a move. so we take that into /* the more indexes we have, the higher the cost of a move. so we take that into
account herein. note on a move that insert() calls padd ingFits(), thus account herein. note on a move that insert() calls padd ingFits(), thus
here for example with no inserts and nIndexes = 1 we hav e here for example with no inserts and nIndexes = 1 we hav e
.001*4-.001 or a 3:1 ratio to non moves -> 75% nonmoves. insert heavy .001*4-.001 or a 3:1 ratio to non moves -> 75% nonmoves. insert heavy
can pushes this down considerably. further tweaking will be a good idea but can pushes this down considerably. further tweaking will be a good idea but
this should be an adequate starting point. this should be an adequate starting point.
*/ */
double N = min(_nIndexes,7) + 3; double N = min(_nIndexes,7) + 3;
double x = _paddingFactor + (0.001 * N); double x = min(2.0,_paddingFactor + (0.001 * N));
if ( x <= 2.0 ) { setPaddingFactor( x );
setPaddingFactor( x );
}
}
}
// @return offset in indexes[]
int findIndexByName(const StringData& name, bool includeBackgroundI
nProgress = false);
// @return offset in indexes[]
int findIndexByKeyPattern(const BSONObj& keyPattern,
bool includeBackgroundInProgress = false)
;
void findIndexByType( const string& name , vector<int>& matches ) {
IndexIterator i = ii();
while ( i.more() ) {
if ( IndexNames::findPluginName(i.next().keyPattern()) == n
ame )
matches.push_back( i.pos() - 1 );
} }
} }
/* Returns the index entry for the first index whose prefix contain
s
* 'keyPattern'. If 'requireSingleKey' is true, skip indices that c
ontain
* array attributes. Otherwise, returns NULL.
*/
const IndexDetails* findIndexByPrefix( const BSONObj &keyPattern ,
bool requireSingleKey );
/* Updates the expireAfterSeconds field of the given index to the v
alue in newExpireSecs.
* The specified index must already contain an expireAfterSeconds f
ield, and the value in
* that field and newExpireSecs must both be numeric.
*/
void updateTTLIndex( int idxNo , const BSONElement& newExpireSecs )
;
const int systemFlags() const { return _systemFlags; } const int systemFlags() const { return _systemFlags; }
bool isSystemFlagSet( int flag ) const { return _systemFlags & flag ; } bool isSystemFlagSet( int flag ) const { return _systemFlags & flag ; }
void setSystemFlag( int flag ); void setSystemFlag( int flag );
void clearSystemFlag( int flag ); void clearSystemFlag( int flag );
const int userFlags() const { return _userFlags; } const int userFlags() const { return _userFlags; }
bool isUserFlagSet( int flag ) const { return _userFlags & flag; } bool isUserFlagSet( int flag ) const { return _userFlags & flag; }
/** /**
* these methods only modify NamespaceDetails and do not * these methods only modify NamespaceDetails and do not
skipping to change at line 398 skipping to change at line 353
} }
* these methods all return true iff only something was modified * these methods all return true iff only something was modified
*/ */
bool setUserFlag( int flag ); bool setUserFlag( int flag );
bool clearUserFlag( int flag ); bool clearUserFlag( int flag );
bool replaceUserFlags( int flags ); bool replaceUserFlags( int flags );
void syncUserFlags( const string& ns ); void syncUserFlags( const string& ns );
/* @return -1 = not found
generally id is first index, so not that expensive an operation
(assuming present).
*/
int findIdIndex() {
IndexIterator i = ii();
while( i.more() ) {
if( i.next().isIdIndex() )
return i.pos()-1;
}
return -1;
}
bool haveIdIndex() {
return isSystemFlagSet( NamespaceDetails::Flag_HaveIdIndex ) ||
findIdIndex() >= 0;
}
/* return which "deleted bucket" for this size object */ /* return which "deleted bucket" for this size object */
static int bucket(int size) { static int bucket(int size) {
for ( int i = 0; i < Buckets; i++ ) { for ( int i = 0; i < Buckets; i++ ) {
if ( bucketSizes[i] > size ) { if ( bucketSizes[i] > size ) {
// Return the first bucket sized _larger_ than the requ ested size. // Return the first bucket sized _larger_ than the requ ested size.
return i; return i;
} }
} }
return MaxBucket; return MaxBucket;
} }
skipping to change at line 436 skipping to change at line 375
@param allocSize requested size to allocate @param allocSize requested size to allocate
The returned size will be greater than or equal to 'allocSize'. The returned size will be greater than or equal to 'allocSize'.
*/ */
static int quantizeAllocationSpace(int allocSize); static int quantizeAllocationSpace(int allocSize);
/** /**
* Quantize 'allocSize' to the nearest bucketSize (or nearest 1mb b oundary for large sizes). * Quantize 'allocSize' to the nearest bucketSize (or nearest 1mb b oundary for large sizes).
*/ */
static int quantizePowerOf2AllocationSpace(int allocSize); static int quantizePowerOf2AllocationSpace(int allocSize);
/* predetermine location of the next alloc without actually doing i
t.
if cannot predetermine returns null (so still call alloc() then)
*/
DiskLoc allocWillBeAt(const char *ns, int lenToAlloc);
/** allocate space for a new record from deleted lists. /** allocate space for a new record from deleted lists.
@param lenToAlloc is WITH header @param lenToAlloc is WITH header
@return null diskloc if no room - allocate a new extent then @return null diskloc if no room - allocate a new extent then
*/ */
DiskLoc alloc(const StringData& ns, int lenToAlloc); DiskLoc alloc(Collection* collection, const StringData& ns, int len ToAlloc);
/* add a given record to the deleted chains for this NS */ /* add a given record to the deleted chains for this NS */
void addDeletedRec(DeletedRecord *d, DiskLoc dloc); void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
void dumpDeleted(set<DiskLoc> *extents = 0);
// Start from firstExtent by default. // Start from firstExtent by default.
DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const ; DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const ;
// Start from lastExtent by default. // Start from lastExtent by default.
DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const; DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const;
NamespaceDetails *writingWithoutExtra() { NamespaceDetails *writingWithoutExtra() {
return ( NamespaceDetails* ) getDur().writingPtr( this, sizeof( NamespaceDetails ) ); return ( NamespaceDetails* ) getDur().writingPtr( this, sizeof( NamespaceDetails ) );
} }
/** Make all linked Extra objects writeable as well */ /** Make all linked Extra objects writeable as well */
NamespaceDetails *writingWithExtra(); NamespaceDetails *writingWithExtra();
private: private:
// @return offset in indexes[]
int _catalogFindIndexByName( const StringData& name,
bool includeBackgroundInProgress = fal
se);
void _removeIndexFromMe( int idx ); void _removeIndexFromMe( int idx );
/** /**
* swaps all meta data for 2 indexes * swaps all meta data for 2 indexes
* a and b are 2 index ids, whose contents will be swapped * a and b are 2 index ids, whose contents will be swapped
* must have a lock on the entire collection to do this * must have a lock on the entire collection to do this
*/ */
void swapIndex( int a, int b ); void swapIndex( int a, int b );
DiskLoc _alloc(const StringData& ns, int len); DiskLoc _alloc(Collection* collection, const StringData& ns, int le n);
void maybeComplain( const StringData& ns, int len ) const; void maybeComplain( const StringData& ns, int len ) const;
DiskLoc __stdAlloc(int len, bool willBeAt); DiskLoc __stdAlloc(int len, bool willBeAt);
void compact(); // combine adjacent deleted records void compact(); // combine adjacent deleted records
friend class Database;
friend class NamespaceIndex; friend class NamespaceIndex;
friend class IndexCatalog; friend class IndexCatalog;
friend class IndexCatalogEntry;
struct ExtraOld { struct ExtraOld {
// note we could use this field for more chaining later, so don 't waste it: // note we could use this field for more chaining later, so don 't waste it:
unsigned long long reserved1; unsigned long long reserved1;
IndexDetails details[NIndexesExtra]; IndexDetails details[NIndexesExtra];
unsigned reserved2; unsigned reserved2;
unsigned reserved3; unsigned reserved3;
}; };
/** Update cappedLastDelRecLastExtent() after capExtent changed in cappedTruncateAfter() */ /** Update cappedLastDelRecLastExtent() after capExtent changed in cappedTruncateAfter() */
void cappedTruncateLastDelUpdate(); void cappedTruncateLastDelUpdate();
 End of changes. 20 change blocks. 
93 lines changed or deleted 22 lines changed or added


 namespace_index.h   namespace_index.h 
skipping to change at line 37 skipping to change at line 37
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <list> #include <list>
#include <string> #include <string>
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/catalog/ondisk/namespace.h" #include "mongo/db/structure/catalog/hashtab.h"
#include "mongo/util/hashtab.h" #include "mongo/db/structure/catalog/namespace.h"
namespace mongo { namespace mongo {
class NamespaceDetails; class NamespaceDetails;
/* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog" /* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
if you will: at least the core parts. (Additional info in system.* collections.) if you will: at least the core parts. (Additional info in system.* collections.)
*/ */
class NamespaceIndex { class NamespaceIndex {
public: public:
 End of changes. 1 change blocks. 
2 lines changed or deleted 2 lines changed or added


 namespace_string-inl.h   namespace_string-inl.h 
skipping to change at line 101 skipping to change at line 101
if ( idx == std::string::npos ) if ( idx == std::string::npos )
return false; return false;
return validCollectionName(ns.substr(idx + 1)) || oplog(ns); return validCollectionName(ns.substr(idx + 1)) || oplog(ns);
} }
inline bool NamespaceString::validCollectionName(const StringData& coll ){ inline bool NamespaceString::validCollectionName(const StringData& coll ){
if (coll.empty()) if (coll.empty())
return false; return false;
return coll.find('$') == std::string::npos; for (StringData::const_iterator iter = coll.begin(), end = coll.end
();
iter != end; ++iter) {
switch (*iter) {
case '\0':
case '$':
return false;
default:
continue;
}
}
return true;
} }
inline NamespaceString::NamespaceString() : _ns(), _dotIndex(0) {} inline NamespaceString::NamespaceString() : _ns(), _dotIndex(0) {}
inline NamespaceString::NamespaceString( const StringData& nsIn ) { inline NamespaceString::NamespaceString( const StringData& nsIn ) {
_ns = nsIn.toString(); // copy to our buffer _ns = nsIn.toString(); // copy to our buffer
_dotIndex = _ns.find( '.' ); _dotIndex = _ns.find( '.' );
} }
inline NamespaceString::NamespaceString( const StringData& dbName, inline NamespaceString::NamespaceString( const StringData& dbName,
const StringData& collectionNa me ) const StringData& collectionNa me )
skipping to change at line 128 skipping to change at line 138
uassert(17246, uassert(17246,
"Collection names cannot start with '.'", "Collection names cannot start with '.'",
collectionName.empty() || collectionName[0] != '.'); collectionName.empty() || collectionName[0] != '.');
std::string::iterator it = std::copy(dbName.begin(), dbName.end(), _ns.begin()); std::string::iterator it = std::copy(dbName.begin(), dbName.end(), _ns.begin());
*it = '.'; *it = '.';
++it; ++it;
it = std::copy(collectionName.begin(), collectionName.end(), it); it = std::copy(collectionName.begin(), collectionName.end(), it);
_dotIndex = dbName.size(); _dotIndex = dbName.size();
dassert(it == _ns.end()); dassert(it == _ns.end());
dassert(_ns[_dotIndex] == '.'); dassert(_ns[_dotIndex] == '.');
dassert(_ns.find('\0') == std::string::npos); uassert(17295, "namespaces cannot have embedded null characters",
_ns.find('\0') == std::string::npos);
} }
inline int nsDBHash( const std::string& ns ) { inline int nsDBHash( const std::string& ns ) {
int hash = 7; int hash = 7;
for ( size_t i = 0; i < ns.size(); i++ ) { for ( size_t i = 0; i < ns.size(); i++ ) {
if ( ns[i] == '.' ) if ( ns[i] == '.' )
break; break;
hash += 11 * ( ns[i] ); hash += 11 * ( ns[i] );
hash *= 3; hash *= 3;
} }
 End of changes. 3 change blocks. 
2 lines changed or deleted 14 lines changed or added


 namespace_string.h   namespace_string.h 
skipping to change at line 81 skipping to change at line 81
operator std::string() const { return _ns; } operator std::string() const { return _ns; }
std::string toString() const { return _ns; } std::string toString() const { return _ns; }
size_t size() const { return _ns.size(); } size_t size() const { return _ns.size(); }
bool isSystem() const { return coll().startsWith( "system." ); } bool isSystem() const { return coll().startsWith( "system." ); }
bool isSystemDotIndexes() const { return coll() == "system.indexes" ; } bool isSystemDotIndexes() const { return coll() == "system.indexes" ; }
bool isConfigDB() const { return db() == "config"; } bool isConfigDB() const { return db() == "config"; }
bool isCommand() const { return coll() == "$cmd"; } bool isCommand() const { return coll() == "$cmd"; }
bool isOplog() const { return oplog( _ns ); }
bool isSpecialCommand() const { return coll().startsWith("$cmd.sys" ); } bool isSpecialCommand() const { return coll().startsWith("$cmd.sys" ); }
bool isSpecial() const { return special( _ns ); } bool isSpecial() const { return special( _ns ); }
bool isNormal() const { return normal( _ns ); }
/** /**
* @return true if the namespace is valid. Special namespaces for i nternal use are considered as valid. * @return true if the namespace is valid. Special namespaces for i nternal use are considered as valid.
*/ */
bool isValid() const { return validDBName( db() ) && !coll().empty( ); } bool isValid() const { return validDBName( db() ) && !coll().empty( ); }
bool operator==( const std::string& nsIn ) const { return nsIn == _ ns; } bool operator==( const std::string& nsIn ) const { return nsIn == _ ns; }
bool operator==( const NamespaceString& nsIn ) const { return nsIn. _ns == _ns; } bool operator==( const NamespaceString& nsIn ) const { return nsIn. _ns == _ns; }
bool operator!=( const std::string& nsIn ) const { return nsIn != _ ns; } bool operator!=( const std::string& nsIn ) const { return nsIn != _ ns; }
bool operator!=( const NamespaceString& nsIn ) const { return nsIn. _ns != _ns; } bool operator!=( const NamespaceString& nsIn ) const { return nsIn. _ns != _ns; }
skipping to change at line 172 skipping to change at line 175
private: private:
std::string _ns; std::string _ns;
size_t _dotIndex; size_t _dotIndex;
}; };
// "database.a.b.c" -> "database" // "database.a.b.c" -> "database"
inline StringData nsToDatabaseSubstring( const StringData& ns ) { inline StringData nsToDatabaseSubstring( const StringData& ns ) {
size_t i = ns.find( '.' ); size_t i = ns.find( '.' );
if ( i == std::string::npos ) { if ( i == std::string::npos ) {
massert(10078, "nsToDatabase: ns too long", ns.size() < MaxData baseNameLen ); massert(10078, "nsToDatabase: db too long", ns.size() < MaxData baseNameLen );
return ns; return ns;
} }
massert(10088, "nsToDatabase: ns too long", i < static_cast<size_t> (MaxDatabaseNameLen)); massert(10088, "nsToDatabase: db too long", i < static_cast<size_t> (MaxDatabaseNameLen));
return ns.substr( 0, i ); return ns.substr( 0, i );
} }
// "database.a.b.c" -> "database" // "database.a.b.c" -> "database"
inline void nsToDatabase(const StringData& ns, char *database) { inline void nsToDatabase(const StringData& ns, char *database) {
StringData db = nsToDatabaseSubstring( ns ); StringData db = nsToDatabaseSubstring( ns );
db.copyTo( database, true ); db.copyTo( database, true );
} }
// TODO: make this return a StringData // TODO: make this return a StringData
 End of changes. 4 change blocks. 
2 lines changed or deleted 5 lines changed or added


 new_find.h   new_find.h 
skipping to change at line 43 skipping to change at line 43
#include "mongo/db/clientcursor.h" #include "mongo/db/clientcursor.h"
#include "mongo/db/curop.h" #include "mongo/db/curop.h"
#include "mongo/db/dbmessage.h" #include "mongo/db/dbmessage.h"
#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/runner.h" #include "mongo/db/query/runner.h"
#include "mongo/util/net/message.h" #include "mongo/util/net/message.h"
namespace mongo { namespace mongo {
/** /**
* Get a runner for a query. Takes ownership of rawCanonicalQuery.
*
* If the query is valid and a runner could be created, returns Status:
:OK()
* and populates *out with the Runner.
*
* If the query cannot be executed, returns a Status indicating why. D
eletes
* rawCanonicalQuery.
*/
Status getRunner(CanonicalQuery* rawCanonicalQuery, Runner** out, size_
t plannerOptions = 0);
/**
* A switch to choose between old Cursor-based code and new Runner-base
d code.
*/
bool isNewQueryFrameworkEnabled();
/**
* Use the new query framework. Called from the dbtest initialization.
*/
void enableNewQueryFramework();
/**
* Called from the getMore entry point in ops/query.cpp. * Called from the getMore entry point in ops/query.cpp.
*/ */
QueryResult* newGetMore(const char* ns, int ntoreturn, long long cursor id, CurOp& curop, QueryResult* newGetMore(const char* ns, int ntoreturn, long long cursor id, CurOp& curop,
int pass, bool& exhaust, bool* isCursorAuthoriz ed); int pass, bool& exhaust, bool* isCursorAuthoriz ed);
/** /**
* Called from the runQuery entry point in ops/query.cpp. * Run the query 'q' and place the result in 'result'.
*
* Takes ownership of cq.
*/
std::string newRunQuery(CanonicalQuery* cq, CurOp& curop, Message &resu
lt);
/**
* Can the new system handle the provided query?
*
* Returns false if not. cqOut is not modified.
* Returns true if so. Caller owns *cqOut.
*/
bool canUseNewSystem(const QueryMessage& qm, CanonicalQuery** cqOut);
/**
* RAII approach to ensuring that runners are deregistered in newRunQue
ry.
*
* While retrieving the first bach of results, newRunQuery manually reg
isters the runner with
* ClientCursor. Certain query execution paths, namely $where, can thr
ow an exception. If we
* fail to deregister the runner, we will call invalidate/kill on the
* still-registered-yet-deleted runner.
*
* For any subsequent calls to getMore, the runner is already registere
d with ClientCursor
* by virtue of being cached, so this exception-proofing is not require
d.
*/ */
struct DeregisterEvenIfUnderlyingCodeThrows { std::string newRunQuery(Message& m, QueryMessage& q, CurOp& curop, Mess
DeregisterEvenIfUnderlyingCodeThrows(Runner* runner) : _runner(runn age &result);
er) { }
~DeregisterEvenIfUnderlyingCodeThrows() {
ClientCursor::deregisterRunner(_runner);
}
Runner* _runner;
};
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
63 lines changed or deleted 3 lines changed or added


 ns_targeter.h   ns_targeter.h 
skipping to change at line 60 skipping to change at line 60
* *
* 0. targetDoc/targetQuery as many times as is required * 0. targetDoc/targetQuery as many times as is required
* *
* 1a. On targeting failure we may need to refresh, note that it happ ened. * 1a. On targeting failure we may need to refresh, note that it happ ened.
* 1b. On stale config from a child write operation we may need to re fresh, note the error. * 1b. On stale config from a child write operation we may need to re fresh, note the error.
* *
* 2. RefreshIfNeeded() to get newer targeting information * 2. RefreshIfNeeded() to get newer targeting information
* *
* 3. Goto 0. * 3. Goto 0.
* *
* The refreshIfNeeded() operation must make progress against noted tar * The refreshIfNeeded() operation must try to make progress against no
geting or stale config ted targeting or stale
* failures, see comments below. No functions may block for shared res * config failures, see comments below. No functions may block for sha
ources or network calls red resources or network
* except refreshIfNeeded(). * calls except refreshIfNeeded().
* *
* Implementers are free to define more specific targeting error codes to allow more complex * Implementers are free to define more specific targeting error codes to allow more complex
* error handling. * error handling.
* *
* Interface must be externally synchronized if used in multiple thread s, for now. * Interface must be externally synchronized if used in multiple thread s, for now.
* TODO: Determine if we should internally synchronize. * TODO: Determine if we should internally synchronize.
*/ */
class NSTargeter { class NSTargeter {
public: public:
skipping to change at line 134 skipping to change at line 134
*/ */
virtual void noteStaleResponse( const ShardEndpoint& endpoint, virtual void noteStaleResponse( const ShardEndpoint& endpoint,
const BSONObj& staleInfo ) = 0; const BSONObj& staleInfo ) = 0;
/** /**
* Refreshes the targeting metadata for the namespace if needed, ba sed on previously-noted * Refreshes the targeting metadata for the namespace if needed, ba sed on previously-noted
* stale responses and targeting failures. * stale responses and targeting failures.
* *
* After this function is called, the targeter should be in a state such that the noted * After this function is called, the targeter should be in a state such that the noted
* stale responses are not seen again and if a targeting failure oc curred it reloaded - * stale responses are not seen again and if a targeting failure oc curred it reloaded -
* it should make progress. * it should try to make progress. If provided, wasChanged is set
to true if the targeting
* information used here was changed.
* *
* NOTE: This function may block for shared resources or network ca lls. * NOTE: This function may block for shared resources or network ca lls.
* Returns !OK with message if could not refresh * Returns !OK with message if could not refresh
*/ */
virtual Status refreshIfNeeded() = 0; virtual Status refreshIfNeeded( bool* wasChanged ) = 0;
}; };
/** /**
* A ShardEndpoint represents a destination for a targeted query or doc ument. It contains both * A ShardEndpoint represents a destination for a targeted query or doc ument. It contains both
* the logical target (shard name/version/broadcast) and the physical t arget (host name). * the logical target (shard name/version/broadcast) and the physical t arget (host name).
*/ */
struct ShardEndpoint { struct ShardEndpoint {
ShardEndpoint() { ShardEndpoint() {
 End of changes. 3 change blocks. 
7 lines changed or deleted 9 lines changed or added


 oplogreader.h   oplogreader.h 
skipping to change at line 35 skipping to change at line 35
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/client/constants.h" #include "mongo/client/constants.h"
#include "mongo/client/dbclientcursor.h" #include "mongo/client/dbclientcursor.h"
#include "mongo/db/dbhelpers.h"
namespace mongo { namespace mongo {
extern const BSONObj reverseNaturalObj; // { $natural : -1 }
/** /**
* Authenticates conn using the server's cluster-membership credentials . * Authenticates conn using the server's cluster-membership credentials .
* *
* Returns true on successful authentication. * Returns true on successful authentication.
*/ */
bool replAuthenticate(DBClientBase* conn); bool replAuthenticate(DBClientBase* conn);
/* started abstracting out the querying of the primary/master's oplog /* started abstracting out the querying of the primary/master's oplog
still fairly awkward but a start. still fairly awkward but a start.
*/ */
 End of changes. 2 change blocks. 
1 lines changed or deleted 1 lines changed or added


 oplogstart.h   oplogstart.h 
skipping to change at line 32 skipping to change at line 32
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/namespace_details.h"
#include "mongo/db/exec/collection_scan.h" #include "mongo/db/exec/collection_scan.h"
#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stage.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#include "mongo/util/timer.h"
namespace mongo { namespace mongo {
class NamespaceDetails;
/** /**
* OplogStart walks a collection backwards to find the first object in the collection that * OplogStart walks a collection backwards to find the first object in the collection that
* matches the query. It's used by replication to efficiently find whe re the oplog should be * matches the query. It's used by replication to efficiently find whe re the oplog should be
* replayed from. * replayed from.
* *
* The oplog is always a capped collection. In capped collections, doc uments are oriented on * The oplog is always a capped collection. In capped collections, doc uments are oriented on
* disk according to insertion order. The oplog inserts documents with increasing timestamps. * disk according to insertion order. The oplog inserts documents with increasing timestamps.
* Queries on the oplog look for entries that are after a certain time. Therefore if we * Queries on the oplog look for entries that are after a certain time. Therefore if we
* navigate backwards, the last document we encounter that satisfies ou r query (over the * navigate backwards, the last document we encounter that satisfies ou r query (over the
* timestamp) is the first document we must scan from to answer the que ry. * timestamp) is the first document we must scan from to answer the que ry.
skipping to change at line 68 skipping to change at line 70
*/ */
class OplogStart : public PlanStage { class OplogStart : public PlanStage {
public: public:
// Does not take ownership. // Does not take ownership.
OplogStart(const string& ns, MatchExpression* filter, WorkingSet* w s); OplogStart(const string& ns, MatchExpression* filter, WorkingSet* w s);
virtual ~OplogStart(); virtual ~OplogStart();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual bool isEOF(); virtual bool isEOF();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
// PS. don't call this. // PS. don't call this.
virtual PlanStageStats* getStats() { return NULL; } virtual PlanStageStats* getStats() { return NULL; }
// For testing only.
void setBackwardsScanTime(int newTime) { _backwardsScanTime = newTi
me; }
bool isExtentHopping() { return _extentHopping; }
bool isBackwardsScanning() { return _backwardsScanning; }
private: private:
// Copied verbatim. // Copied verbatim.
static DiskLoc prevExtentFirstLoc(NamespaceDetails* nsd, const Disk Loc& rec); static DiskLoc prevExtentFirstLoc(NamespaceDetails* nsd, const Disk Loc& rec);
StageState workBackwardsScan(WorkingSetID* out); StageState workBackwardsScan(WorkingSetID* out);
void switchToExtentHopping(); void switchToExtentHopping();
StageState workExtentHopping(WorkingSetID* out); StageState workExtentHopping(WorkingSetID* out);
skipping to change at line 114 skipping to change at line 121
// We only go backwards via a collscan for a few seconds. // We only go backwards via a collscan for a few seconds.
Timer _timer; Timer _timer;
// WorkingSet is not owned by us. // WorkingSet is not owned by us.
WorkingSet* _workingSet; WorkingSet* _workingSet;
string _ns; string _ns;
MatchExpression* _filter; MatchExpression* _filter;
static int _backwardsScanTime;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
2 lines changed or deleted 12 lines changed or added


 optime.h   optime.h 
skipping to change at line 95 skipping to change at line 95
} }
// it isn't generally safe to not be locked for this. so use now(). some tests use this. // it isn't generally safe to not be locked for this. so use now(). some tests use this.
static OpTime _now(); static OpTime _now();
static mongo::mutex m; static mongo::mutex m;
static OpTime now(const mongo::mutex::scoped_lock&); static OpTime now(const mongo::mutex::scoped_lock&);
static OpTime getLast(const mongo::mutex::scoped_lock&); static OpTime getLast(const mongo::mutex::scoped_lock&);
// Maximum OpTime value.
static OpTime max();
// Waits for global OpTime to be different from *this // Waits for global OpTime to be different from *this
void waitForDifferent(unsigned millis); void waitForDifferent(unsigned millis);
/* We store OpTime's in the database as BSON Date datatype -- we ne eded some sort of /* We store OpTime's in the database as BSON Date datatype -- we ne eded some sort of
64 bit "container" for these values. While these are not really " Dates", that seems a 64 bit "container" for these values. While these are not really " Dates", that seems a
better choice for now than say, Number, which is floating point. Note the BinData type better choice for now than say, Number, which is floating point. Note the BinData type
is perhaps the cleanest choice, lacking a true unsigned64 datatype , but BinData has 5 is perhaps the cleanest choice, lacking a true unsigned64 datatype , but BinData has 5
bytes of overhead. bytes of overhead.
*/ */
unsigned long long asDate() const { unsigned long long asDate() const {
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 option_description.h   option_description.h 
skipping to change at line 33 skipping to change at line 33
#include "mongo/util/options_parser/value.h" #include "mongo/util/options_parser/value.h"
namespace mongo { namespace mongo {
namespace optionenvironment { namespace optionenvironment {
/** /**
* An OptionType is an enum of all the types we support in the OptionsP arser * An OptionType is an enum of all the types we support in the OptionsP arser
*/ */
enum OptionType { enum OptionType {
StringVector, // po::value< std::vector<std::string> > StringVector, // po::value< std::vector<std::string> >
StringMap, // po::value< std::vector<std::string> > (but in "key=value" format)
Bool, // po::value<bool> Bool, // po::value<bool>
Double, // po::value<double> Double, // po::value<double>
Int, // po::value<int> Int, // po::value<int>
Long, // po::value<long> Long, // po::value<long>
String, // po::value<std::string> String, // po::value<std::string>
UnsignedLongLong, // po::value<unsigned long long> UnsignedLongLong, // po::value<unsigned long long>
Unsigned, // po::value<unsigned> Unsigned, // po::value<unsigned>
Switch // po::bool_switch Switch // po::bool_switch
}; };
skipping to change at line 166 skipping to change at line 167
* valid. These do not get checked during parsing, but will be add ed to the result * valid. These do not get checked during parsing, but will be add ed to the result
* Environment so that they will get checked when the Environment i s validated. * Environment so that they will get checked when the Environment i s validated.
*/ */
/** /**
* Specifies the range allowed for this option. Only allowed for o ptions with numeric type. * Specifies the range allowed for this option. Only allowed for o ptions with numeric type.
*/ */
OptionDescription& validRange(long min, long max); OptionDescription& validRange(long min, long max);
/** /**
* Specifies that this option is incompatible with another option.
The string provided must
* be the dottedName, which is the name used to access the option i
n the result Environment.
*
* TODO: Find a way to check that that option actually exists in ou
r section somewhere.
*/
OptionDescription& incompatibleWith(const std::string& otherDottedN
ame);
/**
* Specifies that this option is requires another option to be spec
ified. The string
* provided must be the dottedName, which is the name used to acces
s the option in the
* result Environment.
*/
OptionDescription& requires(const std::string& otherDottedName);
/**
* Specifies that this option is required to match the given format
, specified as a regular
* expression. The displayFormat argument is what gets printed to
the user in the case
* where this constraint is not satisfied. This is only allowed on
String options.
*/
OptionDescription& format(const std::string& regexFormat, const std
::string& displayFormat);
/**
* Adds a constraint for this option. During parsing, this Constra int will be added to the * Adds a constraint for this option. During parsing, this Constra int will be added to the
* result Environment, ensuring that it will get checked when the e nvironment is validated. * result Environment, ensuring that it will get checked when the e nvironment is validated.
* See the documentation on the Constraint and Environment classes for more details. * See the documentation on the Constraint and Environment classes for more details.
* *
* WARNING: This function takes ownership of the Constraint pointer that is passed in. * WARNING: This function takes ownership of the Constraint pointer that is passed in.
*/ */
OptionDescription& addConstraint(Constraint* c); OptionDescription& addConstraint(Constraint* c);
std::string _dottedName; // Used for JSON config and in Environment std::string _dottedName; // Used for JSON config and in Environment
std::string _singleName; // Used for boost command line and INI std::string _singleName; // Used for boost command line and INI
 End of changes. 2 change blocks. 
0 lines changed or deleted 33 lines changed or added


 or.h   or.h 
skipping to change at line 59 skipping to change at line 59
virtual ~OrStage(); virtual ~OrStage();
void addChild(PlanStage* child); void addChild(PlanStage* child);
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
// Not owned by us. // Not owned by us.
WorkingSet* _ws; WorkingSet* _ws;
// The filter is not owned by us. // The filter is not owned by us.
const MatchExpression* _filter; const MatchExpression* _filter;
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 ordering.h   ordering.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjiterator.h"
namespace mongo { namespace mongo {
// todo: ideally move to db/ instead of bson/, but elim any dependencie s first // todo: ideally move to db/ instead of bson/, but elim any dependencie s first
/** A precomputation of a BSON index or sort key pattern. That is some thing like: /** A precomputation of a BSON index or sort key pattern. That is some thing like:
{ a : 1, b : -1 } { a : 1, b : -1 }
The constructor is private to make conversion more explicit so we n otice where we call make(). The constructor is private to make conversion more explicit so we n otice where we call make().
Over time we should push this up higher and higher. Over time we should push this up higher and higher.
*/ */
class Ordering { class Ordering {
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 parallel.h   parallel.h 
skipping to change at line 24 skipping to change at line 24
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
/** /**
tools for working in parallel/sharded/clustered environment tools for working in parallel/sharded/clustered environment
*/ */
#pragma once #pragma once
#include "mongo/client/export_macros.h"
#include "mongo/db/dbmessage.h" #include "mongo/db/dbmessage.h"
#include "mongo/db/matcher.h" #include "mongo/db/matcher.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/s/shard.h" #include "mongo/s/shard.h"
#include "mongo/s/stale_exception.h" // for StaleConfigException #include "mongo/s/stale_exception.h" // for StaleConfigException
#include "mongo/util/concurrency/mvar.h" #include "mongo/util/concurrency/mvar.h"
namespace mongo { namespace mongo {
/** /**
* holder for a server address and a query to run * holder for a server address and a query to run
*/ */
class ServerAndQuery { class MONGO_CLIENT_API ServerAndQuery {
public: public:
ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) : ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) :
_server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ) { _server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ) {
} }
bool operator<( const ServerAndQuery& other ) const { bool operator<( const ServerAndQuery& other ) const {
if ( ! _orderObject.isEmpty() ) if ( ! _orderObject.isEmpty() )
return _orderObject.woCompare( other._orderObject ) < 0; return _orderObject.woCompare( other._orderObject ) < 0;
if ( _server < other._server ) if ( _server < other._server )
skipping to change at line 68 skipping to change at line 69
operator string() const { operator string() const {
return toString(); return toString();
} }
string _server; string _server;
BSONObj _extra; BSONObj _extra;
BSONObj _orderObject; BSONObj _orderObject;
}; };
/**
* this is a cursor that works over a set of servers
* can be used in serial/parallel as controlled by sub classes
*/
class ClusteredCursor {
public:
ClusteredCursor( const QuerySpec& q );
ClusteredCursor( QueryMessage& q );
ClusteredCursor( const string& ns , const BSONObj& q , int options=
0 , const BSONObj& fields=BSONObj() );
virtual ~ClusteredCursor();
/** call before using */
void init();
virtual std::string getNS() { return _ns; }
virtual bool more() = 0;
virtual BSONObj next() = 0;
static BSONObj concatQuery( const BSONObj& query , const BSONObj& e
xtraFilter );
virtual string type() const = 0;
virtual void explain(BSONObjBuilder& b) = 0;
protected:
virtual void _init() = 0;
auto_ptr<DBClientCursor> query( const string& server , int num = 0
, BSONObj extraFilter = BSONObj() , int skipLeft = 0 , bool lazy=false );
BSONObj explain( const string& server , BSONObj extraFilter = BSONO
bj() );
/**
* checks the cursor for any errors
* will throw an exceptionif an error is encountered
*/
void _checkCursor( DBClientCursor * cursor );
static BSONObj _concatFilter( const BSONObj& filter , const BSONObj
& extraFilter );
virtual void _explain( map< string,list<BSONObj> >& out ) = 0;
string _ns;
BSONObj _query;
BSONObj _hint;
BSONObj _sort;
int _options;
BSONObj _fields;
int _batchSize;
bool _didInit;
bool _done;
};
class ParallelConnectionMetadata; class ParallelConnectionMetadata;
class FilteringClientCursor;
// TODO: We probably don't really need this as a separate class. class MONGO_CLIENT_API CommandInfo {
class FilteringClientCursor {
public:
FilteringClientCursor( const BSONObj filter = BSONObj() );
FilteringClientCursor( DBClientCursor* cursor , const BSONObj filte
r = BSONObj() );
FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSON
Obj filter = BSONObj() );
~FilteringClientCursor();
void reset( auto_ptr<DBClientCursor> cursor );
void reset( DBClientCursor* cursor, ParallelConnectionMetadata* _pc
mData = NULL );
bool more();
BSONObj next();
BSONObj peek();
DBClientCursor* raw() { return _cursor.get(); }
ParallelConnectionMetadata* rawMData(){ return _pcmData; }
// Required for new PCursor
void release(){
_cursor.release();
_pcmData = NULL;
}
private:
void _advance();
Matcher _matcher;
auto_ptr<DBClientCursor> _cursor;
ParallelConnectionMetadata* _pcmData;
BSONObj _next;
bool _done;
};
class Servers {
public:
Servers() {
}
void add( const ServerAndQuery& s ) {
add( s._server , s._extra );
}
void add( const string& server , const BSONObj& filter ) {
vector<BSONObj>& mine = _filters[server];
mine.push_back( filter.getOwned() );
}
// TOOO: pick a less horrible name
class View {
View( const Servers* s ) {
for ( map<string, vector<BSONObj> >::const_iterator i=s->_f
ilters.begin(); i!=s->_filters.end(); ++i ) {
_servers.push_back( i->first );
_filters.push_back( i->second );
}
}
public:
int size() const {
return _servers.size();
}
string getServer( int n ) const {
return _servers[n];
}
vector<BSONObj> getFilter( int n ) const {
return _filters[ n ];
}
private:
vector<string> _servers;
vector< vector<BSONObj> > _filters;
friend class Servers;
};
View view() const {
return View( this );
}
private:
map<string, vector<BSONObj> > _filters;
friend class View;
};
/**
* runs a query in serial across any number of servers
* returns all results from 1 server, then the next, etc...
*/
class SerialServerClusteredCursor : public ClusteredCursor {
public:
SerialServerClusteredCursor( const set<ServerAndQuery>& servers , Q
ueryMessage& q , int sortOrder=0);
virtual bool more();
virtual BSONObj next();
virtual string type() const { return "SerialServer"; }
protected:
virtual void _explain( map< string,list<BSONObj> >& out );
void _init() {}
vector<ServerAndQuery> _servers;
unsigned _serverIndex;
FilteringClientCursor _current;
int _needToSkip;
};
class CommandInfo {
public: public:
string versionedNS; string versionedNS;
BSONObj cmdFilter; BSONObj cmdFilter;
CommandInfo() {} CommandInfo() {}
CommandInfo( const string& vns, const BSONObj& filter ) : versioned NS( vns ), cmdFilter( filter ) {} CommandInfo( const string& vns, const BSONObj& filter ) : versioned NS( vns ), cmdFilter( filter ) {}
bool isEmpty(){ bool isEmpty(){
return versionedNS.size() == 0; return versionedNS.size() == 0;
} }
skipping to change at line 260 skipping to change at line 94
string toString() const { string toString() const {
return str::stream() << "CInfo " << BSON( "v_ns" << versionedNS << "filter" << cmdFilter ); return str::stream() << "CInfo " << BSON( "v_ns" << versionedNS << "filter" << cmdFilter );
} }
}; };
typedef shared_ptr<ShardConnection> ShardConnectionPtr; typedef shared_ptr<ShardConnection> ShardConnectionPtr;
class DBClientCursor; class DBClientCursor;
typedef shared_ptr<DBClientCursor> DBClientCursorPtr; typedef shared_ptr<DBClientCursor> DBClientCursorPtr;
class ParallelConnectionState { class MONGO_CLIENT_API ParallelConnectionState {
public: public:
ParallelConnectionState() : ParallelConnectionState() :
count( 0 ), done( false ) { } count( 0 ), done( false ) { }
ShardConnectionPtr conn; ShardConnectionPtr conn;
DBClientCursorPtr cursor; DBClientCursorPtr cursor;
// Version information // Version information
ChunkManagerPtr manager; ChunkManagerPtr manager;
skipping to change at line 287 skipping to change at line 121
BSONObj toBSON() const; BSONObj toBSON() const;
string toString() const { string toString() const {
return str::stream() << "PCState : " << toBSON(); return str::stream() << "PCState : " << toBSON();
} }
}; };
typedef ParallelConnectionState PCState; typedef ParallelConnectionState PCState;
typedef shared_ptr<PCState> PCStatePtr; typedef shared_ptr<PCState> PCStatePtr;
class ParallelConnectionMetadata { class MONGO_CLIENT_API ParallelConnectionMetadata {
public: public:
ParallelConnectionMetadata() : ParallelConnectionMetadata() :
retryNext( false ), initialized( false ), finished( false ), co mpleted( false ), errored( false ) { } retryNext( false ), initialized( false ), finished( false ), co mpleted( false ), errored( false ) { }
~ParallelConnectionMetadata(){ ~ParallelConnectionMetadata(){
cleanup( true ); cleanup( true );
} }
void cleanup( bool full = true ); void cleanup( bool full = true );
skipping to change at line 320 skipping to change at line 154
string toString() const { string toString() const {
return str::stream() << "PCMData : " << toBSON(); return str::stream() << "PCMData : " << toBSON();
} }
}; };
typedef ParallelConnectionMetadata PCMData; typedef ParallelConnectionMetadata PCMData;
typedef shared_ptr<PCMData> PCMDataPtr; typedef shared_ptr<PCMData> PCMDataPtr;
/** /**
* Runs a query in parallel across N servers. New logic has several mo * Runs a query in parallel across N servers, enforcing compatible chun
des - k versions for queries
* 1) Standard query, enforces compatible chunk versions for queries ac * across all shards.
ross all results *
* 2) Standard query, sent to particular servers with no compatible chu * If CommandInfo is provided, the ParallelCursor does not use the dire
nk version enforced, but handling ct .$cmd namespace in the
* stale configuration exceptions * query spec, but instead enforces versions across another namespace s
* 3) Command query, either enforcing compatible chunk versions or sent pecified by CommandInfo.
to particular shards. * This is to support commands like:
* db.runCommand({ fileMD5 : "<coll name>" })
*
* There is a deprecated legacy mode as well which effectively does a m
erge-sort across a number
* of servers, but does not correctly enforce versioning (used only in
mapreduce).
*/ */
class ParallelSortClusteredCursor : public ClusteredCursor { class MONGO_CLIENT_API ParallelSortClusteredCursor {
public: public:
ParallelSortClusteredCursor( const QuerySpec& qSpec, const CommandI nfo& cInfo = CommandInfo() ); ParallelSortClusteredCursor( const QuerySpec& qSpec, const CommandI nfo& cInfo = CommandInfo() );
ParallelSortClusteredCursor( const set<Shard>& servers, const Query Spec& qSpec );
// LEGACY Constructors // DEPRECATED legacy constructor for pure mergesort functionality -
ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , Q do not use
ueryMessage& q , const BSONObj& sortKey );
ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , c onst string& ns , ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , c onst string& ns ,
const Query& q , int options=0, const BSONObj& fields=BSONObj() ); const Query& q , int options=0, const BSONObj& fields=BSONObj() );
virtual ~ParallelSortClusteredCursor(); ~ParallelSortClusteredCursor();
virtual bool more();
virtual BSONObj next(); std::string getNS();
virtual string type() const { return "ParallelSort"; }
/** call before using */
void init();
bool more();
BSONObj next();
string type() const { return "ParallelSort"; }
void fullInit(); void fullInit();
void startInit(); void startInit();
void finishInit(); void finishInit();
bool isCommand(){ return NamespaceString( _qSpec.ns() ).isCommand() ; } bool isCommand(){ return NamespaceString( _qSpec.ns() ).isCommand() ; }
bool isExplain(){ return _qSpec.isExplain(); } bool isExplain(){ return _qSpec.isExplain(); }
bool isVersioned(){ return _qShards.size() == 0; } bool isVersioned(){ return _qShards.size() == 0; }
bool isSharded(); bool isSharded();
ShardPtr getPrimary(); ShardPtr getPrimary();
void getQueryShards( set<Shard>& shards ); void getQueryShards( set<Shard>& shards );
ChunkManagerPtr getChunkManager( const Shard& shard ); ChunkManagerPtr getChunkManager( const Shard& shard );
DBClientCursorPtr getShardCursor( const Shard& shard ); DBClientCursorPtr getShardCursor( const Shard& shard );
BSONObj toBSON() const; BSONObj toBSON() const;
string toString() const; string toString() const;
virtual void explain(BSONObjBuilder& b); void explain(BSONObjBuilder& b);
protected: private:
void _finishCons(); void _finishCons();
void _init();
void _oldInit();
virtual void _explain( map< string,list<BSONObj> >& out ); void _explain( map< string,list<BSONObj> >& out );
void _markStaleNS( const NamespaceString& staleNS, const StaleConfi gException& e, bool& forceReload, bool& fullReload ); void _markStaleNS( const NamespaceString& staleNS, const StaleConfi gException& e, bool& forceReload, bool& fullReload );
void _handleStaleNS( const NamespaceString& staleNS, bool forceRelo ad, bool fullReload ); void _handleStaleNS( const NamespaceString& staleNS, bool forceRelo ad, bool fullReload );
bool _didInit;
bool _done;
set<Shard> _qShards; set<Shard> _qShards;
QuerySpec _qSpec; QuerySpec _qSpec;
CommandInfo _cInfo; CommandInfo _cInfo;
// Count round-trips req'd for namespaces and total // Count round-trips req'd for namespaces and total
map<string,int> _staleNSMap; map<string,int> _staleNSMap;
int _totalTries; int _totalTries;
map<Shard,PCMData> _cursorMap; map<Shard,PCMData> _cursorMap;
// LEGACY BELOW // LEGACY BELOW
int _numServers; int _numServers;
int _lastFrom; int _lastFrom;
set<ServerAndQuery> _servers; set<ServerAndQuery> _servers;
BSONObj _sortKey; BSONObj _sortKey;
FilteringClientCursor * _cursors; FilteringClientCursor * _cursors;
int _needToSkip; int _needToSkip;
private:
/** /**
* Setups the shard version of the connection. When using a replica * Setups the shard version of the connection. When using a replica
* set connection and the primary cannot be reached, the version * set connection and the primary cannot be reached, the version
* will not be set if the slaveOk flag is set. * will not be set if the slaveOk flag is set.
*/ */
void setupVersionAndHandleSlaveOk( PCStatePtr state /* in & out */, void setupVersionAndHandleSlaveOk( PCStatePtr state /* in & out */,
const Shard& shard, const Shard& shard,
ShardPtr primary /* in */, ShardPtr primary /* in */,
const NamespaceString& ns, const NamespaceString& ns,
const std::string& vinfo, const std::string& vinfo,
ChunkManagerPtr manager /* in */ ); ChunkManagerPtr manager /* in */ );
// LEGACY init - Needed for map reduce
void _oldInit();
// LEGACY - Needed ONLY for _oldInit
string _ns;
BSONObj _query;
int _options;
BSONObj _fields;
int _batchSize;
};
// TODO: We probably don't really need this as a separate class.
class MONGO_CLIENT_API FilteringClientCursor {
public:
FilteringClientCursor( const BSONObj filter = BSONObj() );
FilteringClientCursor( DBClientCursor* cursor , const BSONObj filte
r = BSONObj() );
FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSON
Obj filter = BSONObj() );
~FilteringClientCursor();
void reset( auto_ptr<DBClientCursor> cursor );
void reset( DBClientCursor* cursor, ParallelConnectionMetadata* _pc
mData = NULL );
bool more();
BSONObj next();
BSONObj peek();
DBClientCursor* raw() { return _cursor.get(); }
ParallelConnectionMetadata* rawMData(){ return _pcmData; }
// Required for new PCursor
void release(){
_cursor.release();
_pcmData = NULL;
}
private:
void _advance();
Matcher _matcher;
auto_ptr<DBClientCursor> _cursor;
ParallelConnectionMetadata* _pcmData;
BSONObj _next;
bool _done;
}; };
/** /**
* Generally clients should be using Strategy::commandOp() wherever pos
sible - the Future API
* does not handle versioning.
*
* tools for doing asynchronous operations * tools for doing asynchronous operations
* right now uses underlying sync network ops and uses another thread * right now uses underlying sync network ops and uses another thread
* should be changed to use non-blocking io * should be changed to use non-blocking io
*/ */
class Future { class MONGO_CLIENT_API Future {
public: public:
class CommandResult { class CommandResult {
public: public:
string getServer() const { return _server; } string getServer() const { return _server; }
bool isDone() const { return _done; } bool isDone() const { return _done; }
bool ok() const { bool ok() const {
verify( _done ); verify( _done );
 End of changes. 21 change blocks. 
207 lines changed or deleted 94 lines changed or added


 parse_number.h   parse_number.h 
skipping to change at line 24 skipping to change at line 24
*/ */
/** /**
* Utility functions for parsing numbers from strings. * Utility functions for parsing numbers from strings.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* Parses a number out of a StringData. * Parses a number out of a StringData.
* *
* Parses "stringValue", interpreting it as a number of the given "base ". On success, stores * Parses "stringValue", interpreting it as a number of the given "base ". On success, stores
* the parsed value into "*result" and returns Status::OK(). * the parsed value into "*result" and returns Status::OK().
* *
* Valid values for "base" are 2-36, with 0 meaning "choose the base by inspecting the prefix * Valid values for "base" are 2-36, with 0 meaning "choose the base by inspecting the prefix
skipping to change at line 45 skipping to change at line 46
* "base". * "base".
* *
* The entirety of the string must consist of digits in the given base, except optionally the * The entirety of the string must consist of digits in the given base, except optionally the
* first character may be "+" or "-", and hexadecimal numbers may begin "0x". Same as strtol, * first character may be "+" or "-", and hexadecimal numbers may begin "0x". Same as strtol,
* without the property of stripping whitespace at the beginning, and f ails to parse if there * without the property of stripping whitespace at the beginning, and f ails to parse if there
* are non-digit characters at the end of the string. * are non-digit characters at the end of the string.
* *
* See parse_number.cpp for the available instantiations, and add any n ew instantiations there. * See parse_number.cpp for the available instantiations, and add any n ew instantiations there.
*/ */
template <typename NumberType> template <typename NumberType>
Status parseNumberFromStringWithBase(const StringData& stringValue, int base, NumberType* result); MONGO_CLIENT_API Status parseNumberFromStringWithBase(const StringData& stringValue, int base, NumberType* result);
template <typename NumberType> template <typename NumberType>
static Status parseNumberFromString(const StringData& stringValue, Numb erType* result) { static Status parseNumberFromString(const StringData& stringValue, Numb erType* result) {
return parseNumberFromStringWithBase(stringValue, 0, result); return parseNumberFromStringWithBase(stringValue, 0, result);
} }
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 pch.h   pch.h 
skipping to change at line 55 skipping to change at line 55
#include <limits.h> #include <limits.h>
#define BOOST_FILESYSTEM_VERSION 3 #define BOOST_FILESYSTEM_VERSION 3
#include <boost/shared_ptr.hpp> #include <boost/shared_ptr.hpp>
#include <boost/smart_ptr.hpp> #include <boost/smart_ptr.hpp>
#include <boost/bind.hpp> #include <boost/bind.hpp>
#include <boost/version.hpp> #include <boost/version.hpp>
#include "mongo/client/redef_macros.h" #include "mongo/client/redef_macros.h"
#include "mongo/client/export_macros.h"
#include "mongo/util/exit_code.h" #include "mongo/util/exit_code.h"
namespace mongo { namespace mongo {
using namespace std; using namespace std;
using boost::shared_ptr; using boost::shared_ptr;
void dbexit( ExitCode returnCode, const char *whyMsg = "" ); void dbexit( ExitCode returnCode, const char *whyMsg = "" );
/** /**
this is here so you can't just type exit() to quit the program this is here so you can't just type exit() to quit the program
you should either use dbexit to shutdown cleanly, or ::exit to tell the system to quit you should either use dbexit to shutdown cleanly, or ::exit to tell the system to quit
if you use this, you'll get a link error since mongo::exit isn't def ined if you use this, you'll get a link error since mongo::exit isn't def ined
*/ */
void exit( ExitCode returnCode ); MONGO_CLIENT_API void exit( ExitCode returnCode );
bool inShutdown(); MONGO_CLIENT_API bool inShutdown();
} }
#include "mongo/util/assert_util.h" #include "mongo/util/assert_util.h"
#include "mongo/util/debug_util.h" #include "mongo/util/debug_util.h"
#include "mongo/util/goodies.h" #include "mongo/util/goodies.h"
#include "mongo/util/allocator.h" #include "mongo/util/allocator.h"
#include "mongo/util/log.h" #include "mongo/util/log.h"
#endif // MONGO_PCH_H #endif // MONGO_PCH_H
 End of changes. 2 change blocks. 
2 lines changed or deleted 3 lines changed or added


 pdfile.h   pdfile.h 
skipping to change at line 41 skipping to change at line 41
Files: Files:
database.ns - namespace index database.ns - namespace index
database.1 - data files database.1 - data files
database.2 database.2
... ...
*/ */
#pragma once #pragma once
#include "mongo/db/client.h" #include "mongo/db/client.h"
#include "mongo/db/cursor.h" #include "mongo/db/catalog/database.h"
#include "mongo/db/database.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobjmanipulator.h" #include "mongo/db/jsobjmanipulator.h"
#include "mongo/db/memconcept.h"
#include "mongo/db/storage/data_file.h" #include "mongo/db/storage/data_file.h"
#include "mongo/db/storage/durable_mapped_file.h" #include "mongo/db/storage/durable_mapped_file.h"
#include "mongo/db/storage/extent.h" #include "mongo/db/storage/extent.h"
#include "mongo/db/namespace_details-inl.h" #include "mongo/db/structure/catalog/namespace_details-inl.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/db/pdfile_version.h" #include "mongo/db/pdfile_version.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
#include "mongo/util/log.h" #include "mongo/util/log.h"
#include "mongo/util/mmap.h" #include "mongo/util/mmap.h"
namespace mongo { namespace mongo {
class Cursor;
class DataFileHeader; class DataFileHeader;
class Extent; class Extent;
class OpDebug; class OpDebug;
class Record; class Record;
struct SortPhaseOne;
void dropDatabase(const std::string& db); void dropDatabase(const std::string& db);
bool repairDatabase(string db, string &errmsg, bool preserveClonedFiles OnFailure = false, bool backupOriginalFiles = false); bool repairDatabase(string db, string &errmsg, bool preserveClonedFiles OnFailure = false, bool backupOriginalFiles = false);
bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForRe plication, bool *deferIdIndex = 0); bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForRe plication, bool *deferIdIndex = 0);
shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order,
const DiskLoc &startLoc=DiskLoc());
bool isValidNS( const StringData& ns );
/*--------------------------------------------------------------------- */ /*--------------------------------------------------------------------- */
class DataFileMgr {
public:
DataFileMgr();
// The object o may be updated if modified on insert.
void insertAndLog( const char *ns, const BSONObj &o, bool god = fal
se, bool fromMigrate = false );
/**
* insert() will add an _id to the object if not present. If you w
ould like to see the
* final object after such an addition, use this method.
* note: does NOT put on oplog
* @param o both and in and out param
* @param mayInterrupt When true, killop may interrupt the function
call.
*/
DiskLoc insertWithObjMod(const char* ns,
BSONObj& /*out*/o,
bool mayInterrupt = false,
bool god = false);
/**
* Insert the contents of @param buf with length @param len into na
mespace @param ns.
* note: does NOT put on oplog
* @param mayInterrupt When true, killop may interrupt the function
call.
* @param god if true, you may pass in obuf of NULL and then popula
te the returned DiskLoc
* after the call -- that will prevent a double buffer copy in
some cases (btree.cpp).
* @param mayAddIndex almost always true, except for invocation fro
m rename namespace
* command.
* @param addedID if not null, set to true if adding _id element.
You must assure false
* before calling if using.
*/
DiskLoc insert(const char* ns,
const void* buf,
int32_t len,
bool mayInterrupt = false,
bool god = false,
bool mayAddIndex = true,
bool* addedID = 0);
static shared_ptr<Cursor> findAll(const StringData& ns, const DiskL
oc &startLoc = DiskLoc());
/* special version of insert for transaction logging -- streamlined
a bit.
assumes ns is capped and no indexes
no _id field check
*/
Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int
len);
static Extent* getExtent(const DiskLoc& dl);
static Record* getRecord(const DiskLoc& dl);
static DeletedRecord* getDeletedRecord(const DiskLoc& dl);
void deleteRecord(NamespaceDetails* d, const StringData& ns, Record
*todelete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false,
bool logOp=false);
/* does not clean up indexes, etc. : just deletes the record in the
pdfile. use deleteRecord() to unindex */
void _deleteRecord(NamespaceDetails *d, const StringData& ns, Recor
d *todelete, const DiskLoc& dl);
};
extern DataFileMgr theDataFileMgr;
#pragma pack(1)
class DeletedRecord {
public:
int lengthWithHeaders() const { _accessing(); return _lengthWithHea
ders; }
int& lengthWithHeaders() { _accessing(); return _lengthWithHeaders;
}
int extentOfs() const { _accessing(); return _extentOfs; }
int& extentOfs() { _accessing(); return _extentOfs; }
// TODO: we need to not const_cast here but problem is DiskLoc::wri
ting
DiskLoc& nextDeleted() const { _accessing(); return const_cast<Disk
Loc&>(_nextDeleted); }
DiskLoc myExtentLoc(const DiskLoc& myLoc) const {
_accessing();
return DiskLoc(myLoc.a(), _extentOfs);
}
Extent* myExtent(const DiskLoc& myLoc) {
_accessing();
return DataFileMgr::getExtent(DiskLoc(myLoc.a(), _extentOfs));
}
private:
void _accessing() const;
int _lengthWithHeaders;
int _extentOfs;
DiskLoc _nextDeleted;
};
/* Record is a record in a datafile. DeletedRecord is similar but for
deleted space.
*11:03:20 AM) dm10gen: regarding extentOfs...
(11:03:42 AM) dm10gen: an extent is a continugous disk area, which cont
ains many Records and DeleteRecords
(11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (
64 bit total)
(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a
64 bit ptr to the full extent address, we keep just the offset
(11:04:29 AM) dm10gen: we can do this as we know the record's address,
and it has the same fileNo
(11:04:33 AM) dm10gen: see class DiskLoc for more info
(11:04:43 AM) dm10gen: so that is how Record::myExtent() works
(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we mu
st populate its extentOfs then
*/
class Record {
public:
enum HeaderSizeValue { HeaderSize = 16 };
int lengthWithHeaders() const { _accessing(); return _lengthWithHe
aders; }
int& lengthWithHeaders() { _accessing(); return _lengthWithHeaders
; }
int extentOfs() const { _accessing(); return _extentOfs; }
int& extentOfs() { _accessing(); return _extentOfs; }
int nextOfs() const { _accessing(); return _nextOfs; }
int& nextOfs() { _accessing(); return _nextOfs; }
int prevOfs() const { _accessing(); return _prevOfs; }
int& prevOfs() { _accessing(); return _prevOfs; }
const char * data() const { _accessing(); return _data; }
char * data() { _accessing(); return _data; }
const char * dataNoThrowing() const { return _data; }
char * dataNoThrowing() { return _data; }
int netLength() const { _accessing(); return _netLength(); }
/* use this when a record is deleted. basically a union with next/p
rev fields */
DeletedRecord& asDeleted() { return *((DeletedRecord*) this); }
Extent* myExtent(const DiskLoc& myLoc) { return DataFileMgr::getExt
ent(DiskLoc(myLoc.a(), extentOfs() ) ); }
/* get the next record in the namespace, traversing extents as nece
ssary */
DiskLoc getNext(const DiskLoc& myLoc);
DiskLoc getPrev(const DiskLoc& myLoc);
struct NP {
int nextOfs;
int prevOfs;
};
NP* np() { return (NP*) &_nextOfs; }
// ---------------------
// memory cache
// ---------------------
/**
* touches the data so that is in physical memory
* @param entireRecrd if false, only the header and first byte is t
ouched
* if true, the entire record is touched
* */
void touch( bool entireRecrd = false ) const;
/**
* @return if this record is likely in physical memory
* its not guaranteed because its possible it gets swapped
out in a very unlucky windows
*/
bool likelyInPhysicalMemory() const ;
/**
* tell the cache this Record was accessed
* @return this, for simple chaining
*/
Record* accessed();
static bool likelyInPhysicalMemory( const char* data );
/**
* this adds stats about page fault exceptions currently
* specically how many times we call _accessing where the record is
not in memory
* and how many times we throw a PageFaultException
*/
static void appendStats( BSONObjBuilder& b );
static void appendWorkingSetInfo( BSONObjBuilder& b );
private:
int _netLength() const { return _lengthWithHeaders - HeaderSize; }
/**
* call this when accessing a field which could hit disk
*/
void _accessing() const;
int _lengthWithHeaders;
int _extentOfs;
int _nextOfs;
int _prevOfs;
/** be careful when referencing this that your write intent was cor
rect */
char _data[4];
public:
static bool MemoryTrackingEnabled;
};
#pragma pack()
// XXX-ERH
inline DiskLoc Record::getNext(const DiskLoc& myLoc) {
_accessing();
if ( _nextOfs != DiskLoc::NullOfs ) {
/* defensive */
if ( _nextOfs >= 0 && _nextOfs < 10 ) {
logContext("Assertion failure - Record::getNext() referenci
ng a deleted record?");
return DiskLoc();
}
return DiskLoc(myLoc.a(), _nextOfs);
}
Extent *e = myExtent(myLoc);
while ( 1 ) {
if ( e->xnext.isNull() )
return DiskLoc(); // end of table.
e = e->xnext.ext();
if ( !e->firstRecord.isNull() )
break;
// entire extent could be empty, keep looking
}
return e->firstRecord;
}
inline DiskLoc Record::getPrev(const DiskLoc& myLoc) {
_accessing();
// Check if we still have records on our current extent
if ( _prevOfs != DiskLoc::NullOfs ) {
return DiskLoc(myLoc.a(), _prevOfs);
}
// Get the current extent
Extent *e = myExtent(myLoc);
while ( 1 ) {
if ( e->xprev.isNull() ) {
// There are no more extents before this one
return DiskLoc();
}
// Move to the extent before this one
e = e->xprev.ext();
if ( !e->lastRecord.isNull() ) {
// We have found a non empty extent
break;
}
}
// Return the last record in our new extent
return e->lastRecord;
}
inline BSONObj DiskLoc::obj() const {
return BSONObj::make(rec()->accessed());
}
inline DeletedRecord* DiskLoc::drec() const {
verify( _a != -1 );
DeletedRecord* dr = (DeletedRecord*) rec();
memconcept::is(dr, memconcept::concept::deletedrecord);
return dr;
}
inline Extent* DiskLoc::ext() const {
return DataFileMgr::getExtent(*this);
}
template< class V >
inline
const BtreeBucket<V> * DiskLoc::btree() const {
verify( _a != -1 );
Record *r = rec();
memconcept::is(r, memconcept::concept::btreebucket, "", 8192);
return (const BtreeBucket<V> *) r->data();
}
boost::intmax_t dbSize( const char *database ); boost::intmax_t dbSize( const char *database );
inline NamespaceIndex* nsindex(const StringData& ns) { inline NamespaceIndex* nsindex(const StringData& ns) {
Database *database = cc().database(); Database *database = cc().database();
verify( database ); verify( database );
memconcept::is(database, memconcept::concept::database, ns, sizeof( Database));
DEV { DEV {
StringData dbname = nsToDatabaseSubstring( ns ); StringData dbname = nsToDatabaseSubstring( ns );
if ( database->name() != dbname ) { if ( database->name() != dbname ) {
out() << "ERROR: attempt to write to wrong database\n"; out() << "ERROR: attempt to write to wrong database\n";
out() << " ns:" << ns << '\n'; out() << " ns:" << ns << '\n';
out() << " database->name:" << database->name() << endl; out() << " database->name:" << database->name() << endl;
verify( database->name() == dbname ); verify( database->name() == dbname );
} }
} }
return &database->namespaceIndex(); return &database->namespaceIndex();
} }
inline NamespaceDetails* nsdetails(const StringData& ns) { inline NamespaceDetails* nsdetails(const StringData& ns) {
// if this faults, did you set the current db first? (Client::Cont ext + dblock) // if this faults, did you set the current db first? (Client::Cont ext + dblock)
NamespaceDetails *d = nsindex(ns)->details(ns); return nsindex(ns)->details(ns);
if( d ) {
memconcept::is(d, memconcept::concept::nsdetails, ns, sizeof(Na
mespaceDetails));
}
return d;
}
inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
verify( dl.a() != -1 );
return cc().database()->getExtentManager().getExtent(dl);
}
inline Record* DataFileMgr::getRecord(const DiskLoc& dl) {
verify(dl.a() != -1);
return cc().database()->getExtentManager().recordFor( dl );
} }
BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) ); BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
inline DeletedRecord* DataFileMgr::getDeletedRecord(const DiskLoc& dl)
{
return reinterpret_cast<DeletedRecord*>(getRecord(dl));
}
inline BSONObj BSONObj::make(const Record* r ) { inline BSONObj BSONObj::make(const Record* r ) {
return BSONObj( r->data() ); return BSONObj( r->data() );
} }
DiskLoc allocateSpaceForANewRecord(const char* ns,
NamespaceDetails* d,
int32_t lenWHdr,
bool god);
void addRecordToRecListInExtent(Record* r, DiskLoc loc);
} // namespace mongo } // namespace mongo
 End of changes. 11 change blocks. 
345 lines changed or deleted 3 lines changed or added


 pipeline.h   pipeline.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so , * file(s), but you are not obligated to do so. If you do not wish to do so ,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also dele te * exception statement from all source files in the program, then also dele te
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <deque> #include <deque>
#include "mongo/pch.h"
#include "mongo/db/pipeline/value.h" #include "mongo/db/pipeline/value.h"
#include "mongo/util/intrusive_counter.h" #include "mongo/util/intrusive_counter.h"
#include "mongo/util/timer.h" #include "mongo/util/timer.h"
namespace mongo { namespace mongo {
class BSONObj; class BSONObj;
class BSONObjBuilder; class BSONObjBuilder;
class BSONArrayBuilder;
class Command; class Command;
struct DepsTracker;
class DocumentSource; class DocumentSource;
class DocumentSourceProject;
class Expression;
struct ExpressionContext; struct ExpressionContext;
class ExpressionNary;
struct OpDesc; // local private struct
class Privilege; class Privilege;
/** mongodb "commands" (sent via db.$cmd.findOne(...)) /** mongodb "commands" (sent via db.$cmd.findOne(...))
subclass to make a command. define a singleton object for it. subclass to make a command. define a singleton object for it.
*/ */
class Pipeline : class Pipeline :
public IntrusiveCounterUnsigned { public IntrusiveCounterUnsigned {
public: public:
/** /**
* Create a pipeline from the command. * Create a pipeline from the command.
skipping to change at line 125 skipping to change at line 119
@param result builder to write the result to @param result builder to write the result to
*/ */
void run(BSONObjBuilder& result); void run(BSONObjBuilder& result);
bool isExplain() const { return explain; } bool isExplain() const { return explain; }
/// The initial source is special since it varies between mongos an d mongod. /// The initial source is special since it varies between mongos an d mongod.
void addInitialSource(intrusive_ptr<DocumentSource> source); void addInitialSource(intrusive_ptr<DocumentSource> source);
/// The source that represents the output. Returns a non-owning poi nter. /// The source that represents the output. Returns a non-owning poi nter.
DocumentSource* output() { return sources.back().get(); } DocumentSource* output() { invariant( !sources.empty() ); return so urces.back().get(); }
/// Returns true if this pipeline only uses features that work in m ongos. /// Returns true if this pipeline only uses features that work in m ongos.
bool canRunInMongos() const; bool canRunInMongos() const;
/** /**
* Write the pipeline's operators to a vector<Value>, with the * Write the pipeline's operators to a vector<Value>, with the
* explain flag true (for DocumentSource::serializeToArray()). * explain flag true (for DocumentSource::serializeToArray()).
*/ */
vector<Value> writeExplainOps() const; vector<Value> writeExplainOps() const;
/** /**
* Returns the dependencies needed by this pipeline.
*
* initialQuery is used as a fallback for metadata dependency detec
tion. The assumption is
* that any metadata produced by the query is needed unless we can
prove it isn't.
*/
DepsTracker getDependencies(const BSONObj& initialQuery) const;
/**
The aggregation command name. The aggregation command name.
*/ */
static const char commandName[]; static const char commandName[];
/* /*
PipelineD is a "sister" class that has additional functionality PipelineD is a "sister" class that has additional functionality
for the Pipeline. It exists because of linkage requirements. for the Pipeline. It exists because of linkage requirements.
Pipeline needs to function in mongod and mongos. PipelineD Pipeline needs to function in mongod and mongos. PipelineD
contains extra functionality required in mongod, and which can't contains extra functionality required in mongod, and which can't
appear in mongos because the required symbols are unavailable appear in mongos because the required symbols are unavailable
 End of changes. 7 change blocks. 
8 lines changed or deleted 12 lines changed or added


 pipeline_optimizations.h   pipeline_optimizations.h 
skipping to change at line 49 skipping to change at line 49
/** /**
* This class holds optimizations applied to a single Pipeline. * This class holds optimizations applied to a single Pipeline.
* *
* Each function has the same signature and takes a Pipeline as an in/o ut parameter. * Each function has the same signature and takes a Pipeline as an in/o ut parameter.
*/ */
class Pipeline::Optimizations::Local { class Pipeline::Optimizations::Local {
public: public:
/** /**
* Moves matches before any adjacent sort phases. * Moves matches before any adjacent sort phases.
* *
* This means we sort fewer items. Neither changes the documents i * This means we sort fewer items. Neither sorts, nor matches (exc
n luding $text)
* the stream, so this transformation shouldn't affect the result. * change the documents in the stream, so this transformation shoul
dn't affect
* the result.
*/ */
static void moveMatchBeforeSort(Pipeline* pipeline); static void moveMatchBeforeSort(Pipeline* pipeline);
/** /**
* Moves limits before any adjacent skip phases. * Moves limits before any adjacent skip phases.
* *
* This is more optimal for sharding since currently, we can only s plit * This is more optimal for sharding since currently, we can only s plit
* the pipeline at a single source and it is better to limit the re sults * the pipeline at a single source and it is better to limit the re sults
* coming from each shard. This also enables other optimizations li ke * coming from each shard. This also enables other optimizations li ke
* coalescing the limit into a sort. * coalescing the limit into a sort.
skipping to change at line 107 skipping to change at line 108
public: public:
/** /**
* Moves everything before a splittable stage to the shards. If the re * Moves everything before a splittable stage to the shards. If the re
* are no splittable stages, moves everything to the shards. * are no splittable stages, moves everything to the shards.
* *
* It is not safe to call this optimization multiple times. * It is not safe to call this optimization multiple times.
* *
* NOTE: looks for SplittableDocumentSources and uses that API * NOTE: looks for SplittableDocumentSources and uses that API
*/ */
static void findSplitPoint(Pipeline* shardPipe, Pipeline* mergePipe ); static void findSplitPoint(Pipeline* shardPipe, Pipeline* mergePipe );
/**
* If the final stage on shards is to unwind an array, move that st
age to the merger. This
* cuts down on network traffic and allows us to take advantage of
reduced copying in
* unwind.
*/
static void moveFinalUnwindFromShardsToMerger(Pipeline* shardPipe,
Pipeline* mergePipe);
/**
* Adds a stage to the end of shardPipe explicitly requesting all f
ields that mergePipe
* needs. This is only done if it heuristically determines that it
is needed. This
* optimization can reduce the amount of network traffic and can al
so enable the shards to
* convert less source BSON into Documents.
*/
static void limitFieldsSentFromShardsToMerger(Pipeline* shardPipe,
Pipeline* mergePipe);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
3 lines changed or deleted 27 lines changed or added


 plan_cache.h   plan_cache.h 
/** /**
* Copyright (C) 2013 10gen Inc. * Copyright (C) 2014 MongoDB Inc.
* *
* This program is free software: you can redistribute it and/or modify * This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <set>
#include <boost/optional/optional.hpp>
#include <boost/thread/mutex.hpp>
#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/query_solution.h" #include "mongo/db/query/index_tag.h"
#include "mongo/db/query/lru_key_value.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/platform/atomic_word.h"
namespace mongo { namespace mongo {
struct PlanRankingDecision;
struct QuerySolution;
struct QuerySolutionNode;
/** /**
* TODO: Debug commands: * TODO HK notes
* 1. show canonical form of query
* 2. show plans generated for query without (and with) cache * cache should be LRU with some cap on size
* 3. print out cache.
* 4. clear all elements from cache / otherwise manipulate cache. * {x:1} and {x:{$gt:7}} not same shape for now -- operator matters
*/ */
/** /**
* When the CachedPlanRunner runs a cached query, it can provide feedba ck to the cache. This * When the CachedPlanRunner runs a cached query, it can provide feedba ck to the cache. This
* feedback is available to anyone who retrieves that query in the futu re. * feedback is available to anyone who retrieves that query in the futu re.
*/ */
struct CachedSolutionFeedback { struct PlanCacheEntryFeedback {
PlanStageStats* stats; // How well did the cached plan perform?
boost::scoped_ptr<PlanStageStats> stats;
// The "goodness" score produced by the plan ranker
// corresponding to 'stats'.
double score;
}; };
// TODO: Replace with opaque type.
typedef std::string PlanID;
/** /**
* A cached solution to a query. * A PlanCacheIndexTree is the meaty component of the data
* stored in SolutionCacheData. It is a tree structure with
* index tags that indicates to the access planner which indices
* it should try to use.
*
* How a PlanCacheIndexTree is created:
* The query planner tags a match expression with indices. It
* then uses the tagged tree to create a PlanCacheIndexTree,
* using QueryPlanner::cacheDataFromTaggedTree. The PlanCacheIndexTre
e
* is isomorphic to the tagged match expression, and has matching
* index tags.
*
* How a PlanCacheIndexTree is used:
* When the query planner is planning from the cache, it uses
* the PlanCacheIndexTree retrieved from the cache in order to
* recreate index assignments. Specifically, a raw MatchExpression
* is tagged according to the index tags in the PlanCacheIndexTree.
* This is done by QueryPlanner::tagAccordingToCache.
*/ */
struct CachedSolution { struct PlanCacheIndexTree {
~CachedSolution() { PlanCacheIndexTree() : entry(NULL), index_pos(0) { }
for (size_t i = 0; i < feedback.size(); ++i) {
delete feedback[i]; ~PlanCacheIndexTree() {
for (vector<PlanCacheIndexTree*>::const_iterator it = children.
begin();
it != children.end(); ++it) {
delete *it;
} }
} }
// The best solution for the CanonicalQuery. /**
scoped_ptr<QuerySolution> solution; * Clone 'ie' and set 'this->entry' to be the clone.
*/
void setIndexEntry(const IndexEntry& ie);
/**
* Make a deep copy.
*/
PlanCacheIndexTree* clone() const;
// Why the best solution was picked. /**
scoped_ptr<PlanRankingDecision> decision; * For debugging.
*/
std::string toString(int indents = 0) const;
// Annotations from cached runs. // Children owned here.
// TODO: How many of these do we really want to keep? std::vector<PlanCacheIndexTree*> children;
vector<CachedSolutionFeedback*> feedback;
// Owned here.
boost::scoped_ptr<IndexEntry> entry;
size_t index_pos;
};
/**
* Data stored inside a QuerySolution which can subsequently be
* used to create a cache entry. When this data is retrieved
* from the cache, it is sufficient to reconstruct the original
* QuerySolution.
*/
struct SolutionCacheData {
SolutionCacheData() :
tree(NULL),
solnType(USE_INDEX_TAGS_SOLN),
wholeIXSolnDir(1),
indexFilterApplied(false) {
}
// Make a deep copy.
SolutionCacheData* clone() const;
// For debugging.
std::string toString() const;
// Owned here. If 'wholeIXSoln' is false, then 'tree'
// can be used to tag an isomorphic match expression. If 'wholeIXSo
ln'
// is true, then 'tree' is used to store the relevant IndexEntry.
// If 'collscanSoln' is true, then 'tree' should be NULL.
scoped_ptr<PlanCacheIndexTree> tree;
enum SolutionType {
// Indicates that the plan should use
// the index as a proxy for a collection
// scan (e.g. using index to provide sort).
WHOLE_IXSCAN_SOLN,
// The cached plan is a collection scan.
COLLSCAN_SOLN,
// Build the solution by using 'tree'
// to tag the match expression.
USE_INDEX_TAGS_SOLN
} solnType;
// The direction of the index scan used as
// a proxy for a collection scan. Used only
// for WHOLE_IXSCAN_SOLN.
int wholeIXSolnDir;
// True if index filter was applied.
bool indexFilterApplied;
};
class PlanCacheEntry;
/**
* Information returned from a get(...) query.
*/
class CachedSolution {
private: private:
MONGO_DISALLOW_COPYING(CachedSolution); MONGO_DISALLOW_COPYING(CachedSolution);
public:
CachedSolution(const PlanCacheKey& key, const PlanCacheEntry& entry
);
~CachedSolution();
// Owned here.
std::vector<SolutionCacheData*> plannerData;
// An index into plannerData indicating the SolutionCacheData which
should be
// used to produce a backup solution in the case of a blocking sort
.
boost::optional<size_t> backupSoln;
// Key used to provide feedback on the entry.
PlanCacheKey key;
// For debugging.
std::string toString() const;
// We are extracting just enough information from the canonical
// query. We could clone the canonical query but the following
// items are all that is displayed to the user.
BSONObj query;
BSONObj sort;
BSONObj projection;
};
/**
* Used by the cache to track entries and their performance over time.
* Also used by the plan cache commands to display plan cache state.
*/
class PlanCacheEntry {
private:
MONGO_DISALLOW_COPYING(PlanCacheEntry);
public:
/**
* Create a new PlanCacheEntry.
* Grabs any planner-specific data required from the solutions.
* Takes ownership of the PlanRankingDecision that placed the plan
in the cache.
*/
PlanCacheEntry(const std::vector<QuerySolution*>& solutions,
PlanRankingDecision* why);
~PlanCacheEntry();
/**
* Make a deep copy.
*/
PlanCacheEntry* clone() const;
// For debugging.
std::string toString() const;
//
// Planner data
//
// Data provided to the planner to allow it to recreate the solutio
ns this entry
// represents. Each SolutionCacheData is fully owned here, so in or
der to return
// it from the cache a deep copy is made and returned inside Cached
Solution.
std::vector<SolutionCacheData*> plannerData;
// An index into plannerData indicating the SolutionCacheData which
should be
// used to produce a backup solution in the case of a blocking sort
.
boost::optional<size_t> backupSoln;
// XXX: Replace with copy of canonical query?
// Used by the plan cache commands to display an example query
// of the appropriate shape.
BSONObj query;
BSONObj sort;
BSONObj projection;
//
// Performance stats
//
// Information that went into picking the winning plan and also why
// the other plans lost.
boost::scoped_ptr<PlanRankingDecision> decision;
// Annotations from cached runs. The CachedSolutionRunner provides
these stats about its
// runs when they complete.
std::vector<PlanCacheEntryFeedback*> feedback;
// The average score of all stored feedback.
boost::optional<double> averageScore;
// The standard deviation of the scores from stored as feedback.
boost::optional<double> stddevScore;
// Determines the amount of feedback that we are willing to store.
Must be >= 1.
// TODO: how do we tune this?
static const size_t kMaxFeedback;
// The number of standard deviations which must be exceeded
// in order to determine that the cache entry should be removed.
// Must be positive. TODO how do we tune this?
static const double kStdDevThreshold;
}; };
/** /**
* Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution) * Caches the best solution to a query. Aside from the (CanonicalQuery -> QuerySolution)
* mapping, the cache contains information on why that mapping was made , and statistics on the * mapping, the cache contains information on why that mapping was made and statistics on the
* cache entry's actual performance on subsequent runs. * cache entry's actual performance on subsequent runs.
*
*/ */
class PlanCache { class PlanCache {
private:
MONGO_DISALLOW_COPYING(PlanCache);
public: public:
/** /**
* Get the (global) cache for the provided namespace. Must not be * Flush cache when the number of write operations since last
held across yields. * clear() reaches this limit.
* As such, there is no locking required. */
static const int kPlanCacheMaxWriteOperations;
/**
* The maximum number of plan cache entries allowed.
*/ */
static PlanCache* get(const string& ns) { return NULL; } static const int kMaxCacheSize;
/** /**
* Record 'solution' as the best plan for 'query' which was picked * We don't want to cache every possible query. This function
for reasons detailed in * encapsulates the criteria for what makes a canonical query
* 'why'. * suitable for lookup/inclusion in the cache.
*/
static bool shouldCacheQuery(const CanonicalQuery& query);
/**
* If omitted, namespace set to empty string.
*/
PlanCache();
PlanCache(const std::string& ns);
~PlanCache();
/**
* Record solutions for query. Best plan is first element in list.
* Each query in the cache will have more than 1 plan because we on
ly
* add queries which are considered by the multi plan runner (which
happens
* only when the query planner generates multiple candidate plans).
* *
* Takes ownership of all arguments. * Takes ownership of 'why'.
* *
* If the mapping was added successfully, returns true. * If the mapping was added successfully, returns Status::OK().
* If the mapping already existed or some other error occurred, ret * If the mapping already existed or some other error occurred, ret
urns false; urns another Status.
*/ */
bool add(CanonicalQuery* query, QuerySolution* solution, PlanRankin Status add(const CanonicalQuery& query,
gDecision* why) { const std::vector<QuerySolution*>& solns,
return false; PlanRankingDecision* why);
}
/** /**
* Look up the cached solution for the provided query. If a cached * Look up the cached data access for the provided 'query'. Used b
solution exists, return y the query planner
* a copy of it which the caller then owns. If no cached solution * to shortcut planning.
exists, returns NULL. *
* If there is no entry in the cache for the 'query', returns an er
ror Status.
* *
* TODO: Allow querying for exact query and querying for the shape * If there is an entry in the cache, populates 'crOut' and returns
of the query. Status::OK(). Caller
* owns '*crOut'.
*/ */
CachedSolution* get(const CanonicalQuery& query) { Status get(const CanonicalQuery& query, CachedSolution** crOut) con
return NULL; st;
}
/** /**
* When the CachedPlanRunner runs a plan out of the cache, we want to record data about the * When the CachedPlanRunner runs a plan out of the cache, we want to record data about the
* plan's performance. Cache takes ownership of 'feedback'. * plan's performance. The CachedPlanRunner calls feedback(...) at
the end of query
* execution in order to do this.
* *
* If the (query, solution) pair isn't in the cache, the cache dele * Cache takes ownership of 'feedback'.
tes feedback and returns *
* false. Otherwise, returns true. * If the entry corresponding to 'cq' isn't in the cache anymore, t
he feedback is ignored
* and an error Status is returned.
*
* If the entry corresponding to 'cq' still exists, 'feedback' is a
dded to the run
* statistics about the plan. Status::OK() is returned.
*
* May cause the cache entry to be removed if it is determined that
the cached plan
* is badly performing.
*/ */
bool feedback(const CanonicalQuery& query, const QuerySolution& sol Status feedback(const CanonicalQuery& cq, PlanCacheEntryFeedback* f
ution, eedback);
const CachedSolutionFeedback* feedback) {
return false;
}
/** /**
* Remove the (query, solution) pair from our cache. Returns true * Remove the entry corresponding to 'ck' from the cache. Returns
if the plan was removed, Status::OK() if the plan
* false if it wasn't found. * was present and removed and an error status otherwise.
*/ */
bool remove(const CanonicalQuery& query, const QuerySolution& solut Status remove(const CanonicalQuery& canonicalQuery);
ion) {
return false; /**
} * Remove *all* entries.
*/
void clear();
/**
* Returns a copy of a cache entry.
* Used by planCacheListPlans to display plan details.
*
* If there is no entry in the cache for the 'query', returns an er
ror Status.
*
* If there is an entry in the cache, populates 'entryOut' and retu
rns Status::OK(). Caller
* owns '*entryOut'.
*/
Status getEntry(const CanonicalQuery& cq, PlanCacheEntry** entryOut
) const;
/**
* Returns a vector of all cache entries.
* Caller owns the result vector and is responsible for cleaning up
* the cache entry copies.
* Used by planCacheListQueryShapes and index_filter_commands_test.
cpp.
*/
std::vector<PlanCacheEntry*> getAllEntries() const;
/**
* Returns number of entries in cache.
* Used for testing.
*/
size_t size() const;
/**
* You must notify the cache if you are doing writes, as query pla
n utility will change.
* Cache is flushed after every 1000 notifications.
*/
void notifyOfWriteOp();
private:
/**
* Releases resources associated with each cache entry
* and clears map.
* Invoked by clear() and during destruction.
*/
void _clear();
LRUKeyValue<PlanCacheKey, PlanCacheEntry> _cache;
/**
* Protects _cache.
*/
mutable boost::mutex _cacheMutex;
/**
* Counter for write notifications since initialization or last cle
ar() invocation.
* Starts at 0.
*/
AtomicInt32 _writeOperations;
/**
* Full namespace of collection.
*/
std::string _ns;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 30 change blocks. 
63 lines changed or deleted 386 lines changed or added


 plan_enumerator.h   plan_enumerator.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h" #include "mongo/db/query/index_entry.h"
#include "mongo/db/query/index_tag.h" #include "mongo/db/query/index_tag.h"
namespace mongo { namespace mongo {
struct PlanEnumeratorParams {
PlanEnumeratorParams() : intersect(false) { }
bool intersect;
// Not owned here.
MatchExpression* root;
// Not owned here.
const vector<IndexEntry>* indices;
};
/** /**
* Provides elements from the power set of possible indices to use. Us es the available * Provides elements from the power set of possible indices to use. Us es the available
* predicate information to make better decisions about what indices ar e best. * predicate information to make better decisions about what indices ar e best.
*/ */
class PlanEnumerator { class PlanEnumerator {
MONGO_DISALLOW_COPYING(PlanEnumerator); MONGO_DISALLOW_COPYING(PlanEnumerator);
public: public:
/** /**
* Constructs an enumerator for the query specified in 'root' which is tagged with * Constructs an enumerator for the query specified in 'root' which is tagged with
* RelevantTag(s). The index patterns mentioned in the tags are de scribed by 'indices'. * RelevantTag(s). The index patterns mentioned in the tags are de scribed by 'indices'.
* *
* Does not take ownership of any arguments. They must outlive any calls to getNext(...). * Does not take ownership of any arguments. They must outlive any calls to getNext(...).
*/ */
PlanEnumerator(MatchExpression* root, const vector<IndexEntry>* ind PlanEnumerator(const PlanEnumeratorParams& params);
ices);
~PlanEnumerator(); ~PlanEnumerator();
/** /**
* Returns OK and performs a sanity check on the input parameters a nd prepares the * Returns OK and performs a sanity check on the input parameters a nd prepares the
* internal state so that getNext() can be called. Returns an error status with a * internal state so that getNext() can be called. Returns an error status with a
* description if the sanity check failed. * description if the sanity check failed.
*/ */
Status init(); Status init();
/** /**
skipping to change at line 89 skipping to change at line 114
// The position of a field in a possibly compound index. // The position of a field in a possibly compound index.
typedef size_t IndexPosition; typedef size_t IndexPosition;
/** /**
* Traverses the match expression and generates the memo structure from it. * Traverses the match expression and generates the memo structure from it.
* Returns true if the provided node uses an index, false otherwise . * Returns true if the provided node uses an index, false otherwise .
*/ */
bool prepMemo(MatchExpression* node); bool prepMemo(MatchExpression* node);
/** /**
* Returns true if index #idx is compound, false otherwise.
*/
bool isCompound(IndexID idx);
/**
* When we assign indices to nodes, we only assign indices to predi
cates that are 'first'
* indices (the predicate is over the first field in the index).
*
* If an assigned index is compound, checkCompound looks for predic
ates that are over fields
* in the compound index.
*/
void checkCompound(string prefix, MatchExpression* node);
/**
* Traverses the memo structure and annotates the tree with IndexTa gs for the chosen * Traverses the memo structure and annotates the tree with IndexTa gs for the chosen
* indices. * indices.
*/ */
void tagMemo(MemoID id); void tagMemo(MemoID id);
/** /**
* Move to the next enumeration state. Each assignment stores its own enumeration state. * Move to the next enumeration state. Each assignment stores its own enumeration state.
* See the various ____Assignment classes below for details on enum eration state. * See the various ____Assignment classes below for details on enum eration state.
* *
* Returns true if the memo subtree with root 'node' has no further enumeration states. In * Returns true if the memo subtree with root 'node' has no further enumeration states. In
skipping to change at line 173 skipping to change at line 184
}; };
// This is used by AndAssignment and is not an actual assignment. // This is used by AndAssignment and is not an actual assignment.
struct OneIndexAssignment { struct OneIndexAssignment {
// 'preds[i]' is uses index 'index' at position 'positions[i]' // 'preds[i]' is uses index 'index' at position 'positions[i]'
vector<MatchExpression*> preds; vector<MatchExpression*> preds;
vector<IndexPosition> positions; vector<IndexPosition> positions;
IndexID index; IndexID index;
}; };
struct AndEnumerableState {
vector<OneIndexAssignment> assignments;
vector<MemoID> subnodesToIndex;
};
struct AndAssignment { struct AndAssignment {
// Enumeration state AndAssignment() : counter(0) { }
enum EnumerationState {
// First this
MANDATORY,
// Then this
PRED_CHOICES,
// Then this
SUBNODES,
// Then we have a carry and back to MANDATORY.
};
AndAssignment() : state(MANDATORY), counter(0) { }
// These index assignments must exist in every choice we make (
GEO_NEAR and TEXT).
vector<OneIndexAssignment> mandatory;
// TODO: We really want to consider the power set of the union
of predChoices, subnodes.
vector<OneIndexAssignment> predChoices;
vector<MemoID> subnodes;
// In the simplest case, an AndAssignment picks indices like a vector<AndEnumerableState> choices;
PredicateAssignment. To
// be indexed we must only pick one index, which is currently w
hat is done.
//
// Complications:
//
// Some of our child predicates cannot be answered without an i
ndex. As such, the
// indices that those predicates require must always be outputt
ed. We store these
// mandatory index assignments in 'mandatory'.
//
// Some of our children may not be predicates. We may have ORs
(or array operators) as
// children. If one of these subtrees provides an index, the A
ND is indexed. We store
// these subtree choices in 'subnodes'.
//
// With the above two cases out of the way, we can focus on the
remaining case: what to
// do with our children that are leaf predicates.
//
// Guiding principles for index assignment to leaf predicates:
//
// 1. If we assign an index to {x:{$gt: 5}} we should assign th
e same index to
// {x:{$lt: 50}}. That is, an index assignment should inclu
de all predicates
// over its leading field.
//
// 2. If we have the index {a:1, b:1} and we assign it to {a: 5
} we should assign it
// to {b:7}, since with a predicate over the first field of
the compound index,
// the second field can be bounded as well. We may only ass
ign indices to predicates
// if all fields to the left of the index field are constrai
ned.
// Enumeration of an AND:
//
// If there are any mandatory indices, we assign them one at a
time. After we have
// assigned all of them, we stop assigning indices.
//
// Otherwise: We assign each index in predChoice. When those a
re exhausted, we have
// each subtree enumerate its choices one at a time. When the
last subtree has
// enumerated its last choices, we are done.
//
void resetEnumeration() {
if (mandatory.size() > 0) {
state = AndAssignment::MANDATORY;
}
else if (predChoices.size() > 0) {
state = AndAssignment::PRED_CHOICES;
}
else {
verify(subnodes.size() > 0);
state = AndAssignment::SUBNODES;
}
counter = 0;
}
EnumerationState state;
// We're on the counter-th member of state. // We're on the counter-th member of state.
size_t counter; size_t counter;
}; };
struct ArrayAssignment {
ArrayAssignment() : counter(0) { }
vector<MemoID> subnodes;
size_t counter;
};
/** /**
* Associates indices with predicates. * Associates indices with predicates.
*/ */
struct NodeAssignment { struct NodeAssignment {
scoped_ptr<PredicateAssignment> pred; scoped_ptr<PredicateAssignment> pred;
scoped_ptr<OrAssignment> orAssignment; scoped_ptr<OrAssignment> orAssignment;
scoped_ptr<AndAssignment> newAnd; scoped_ptr<AndAssignment> andAssignment;
scoped_ptr<ArrayAssignment> arrayAssignment;
string toString() const; string toString() const;
}; };
/** /**
* Allocates a NodeAssignment and associates it with the provided ' expr'. * Allocates a NodeAssignment and associates it with the provided ' expr'.
* *
* The unique MemoID of the new assignment is outputted in '*id'. * The unique MemoID of the new assignment is outputted in '*id'.
* The out parameter '*slot' points to the newly allocated NodeAssi gnment. * The out parameter '*slot' points to the newly allocated NodeAssi gnment.
*/ */
void allocateAssignment(MatchExpression* expr, NodeAssignment** slo t, MemoID* id); void allocateAssignment(MatchExpression* expr, NodeAssignment** slo t, MemoID* id);
void dumpMemo(); /**
* Try to assign predicates in 'tryCompound' to 'thisIndex' as comp
ound assignments.
* Output the assignments in 'assign'.
*/
void compound(const vector<MatchExpression*>& tryCompound,
const IndexEntry& thisIndex,
OneIndexAssignment* assign);
// Used to label nodes in the order in which we visit in a post-ord void dumpMemo();
er traversal.
size_t _inOrderCount;
// Map from expression to its MemoID. // Map from expression to its MemoID.
unordered_map<MatchExpression*, MemoID> _nodeToId; unordered_map<MatchExpression*, MemoID> _nodeToId;
// Map from MemoID to its precomputed solution info. // Map from MemoID to its precomputed solution info.
unordered_map<MemoID, NodeAssignment*> _memo; unordered_map<MemoID, NodeAssignment*> _memo;
// If true, there are no further enumeration states, and getNext sh ould return false. // If true, there are no further enumeration states, and getNext sh ould return false.
// We could be _done immediately after init if we're unable to outp ut an indexed plan. // We could be _done immediately after init if we're unable to outp ut an indexed plan.
bool _done; bool _done;
// //
// Data used by all enumeration strategies // Data used by all enumeration strategies
// //
// Match expression we're planning for. Not owned by us. // Match expression we're planning for. Not owned by us.
MatchExpression* _root; MatchExpression* _root;
// Indices we're allowed to enumerate with. // Indices we're allowed to enumerate with. Not owned here.
const vector<IndexEntry>* _indices; const vector<IndexEntry>* _indices;
// Do we output >1 index per AND (index intersection)?
bool _ixisect;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 15 change blocks. 
111 lines changed or deleted 62 lines changed or added


 plan_executor.h   plan_executor.h 
skipping to change at line 79 skipping to change at line 79
// Methods that just pass down to the PlanStage tree. // Methods that just pass down to the PlanStage tree.
// //
/** TODO document me */ /** TODO document me */
void saveState(); void saveState();
/** TODO document me */ /** TODO document me */
bool restoreState(); bool restoreState();
/** TODO document me */ /** TODO document me */
void invalidate(const DiskLoc& dl); void invalidate(const DiskLoc& dl, InvalidationType type);
// //
// Running Support // Running Support
// //
/** TODO document me */ /** TODO document me */
void setYieldPolicy(Runner::YieldPolicy policy); void setYieldPolicy(Runner::YieldPolicy policy);
/** TODO document me */ /** TODO document me */
Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut); Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 plan_ranker.h   plan_ranker.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <list> #include <list>
#include <vector> #include <vector>
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/working_set.h"
#include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_solution.h"
namespace mongo { namespace mongo {
struct CandidatePlan; struct CandidatePlan;
struct PlanRankingDecision; struct PlanRankingDecision;
/** /**
* Ranks 2 or more plans. * Ranks 2 or more plans.
*/ */
class PlanRanker { class PlanRanker {
public: public:
/** /**
* Returns index in 'candidates' of which plan is best. * Returns index in 'candidates' of which plan is best.
* If 'why' is not NULL, populates it with information relevant to * Populates 'why' with information relevant to how each plan fared
why that plan was picked. in the ranking process.
* Caller owns pointers in 'why'.
* 'candidateOrder' holds indices into candidates ordered by score
(winner in first element).
*/ */
static size_t pickBestPlan(const vector<CandidatePlan>& candidates, static size_t pickBestPlan(const vector<CandidatePlan>& candidates,
PlanRankingDecision* why); PlanRankingDecision* why);
private:
/** /**
* Assign the stats tree a 'goodness' score. Used internally. * Assign the stats tree a 'goodness' score. The higher the score,
the better
* the plan. The exact value isn't meaningful except for imposing a
ranking.
*
* XXX: consider moving out of PlanRanker so that the plan
* cache can use directly.
*/ */
static double scoreTree(const PlanStageStats* stats); static double scoreTree(const PlanStageStats* stats);
}; };
/** /**
* A container holding one to-be-ranked plan and its associated/relevan t data. * A container holding one to-be-ranked plan and its associated/relevan t data.
* Does not own any of its pointers. * Does not own any of its pointers.
*/ */
struct CandidatePlan { struct CandidatePlan {
CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w) CandidatePlan(QuerySolution* s, PlanStage* r, WorkingSet* w)
skipping to change at line 85 skipping to change at line 92
std::list<WorkingSetID> results; std::list<WorkingSetID> results;
bool failed; bool failed;
}; };
/** /**
* Information about why a plan was picked to be the best. Data here i s placed into the cache * Information about why a plan was picked to be the best. Data here i s placed into the cache
* and used by the CachedPlanRunner to compare expected performance wit h actual. * and used by the CachedPlanRunner to compare expected performance wit h actual.
*/ */
struct PlanRankingDecision { struct PlanRankingDecision {
PlanRankingDecision() : statsOfWinner(NULL), onlyOneSolution(false) /**
{ } * Make a deep copy.
*/
PlanRankingDecision* clone() const {
PlanRankingDecision* decision = new PlanRankingDecision();
for (size_t i = 0; i < stats.size(); ++i) {
PlanStageStats* s = stats.vector()[i];
invariant(s);
decision->stats.mutableVector().push_back(s->clone());
}
decision->scores = scores;
decision->candidateOrder = candidateOrder;
return decision;
}
// Stats of all plans sorted in descending order by score.
// Owned by us. // Owned by us.
PlanStageStats* statsOfWinner; OwnedPointerVector<PlanStageStats> stats;
bool onlyOneSolution;
// TODO: We can place anything we want here. What's useful to the // The "goodness" score corresponding to 'stats'.
cache? What's useful to // Sorted in descending order.
// planning and optimization? std::vector<double> scores;
// Ordering of original plans in descending of score.
// Filled in by PlanRanker::pickBestPlan(candidates, ...)
// so that candidates[candidateOrder[0]] refers to the best plan
// with corresponding cores[0] and stats[0]. Runner-up would be
// candidates[candidateOrder[1]] followed by
// candidates[candidateOrder[2]], ...
std::vector<size_t> candidateOrder;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 8 change blocks. 
12 lines changed or deleted 41 lines changed or added


 plan_stage.h   plan_stage.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/plan_stats.h"
#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/working_set.h"
#include "mongo/db/invalidation_type.h"
namespace mongo { namespace mongo {
class DiskLoc; class DiskLoc;
/** /**
* A PlanStage ("stage") is the basic building block of a "Query Execut ion Plan." A stage is * A PlanStage ("stage") is the basic building block of a "Query Execut ion Plan." A stage is
* the smallest piece of machinery used in executing a compiled query. Stages either access * the smallest piece of machinery used in executing a compiled query. Stages either access
* data (from a collection or an index) to create a stream of results, or transform a stream of * data (from a collection or an index) to create a stream of results, or transform a stream of
* results (e.g. AND, OR, SORT) to create a stream of results. * results (e.g. AND, OR, SORT) to create a stream of results.
skipping to change at line 209 skipping to change at line 210
*/ */
virtual void recoverFromYield() = 0; virtual void recoverFromYield() = 0;
/** /**
* Notifies a stage that a DiskLoc is going to be deleted (or in-pl ace updated) so that the * Notifies a stage that a DiskLoc is going to be deleted (or in-pl ace updated) so that the
* stage can invalidate or modify any state required to continue pr ocessing without this * stage can invalidate or modify any state required to continue pr ocessing without this
* DiskLoc. * DiskLoc.
* *
* Can only be called after a prepareToYield but before a recoverFr omYield. * Can only be called after a prepareToYield but before a recoverFr omYield.
*/ */
virtual void invalidate(const DiskLoc& dl) = 0; virtual void invalidate(const DiskLoc& dl, InvalidationType type) = 0;
/** /**
* Returns a tree of stats. See plan_stats.h for the details of th is structure. If the * Returns a tree of stats. See plan_stats.h for the details of th is structure. If the
* stage has any children it must propagate the request for stats t o them. * stage has any children it must propagate the request for stats t o them.
* *
* Caller owns returned pointer. * Caller owns returned pointer.
*/ */
virtual PlanStageStats* getStats() = 0; virtual PlanStageStats* getStats() = 0;
}; };
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 plan_stats.h   plan_stats.h 
skipping to change at line 43 skipping to change at line 43
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/query/stage_types.h" #include "mongo/db/query/stage_types.h"
#include "mongo/platform/cstdint.h" #include "mongo/platform/cstdint.h"
namespace mongo { namespace mongo {
struct SpecificStats; /**
* The interface all specific-to-stage stats provide.
*/
struct SpecificStats {
virtual ~SpecificStats() { }
/**
* Make a deep copy.
*/
virtual SpecificStats* clone() const = 0;
};
// Every stage has CommonStats. // Every stage has CommonStats.
struct CommonStats { struct CommonStats {
CommonStats() : works(0), CommonStats() : works(0),
yields(0), yields(0),
unyields(0), unyields(0),
invalidates(0), invalidates(0),
advanced(0), advanced(0),
needTime(0), needTime(0),
needFetch(0), needFetch(0),
isEOF(false) { } isEOF(false) { }
// Count calls into the stage. // Count calls into the stage.
uint64_t works; size_t works;
uint64_t yields; size_t yields;
uint64_t unyields; size_t unyields;
uint64_t invalidates; size_t invalidates;
// How many times was this state the return value of work(...)? // How many times was this state the return value of work(...)?
uint64_t advanced; size_t advanced;
uint64_t needTime; size_t needTime;
uint64_t needFetch; size_t needFetch;
// TODO: have some way of tracking WSM sizes (or really any series of #s). We can measure // TODO: have some way of tracking WSM sizes (or really any series of #s). We can measure
// the size of our inputs and the size of our outputs. We can do a lot with the WS here. // the size of our inputs and the size of our outputs. We can do a lot with the WS here.
// TODO: once we've picked a plan, collect different (or additional ) stats for display to // TODO: once we've picked a plan, collect different (or additional ) stats for display to
// the user, eg. time_t totalTimeSpent; // the user, eg. time_t totalTimeSpent;
// TODO: keep track of total yield time / fetch time for a plan (do ne by runner) // TODO: keep track of total yield time / fetch time for a plan (do ne by runner)
bool isEOF; bool isEOF;
skipping to change at line 88 skipping to change at line 98
// The universal container for a stage's stats. // The universal container for a stage's stats.
struct PlanStageStats { struct PlanStageStats {
PlanStageStats(const CommonStats& c, StageType t) : stageType(t), c ommon(c) { } PlanStageStats(const CommonStats& c, StageType t) : stageType(t), c ommon(c) { }
~PlanStageStats() { ~PlanStageStats() {
for (size_t i = 0; i < children.size(); ++i) { for (size_t i = 0; i < children.size(); ++i) {
delete children[i]; delete children[i];
} }
} }
/**
* Make a deep copy.
*/
PlanStageStats* clone() const {
PlanStageStats* stats = new PlanStageStats(common, stageType);
if (specific.get()) {
stats->specific.reset(specific->clone());
}
for (size_t i = 0; i < children.size(); ++i) {
invariant(children[i]);
stats->children.push_back(children[i]->clone());
}
return stats;
}
// See query/stage_type.h // See query/stage_type.h
StageType stageType; StageType stageType;
// Stats exported by implementing the PlanStage interface. // Stats exported by implementing the PlanStage interface.
CommonStats common; CommonStats common;
// Per-stage place to stash additional information // Per-stage place to stash additional information
boost::scoped_ptr<SpecificStats> specific; boost::scoped_ptr<SpecificStats> specific;
// The stats of the node's children. // The stats of the node's children.
std::vector<PlanStageStats*> children; std::vector<PlanStageStats*> children;
private: private:
MONGO_DISALLOW_COPYING(PlanStageStats); MONGO_DISALLOW_COPYING(PlanStageStats);
}; };
/**
* The interface all specific-to-stage stats provide.
*/
struct SpecificStats {
virtual ~SpecificStats() { }
};
struct CollectionScanStats : public SpecificStats {
CollectionScanStats() : docsTested(0) { }
// How many documents did we check against our filter?
uint64_t docsTested;
};
struct AndHashStats : public SpecificStats { struct AndHashStats : public SpecificStats {
AndHashStats() : flaggedButPassed(0), AndHashStats() : flaggedButPassed(0),
flaggedInProgress(0) { } flaggedInProgress(0) { }
virtual ~AndHashStats() { } virtual ~AndHashStats() { }
virtual SpecificStats* clone() const {
AndHashStats* specific = new AndHashStats(*this);
return specific;
}
// Invalidation counters. // Invalidation counters.
// How many results had the AND fully evaluated but were invalidate d? // How many results had the AND fully evaluated but were invalidate d?
uint64_t flaggedButPassed; size_t flaggedButPassed;
// How many results were mid-AND but got flagged? // How many results were mid-AND but got flagged?
uint64_t flaggedInProgress; size_t flaggedInProgress;
// How many entries are in the map after each child? // How many entries are in the map after each child?
// child 'i' produced children[i].common.advanced DiskLocs, of whic h mapAfterChild[i] were // child 'i' produced children[i].common.advanced DiskLocs, of whic h mapAfterChild[i] were
// intersections. // intersections.
std::vector<uint64_t> mapAfterChild; std::vector<size_t> mapAfterChild;
// mapAfterChild[mapAfterChild.size() - 1] WSMswere match tested. // mapAfterChild[mapAfterChild.size() - 1] WSMswere match tested.
// commonstats.advanced is how many passed. // commonstats.advanced is how many passed.
}; };
struct AndSortedStats : public SpecificStats { struct AndSortedStats : public SpecificStats {
AndSortedStats() : flagged(0), AndSortedStats() : flagged(0),
matchTested(0) { } matchTested(0) { }
virtual ~AndSortedStats() { } virtual ~AndSortedStats() { }
virtual SpecificStats* clone() const {
AndSortedStats* specific = new AndSortedStats(*this);
return specific;
}
// How many results from each child did not pass the AND? // How many results from each child did not pass the AND?
std::vector<uint64_t> failedAnd; std::vector<size_t> failedAnd;
// How many results were flagged via invalidation? // How many results were flagged via invalidation?
uint64_t flagged; size_t flagged;
// Fails == common.advanced - matchTested // Fails == common.advanced - matchTested
uint64_t matchTested; size_t matchTested;
};
struct CollectionScanStats : public SpecificStats {
CollectionScanStats() : docsTested(0) { }
virtual SpecificStats* clone() const {
CollectionScanStats* specific = new CollectionScanStats(*this);
return specific;
}
// How many documents did we check against our filter?
size_t docsTested;
};
struct DistinctScanStats : public SpecificStats {
DistinctScanStats() : keysExamined(0) { }
virtual SpecificStats* clone() const {
return new DistinctScanStats(*this);
}
// How many keys did we look at while distinct-ing?
size_t keysExamined;
}; };
struct FetchStats : public SpecificStats { struct FetchStats : public SpecificStats {
FetchStats() : alreadyHasObj(0), FetchStats() : alreadyHasObj(0),
forcedFetches(0), forcedFetches(0),
matchTested(0) { } matchTested(0) { }
virtual ~FetchStats() { } virtual ~FetchStats() { }
virtual SpecificStats* clone() const {
FetchStats* specific = new FetchStats(*this);
return specific;
}
// Have we seen anything that already had an object? // Have we seen anything that already had an object?
uint64_t alreadyHasObj; size_t alreadyHasObj;
// How many fetches weren't in memory? it's common.needFetch. // How many fetches weren't in memory? it's common.needFetch.
// How many total fetches did we do? it's common.advanced. // How many total fetches did we do? it's common.advanced.
// So the number of fetches that were in memory are common.advanced - common.needFetch. // So the number of fetches that were in memory are common.advanced - common.needFetch.
// How many records were we forced to fetch as the result of an inv alidation? // How many records were we forced to fetch as the result of an inv alidation?
uint64_t forcedFetches; size_t forcedFetches;
// We know how many passed (it's the # of advanced) and therefore h ow many failed. // We know how many passed (it's the # of advanced) and therefore h ow many failed.
uint64_t matchTested; size_t matchTested;
}; };
struct IndexScanStats : public SpecificStats { struct IndexScanStats : public SpecificStats {
IndexScanStats() : isMultiKey(false), IndexScanStats() : isMultiKey(false),
yieldMovedCursor(0), yieldMovedCursor(0),
dupsTested(0), dupsTested(0),
dupsDropped(0), dupsDropped(0),
seenInvalidated(0), seenInvalidated(0),
matchTested(0), matchTested(0),
keysExamined(0) { } keysExamined(0) { }
virtual ~IndexScanStats() { } virtual ~IndexScanStats() { }
virtual SpecificStats* clone() const {
IndexScanStats* specific = new IndexScanStats(*this);
// BSON objects have to be explicitly copied.
specific->keyPattern = keyPattern.getOwned();
specific->indexBounds = indexBounds.getOwned();
return specific;
}
// Index type being used. // Index type being used.
std::string indexType; std::string indexType;
// name of the index being used // name of the index being used
std::string indexName; std::string indexName;
BSONObj keyPattern; BSONObj keyPattern;
// A BSON (opaque, ie. hands off other than toString() it) represen tation of the bounds // A BSON (opaque, ie. hands off other than toString() it) represen tation of the bounds
// used. // used.
BSONObj indexBounds; BSONObj indexBounds;
// >1 if we're traversing the index along with its order. <1 if we' re traversing it // >1 if we're traversing the index along with its order. <1 if we' re traversing it
// against the order. // against the order.
int direction; int direction;
// Whether this index is over a field that contain array values. // Whether this index is over a field that contain array values.
bool isMultiKey; bool isMultiKey;
uint64_t yieldMovedCursor; size_t yieldMovedCursor;
uint64_t dupsTested; size_t dupsTested;
uint64_t dupsDropped; size_t dupsDropped;
uint64_t seenInvalidated; size_t seenInvalidated;
// TODO: we could track key sizes here. // TODO: we could track key sizes here.
// We know how many passed (it's the # of advanced) and therefore h ow many failed. // We know how many passed (it's the # of advanced) and therefore h ow many failed.
uint64_t matchTested; size_t matchTested;
// Number of entries retrieved from the index during the scan. // Number of entries retrieved from the index during the scan.
uint64_t keysExamined; size_t keysExamined;
}; };
struct OrStats : public SpecificStats { struct OrStats : public SpecificStats {
OrStats() : dupsTested(0), OrStats() : dupsTested(0),
dupsDropped(0), dupsDropped(0),
locsForgotten(0) { } locsForgotten(0) { }
virtual ~OrStats() { } virtual ~OrStats() { }
uint64_t dupsTested; virtual SpecificStats* clone() const {
uint64_t dupsDropped; OrStats* specific = new OrStats(*this);
return specific;
}
size_t dupsTested;
size_t dupsDropped;
// How many calls to invalidate(...) actually removed a DiskLoc fro m our deduping map? // How many calls to invalidate(...) actually removed a DiskLoc fro m our deduping map?
uint64_t locsForgotten; size_t locsForgotten;
// We know how many passed (it's the # of advanced) and therefore h ow many failed. // We know how many passed (it's the # of advanced) and therefore h ow many failed.
std::vector<uint64_t> matchTested; std::vector<size_t> matchTested;
}; };
struct SortStats : public SpecificStats { struct SortStats : public SpecificStats {
SortStats() : forcedFetches(0) { } SortStats() : forcedFetches(0) { }
virtual ~SortStats() { } virtual ~SortStats() { }
virtual SpecificStats* clone() const {
SortStats* specific = new SortStats(*this);
return specific;
}
// How many records were we forced to fetch as the result of an inv alidation? // How many records were we forced to fetch as the result of an inv alidation?
uint64_t forcedFetches; size_t forcedFetches;
}; };
struct MergeSortStats : public SpecificStats { struct MergeSortStats : public SpecificStats {
MergeSortStats() : dupsTested(0), MergeSortStats() : dupsTested(0),
dupsDropped(0), dupsDropped(0),
forcedFetches(0) { } forcedFetches(0) { }
virtual ~MergeSortStats() { } virtual ~MergeSortStats() { }
uint64_t dupsTested; virtual SpecificStats* clone() const {
uint64_t dupsDropped; MergeSortStats* specific = new MergeSortStats(*this);
return specific;
}
size_t dupsTested;
size_t dupsDropped;
// How many records were we forced to fetch as the result of an inv alidation? // How many records were we forced to fetch as the result of an inv alidation?
uint64_t forcedFetches; size_t forcedFetches;
}; };
struct ShardingFilterStats : public SpecificStats { struct ShardingFilterStats : public SpecificStats {
ShardingFilterStats() : chunkSkips(0) { } ShardingFilterStats() : chunkSkips(0) { }
uint64_t chunkSkips; virtual SpecificStats* clone() const {
ShardingFilterStats* specific = new ShardingFilterStats(*this);
return specific;
}
size_t chunkSkips;
};
struct TwoDNearStats : public SpecificStats {
TwoDNearStats() : objectsLoaded(0), nscanned(0) { }
virtual SpecificStats* clone() const {
TwoDNearStats* specific = new TwoDNearStats(*this);
return specific;
}
size_t objectsLoaded;
// Since 2d's near does all its work in one go we can't divine the
real nscanned from
// anything else.
size_t nscanned;
};
struct TextStats : public SpecificStats {
TextStats() : keysExamined(0), fetches(0) { }
virtual SpecificStats* clone() const {
TextStats* specific = new TextStats(*this);
return specific;
}
size_t keysExamined;
size_t fetches;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 30 change blocks. 
46 lines changed or deleted 152 lines changed or added


 privilege_parser.h   privilege_parser.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 processinfo.h   processinfo.h 
skipping to change at line 89 skipping to change at line 89
* Get the CPU architecture (e.g. x86, x86_64) * Get the CPU architecture (e.g. x86, x86_64)
*/ */
const string& getArch() const { return sysInfo().cpuArch; } const string& getArch() const { return sysInfo().cpuArch; }
/** /**
* Determine if NUMA is enabled (interleaved) for this process * Determine if NUMA is enabled (interleaved) for this process
*/ */
bool hasNumaEnabled() const { return sysInfo().hasNuma; } bool hasNumaEnabled() const { return sysInfo().hasNuma; }
/** /**
* Determine if file zeroing is necessary for newly allocated data
files.
*/
static bool isDataFileZeroingNeeded() { return systemInfo->fileZero
Needed; }
/**
* Get extra system stats * Get extra system stats
*/ */
void appendSystemDetails( BSONObjBuilder& details ) const { void appendSystemDetails( BSONObjBuilder& details ) const {
details.append( StringData("extra"), sysInfo()._extraStats.copy () ); details.append( StringData("extra"), sysInfo()._extraStats.copy () );
} }
/** /**
* Append platform-specific data to obj * Append platform-specific data to obj
*/ */
void getExtraInfo( BSONObjBuilder& info ); void getExtraInfo( BSONObjBuilder& info );
skipping to change at line 142 skipping to change at line 147
string osType; string osType;
string osName; string osName;
string osVersion; string osVersion;
unsigned addrSize; unsigned addrSize;
unsigned long long memSize; unsigned long long memSize;
unsigned numCores; unsigned numCores;
unsigned long long pageSize; unsigned long long pageSize;
string cpuArch; string cpuArch;
bool hasNuma; bool hasNuma;
BSONObj _extraStats; BSONObj _extraStats;
// This is an OS specific value, which determines whether files
should be zero-filled
// at allocation time in order to avoid Microsoft KB 2731284.
//
bool fileZeroNeeded;
SystemInfo() : SystemInfo() :
addrSize( 0 ), addrSize( 0 ),
memSize( 0 ), memSize( 0 ),
numCores( 0 ), numCores( 0 ),
pageSize( 0 ), pageSize( 0 ),
hasNuma( false ) { hasNuma( false ),
fileZeroNeeded (false) {
// populate SystemInfo during construction // populate SystemInfo during construction
collectSystemInfo(); collectSystemInfo();
} }
private: private:
/** Collect host system info */ /** Collect host system info */
void collectSystemInfo(); void collectSystemInfo();
}; };
ProcessId _pid; ProcessId _pid;
static mongo::mutex _sysInfoLock; static mongo::mutex _sysInfoLock;
 End of changes. 3 change blocks. 
1 lines changed or deleted 16 lines changed or added


 progress_meter.h   progress_meter.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/util/goodies.h"
#include <boost/noncopyable.hpp> #include <boost/noncopyable.hpp>
#include <string> #include <string>
namespace mongo { namespace mongo {
class ProgressMeter : boost::noncopyable { class ProgressMeter : boost::noncopyable {
public: public:
ProgressMeter(unsigned long long total, ProgressMeter(unsigned long long total,
int secondsBetween = 3, int secondsBetween = 3,
int checkInterval = 100, int checkInterval = 100,
std::string units = "", std::string units = "",
std::string name = "Progress") std::string name = "Progress")
: _showTotal(true), : _showTotal(true),
_units(units), _units(units) {
_name(name) { _name = name.c_str();
reset( total , secondsBetween , checkInterval ); reset( total , secondsBetween , checkInterval );
} }
ProgressMeter() : _active(0), _showTotal(true), _units(""), _name(" ProgressMeter() : _active(0), _showTotal(true), _units("") {
Progress") {} _name = "Progress";
}
// typically you do ProgressMeterHolder // typically you do ProgressMeterHolder
void reset( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 ); void reset( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 );
void finished() { _active = 0; } void finished() { _active = 0; }
bool isActive() const { return _active; } bool isActive() const { return _active; }
/** /**
* @param n how far along we are relative to the total # we set in CurOp::setMessage * @param n how far along we are relative to the total # we set in CurOp::setMessage
* @return if row was printed * @return if row was printed
*/ */
bool hit( int n = 1 ); bool hit( int n = 1 );
void setUnits( const std::string& units ) { _units = units; } void setUnits( const std::string& units ) { _units = units; }
std::string getUnit() const { return _units; } std::string getUnit() const { return _units; }
void setName(std::string name) { _name = name; } void setName(std::string name) { _name = name.c_str(); }
std::string getName() const { return _name; } std::string getName() const { return _name.toString(); }
void setTotalWhileRunning( unsigned long long total ) { void setTotalWhileRunning( unsigned long long total ) {
_total = total; _total = total;
} }
unsigned long long done() const { return _done; } unsigned long long done() const { return _done; }
unsigned long long hits() const { return _hits; } unsigned long long hits() const { return _hits; }
unsigned long long total() const { return _total; } unsigned long long total() const { return _total; }
skipping to change at line 91 skipping to change at line 94
unsigned long long _total; unsigned long long _total;
bool _showTotal; bool _showTotal;
int _secondsBetween; int _secondsBetween;
int _checkInterval; int _checkInterval;
unsigned long long _done; unsigned long long _done;
unsigned long long _hits; unsigned long long _hits;
int _lastTime; int _lastTime;
std::string _units; std::string _units;
std::string _name; ThreadSafeString _name;
}; };
// e.g.: // e.g.:
// CurOp * op = cc().curop(); // CurOp * op = cc().curop();
// ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort", "Index: External Sort Progress", d->stats.nrecords, 10)); // ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort", "Index: External Sort Progress", d->stats.nrecords, 10));
// loop { pm.hit(); } // loop { pm.hit(); }
class ProgressMeterHolder : boost::noncopyable { class ProgressMeterHolder : boost::noncopyable {
public: public:
ProgressMeterHolder( ProgressMeter& pm ) ProgressMeterHolder( ProgressMeter& pm )
: _pm( pm ) { : _pm( pm ) {
 End of changes. 5 change blocks. 
7 lines changed or deleted 9 lines changed or added


 qlock.h   qlock.h 
skipping to change at line 36 skipping to change at line 36
* wish to do so, delete this exception statement from your version. If y ou * wish to do so, delete this exception statement from your version. If y ou
* delete this exception statement from all source files in the program, * delete this exception statement from all source files in the program,
* then also delete it in the license file. * then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <boost/noncopyable.hpp> #include <boost/noncopyable.hpp>
#include <boost/thread/mutex.hpp> #include <boost/thread/mutex.hpp>
#include <boost/thread/condition.hpp> #include <boost/thread/condition.hpp>
#include "../assert_util.h" #include "mongo/util/assert_util.h"
#include "../time_support.h" #include "mongo/util/time_support.h"
namespace mongo { namespace mongo {
/** "Quad Lock" /** "Quad Lock"
we want to be able to do semi-granular locking now, and read/write style locking for that. we want to be able to do semi-granular locking now, and read/write style locking for that.
if that is all we want we could just have a rwlock per lockable ent ity, and we are done. if that is all we want we could just have a rwlock per lockable ent ity, and we are done.
however at times we want to stop-the-world. in addition, sometimes we want to stop the however at times we want to stop-the-world. in addition, sometimes we want to stop the
world *for writing only*. world *for writing only*.
A hierarchy of locks could achieve this; instead here we've modeled it in one synchronization A hierarchy of locks could achieve this; instead here we've modeled it in one synchronization
 End of changes. 1 change blocks. 
2 lines changed or deleted 2 lines changed or added


 qlog.h   qlog.h 
skipping to change at line 35 skipping to change at line 35
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <ostream> #include <ostream>
namespace mongo { namespace mongo {
std::ostream& QLOG(); extern bool verboseQueryLogging;
// With a #define like this, we don't evaluate the costly toString()s that
are QLOG'd
#define QLOG() if (verboseQueryLogging) log()
bool qlogOff(); bool qlogOff();
bool qlogOn(); bool qlogOn();
} // namespace mongo } // namespace mongo
 End of changes. 1 change blocks. 
1 lines changed or deleted 5 lines changed or added


 query_planner.h   query_planner.h 
skipping to change at line 32 skipping to change at line 32
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/index_entry.h" #include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_solution.h"
namespace mongo { namespace mongo {
struct QueryPlannerParams { class CachedSolution;
enum Options {
// You probably want to set this.
DEFAULT = 0,
// Set this if you don't want a table scan.
// See http://docs.mongodb.org/manual/reference/parameters/
NO_TABLE_SCAN = 1,
// Set this if you want a collscan outputted even if there's an
ixscan.
INCLUDE_COLLSCAN = 2,
// Set this if you're running on a sharded cluster. We'll add
a "drop all docs that
// shouldn't be on this shard" stage before projection.
//
// In order to set this, you must check
// shardingState.needCollectionMetadata(current_namespace) in t
he same lock that you use
// to build the query runner.
INCLUDE_SHARD_FILTER = 4,
};
// See Options enum above.
size_t options;
// What indices are available for planning?
vector<IndexEntry> indices;
// What's our shard key? If INCLUDE_SHARD_FILTER is set we will cr
eate a shard filtering
// stage. If we know the shard key, we can perform covering analys
is instead of always
// forcing a fetch.
BSONObj shardKey;
};
/** /**
* QueryPlanner's job is to provide an entry point to the query plannin g and optimization * QueryPlanner's job is to provide an entry point to the query plannin g and optimization
* process. * process.
*/ */
class QueryPlanner { class QueryPlanner {
public: public:
/** /**
* Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the * Outputs a series of possible solutions for the provided 'query' into 'out'. Uses the
* indices and other data in 'params' to plan with. * indices and other data in 'params' to plan with.
* *
* Caller owns pointers in *out. * Caller owns pointers in *out.
*/ */
static void plan(const CanonicalQuery& query, static Status plan(const CanonicalQuery& query,
const QueryPlannerParams& params, const QueryPlannerParams& params,
vector<QuerySolution*>* out); std::vector<QuerySolution*>* out);
private:
//
// Index Selection methods.
//
/** /**
* Return all the fields in the tree rooted at 'node' that we can u * Helper that does most of the heavy lifting for the planFromCache
se an index on * method which this overloads. Whereas the overloaded version plan
* in order to answer the query. s
* from cache twice (once for the winning solution and once from th
e
* backup solution), this version plans from cache once.
* *
* The 'prefix' argument is a path prefix to be prepended to any fi * It requires a single SolutionCacheData, rather than a CachedSolu
elds mentioned in tion, which
* predicates encountered. Some array operators specify a path pre * owns a vector of SolutionCacheData instances.
fix.
*/ */
static void getFields(MatchExpression* node, string prefix, unorder static Status planFromCache(const CanonicalQuery& query,
ed_set<string>* out); const QueryPlannerParams& params,
const SolutionCacheData& cacheData,
QuerySolution** out);
/** /**
* Find all indices prefixed by fields we have predicates over. On * Attempt to generate a query solution, given data retrieved
ly these indices are * from the plan cache.
* useful in answering the query.
*/
static void findRelevantIndices(const unordered_set<string>& fields
,
const vector<IndexEntry>& indices,
vector<IndexEntry>* out);
/**
* Return true if the index key pattern field 'elt' (which belongs
to 'index') can be used
* to answer the predicate 'node'.
* *
* For example, {field: "hashed"} can only be used with sets of equ * @param query -- query for which we are generating a plan
alities. * @param params -- planning parameters
* {field: "2d"} can only be used with some geo predic * @param cachedSoln -- the CachedSolution retrieved from the plan
ates. cache.
* {field: "2dsphere"} can only be used with some othe * @param out -- an out-parameter which will be filled in with the
r geo predicates. solution
* generated from the cache data
* @param backupOut -- if 'out' contains a blocking sort, then back
outOut may
* contain an alternative solution with no blocking sort; otherwis
e it will
* contain NULL on return.
*/ */
static bool compatible(const BSONElement& elt, const IndexEntry& in static Status planFromCache(const CanonicalQuery& query,
dex, MatchExpression* node); const QueryPlannerParams& params,
const CachedSolution& cachedSoln,
QuerySolution** out,
QuerySolution** backupOut);
/** /**
* Determine how useful all of our relevant 'indices' are to all pr * Used to generated the index tag tree that will be inserted
edicates in the subtree * into the plan cache. This data gets stashed inside a QuerySoluti
* rooted at 'node'. Affixes a RelevantTag to all predicate nodes on
which can use an index. * until it can be inserted into the cache proper.
*
* 'prefix' is a path prefix that should be prepended to any path (
certain array operators
* imply a path prefix).
* *
* For an index to be useful to a predicate, the index must be comp * @param taggedTree -- a MatchExpression with index tags that has
atible (see above). been
* produced by the enumerator.
* @param relevantIndices -- a list of the index entries used to ta
g
* the tree (i.e. index numbers in the tags refer to entries in t
his vector)
* *
* If an index is prefixed by the predicate's path, it's always use * On success, a new tagged tree is returned through the out-parame
ful. ter 'out'.
* The caller has ownership of both taggedTree and *out.
* *
* If an index is compound but not prefixed by a predicate's path, * On failure, 'out' is set to NULL.
it's only useful if
* there exists another predicate that 1. will use that index and 2
. is related to the
* original predicate by having an AND as a parent.
*/
static void rateIndices(MatchExpression* node, string prefix,
const vector<IndexEntry>& indices);
//
// Collection Scan Data Access method.
//
/**
* Return a CollectionScanNode that scans as requested in 'query'.
*/ */
static QuerySolution* makeCollectionScan(const CanonicalQuery& quer static Status cacheDataFromTaggedTree(const MatchExpression* const
y, taggedTree,
bool tailable, const vector<IndexEntry>& rel
const QueryPlannerParams& evantIndices,
params); PlanCacheIndexTree** out);
//
// Indexed Data Access methods.
//
// The inArrayOperator flag deserves some attention. It is set whe
n we're processing a child of
// a MatchExpression::ALL or MatchExpression::ELEM_MATCH_OBJECT.
//
// When true, the following behavior changes for all methods below
that take it as an argument:
// 0. No deletion of MatchExpression(s). In fact,
// 1. No mutation of the MatchExpression at all. We need the tree
as-is in order to perform
// a filter on the entire tree.
// 2. No fetches performed. There will be a final fetch by the cal
ler of buildIndexedDataAccess
// who set the value of inArrayOperator to true.
// 3. No compound indices are used and no bounds are combined. The
se are incorrect in the context
// of these operators.
//
/**
* If 'inArrayOperator' is false, takes ownership of 'root'.
*/
static QuerySolutionNode* buildIndexedDataAccess(const CanonicalQue
ry& query,
MatchExpression* r
oot,
bool inArrayOperat
or,
const vector<Index
Entry>& indices);
/**
* Takes ownership of 'root'.
*/
static QuerySolutionNode* buildIndexedAnd(const CanonicalQuery& que
ry,
MatchExpression* root,
bool inArrayOperator,
const vector<IndexEntry>&
indices);
/**
* Takes ownership of 'root'.
*/
static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& quer
y,
MatchExpression* root,
bool inArrayOperator,
const vector<IndexEntry>&
indices);
/** /**
* Helper used by buildIndexedAnd and buildIndexedOr. * @param filter -- an untagged MatchExpression
* * @param indexTree -- a tree structure retrieved from the
* The children of AND and OR nodes are sorted by the index that th * cache with index tags that indicates how 'filter' should
e subtree rooted at * be tagged.
* that node uses. Child nodes that use the same index are adjacen * @param indexMap -- needed in order to put the proper index
t to one another to * numbers inside the index tags
* facilitate grouping of index scans. As such, the processing for
AND and OR is
* almost identical.
*
* See tagForSort and sortUsingTags in index_tag.h for details on o
rdering the children
* of OR and AND.
* *
* Does not take ownership of 'root' but may remove children from i * On success, 'filter' is mutated so that it has all the
t. * index tags needed in order for the access planner to recreate
*/ * the cached plan.
static bool processIndexScans(const CanonicalQuery& query,
MatchExpression* root,
bool inArrayOperator,
const vector<IndexEntry>& indices,
vector<QuerySolutionNode*>* out);
//
// Helpers for creating an index scan.
//
/**
* Create a new data access node.
* *
* If the node is an index scan, the bounds for 'expr' are computed * On failure, the tag state attached to the nodes of 'filter'
and placed into the * is invalid. Planning from the cache should be aborted.
* first field's OIL position. The rest of the OILs are allocated
but uninitialized.
* *
* If the node is a geo node, grab the geo data from 'expr' and stu * Does not take ownership of either filter or indexTree.
ff it into the
* geo solution node of the appropriate type.
*/
static QuerySolutionNode* makeLeafNode(const IndexEntry& index,
MatchExpression* expr,
bool* exact);
/**
* Merge the predicate 'expr' with the leaf node 'node'.
*/
static void mergeWithLeafNode(MatchExpression* expr, const IndexEnt
ry& index,
size_t pos, bool* exactOut, QuerySolu
tionNode* node,
MatchExpression::MatchType mergeType)
;
/**
* If index scan (regular or expression index), fill in any bounds
that are missing in
* 'node' with the "all values for this field" interval.
*
* If geo, do nothing.
*/
static void finishLeafNode(QuerySolutionNode* node, const IndexEntr
y& index);
//
// Analysis of Data Access
//
/**
* In brief: performs sort and covering analysis.
*
* The solution rooted at 'solnRoot' provides data for the query, w
hether through some
* configuration of indices or through a collection scan. Addition
al stages may be required
* to perform sorting, projection, or other operations that are ind
ependent of the source
* of the data. These stages are added atop 'solnRoot'.
*
* 'taggedRoot' is a copy of the parse tree. Nodes in 'solnRoot' m
ay point into it.
*
* Takes ownership of 'solnRoot' and 'taggedRoot'.
*
* Caller owns the returned QuerySolution.
*/
static QuerySolution* analyzeDataAccess(const CanonicalQuery& query
,
const QueryPlannerParams& p
arams,
QuerySolutionNode* solnRoot
);
/**
* Return a plan that uses the provided index as a proxy for a coll
ection scan.
*/
static QuerySolution* scanWholeIndex(const IndexEntry& index,
const CanonicalQuery& query,
const QueryPlannerParams& para
ms,
int direction = 1);
/**
* Traverse the tree rooted at 'root' reversing ixscans and other s
orts.
*/
static void reverseScans(QuerySolutionNode* root);
/**
* Assumes each OIL in bounds is increasing.
*
* Aligns OILs (and bounds) according to the kp direction * the sca
nDir.
*/
static void alignBounds(IndexBounds* bounds, const BSONObj& kp, int
scanDir = 1);
/**
* Does the index with key pattern 'kp' provide the sort that 'quer
y' wants?
*/
static bool providesSort(const CanonicalQuery& query, const BSONObj
& kp);
/**
* Get the bounds for the sort in 'query' used by the sort stage.
Output the bounds
* in 'node'.
*/ */
static void getBoundsForSort(const CanonicalQuery& query, SortNode* static Status tagAccordingToCache(MatchExpression* filter,
node); const PlanCacheIndexTree* const i
ndexTree,
const map<BSONObj, size_t>& index
Map);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 19 change blocks. 
288 lines changed or deleted 74 lines changed or added


 query_planner_common.h   query_planner_common.h 
skipping to change at line 29 skipping to change at line 29
* linked combinations including the program with the OpenSSL library. Y ou * linked combinations including the program with the OpenSSL library. Y ou
* must comply with the GNU Affero General Public License in all respect s for * must comply with the GNU Affero General Public License in all respect s for
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/qlog.h"
namespace mongo { namespace mongo {
/**
* Methods used by several parts of the planning process.
*/
class QueryPlannerCommon { class QueryPlannerCommon {
public: public:
/** /**
* Does the tree rooted at 'root' have a node with matchType 'type' ? * Does the tree rooted at 'root' have a node with matchType 'type' ?
* *
* If 'out' is not NULL, sets 'out' to the first node of type 'type ' encountered. * If 'out' is not NULL, sets 'out' to the first node of type 'type ' encountered.
*/ */
static bool hasNode(MatchExpression* root, MatchExpression::MatchTy pe type, static bool hasNode(MatchExpression* root, MatchExpression::MatchTy pe type,
MatchExpression** out = NULL) { MatchExpression** out = NULL) {
if (type == root->matchType()) { if (type == root->matchType()) {
skipping to change at line 56 skipping to change at line 62
return true; return true;
} }
for (size_t i = 0; i < root->numChildren(); ++i) { for (size_t i = 0; i < root->numChildren(); ++i) {
if (hasNode(root->getChild(i), type, out)) { if (hasNode(root->getChild(i), type, out)) {
return true; return true;
} }
} }
return false; return false;
} }
/**
* Assumes the provided BSONObj is of the form {field1: -+1, ..., f
ield2: -+1}
* Returns a BSONObj with the values negated.
*/
static BSONObj reverseSortObj(const BSONObj& sortObj) {
BSONObjBuilder reverseBob;
BSONObjIterator it(sortObj);
while (it.more()) {
BSONElement elt = it.next();
reverseBob.append(elt.fieldName(), elt.numberInt() * -1);
}
return reverseBob.obj();
}
/**
* Traverses the tree rooted at 'node'. For every STAGE_IXSCAN enc
ountered, reverse
* the scan direction and index bounds.
*/
static void reverseScans(QuerySolutionNode* node) {
StageType type = node->getType();
if (STAGE_IXSCAN == type) {
IndexScanNode* isn = static_cast<IndexScanNode*>(node);
isn->direction *= -1;
if (isn->bounds.isSimpleRange) {
std::swap(isn->bounds.startKey, isn->bounds.endKey);
// XXX: Not having a startKeyInclusive means that if we
reverse a max/min query
// we have different results with and without the rever
se...
isn->bounds.endKeyInclusive = true;
}
else {
for (size_t i = 0; i < isn->bounds.fields.size(); ++i)
{
vector<Interval>& iv = isn->bounds.fields[i].interv
als;
// Step 1: reverse the list.
std::reverse(iv.begin(), iv.end());
// Step 2: reverse each interval.
for (size_t j = 0; j < iv.size(); ++j) {
iv[j].reverse();
}
}
}
if (!isn->bounds.isValidFor(isn->indexKeyPattern, isn->dire
ction)) {
QLOG() << "invalid bounds: " << isn->bounds.toString()
<< endl;
verify(0);
}
// TODO: we can just negate every value in the already comp
uted properties.
isn->computeProperties();
}
else if (STAGE_SORT_MERGE == type) {
// reverse direction of comparison for merge
MergeSortNode* msn = static_cast<MergeSortNode*>(node);
msn->sort = reverseSortObj(msn->sort);
}
else {
verify(STAGE_SORT != type);
// This shouldn't be here...
}
for (size_t i = 0; i < node->children.size(); ++i) {
reverseScans(node->children[i]);
}
}
}; };
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
0 lines changed or deleted 81 lines changed or added


 query_solution.h   query_solution.h 
skipping to change at line 36 skipping to change at line 36
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#include "mongo/db/geo/geoquery.h" #include "mongo/db/geo/geoquery.h"
#include "mongo/db/fts/fts_query.h" #include "mongo/db/fts/fts_query.h"
#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/lite_projection.h" #include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/stage_types.h" #include "mongo/db/query/stage_types.h"
namespace mongo { namespace mongo {
using mongo::fts::FTSQuery; using mongo::fts::FTSQuery;
/** /**
* This is an abstract representation of a query plan. It can be trans cribed into a tree of * This is an abstract representation of a query plan. It can be trans cribed into a tree of
* PlanStages, which can then be handed to a PlanRunner for execution. * PlanStages, which can then be handed to a PlanRunner for execution.
*/ */
skipping to change at line 70 skipping to change at line 70
/** /**
* What stage should this be transcribed to? See stage_types.h. * What stage should this be transcribed to? See stage_types.h.
*/ */
virtual StageType getType() const = 0; virtual StageType getType() const = 0;
/** /**
* Internal function called by toString() * Internal function called by toString()
* *
* TODO: Consider outputting into a BSONObj or builder thereof. * TODO: Consider outputting into a BSONObj or builder thereof.
*/ */
virtual void appendToString(stringstream* ss, int indent) const = 0 ; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const = 0;
// //
// Computed properties // Computed properties
// //
/** /**
* Must be called before any properties are examined. * Must be called before any properties are examined.
*/ */
virtual void computeProperties() { virtual void computeProperties() {
for (size_t i = 0; i < children.size(); ++i) { for (size_t i = 0; i < children.size(); ++i) {
skipping to change at line 131 skipping to change at line 131
* Usage: * Usage:
* 1. If our plan gives us a sort order, we don't have to add a sor t stage. * 1. If our plan gives us a sort order, we don't have to add a sor t stage.
* 2. If all the children of an OR have the same sort order, we can maintain that * 2. If all the children of an OR have the same sort order, we can maintain that
* sort order with a STAGE_SORT_MERGE instead of STAGE_OR. * sort order with a STAGE_SORT_MERGE instead of STAGE_OR.
*/ */
virtual const BSONObjSet& getSort() const = 0; virtual const BSONObjSet& getSort() const = 0;
// These are owned here. // These are owned here.
vector<QuerySolutionNode*> children; vector<QuerySolutionNode*> children;
// If a stage has a non-NULL filter all values outputted from that
stage must pass that
// filter.
scoped_ptr<MatchExpression> filter; scoped_ptr<MatchExpression> filter;
protected: protected:
/** /**
* Formatting helper used by toString(). * Formatting helper used by toString().
*/ */
static void addIndent(stringstream* ss, int level); static void addIndent(mongoutils::str::stream* ss, int level);
/** /**
* Every solution node has properties and this adds the debug info for the * Every solution node has properties and this adds the debug info for the
* properties. * properties.
*/ */
void addCommon(stringstream* ss, int indent) const; void addCommon(mongoutils::str::stream* ss, int indent) const;
private: private:
MONGO_DISALLOW_COPYING(QuerySolutionNode); MONGO_DISALLOW_COPYING(QuerySolutionNode);
}; };
/** /**
* A QuerySolution must be entirely self-contained and own everything i nside of it. * A QuerySolution must be entirely self-contained and own everything i nside of it.
* *
* A tree of stages may be built from a QuerySolution. The QuerySoluti on must outlive the tree * A tree of stages may be built from a QuerySolution. The QuerySoluti on must outlive the tree
* of stages. * of stages.
skipping to change at line 170 skipping to change at line 172
// Any filters in root or below point into this object. Must be ow ned. // Any filters in root or below point into this object. Must be ow ned.
BSONObj filterData; BSONObj filterData;
string ns; string ns;
// XXX temporary: if it has a sort stage the sort wasn't provided b y an index, // XXX temporary: if it has a sort stage the sort wasn't provided b y an index,
// so we use that index (if it exists) to provide a sort. // so we use that index (if it exists) to provide a sort.
bool hasSortStage; bool hasSortStage;
// Owned here. Used by the plan cache.
boost::scoped_ptr<SolutionCacheData> cacheData;
/** /**
* Output a human-readable string representing the plan. * Output a human-readable string representing the plan.
*/ */
string toString() { string toString() {
if (NULL == root) { if (NULL == root) {
return "empty query solution"; return "empty query solution";
} }
stringstream ss; mongoutils::str::stream ss;
root->appendToString(&ss, 0); root->appendToString(&ss, 0);
return ss.str(); return ss;
} }
private: private:
MONGO_DISALLOW_COPYING(QuerySolution); MONGO_DISALLOW_COPYING(QuerySolution);
}; };
struct TextNode : public QuerySolutionNode { struct TextNode : public QuerySolutionNode {
TextNode() : _numWanted(100) { } TextNode() { }
virtual ~TextNode() { } virtual ~TextNode() { }
virtual StageType getType() const { return STAGE_TEXT; } virtual StageType getType() const { return STAGE_TEXT; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return false; } // text's return is LOC_AND_UNOWNED_OBJ so it's fetched and has all
bool hasField(const string& field) const { return false; } fields.
bool fetched() const { return true; }
bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
BSONObjSet _sort; BSONObjSet _sort;
uint32_t _numWanted;
BSONObj _indexKeyPattern; BSONObj _indexKeyPattern;
std::string _query; std::string _query;
std::string _language; std::string _language;
}; };
struct CollectionScanNode : public QuerySolutionNode { struct CollectionScanNode : public QuerySolutionNode {
CollectionScanNode(); CollectionScanNode();
virtual ~CollectionScanNode() { } virtual ~CollectionScanNode() { }
virtual StageType getType() const { return STAGE_COLLSCAN; } virtual StageType getType() const { return STAGE_COLLSCAN; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
BSONObjSet _sort; BSONObjSet _sort;
// Name of the namespace. // Name of the namespace.
string name; string name;
// Should we make a tailable cursor? // Should we make a tailable cursor?
bool tailable; bool tailable;
int direction; int direction;
// maxScan option to .find() limits how many docs we look at.
int maxScan;
}; };
struct AndHashNode : public QuerySolutionNode { struct AndHashNode : public QuerySolutionNode {
AndHashNode(); AndHashNode();
virtual ~AndHashNode(); virtual ~AndHashNode();
virtual StageType getType() const { return STAGE_AND_HASH; } virtual StageType getType() const { return STAGE_AND_HASH; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return children.back()->getSort (); }
BSONObjSet _sort; BSONObjSet _sort;
}; };
struct AndSortedNode : public QuerySolutionNode { struct AndSortedNode : public QuerySolutionNode {
AndSortedNode(); AndSortedNode();
virtual ~AndSortedNode(); virtual ~AndSortedNode();
virtual StageType getType() const { return STAGE_AND_SORTED; } virtual StageType getType() const { return STAGE_AND_SORTED; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return true; } bool sortedByDiskLoc() const { return true; }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
BSONObjSet _sort; BSONObjSet _sort;
}; };
struct OrNode : public QuerySolutionNode { struct OrNode : public QuerySolutionNode {
OrNode(); OrNode();
virtual ~OrNode(); virtual ~OrNode();
virtual StageType getType() const { return STAGE_OR; } virtual StageType getType() const { return STAGE_OR; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { bool sortedByDiskLoc() const {
// Even if our children are sorted by their diskloc or other fi elds, we don't maintain // Even if our children are sorted by their diskloc or other fi elds, we don't maintain
// any order on the output. // any order on the output.
return false; return false;
} }
const BSONObjSet& getSort() const { return _sort; } const BSONObjSet& getSort() const { return _sort; }
skipping to change at line 291 skipping to change at line 299
bool dedup; bool dedup;
}; };
struct MergeSortNode : public QuerySolutionNode { struct MergeSortNode : public QuerySolutionNode {
MergeSortNode(); MergeSortNode();
virtual ~MergeSortNode(); virtual ~MergeSortNode();
virtual StageType getType() const { return STAGE_SORT_MERGE; } virtual StageType getType() const { return STAGE_SORT_MERGE; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const; bool fetched() const;
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
virtual void computeProperties() { virtual void computeProperties() {
for (size_t i = 0; i < children.size(); ++i) { for (size_t i = 0; i < children.size(); ++i) {
children[i]->computeProperties(); children[i]->computeProperties();
skipping to change at line 319 skipping to change at line 327
BSONObj sort; BSONObj sort;
bool dedup; bool dedup;
}; };
struct FetchNode : public QuerySolutionNode { struct FetchNode : public QuerySolutionNode {
FetchNode(); FetchNode();
virtual ~FetchNode() { } virtual ~FetchNode() { }
virtual StageType getType() const { return STAGE_FETCH; } virtual StageType getType() const { return STAGE_FETCH; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
BSONObjSet _sorts; BSONObjSet _sorts;
}; };
struct IndexScanNode : public QuerySolutionNode { struct IndexScanNode : public QuerySolutionNode {
IndexScanNode(); IndexScanNode();
virtual ~IndexScanNode() { } virtual ~IndexScanNode() { }
virtual void computeProperties(); virtual void computeProperties();
virtual StageType getType() const { return STAGE_IXSCAN; } virtual StageType getType() const { return STAGE_IXSCAN; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return false; } bool fetched() const { return false; }
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const; bool sortedByDiskLoc() const;
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
BSONObjSet _sorts; BSONObjSet _sorts;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
bool indexIsMultiKey; bool indexIsMultiKey;
// Only set for 2d.
int limit;
int direction; int direction;
// maxScan option to .find() limits how many docs we look at.
int maxScan;
// If there's a 'returnKey' projection we add key metadata.
bool addKeyMetadata;
// BIG NOTE: // BIG NOTE:
// If you use simple bounds, we'll use whatever index access method the keypattern implies. // If you use simple bounds, we'll use whatever index access method the keypattern implies.
// If you use the complex bounds, we force Btree access. // If you use the complex bounds, we force Btree access.
// The complex bounds require Btree access. // The complex bounds require Btree access.
IndexBounds bounds; IndexBounds bounds;
}; };
struct ProjectionNode : public QuerySolutionNode { struct ProjectionNode : public QuerySolutionNode {
ProjectionNode() : liteProjection(NULL) { } ProjectionNode() { }
virtual ~ProjectionNode() { } virtual ~ProjectionNode() { }
virtual StageType getType() const { return STAGE_PROJECTION; } virtual StageType getType() const { return STAGE_PROJECTION; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
/** /**
* This node changes the type to OWNED_OBJ. There's no fetching po ssible after this. * This node changes the type to OWNED_OBJ. There's no fetching po ssible after this.
*/ */
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { bool hasField(const string& field) const {
// XXX XXX: perhaps have the QueryProjection pre-allocated and defer to it? we don't // XXX XXX: perhaps have the QueryProjection pre-allocated and defer to it? we don't
// know what we're dropping. Until we push projection down thi s doesn't matter. // know what we're dropping. Until we push projection down thi s doesn't matter.
return false; return false;
skipping to change at line 397 skipping to change at line 408
} }
const BSONObjSet& getSort() const { const BSONObjSet& getSort() const {
// TODO: If we're applying a projection that maintains sort ord er, the prefix of the // TODO: If we're applying a projection that maintains sort ord er, the prefix of the
// sort order we project is the sort order. // sort order we project is the sort order.
return _sorts; return _sorts;
} }
BSONObjSet _sorts; BSONObjSet _sorts;
// Points into the CanonicalQuery, not owned here.
LiteProjection* liteProjection;
// The full query tree. Needed when we have positional operators. // The full query tree. Needed when we have positional operators.
// Owned in the CanonicalQuery, not here. // Owned in the CanonicalQuery, not here.
MatchExpression* fullExpression; MatchExpression* fullExpression;
// Given that we don't yet have a MatchExpression analogue for the
expression language, we
// use a BSONObj.
BSONObj projection;
}; };
struct SortNode : public QuerySolutionNode { struct SortNode : public QuerySolutionNode {
SortNode() : hasBounds(false) { } SortNode() : limit(0) { }
virtual ~SortNode() { } virtual ~SortNode() { }
virtual StageType getType() const { return STAGE_SORT; } virtual StageType getType() const { return STAGE_SORT; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
virtual void computeProperties() { virtual void computeProperties() {
for (size_t i = 0; i < children.size(); ++i) { for (size_t i = 0; i < children.size(); ++i) {
children[i]->computeProperties(); children[i]->computeProperties();
} }
_sorts.clear(); _sorts.clear();
_sorts.insert(pattern); _sorts.insert(pattern);
} }
BSONObjSet _sorts; BSONObjSet _sorts;
BSONObj pattern; BSONObj pattern;
bool hasBounds; BSONObj query;
// XXX // Sum of both limit and skip count in the parsed query.
IndexBounds bounds; int limit;
}; };
struct LimitNode : public QuerySolutionNode { struct LimitNode : public QuerySolutionNode {
LimitNode() { } LimitNode() { }
virtual ~LimitNode() { } virtual ~LimitNode() { }
virtual StageType getType() const { return STAGE_LIMIT; } virtual StageType getType() const { return STAGE_LIMIT; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
int limit; int limit;
}; };
struct SkipNode : public QuerySolutionNode { struct SkipNode : public QuerySolutionNode {
SkipNode() { } SkipNode() { }
virtual ~SkipNode() { } virtual ~SkipNode() { }
virtual StageType getType() const { return STAGE_SKIP; } virtual StageType getType() const { return STAGE_SKIP; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
int skip; int skip;
}; };
// //
// Geo nodes. A thin wrapper above an IXSCAN until we can yank functio nality out of // Geo nodes. A thin wrapper above an IXSCAN until we can yank functio nality out of
// the IXSCAN layer into the stage layer. // the IXSCAN layer into the stage layer.
// //
// TODO: This is probably an expression index. // TODO: This is probably an expression index.
struct Geo2DNode : public QuerySolutionNode { struct Geo2DNode : public QuerySolutionNode {
Geo2DNode() { } Geo2DNode() { }
virtual ~Geo2DNode() { } virtual ~Geo2DNode() { }
virtual StageType getType() const { return STAGE_GEO_2D; } virtual StageType getType() const { return STAGE_GEO_2D; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return false; } bool fetched() const { return false; }
bool hasField(const string& field) const; bool hasField(const string& field) const;
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
BSONObjSet _sorts; BSONObjSet _sorts;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
GeoQuery gq; GeoQuery gq;
}; };
// This is a standalone stage. // This is a standalone stage.
struct GeoNear2DNode : public QuerySolutionNode { struct GeoNear2DNode : public QuerySolutionNode {
GeoNear2DNode() : numWanted(100) { } GeoNear2DNode() : numWanted(100), addPointMeta(false), addDistMeta( false) { }
virtual ~GeoNear2DNode() { } virtual ~GeoNear2DNode() { }
virtual StageType getType() const { return STAGE_GEO_NEAR_2D; } virtual StageType getType() const { return STAGE_GEO_NEAR_2D; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
BSONObjSet _sorts; BSONObjSet _sorts;
NearQuery nq; NearQuery nq;
int numWanted; int numWanted;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
bool addPointMeta;
bool addDistMeta;
}; };
// This is actually its own standalone stage. // This is actually its own standalone stage.
struct GeoNear2DSphereNode : public QuerySolutionNode { struct GeoNear2DSphereNode : public QuerySolutionNode {
GeoNear2DSphereNode() { } GeoNear2DSphereNode() : addPointMeta(false), addDistMeta(false) { }
virtual ~GeoNear2DSphereNode() { } virtual ~GeoNear2DSphereNode() { }
virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; } virtual StageType getType() const { return STAGE_GEO_NEAR_2DSPHERE; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return true; } bool fetched() const { return true; }
bool hasField(const string& field) const { return true; } bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; } bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return _sorts; } const BSONObjSet& getSort() const { return _sorts; }
BSONObjSet _sorts; BSONObjSet _sorts;
NearQuery nq; NearQuery nq;
IndexBounds baseBounds; IndexBounds baseBounds;
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
bool addPointMeta;
bool addDistMeta;
}; };
// //
// Internal nodes used to provide functionality // Internal nodes used to provide functionality
// //
/** /**
* If we're answering a query on a sharded cluster, docs must be checke d against the shard key * If we're answering a query on a sharded cluster, docs must be checke d against the shard key
* to ensure that we don't return data that shouldn't be there. This m ust be done prior to * to ensure that we don't return data that shouldn't be there. This m ust be done prior to
* projection, and in fact should be done as early as possible to avoid propagating stale data * projection, and in fact should be done as early as possible to avoid propagating stale data
* through the pipeline. * through the pipeline.
*/ */
struct ShardingFilterNode : public QuerySolutionNode { struct ShardingFilterNode : public QuerySolutionNode {
ShardingFilterNode() { } ShardingFilterNode() { }
virtual ~ShardingFilterNode() { } virtual ~ShardingFilterNode() { }
virtual StageType getType() const { return STAGE_SHARDING_FILTER; } virtual StageType getType() const { return STAGE_SHARDING_FILTER; }
virtual void appendToString(stringstream* ss, int indent) const; virtual void appendToString(mongoutils::str::stream* ss, int indent ) const;
bool fetched() const { return children[0]->fetched(); } bool fetched() const { return children[0]->fetched(); }
bool hasField(const string& field) const { return children[0]->hasF ield(field); } bool hasField(const string& field) const { return children[0]->hasF ield(field); }
bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); } bool sortedByDiskLoc() const { return children[0]->sortedByDiskLoc( ); }
const BSONObjSet& getSort() const { return children[0]->getSort(); } const BSONObjSet& getSort() const { return children[0]->getSort(); }
}; };
/**
* If documents mutate or are deleted during a query, we can (in some c
ases) fetch them
* and still return them. This stage merges documents that have been m
utated or deleted
* into the query result stream.
*/
struct KeepMutationsNode : public QuerySolutionNode {
KeepMutationsNode() { }
virtual ~KeepMutationsNode() { }
virtual StageType getType() const { return STAGE_KEEP_MUTATIONS; }
virtual void appendToString(mongoutils::str::stream* ss, int indent
) const;
// Any flagged results are OWNED_OBJ and therefore we're covered if
our child is.
bool fetched() const { return children[0]->fetched(); }
// Any flagged results are OWNED_OBJ and as such they'll have any f
ield we need.
bool hasField(const string& field) const { return children[0]->hasF
ield(field); }
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; }
// Since we merge in flagged results we have no sort order.
BSONObjSet sorts;
};
/**
* Distinct queries only want one value for a given field. We run an i
ndex scan but
* *always* skip over the current key to the next key.
*/
struct DistinctNode : public QuerySolutionNode {
DistinctNode() { }
virtual ~DistinctNode() { }
virtual StageType getType() const { return STAGE_DISTINCT; }
virtual void appendToString(mongoutils::str::stream* ss, int indent
) const;
// This stage is created "on top" of normal planning and as such th
e properties
// below don't really matter.
bool fetched() const { return true; }
bool hasField(const string& field) const { return !indexKeyPattern[
field].eoo(); }
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; }
BSONObjSet sorts;
BSONObj indexKeyPattern;
int direction;
IndexBounds bounds;
// We are distinct-ing over the 'fieldNo'-th field of 'indexKeyPatt
ern'.
int fieldNo;
};
/**
* Some count queries reduce to counting how many keys are between two
entries in a
* Btree.
*/
struct CountNode : public QuerySolutionNode {
CountNode() { }
virtual ~CountNode() { }
virtual StageType getType() const { return STAGE_COUNT; }
virtual void appendToString(mongoutils::str::stream* ss, int indent
) const;
bool fetched() const { return true; }
bool hasField(const string& field) const { return true; }
bool sortedByDiskLoc() const { return false; }
const BSONObjSet& getSort() const { return sorts; }
BSONObjSet sorts;
BSONObj indexKeyPattern;
BSONObj startKey;
bool startKeyInclusive;
BSONObj endKey;
bool endKeyInclusive;
};
} // namespace mongo } // namespace mongo
 End of changes. 42 change blocks. 
40 lines changed or deleted 149 lines changed or added


 queryutil.h   queryutil.h 
skipping to change at line 214 skipping to change at line 214
SpecialIndices _special; // Index type name of a non standard (eg ' 2d') index required by a SpecialIndices _special; // Index type name of a non standard (eg ' 2d') index required by a
// parsed query operator (eg '$near'). Could be >1. // parsed query operator (eg '$near'). Could be >1.
bool _exactMatchRepresentation; bool _exactMatchRepresentation;
BSONElement _elemMatchContext; // Parent $elemMatch object of the f ield constraint that BSONElement _elemMatchContext; // Parent $elemMatch object of the f ield constraint that
// generated this FieldRange. For e xample if the query is // generated this FieldRange. For e xample if the query is
// { a:{ $elemMatch:{ b:1, c:1 } } } , then the // { a:{ $elemMatch:{ b:1, c:1 } } } , then the
// _elemMatchContext for the FieldRa nge on 'a.b' is the query // _elemMatchContext for the FieldRa nge on 'a.b' is the query
// element having field name '$elemM atch'. // element having field name '$elemM atch'.
}; };
class QueryPattern;
/** /**
* A set of FieldRanges determined from constraints on the fields of a query, * A set of FieldRanges determined from constraints on the fields of a query,
* that may be used to determine index bounds. * that may be used to determine index bounds.
*/ */
class FieldRangeSet { class FieldRangeSet {
public: public:
friend class OrRangeGenerator; friend class OrRangeGenerator;
friend class FieldRangeVector; friend class FieldRangeVector;
/** /**
* Creates a FieldRangeSet representing a superset of the documents matching a query. * Creates a FieldRangeSet representing a superset of the documents matching a query.
skipping to change at line 281 skipping to change at line 279
* *
* Used in determining "suitability" for hashedindexes, and also in * Used in determining "suitability" for hashedindexes, and also in
* sharding for determining the relevant shards for a query. * sharding for determining the relevant shards for a query.
* *
* TODO: move this into FieldRange instead of FieldRangeSet * TODO: move this into FieldRange instead of FieldRangeSet
*/ */
bool isPointIntervalSet( const string& fieldname ) const; bool isPointIntervalSet( const string& fieldname ) const;
const char *ns() const { return _ns.c_str(); } const char *ns() const { return _ns.c_str(); }
QueryPattern pattern( const BSONObj &sort = BSONObj() ) const; // QueryPattern pattern( const BSONObj &sort = BSONObj() ) const;
SpecialIndices getSpecial() const; SpecialIndices getSpecial() const;
/** /**
* @return a FieldRangeSet approximation of the documents in 'this' but * @return a FieldRangeSet approximation of the documents in 'this' but
* not in 'other'. The approximation will be a superset of the doc uments * not in 'other'. The approximation will be a superset of the doc uments
* in 'this' but not 'other'. * in 'this' but not 'other'.
*/ */
const FieldRangeSet &operator-=( const FieldRangeSet &other ); const FieldRangeSet &operator-=( const FieldRangeSet &other );
/** @return intersection of 'this' with 'other'. */ /** @return intersection of 'this' with 'other'. */
const FieldRangeSet &operator&=( const FieldRangeSet &other ); const FieldRangeSet &operator&=( const FieldRangeSet &other );
 End of changes. 2 change blocks. 
3 lines changed or deleted 1 lines changed or added


 random.h   random.h 
skipping to change at line 44 skipping to change at line 44
int32_t nextInt32(); int32_t nextInt32();
int64_t nextInt64(); int64_t nextInt64();
/** /**
* @return a number between 0 and max * @return a number between 0 and max
*/ */
int32_t nextInt32( int32_t max ) { return nextInt32() % max; } int32_t nextInt32( int32_t max ) { return nextInt32() % max; }
/**
* @return a number between 0 and max
*/
int64_t nextInt64( int64_t max ) { return nextInt64() % max; }
/**
* @return a number between 0 and max
*
* This makes PsuedoRandom instances passable as the third argument
to std::random_shuffle
*/
intptr_t operator()(intptr_t max) {
if (sizeof(intptr_t) == 4)
return static_cast<intptr_t>(nextInt32(static_cast<int32_t>
(max)));
return static_cast<intptr_t>(nextInt64(static_cast<int64_t>(max
)));
}
private: private:
int32_t _x; int32_t _x;
int32_t _y; int32_t _y;
int32_t _z; int32_t _z;
int32_t _w; int32_t _w;
}; };
/** /**
* More secure random numbers * More secure random numbers
* Suitable for nonce/crypto * Suitable for nonce/crypto
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 range_deleter.h   range_deleter.h 
skipping to change at line 38 skipping to change at line 38
#pragma once #pragma once
#include <boost/thread/thread.hpp> #include <boost/thread/thread.hpp>
#include <deque> #include <deque>
#include <set> #include <set>
#include <string> #include <string>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/cc_by_loc.h" // for typedef CursorId #include "mongo/db/clientcursor.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/util/concurrency/mutex.h" #include "mongo/util/concurrency/mutex.h"
#include "mongo/util/concurrency/synchronization.h" #include "mongo/util/concurrency/synchronization.h"
namespace mongo { namespace mongo {
struct RangeDeleterEnv; struct RangeDeleterEnv;
class RangeDeleterStats; class RangeDeleterStats;
/** /**
skipping to change at line 277 skipping to change at line 277
// Keeps track of counters regarding each of the queues. // Keeps track of counters regarding each of the queues.
scoped_ptr<RangeDeleterStats> _stats; scoped_ptr<RangeDeleterStats> _stats;
}; };
/** /**
* Class for encapsulating logic used by the RangeDeleter class to perf orm its tasks. * Class for encapsulating logic used by the RangeDeleter class to perf orm its tasks.
*/ */
struct RangeDeleterEnv { struct RangeDeleterEnv {
virtual ~RangeDeleterEnv() {} virtual ~RangeDeleterEnv() {}
virtual void initThread() = 0;
/** /**
* Deletes the documents from the given range. This method should b e * Deletes the documents from the given range. This method should b e
* responsible for making sure that the proper contexts are setup * responsible for making sure that the proper contexts are setup
* to be able to perform deletions. * to be able to perform deletions.
* *
* Must be a synchronous call. Docs should be deleted after call en ds. * Must be a synchronous call. Docs should be deleted after call en ds.
* Must not throw Exceptions. * Must not throw Exceptions.
*/ */
virtual bool deleteRange(const StringData& ns, virtual bool deleteRange(const StringData& ns,
const BSONObj& inclusiveLower, const BSONObj& inclusiveLower,
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 range_deleter_db_env.h   range_deleter_db_env.h 
skipping to change at line 39 skipping to change at line 39
#pragma once #pragma once
#include "mongo/db/range_deleter.h" #include "mongo/db/range_deleter.h"
namespace mongo { namespace mongo {
/** /**
* This class implements the deleter methods to be used for a shard. * This class implements the deleter methods to be used for a shard.
*/ */
struct RangeDeleterDBEnv : public RangeDeleterEnv { struct RangeDeleterDBEnv : public RangeDeleterEnv {
virtual void initThread();
/** /**
* Deletes the documents from the given range synchronously. * Deletes the documents from the given range synchronously.
* *
* The keyPattern will be used to determine the right index to use to perform * The keyPattern will be used to determine the right index to use to perform
* the deletion and it can be a prefix of an existing index. Caller is responsible * the deletion and it can be a prefix of an existing index. Caller is responsible
* of making sure that both inclusiveLower and exclusiveUpper is a prefix of keyPattern. * of making sure that both inclusiveLower and exclusiveUpper is a prefix of keyPattern.
* *
* Note that secondaryThrottle will be ignored if current process i s not part * Note that secondaryThrottle will be ignored if current process i s not part
* of a replica set. * of a replica set.
* *
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 range_deleter_mock_env.h   range_deleter_mock_env.h 
skipping to change at line 63 skipping to change at line 63
/** /**
* Mock environment for RangeDeleter with knobs for pausing/resuming * Mock environment for RangeDeleter with knobs for pausing/resuming
* deletes, setting open cursors IDs per namespace and the ability to * deletes, setting open cursors IDs per namespace and the ability to
* record the history of deletes performed through this environment. * record the history of deletes performed through this environment.
*/ */
class RangeDeleterMockEnv: public mongo::RangeDeleterEnv { class RangeDeleterMockEnv: public mongo::RangeDeleterEnv {
public: public:
RangeDeleterMockEnv(); RangeDeleterMockEnv();
void initThread() {}
// //
// Environment modification methods. // Environment modification methods.
// //
/** /**
* Adds an id to the current set of cursors in the given namespace. * Adds an id to the current set of cursors in the given namespace.
*/ */
void addCursorId(const StringData& ns, CursorId id); void addCursorId(const StringData& ns, CursorId id);
/** /**
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 range_preserver.h   range_preserver.h 
skipping to change at line 51 skipping to change at line 51
* change. * change.
*/ */
class RangePreserver { class RangePreserver {
public: public:
/** /**
* Sharding uses the set of active cursor IDs as the current state. We add a dummy * Sharding uses the set of active cursor IDs as the current state. We add a dummy
* ClientCursor, which creates an additional cursor ID. The cursor ID lasts as long as this * ClientCursor, which creates an additional cursor ID. The cursor ID lasts as long as this
* object does. The ClientCursorPin guarantees that the underlying ClientCursor is not * object does. The ClientCursorPin guarantees that the underlying ClientCursor is not
* deleted until this object goes out of scope. * deleted until this object goes out of scope.
*/ */
RangePreserver(const string& ns) { RangePreserver(const Collection* collection) {
invariant( collection );
// Not a memory leak. Cached in a static structure by CC's cto r. // Not a memory leak. Cached in a static structure by CC's cto r.
ClientCursor* cc = new ClientCursor(ns); ClientCursor* cc = new ClientCursor(collection);
// Pin keeps the CC from being deleted while it's in scope. We delete it ourselves. // Pin keeps the CC from being deleted while it's in scope. We delete it ourselves.
_pin.reset(new ClientCursorPin(cc->cursorid())); _pin.reset(new ClientCursorPin(collection, cc->cursorid()));
} }
~RangePreserver() { ~RangePreserver() {
_pin->deleteUnderlying(); _pin->deleteUnderlying();
} }
private: private:
boost::scoped_ptr<ClientCursorPin> _pin; boost::scoped_ptr<ClientCursorPin> _pin;
}; };
 End of changes. 3 change blocks. 
3 lines changed or deleted 4 lines changed or added


 record.h   record.h 
skipping to change at line 35 skipping to change at line 35
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsonobjbuilder.h"
#include "mongo/platform/atomic_word.h" #include "mongo/platform/atomic_word.h"
#include "mongo/db/storage/extent.h"
namespace mongo { namespace mongo {
/* Record is a record in a datafile. DeletedRecord is similar but for
deleted space.
*11:03:20 AM) dm10gen: regarding extentOfs...
(11:03:42 AM) dm10gen: an extent is a continugous disk area, which cont
ains many Records and DeleteRecords
(11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (
64 bit total)
(11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a
64 bit ptr to the full extent address, we keep just the offset
(11:04:29 AM) dm10gen: we can do this as we know the record's address,
and it has the same fileNo
(11:04:33 AM) dm10gen: see class DiskLoc for more info
(11:04:43 AM) dm10gen: so that is how Record::myExtent() works
(11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we mu
st populate its extentOfs then
*/
#pragma pack(1)
class Record {
public:
enum HeaderSizeValue { HeaderSize = 16 };
int lengthWithHeaders() const { _accessing(); return _lengthWithHe
aders; }
int& lengthWithHeaders() { _accessing(); return _lengthWithHeaders
; }
int extentOfs() const { _accessing(); return _extentOfs; }
int& extentOfs() { _accessing(); return _extentOfs; }
int nextOfs() const { _accessing(); return _nextOfs; }
int& nextOfs() { _accessing(); return _nextOfs; }
int prevOfs() const { _accessing(); return _prevOfs; }
int& prevOfs() { _accessing(); return _prevOfs; }
const char * data() const { _accessing(); return _data; }
char * data() { _accessing(); return _data; }
const char * dataNoThrowing() const { return _data; }
char * dataNoThrowing() { return _data; }
int netLength() const { _accessing(); return _netLength(); }
/* use this when a record is deleted. basically a union with next/p
rev fields */
DeletedRecord& asDeleted() { return *((DeletedRecord*) this); }
// TODO(ERH): remove
Extent* myExtent(const DiskLoc& myLoc) { return DiskLoc(myLoc.a(),
extentOfs() ).ext(); }
/* get the next record in the namespace, traversing extents as nece
ssary */
DiskLoc getNext(const DiskLoc& myLoc); // TODO(ERH): remove
DiskLoc getPrev(const DiskLoc& myLoc); // TODO(ERH): remove
struct NP {
int nextOfs;
int prevOfs;
};
NP* np() { return (NP*) &_nextOfs; }
// ---------------------
// memory cache
// ---------------------
/**
* touches the data so that is in physical memory
* @param entireRecrd if false, only the header and first byte is t
ouched
* if true, the entire record is touched
* */
void touch( bool entireRecrd = false ) const;
/**
* @return if this record is likely in physical memory
* its not guaranteed because its possible it gets swapped
out in a very unlucky windows
*/
bool likelyInPhysicalMemory() const ;
/**
* tell the cache this Record was accessed
* @return this, for simple chaining
*/
Record* accessed();
static bool likelyInPhysicalMemory( const char* data );
/**
* this adds stats about page fault exceptions currently
* specically how many times we call _accessing where the record is
not in memory
* and how many times we throw a PageFaultException
*/
static void appendStats( BSONObjBuilder& b );
static void appendWorkingSetInfo( BSONObjBuilder& b );
private:
int _netLength() const { return _lengthWithHeaders - HeaderSize; }
/**
* call this when accessing a field which could hit disk
*/
void _accessing() const;
int _lengthWithHeaders;
int _extentOfs;
int _nextOfs;
int _prevOfs;
/** be careful when referencing this that your write intent was cor
rect */
char _data[4];
public:
static bool MemoryTrackingEnabled;
};
#pragma pack()
// TODO: this probably moves to record_store.h
class DeletedRecord {
public:
int lengthWithHeaders() const { _accessing(); return _lengthWithHea
ders; }
int& lengthWithHeaders() { _accessing(); return _lengthWithHeaders;
}
int extentOfs() const { _accessing(); return _extentOfs; }
int& extentOfs() { _accessing(); return _extentOfs; }
// TODO: we need to not const_cast here but problem is DiskLoc::wri
ting
DiskLoc& nextDeleted() const { _accessing(); return const_cast<Disk
Loc&>(_nextDeleted); }
DiskLoc myExtentLoc(const DiskLoc& myLoc) const {
_accessing();
return DiskLoc(myLoc.a(), _extentOfs);
}
Extent* myExtent(const DiskLoc& myLoc) {
_accessing();
return DiskLoc(myLoc.a(), _extentOfs).ext();
}
private:
void _accessing() const;
int _lengthWithHeaders;
int _extentOfs;
DiskLoc _nextDeleted;
};
BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
struct RecordStats { struct RecordStats {
void record( BSONObjBuilder& b ); void record( BSONObjBuilder& b );
AtomicInt64 accessesNotInMemory; AtomicInt64 accessesNotInMemory;
AtomicInt64 pageFaultExceptionsThrown; AtomicInt64 pageFaultExceptionsThrown;
}; };
// ------------------
inline DiskLoc Record::getNext(const DiskLoc& myLoc) {
_accessing();
if ( _nextOfs != DiskLoc::NullOfs ) {
/* defensive */
if ( _nextOfs >= 0 && _nextOfs < 10 ) {
logContext("Assertion failure - Record::getNext() referenci
ng a deleted record?");
return DiskLoc();
}
return DiskLoc(myLoc.a(), _nextOfs);
}
Extent *e = myExtent(myLoc);
while ( 1 ) {
if ( e->xnext.isNull() )
return DiskLoc(); // end of table.
e = e->xnext.ext();
if ( !e->firstRecord.isNull() )
break;
// entire extent could be empty, keep looking
}
return e->firstRecord;
}
inline DiskLoc Record::getPrev(const DiskLoc& myLoc) {
_accessing();
// Check if we still have records on our current extent
if ( _prevOfs != DiskLoc::NullOfs ) {
return DiskLoc(myLoc.a(), _prevOfs);
}
// Get the current extent
Extent *e = myExtent(myLoc);
while ( 1 ) {
if ( e->xprev.isNull() ) {
// There are no more extents before this one
return DiskLoc();
}
// Move to the extent before this one
e = e->xprev.ext();
if ( !e->lastRecord.isNull() ) {
// We have found a non empty extent
break;
}
}
// Return the last record in our new extent
return e->lastRecord;
}
} }
 End of changes. 3 change blocks. 
0 lines changed or deleted 215 lines changed or added


 record_store.h   record_store.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
namespace mongo { namespace mongo {
class Collection;
class DocWriter;
class ExtentManager; class ExtentManager;
class MAdvise;
class NamespaceDetails; class NamespaceDetails;
class Record; class Record;
class RecordStore { class RecordStore {
MONGO_DISALLOW_COPYING(RecordStore);
public: public:
RecordStore( const StringData& ns ); RecordStore( const StringData& ns );
virtual ~RecordStore();
void init( NamespaceDetails* details, virtual Record* recordFor( const DiskLoc& loc ) const = 0;
ExtentManager* em,
bool isSystemIndexes );
void deallocRecord( const DiskLoc& dl, Record* todelete ); virtual void deleteRecord( const DiskLoc& dl ) = 0;
StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int quotaMa x ); virtual StatusWith<DiskLoc> insertRecord( const char* data, int len , int quotaMax ) = 0;
private: virtual StatusWith<DiskLoc> insertRecord( const DocWriter* doc, int
quotaMax ) = 0;
protected:
std::string _ns; std::string _ns;
};
class RecordStoreV1Base : public RecordStore {
public:
RecordStoreV1Base( const StringData& ns,
NamespaceDetails* details,
ExtentManager* em,
bool isSystemIndexes );
virtual ~RecordStoreV1Base();
Record* recordFor( const DiskLoc& loc ) const;
void deleteRecord( const DiskLoc& dl );
StatusWith<DiskLoc> insertRecord( const char* data, int len, int qu
otaMax );
StatusWith<DiskLoc> insertRecord( const DocWriter* doc, int quotaMa
x );
protected:
virtual StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int
quotaMax ) = 0;
/** add a record to the end of the linked list chain within this ex
tent.
require: you must have already declared write intent for the re
cord header.
*/
void _addRecordToRecListInExtent(Record* r, DiskLoc loc);
NamespaceDetails* _details; NamespaceDetails* _details;
ExtentManager* _extentManager; ExtentManager* _extentManager;
bool _isSystemIndexes; bool _isSystemIndexes;
}; };
// used by index and original collections
class SimpleRecordStoreV1 : public RecordStoreV1Base {
public:
SimpleRecordStoreV1( const StringData& ns,
NamespaceDetails* details,
ExtentManager* em,
bool isSystemIndexes );
virtual ~SimpleRecordStoreV1();
protected:
virtual StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int
quotaMax );
};
class CappedRecordStoreV1 : public RecordStoreV1Base {
public:
CappedRecordStoreV1( Collection* collection,
const StringData& ns,
NamespaceDetails* details,
ExtentManager* em,
bool isSystemIndexes );
virtual ~CappedRecordStoreV1();
protected:
virtual StatusWith<DiskLoc> allocRecord( int lengthWithHeaders, int
quotaMax );
Collection* _collection;
OwnedPointerVector<MAdvise> _extentAdvice;
};
} }
 End of changes. 11 change blocks. 
6 lines changed or deleted 81 lines changed or added


 redef_macros.h   redef_macros.h 
skipping to change at line 39 skipping to change at line 39
#define malloc MONGO_malloc #define malloc MONGO_malloc
#pragma push_macro("realloc") #pragma push_macro("realloc")
#undef realloc #undef realloc
#define realloc MONGO_realloc #define realloc MONGO_realloc
#endif #endif
// util/assert_util.h // util/assert_util.h
#pragma push_macro("verify") #pragma push_macro("verify")
#undef verify #undef verify
#define verify MONGO_verify #define verify MONGO_verify
#pragma push_macro("invariant")
#undef invariant
#define invariant MONGO_invariant
#pragma push_macro("dassert") #pragma push_macro("dassert")
#undef dassert #undef dassert
#define dassert MONGO_dassert #define dassert MONGO_dassert
#pragma push_macro("wassert") #pragma push_macro("wassert")
#undef wassert #undef wassert
#define wassert MONGO_wassert #define wassert MONGO_wassert
#pragma push_macro("massert") #pragma push_macro("massert")
#undef massert #undef massert
#define massert MONGO_massert #define massert MONGO_massert
#pragma push_macro("uassert") #pragma push_macro("uassert")
 End of changes. 1 change blocks. 
0 lines changed or deleted 3 lines changed or added


 repl_reads_ok.h   repl_reads_ok.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
namespace mongo { namespace mongo {
class ParsedQuery;
class LiteParsedQuery; class LiteParsedQuery;
// Check to see if slaveOk reads are allowed, // Check to see if slaveOk reads are allowed,
// based on read preference and query options // based on read preference and query options
void replVerifyReadsOk(const LiteParsedQuery* pq = 0);
// DEPRECATED impl
void replVerifyReadsOk(const ParsedQuery* pq = 0);
void replVerifyReadsOk(const LiteParsedQuery* pq);
} }
 End of changes. 2 change blocks. 
6 lines changed or deleted 1 lines changed or added


 resource_pattern.h   resource_pattern.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/platform/hash_namespace.h" #include "mongo/platform/hash_namespace.h"
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 rs.h   rs.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/bson/oid.h"
#include "mongo/bson/optime.h" #include "mongo/bson/optime.h"
#include "mongo/db/commands.h" #include "mongo/db/commands.h"
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
#include "mongo/db/storage/index_details.h" #include "mongo/db/structure/catalog/index_details.h"
#include "mongo/db/repl/oplogreader.h" #include "mongo/db/repl/oplogreader.h"
#include "mongo/db/repl/rs_config.h" #include "mongo/db/repl/rs_config.h"
#include "mongo/db/repl/rs_exception.h" #include "mongo/db/repl/rs_exception.h"
#include "mongo/db/repl/rs_member.h" #include "mongo/db/repl/rs_member.h"
#include "mongo/db/repl/rs_sync.h" #include "mongo/db/repl/rs_sync.h"
#include "mongo/db/repl/sync_source_feedback.h" #include "mongo/db/repl/sync_source_feedback.h"
#include "mongo/util/concurrency/list.h" #include "mongo/util/concurrency/list.h"
#include "mongo/util/concurrency/msg.h" #include "mongo/util/concurrency/msg.h"
#include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/concurrency/thread_pool.h"
#include "mongo/util/concurrency/value.h" #include "mongo/util/concurrency/value.h"
skipping to change at line 180 skipping to change at line 181
* it to P (_currentSyncTarget). Then it would use this connection to * it to P (_currentSyncTarget). Then it would use this connection to
* pretend to be S1, replicating off of P. * pretend to be S1, replicating off of P.
*/ */
void percolate(const mongo::OID& rid, const OpTime& last); void percolate(const mongo::OID& rid, const OpTime& last);
void associateSlave(const BSONObj& rid, const int memberId); void associateSlave(const BSONObj& rid, const int memberId);
bool updateSlave(const mongo::OID& id, const OpTime& last); bool updateSlave(const mongo::OID& id, const OpTime& last);
void clearCache(); void clearCache();
}; };
class Consensus { class Consensus {
private:
ReplSetImpl &rs; ReplSetImpl &rs;
struct LastYea { struct LastYea {
LastYea() : when(0), who(0xffffffff) { } LastYea() : when(0), who(0xffffffff) { }
time_t when; time_t when;
unsigned who; unsigned who;
}; };
static SimpleMutex lyMutex; static SimpleMutex lyMutex;
Guarded<LastYea,lyMutex> ly; Guarded<LastYea,lyMutex> ly;
unsigned yea(unsigned memberId); // throws VoteException unsigned yea(unsigned memberId); // throws VoteException
void electionFailed(unsigned meid); void electionFailed(unsigned meid);
void _electSelf(); void _electSelf();
bool weAreFreshest(bool& allUp, int& nTies); bool weAreFreshest(bool& allUp, int& nTies);
bool sleptLast; // slept last elect() pass bool sleptLast; // slept last elect() pass
// This is a unique id that is changed each time we transition to P
RIMARY, as the
// result of an election.
OID _electionId;
public: public:
Consensus(ReplSetImpl *t) : rs(*t) { Consensus(ReplSetImpl *t) : rs(*t) {
sleptLast = false; sleptLast = false;
steppedDown = 0; steppedDown = 0;
} }
/* if we've stepped down, this is when we are allowed to try to ele ct ourself again. /* if we've stepped down, this is when we are allowed to try to ele ct ourself again.
todo: handle possible weirdnesses at clock skews etc. todo: handle possible weirdnesses at clock skews etc.
*/ */
time_t steppedDown; time_t steppedDown;
int totalVotes() const; int totalVotes() const;
bool aMajoritySeemsToBeUp() const; bool aMajoritySeemsToBeUp() const;
bool shouldRelinquish() const; bool shouldRelinquish() const;
void electSelf(); void electSelf();
void electCmdReceived(BSONObj, BSONObjBuilder*); void electCmdReceived(BSONObj, BSONObjBuilder*);
void multiCommand(BSONObj cmd, list<Target>& L); void multiCommand(BSONObj cmd, list<Target>& L);
OID getElectionId() const { return _electionId; }
void setElectionId(OID oid) { _electionId = oid; }
}; };
/** /**
* most operations on a ReplSet object should be done while locked. tha t * most operations on a ReplSet object should be done while locked. tha t
* logic implemented here. * logic implemented here.
* *
* Order of locking: lock the replica set, then take a rwlock. * Order of locking: lock the replica set, then take a rwlock.
*/ */
class RSBase : boost::noncopyable { class RSBase : boost::noncopyable {
public: public:
skipping to change at line 370 skipping to change at line 379
/** /**
* Updates the lastHeartbeatRecv of Member with the given id. * Updates the lastHeartbeatRecv of Member with the given id.
*/ */
void msgUpdateHBRecv(unsigned id, time_t newTime); void msgUpdateHBRecv(unsigned id, time_t newTime);
StateBox box; StateBox box;
SyncSourceFeedback syncSourceFeedback; SyncSourceFeedback syncSourceFeedback;
OpTime lastOpTimeWritten; OpTime lastOpTimeWritten;
OpTime getEarliestOpTimeWritten() const;
long long lastH; // hash we use to make sure we are reading the rig ht flow of ops and aren't on an out-of-date "fork" long long lastH; // hash we use to make sure we are reading the rig ht flow of ops and aren't on an out-of-date "fork"
bool forceSyncFrom(const string& host, string& errmsg, BSONObjBuild er& result); bool forceSyncFrom(const string& host, string& errmsg, BSONObjBuild er& result);
// Check if the current sync target is suboptimal. This must be cal led while holding a mutex // Check if the current sync target is suboptimal. This must be cal led while holding a mutex
// that prevents the sync source from changing. // that prevents the sync source from changing.
bool shouldChangeSyncTarget(const OpTime& target) const; bool shouldChangeSyncTarget(const OpTime& target) const;
/** /**
* Find the closest member (using ping time) with a higher latest o ptime. * Find the closest member (using ping time) with a higher latest o ptime.
*/ */
const Member* getMemberToSyncTo(); const Member* getMemberToSyncTo();
void veto(const string& host, unsigned secs=10); void veto(const string& host, unsigned secs=10);
bool gotForceSync(); bool gotForceSync();
void goStale(const Member* m, const BSONObj& o); void goStale(const Member* m, const BSONObj& o);
OID getElectionId() const { return elect.getElectionId(); }
private: private:
set<ReplSetHealthPollTask*> healthTasks; set<ReplSetHealthPollTask*> healthTasks;
void endOldHealthTasks(); void endOldHealthTasks();
void startHealthTaskFor(Member *m); void startHealthTaskFor(Member *m);
Consensus elect; Consensus elect;
void relinquish(); void relinquish();
void forgetPrimary(); void forgetPrimary();
protected: protected:
bool _stepDown(int secs); bool _stepDown(int secs);
skipping to change at line 741 skipping to change at line 754
/** inlines ----------------- */ /** inlines ----------------- */
inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig: :MemberCfg *c, bool self) : inline Member::Member(HostAndPort h, unsigned ord, const ReplSetConfig: :MemberCfg *c, bool self) :
_config(*c), _h(h), _hbinfo(ord) { _config(*c), _h(h), _hbinfo(ord) {
verify(c); verify(c);
if( self ) if( self )
_hbinfo.health = 1.0; _hbinfo.health = 1.0;
} }
inline bool ignoreUniqueIndex(IndexDescriptor* idx) { inline bool ignoreUniqueIndex(const IndexDescriptor* idx) {
if (!idx->unique()) { if (!idx->unique()) {
return false; return false;
} }
if (!theReplSet) { if (!theReplSet) {
return false; return false;
} }
// see SERVER-6671 // see SERVER-6671
MemberState ms = theReplSet->state(); MemberState ms = theReplSet->state();
if (! ((ms == MemberState::RS_STARTUP2) || if (! ((ms == MemberState::RS_STARTUP2) ||
(ms == MemberState::RS_RECOVERING) || (ms == MemberState::RS_RECOVERING) ||
skipping to change at line 768 skipping to change at line 781
return false; return false;
} }
// Never ignore _id index // Never ignore _id index
if (idx->isIdIndex()) { if (idx->isIdIndex()) {
return false; return false;
} }
return true; return true;
} }
inline bool ignoreUniqueIndex(IndexDetails& idx) {
if (!idx.unique()) {
return false;
}
if (!theReplSet) {
return false;
}
// see SERVER-6671
MemberState ms = theReplSet->state();
if (! ((ms == MemberState::RS_STARTUP2) ||
(ms == MemberState::RS_RECOVERING) ||
(ms == MemberState::RS_ROLLBACK))) {
return false;
}
// 2 is the oldest oplog version where operations
// are fully idempotent.
if (theReplSet->oplogVersion < 2) {
return false;
}
// Never ignore _id index
if (idx.isIdIndex()) {
return false;
}
return true;
}
} }
 End of changes. 9 change blocks. 
29 lines changed or deleted 16 lines changed or added


 runner.h   runner.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query.h"
#include "mongo/db/invalidation_type.h"
namespace mongo { namespace mongo {
class Collection;
class DiskLoc; class DiskLoc;
class TypeExplain; class TypeExplain;
struct PlanInfo;
/** /**
* A runner runs a query. * A runner runs a query.
*/ */
class Runner { class Runner {
public: public:
virtual ~Runner() { } virtual ~Runner() { }
enum RunnerState { enum RunnerState {
// We successfully populated the out parameter. // We successfully populated the out parameter.
skipping to change at line 127 skipping to change at line 130
}; };
/** /**
* Set the yielding policy of the underlying runner. See the Runne rYieldPolicy enum above. * Set the yielding policy of the underlying runner. See the Runne rYieldPolicy enum above.
*/ */
virtual void setYieldPolicy(YieldPolicy policy) = 0; virtual void setYieldPolicy(YieldPolicy policy) = 0;
/** /**
* Get the next result from the query. * Get the next result from the query.
* *
* If objOut is not-NULL, it is filled with the next result, if the * If objOut is not NULL, only results that have a BSONObj are retu
re is one. If there is rned. The BSONObj may
* not, getNext returns RUNNER_ERROR. * point to on-disk data (isOwned will be false) and must be copied
by the caller before
* yielding.
* *
* If dlOut is not-NULL: * If dlOut is not NULL, only results that have a valid DiskLoc are
* If objOut is unowned, dlOut is set to its associated DiskLoc. returned.
* If objOut is owned, getNext returns RUNNER_ERROR.
* *
* If the caller is running a query, they only care about the objec * If both objOut and dlOut are not NULL, only results with both a
t. valid BSONObj and DiskLoc
* will be returned. The BSONObj is the object located at the Disk
Loc provided.
*
* If the underlying query machinery produces a result that does no
t have the data requested
* by the user, it will be silently dropped.
*
* If the caller is running a query, they probably only care about
the object.
* If the caller is an internal client, they may only care about Di skLocs (index scan), or * If the caller is an internal client, they may only care about Di skLocs (index scan), or
* about object + DiskLocs (collection scan). * about object + DiskLocs (collection scan).
* *
* Some notes on objOut and ownership: * Some notes on objOut and ownership:
* *
* objOut may be an owned object in certain cases: invalidation of the underlying DiskLoc, * objOut may be an owned object in certain cases: invalidation of the underlying DiskLoc,
* object is created from covered index key data, object is project * the object is created from covered index key data, the object is
ed or otherwise the projected or otherwise
* result of a computation. * the result of a computation.
* *
* objOut will be unowned if it's the result of a fetch or a collec tion scan. * objOut will be unowned if it's the result of a fetch or a collec tion scan.
*/ */
virtual RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut) = 0; virtual RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut) = 0;
/** /**
* Will the next call to getNext() return EOF? It's useful to know if the runner is done * Will the next call to getNext() return EOF? It's useful to know if the runner is done
* without having to take responsibility for a result. * without having to take responsibility for a result.
*/ */
virtual bool isEOF() = 0; virtual bool isEOF() = 0;
/** /**
* Inform the runner that the provided DiskLoc is about to disappea * Inform the runner about changes to DiskLoc(s) that occur while t
r (or change entirely). he runner is yielded.
* The runner then takes any actions required to continue operating * The runner must take any actions required to continue operating
correctly, including correctly, including
* broadcasting the invalidation request to the PlanStage tree bein g run. * broadcasting the invalidation request to the PlanStage tree bein g run.
* *
* Called from ClientCursor::aboutToDelete. * Called from CollectionCursorCache::invalidateDocument.
*
* See db/invalidation_type.h for InvalidationType.
*/ */
virtual void invalidate(const DiskLoc& dl) = 0; virtual void invalidate(const DiskLoc& dl, InvalidationType type) = 0;
/** /**
* Mark the Runner as no longer valid. Can happen when a runner yi elds and the underlying * Mark the Runner as no longer valid. Can happen when a runner yi elds and the underlying
* database is dropped/indexes removed/etc. All future to calls to getNext return * database is dropped/indexes removed/etc. All future to calls to getNext return
* RUNNER_DEAD. Every other call is a NOOP. * RUNNER_DEAD. Every other call is a NOOP.
*/ */
virtual void kill() = 0; virtual void kill() = 0;
/** /**
* Save any state required to yield. * Save any state required to yield.
skipping to change at line 187 skipping to change at line 197
* it was killed. * it was killed.
*/ */
virtual bool restoreState() = 0; virtual bool restoreState() = 0;
/** /**
* Return the NS that the query is running over. * Return the NS that the query is running over.
*/ */
virtual const string& ns() = 0; virtual const string& ns() = 0;
/** /**
* Returns OK, allocating and filling '*explain' with a description * Return the Collection that the query is running over.
of the chosen plan. */
* Caller takes onwership of '*explain'. Otherwise, returns false w virtual const Collection* collection() = 0;
ith a detailed error
* status. /**
* Returns OK, allocating and filling '*explain' or '*planInfo' wit
h a description of the
* chosen plan, depending on which is non-NULL (one of the two shou
ld be NULL). Caller
* takes onwership of either '*explain' and '*planInfo'. Otherwise,
returns false
* a detailed error status.
*
* If 'explain' is NULL, then this out-parameter is ignored. Simila
rly, if 'staticInfo'
* is NULL, then no static debug information is produced.
*/ */
virtual Status getExplainPlan(TypeExplain** explain) const = 0; virtual Status getInfo(TypeExplain** explain, PlanInfo** planInfo) const = 0;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 12 change blocks. 
23 lines changed or deleted 49 lines changed or added


 runner_yield_policy.h   runner_yield_policy.h 
skipping to change at line 32 skipping to change at line 32
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/clientcursor.h" #include "mongo/db/clientcursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/util/elapsed_tracker.h" #include "mongo/util/elapsed_tracker.h"
namespace mongo { namespace mongo {
class RunnerYieldPolicy { class RunnerYieldPolicy {
public: public:
RunnerYieldPolicy() : _elapsedTracker(128, 10), _runnerYielding(NUL L) { } RunnerYieldPolicy() : _elapsedTracker(128, 10), _runnerYielding(NUL L) { }
~RunnerYieldPolicy() { ~RunnerYieldPolicy() {
if (NULL != _runnerYielding) { if (NULL != _runnerYielding) {
// We were destructed mid-yield. Since we're being used to yield a runner, we have // We were destructed mid-yield. Since we're being used to yield a runner, we have
// to deregister the runner. // to deregister the runner.
ClientCursor::deregisterRunner(_runnerYielding); if ( _runnerYielding->collection() ) {
_runnerYielding->collection()->cursorCache()->deregiste
rRunner(_runnerYielding);
}
} }
} }
bool shouldYield() { bool shouldYield() {
return _elapsedTracker.intervalHasElapsed(); return _elapsedTracker.intervalHasElapsed();
} }
/** /**
* Yield the provided runner, registering and deregistering it appr opriately. Deal with * Yield the provided runner, registering and deregistering it appr opriately. Deal with
* deletion during a yield by setting _runnerYielding to ensure der egistration. * deletion during a yield by setting _runnerYielding to ensure der egistration.
* *
* Provided runner MUST be YIELD_MANUAL. * Provided runner MUST be YIELD_MANUAL.
*/ */
bool yieldAndCheckIfOK(Runner* runner) { bool yieldAndCheckIfOK(Runner* runner, Record* record = NULL) {
verify(runner); invariant(runner);
invariant(runner->collection()); // XXX: should this just retur
n true?
int micros = ClientCursor::suggestYieldMicros(); int micros = ClientCursor::suggestYieldMicros();
// No point in yielding.
// If micros is not positive, no point in yielding, nobody wait
ing.
// XXX: Do we want to yield anyway if record is not NULL?
if (micros <= 0) { return true; } if (micros <= 0) { return true; }
// If micros > 0, we should yield. // If micros > 0, we should yield.
runner->saveState(); runner->saveState();
_runnerYielding = runner; _runnerYielding = runner;
ClientCursor::registerRunner(_runnerYielding);
staticYield(micros, NULL); runner->collection()->cursorCache()->registerRunner( _runnerYie
ClientCursor::deregisterRunner(_runnerYielding); lding );
staticYield(micros, record);
if ( runner->collection() ) {
// if the runner was killed, runner->collection() will retu
rn NULL
// so we don't deregister as it was done when killed
runner->collection()->cursorCache()->deregisterRunner( _run
nerYielding );
}
_runnerYielding = NULL; _runnerYielding = NULL;
_elapsedTracker.resetLastTime(); _elapsedTracker.resetLastTime();
return runner->restoreState(); return runner->restoreState();
} }
/** /**
* Yield, possibly fetching the provided record. Caller is in char ge of all runner * Yield, possibly fetching the provided record. Caller is in char ge of all runner
* registration. * registration.
* *
* Used for YIELD_AUTO runners. * Used for YIELD_AUTO runners.
*/ */
void yield(Record* rec = NULL) { void yield(Record* rec = NULL) {
int micros = ClientCursor::suggestYieldMicros(); int micros = ClientCursor::suggestYieldMicros();
if (micros > 0) {
// If there is anyone waiting on us or if there's a record to p
age-in, yield. TODO: Do
// we want to page in the record in the lock even if nobody is
waiting for the lock?
if (micros > 0 || (NULL != rec)) {
staticYield(micros, rec); staticYield(micros, rec);
// XXX: when do we really want to reset this?
//
// Currently we reset it when we actually yield. As such w
e'll keep on trying
// to yield once the tracker has elapsed.
//
// If we reset it even if we don't yield, we'll wait until
the time interval
// elapses again to try yielding.
_elapsedTracker.resetLastTime(); _elapsedTracker.resetLastTime();
} }
} }
static void staticYield(int micros, Record* rec = NULL) { static void staticYield(int micros, const Record* rec = NULL) {
ClientCursor::staticYield(micros, "", rec); ClientCursor::staticYield(micros, "", rec);
} }
private: private:
ElapsedTracker _elapsedTracker; ElapsedTracker _elapsedTracker;
Runner* _runnerYielding; Runner* _runnerYielding;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 8 change blocks. 
9 lines changed or deleted 43 lines changed or added


 s2.h   s2.h 
// Copyright 2005 Google Inc. All Rights Reserved. /**
* Copyright (C) 2008-2012 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3
,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public Licen
se
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certai
n
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Yo
u
* must comply with the GNU Affero General Public License in all respects
for
* all of the code used other than as permitted herein. If you modify fil
e(s)
* with this exception, you may extend this exception to your version of
the
* file(s), but you are not obligated to do so. If you do not wish to do
so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de
lete
* it in the license file.
*/
#pragma once
/*
* This file's purpose is to confine the suppression of the Clang warning f
or mismatched-tags (struct vs class)
* in only the s2.h file
*/
#ifndef UTIL_GEOMETRY_S2_H_ #ifdef __clang__
#define UTIL_GEOMETRY_S2_H_ #pragma GCC diagnostic ignored "-Wmismatched-tags"
#include <algorithm>
using std::min;
using std::max;
using std::swap;
using std::reverse;
#include "base/definer.h"
#ifdef OS_WINDOWS
#define _USE_MATH_DEFINES
#include <cmath>
#endif #endif
#include "hash.h" #include "third_party/s2/s2.h"
// To have template struct hash<T> defined
#include "third_party/s2/base/basictypes.h"
#include "third_party/s2/base/logging.h"
#include "third_party/s2/base/macros.h"
#include "third_party/s2/base/port.h" // for HASH_NAMESPACE_DECLARATION_ST
ART
#include "third_party/s2/util/math/vector3-inl.h"
#include "third_party/s2/util/math/matrix3x3.h"
// An S2Point represents a point on the unit sphere as a 3D vector. Usuall
y
// points are normalized to be unit length, but some methods do not require
// this. See util/math/vector3-inl.h for the methods available. Among oth
er
// things, there are overloaded operators that make it convenient to write
// arithmetic expressions (e.g. (1-x)*p1 + x*p2).
typedef Vector3_d S2Point;
HASH_NAMESPACE_START
template<> class hash<S2Point> {
public:
size_t operator()(S2Point const& p) const;
};
HASH_NAMESPACE_END
// The S2 class is simply a namespace for constants and static utility
// functions related to spherical geometry, such as area calculations and e
dge
// intersection tests. The name "S2" is derived from the mathematical symb
ol
// for the two-dimensional unit sphere (note that the "2" refers to the
// dimension of the surface, not the space it is embedded in).
//
// This class also defines a framework for decomposing the unit sphere into
a
// hierarchy of "cells". Each cell is a quadrilateral bounded by four
// geodesics. The top level of the hierarchy is obtained by projecting the
// six faces of a cube onto the unit sphere, and lower levels are obtained
by
// subdividing each cell into four children recursively.
//
// This class specifies the details of how the cube faces are projected ont
o
// the unit sphere. This includes getting the face ordering and orientatio
n
// correct so that sequentially increasing cell ids follow a continuous
// space-filling curve over the entire sphere, and defining the
// transformation from cell-space to cube-space in order to make the cells
// more uniform in size.
//
// This file also contains documentation of the various coordinate systems
// and conventions used.
//
// This class is not thread-safe for loops and objects that use loops.
//
class S2 {
public:
static const bool debug;
// Return a unique "origin" on the sphere for operations that need a fixe
d
// reference point. In particular, this is the "point at infinity" used
for
// point-in-polygon testing (by counting the number of edge crossings).
//
// It should *not* be a point that is commonly used in edge tests in orde
r
// to avoid triggering code to handle degenerate cases. (This rules out
the
// north and south poles.) It should also not be on the boundary of any
// low-level S2Cell for the same reason.
inline static S2Point Origin();
// Return true if the given point is approximately unit length
// (this is mainly useful for assertions).
static bool IsUnitLength(S2Point const& p);
// Return a unit-length vector that is orthogonal to "a". Satisfies
// Ortho(-a) = -Ortho(a) for all a.
static S2Point Ortho(S2Point const& a);
// Given a point "z" on the unit sphere, extend this into a right-handed
// coordinate frame of unit-length column vectors m = (x,y,z). Note that
// the vectors (x,y) are an orthonormal frame for the tangent space at "z
",
// while "z" itself is an orthonormal frame for the normal space at "z".
static void GetFrame(S2Point const& z, Matrix3x3_d* m);
// Given an orthonormal basis "m" of column vectors and a point "p", retu
rn
// the coordinates of "p" with respect to the basis "m". The resulting
// point "q" satisfies the identity (m * q == p).
static S2Point ToFrame(Matrix3x3_d const& m, S2Point const& p);
// Given an orthonormal basis "m" of column vectors and a point "q" with
// respect to that basis, return the equivalent point "p" with respect to
// the standard axis-aligned basis. The result satisfies (p == m * q).
static S2Point FromFrame(Matrix3x3_d const& m, S2Point const& q);
// the coordinates of "p" with respect to the basis "m". The resulting
// point "r" satisfies the identity (m * r == p).
// Return true if two points are within the given distance of each other
// (this is mainly useful for testing).
static bool ApproxEquals(S2Point const& a, S2Point const& b,
double max_error = 1e-15);
// Return a vector "c" that is orthogonal to the given unit-length vector
s
// "a" and "b". This function is similar to a.CrossProd(b) except that i
t
// does a better job of ensuring orthogonality when "a" is nearly paralle
l
// to "b", and it returns a non-zero result even when a == b or a == -b.
//
// It satisfies the following properties (RCP == RobustCrossProd):
//
// (1) RCP(a,b) != 0 for all a, b
// (2) RCP(b,a) == -RCP(a,b) unless a == b or a == -b
// (3) RCP(-a,b) == -RCP(a,b) unless a == b or a == -b
// (4) RCP(a,-b) == -RCP(a,b) unless a == b or a == -b
static S2Point RobustCrossProd(S2Point const& a, S2Point const& b);
// Return true if the points A, B, C are strictly counterclockwise. Retu
rn
// false if the points are clockwise or collinear (i.e. if they are all
// contained on some great circle).
//
// Due to numerical errors, situations may arise that are mathematically
// impossible, e.g. ABC may be considered strictly CCW while BCA is not.
// However, the implementation guarantees the following:
//
// If SimpleCCW(a,b,c), then !SimpleCCW(c,b,a) for all a,b,c.
static bool SimpleCCW(S2Point const& a, S2Point const& b, S2Point const&
c);
// Returns +1 if the points A, B, C are counterclockwise, -1 if the point
s
// are clockwise, and 0 if any two points are the same. This function is
// essentially like taking the sign of the determinant of ABC, except tha
t
// it has additional logic to make sure that the above properties hold ev
en
// when the three points are coplanar, and to deal with the limitations o
f
// floating-point arithmetic.
//
// RobustCCW satisfies the following conditions:
//
// (1) RobustCCW(a,b,c) == 0 if and only if a == b, b == c, or c == a
// (2) RobustCCW(b,c,a) == RobustCCW(a,b,c) for all a,b,c
// (3) RobustCCW(c,b,a) == -RobustCCW(a,b,c) for all a,b,c
//
// In other words:
//
// (1) The result is zero if and only if two points are the same.
// (2) Rotating the order of the arguments does not affect the result.
// (3) Exchanging any two arguments inverts the result.
//
// On the other hand, note that it is not true in general that
// RobustCCW(-a,b,c) == -RobustCCW(a,b,c), or any similar identities
// involving antipodal points.
static int RobustCCW(S2Point const& a, S2Point const& b, S2Point const& c
);
// A more efficient version of RobustCCW that allows the precomputed
// cross-product of A and B to be specified. (Unlike the 3 argument
// version this method is also inlined.)
inline static int RobustCCW(S2Point const& a, S2Point const& b,
S2Point const& c, S2Point const& a_cross_b);
// This version of RobustCCW returns +1 if the points are definitely CCW,
// -1 if they are definitely CW, and 0 if two points are identical or the
// result is uncertain. Uncertain certain cases can be resolved, if
// desired, by calling ExpensiveCCW.
//
// The purpose of this method is to allow additional cheap tests to be do
ne,
// where possible, in order to avoid calling ExpensiveCCW unnecessarily.
inline static int TriageCCW(S2Point const& a, S2Point const& b,
S2Point const& c, S2Point const& a_cross_b);
// This function is invoked by RobustCCW() if the sign of the determinant
is
// uncertain. It always returns a non-zero result unless two of the inpu
t
// points are the same. It uses a combination of multiple-precision
// arithmetic and symbolic perturbations to ensure that its results are
// always self-consistent (cf. Simulation of Simplicity, Edelsbrunner and
// Muecke). The basic idea is to assign an infinitesmal symbolic
// perturbation to every possible S2Point such that no three S2Points are
// collinear and no four S2Points are coplanar. These perturbations are
so
// small that they do not affect the sign of any determinant that was
// non-zero before the perturbations.
//
// Unlike RobustCCW(), this method does not require the input points to b
e
// normalized.
static int ExpensiveCCW(S2Point const& a, S2Point const& b,
S2Point const& c);
// Given 4 points on the unit sphere, return true if the edges OA, OB, an
d
// OC are encountered in that order while sweeping CCW around the point O
.
// You can think of this as testing whether A <= B <= C with respect to t
he
// CCW ordering around O that starts at A, or equivalently, whether B is
// contained in the range of angles (inclusive) that starts at A and exte
nds
// CCW to C. Properties:
//
// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b
// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c
// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c
// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true
// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false
static bool OrderedCCW(S2Point const& a, S2Point const& b, S2Point const&
c,
S2Point const& o);
// Return the interior angle at the vertex B in the triangle ABC. The
// return value is always in the range [0, Pi]. The points do not need t
o
// be normalized. Ensures that Angle(a,b,c) == Angle(c,b,a) for all a,b,
c.
//
// The angle is undefined if A or C is diametrically opposite from B, and
// becomes numerically unstable as the length of edge AB or BC approaches
// 180 degrees.
static double Angle(S2Point const& a, S2Point const& b, S2Point const& c)
;
// Return the exterior angle at the vertex B in the triangle ABC. The
// return value is positive if ABC is counterclockwise and negative
// otherwise. If you imagine an ant walking from A to B to C, this is th
e
// angle that the ant turns at vertex B (positive = left, negative = righ
t).
// Ensures that TurnAngle(a,b,c) == -TurnAngle(c,b,a) for all a,b,c.
static double TurnAngle(S2Point const& a, S2Point const& b, S2Point const
& c);
// Return the area of triangle ABC. The method used is about twice as
// expensive as Girard's formula, but it is numerically stable for both
// large and very small triangles. All points should be unit length.
// The area is always positive.
//
// The triangle area is undefined if it contains two antipodal points, an
d
// becomes numerically unstable as the length of any edge approaches 180
// degrees.
static double Area(S2Point const& a, S2Point const& b, S2Point const& c);
// Return the area of the triangle computed using Girard's formula. All
// points should be unit length. This is slightly faster than the Area()
// method above but is not accurate for very small triangles.
static double GirardArea(S2Point const& a, S2Point const& b,
S2Point const& c);
// Like Area(), but returns a positive value for counterclockwise triangl
es
// and a negative value otherwise.
static double SignedArea(S2Point const& a, S2Point const& b,
S2Point const& c);
// About centroids:
// ----------------
//
// There are several notions of the "centroid" of a triangle. First, the
re
// // is the planar centroid, which is simply the centroid of the ordina
ry
// (non-spherical) triangle defined by the three vertices. Second, there
is
// the surface centroid, which is defined as the intersection of the thre
e
// medians of the spherical triangle. It is possible to show that this
// point is simply the planar centroid projected to the surface of the
// sphere. Finally, there is the true centroid (mass centroid), which is
// defined as the area integral over the spherical triangle of (x,y,z)
// divided by the triangle area. This is the point that the triangle wou
ld
// rotate around if it was spinning in empty space.
//
// The best centroid for most purposes is the true centroid. Unlike the
// planar and surface centroids, the true centroid behaves linearly as
// regions are added or subtracted. That is, if you split a triangle int
o
// pieces and compute the average of their centroids (weighted by triangl
e
// area), the result equals the centroid of the original triangle. This
is
// not true of the other centroids.
//
// Also note that the surface centroid may be nowhere near the intuitive
// "center" of a spherical triangle. For example, consider the triangle
// with vertices A=(1,eps,0), B=(0,0,1), C=(-1,eps,0) (a quarter-sphere).
// The surface centroid of this triangle is at S=(0, 2*eps, 1), which is
// within a distance of 2*eps of the vertex B. Note that the median from
A
// (the segment connecting A to the midpoint of BC) passes through S, sin
ce
// this is the shortest path connecting the two endpoints. On the other
// hand, the true centroid is at M=(0, 0.5, 0.5), which when projected on
to
// the surface is a much more reasonable interpretation of the "center" o
f
// this triangle.
// Return the centroid of the planar triangle ABC. This can be normalize
d
// to unit length to obtain the "surface centroid" of the corresponding
// spherical triangle, i.e. the intersection of the three medians. Howev
er,
// note that for large spherical triangles the surface centroid may be
// nowhere near the intuitive "center" (see example above).
static S2Point PlanarCentroid(S2Point const& a, S2Point const& b,
S2Point const& c);
// Returns the true centroid of the spherical triangle ABC multiplied by
the
// signed area of spherical triangle ABC. The reasons for multiplying by
// the signed area are (1) this is the quantity that needs to be summed t
o
// compute the centroid of a union or difference of triangles, and (2) it
's
// actually easier to calculate this way.
static S2Point TrueCentroid(S2Point const& a, S2Point const& b,
S2Point const& c);
////////////////////////// S2Cell Decomposition /////////////////////////
//
// The following methods define the cube-to-sphere projection used by
// the S2Cell decomposition.
//
// In the process of converting a latitude-longitude pair to a 64-bit cel
l
// id, the following coordinate systems are used:
//
// (id)
// An S2CellId is a 64-bit encoding of a face and a Hilbert curve posi
tion
// on that face. The Hilbert curve position implicitly encodes both t
he
// position of a cell and its subdivision level (see s2cellid.h).
//
// (face, i, j)
// Leaf-cell coordinates. "i" and "j" are integers in the range
// [0,(2**30)-1] that identify a particular leaf cell on the given fac
e.
// The (i, j) coordinate system is right-handed on each face, and the
// faces are oriented such that Hilbert curves connect continuously fr
om
// one face to the next.
//
// (face, s, t)
// Cell-space coordinates. "s" and "t" are real numbers in the range
// [0,1] that identify a point on the given face. For example, the po
int
// (s, t) = (0.5, 0.5) corresponds to the center of the top-level face
// cell. This point is also a vertex of exactly four cells at each
// subdivision level greater than zero.
//
// (face, si, ti)
// Discrete cell-space coordinates. These are obtained by multiplying
// "s" and "t" by 2**31 and rounding to the nearest unsigned integer.
// Discrete coordinates lie in the range [0,2**31]. This coordinate
// system can represent the edge and center positions of all cells wit
h
// no loss of precision (including non-leaf cells).
//
// (face, u, v)
// Cube-space coordinates. To make the cells at each level more unifo
rm
// in size after they are projected onto the sphere, we apply apply a
// nonlinear transformation of the form u=f(s), v=f(t). The (u, v)
// coordinates after this transformation give the actual coordinates o
n
// the cube face (modulo some 90 degree rotations) before it is projec
ted
// onto the unit sphere.
//
// (x, y, z)
// Direction vector (S2Point). Direction vectors are not necessarily
unit
// length, and are often chosen to be points on the biunit cube
// [-1,+1]x[-1,+1]x[-1,+1]. They can be be normalized to obtain the
// corresponding point on the unit sphere.
//
// (lat, lng)
// Latitude and longitude (S2LatLng). Latitudes must be between -90 a
nd
// 90 degrees inclusive, and longitudes must be between -180 and 180
// degrees inclusive.
//
// Note that the (i, j), (s, t), (si, ti), and (u, v) coordinate systems
are
// right-handed on all six faces.
// Convert an s or t value to the corresponding u or v value. This is
// a non-linear transformation from [-1,1] to [-1,1] that attempts to
// make the cell sizes more uniform.
inline static double STtoUV(double s);
// The inverse of the STtoUV transformation. Note that it is not always
// true that UVtoST(STtoUV(x)) == x due to numerical errors.
inline static double UVtoST(double u);
// Convert (face, u, v) coordinates to a direction vector (not
// necessarily unit length).
inline static S2Point FaceUVtoXYZ(int face, double u, double v);
// If the dot product of p with the given face normal is positive,
// set the corresponding u and v values (which may lie outside the range
// [-1,1]) and return true. Otherwise return false.
inline static bool FaceXYZtoUV(int face, S2Point const& p,
double* pu, double* pv);
// Convert a direction vector (not necessarily unit length) to
// (face, u, v) coordinates.
inline static int XYZtoFaceUV(S2Point const& p, double* pu, double* pv);
// Return the right-handed normal (not necessarily unit length) for an
// edge in the direction of the positive v-axis at the given u-value on
// the given face. (This vector is perpendicular to the plane through
// the sphere origin that contains the given edge.)
inline static S2Point GetUNorm(int face, double u);
// Return the right-handed normal (not necessarily unit length) for an
// edge in the direction of the positive u-axis at the given v-value on
// the given face.
inline static S2Point GetVNorm(int face, double v);
// Return the unit-length normal, u-axis, or v-axis for the given face.
inline static S2Point GetNorm(int face);
inline static S2Point GetUAxis(int face);
inline static S2Point GetVAxis(int face);
////////////////////////////////////////////////////////////////////////
// The canonical Hilbert traversal order looks like an inverted 'U':
// the subcells are visited in the order (0,0), (0,1), (1,1), (1,0).
// The following tables encode the traversal order for various
// orientations of the Hilbert curve (axes swapped and/or directions
// of the axes reversed).
// Together these flags define a cell orientation. If 'kSwapMask'
// is true, then canonical traversal order is flipped around the
// diagonal (i.e. i and j are swapped with each other). If
// 'kInvertMask' is true, then the traversal order is rotated by 180
// degrees (i.e. the bits of i and j are inverted, or equivalently,
// the axis directions are reversed).
static int const kSwapMask;
static int const kInvertMask;
// This is the number of levels needed to specify a leaf cell. This
// constant is defined here so that the S2::Metric class can be
// implemented without including s2cellid.h.
static int const kMaxCellLevel;
// kIJtoPos[orientation][ij] -> pos
//
// Given a cell orientation and the (i,j)-index of a subcell (0=(0,0),
// 1=(0,1), 2=(1,0), 3=(1,1)), return the order in which this subcell is
// visited by the Hilbert curve (a position in the range [0..3]).
static int const kIJtoPos[4][4];
// kPosToIJ[orientation][pos] -> ij
//
// Return the (i,j) index of the subcell at the given position 'pos' in t
he
// Hilbert curve traversal order with the given orientation. This is the
// inverse of the previous table:
//
// kPosToIJ[r][kIJtoPos[r][ij]] == ij
static int const kPosToIJ[4][4];
// kPosToOrientation[pos] -> orientation_modifier
//
// Return a modifier indicating how the orientation of the child subcell
// with the given traversal position [0..3] is related to the orientation
// of the parent cell. The modifier should be XOR-ed with the parent
// orientation to obtain the curve orientation in the child.
static int const kPosToOrientation[4];
////////////////////////// S2Cell Metrics //////////////////////////////
//
// The following are various constants that describe the shapes and sizes
of
// cells. They are useful for deciding which cell level to use in order
to
// satisfy a given condition (e.g. that cell vertices must be no further
// than "x" apart). All of the raw constants are differential quantities
;
// you can use the GetValue(level) method to compute the corresponding le
ngth
// or area on the unit sphere for cells at a given level. The minimum an
d
// maximum bounds are valid for cells at all levels, but they may be
// somewhat conservative for very large cells (e.g. face cells).
// Defines a cell metric of the given dimension (1 == length, 2 == area).
template <int dim> class Metric {
public:
explicit Metric(double deriv) : deriv_(deriv) {}
// The "deriv" value of a metric is a derivative, and must be multiplie
d by
// a length or area in (s,t)-space to get a useful value.
double deriv() const { return deriv_; }
// Return the value of a metric for cells at the given level. The value
is
// either a length or an area on the unit sphere, depending on the
// particular metric.
double GetValue(int level) const { return ldexp(deriv_, - dim * level);
}
// Return the level at which the metric has approximately the given
// value. For example, S2::kAvgEdge.GetClosestLevel(0.1) returns the
// level at which the average cell edge length is approximately 0.1.
// The return value is always a valid level.
int GetClosestLevel(double value) const;
// Return the minimum level such that the metric is at most the given
// value, or S2CellId::kMaxLevel if there is no such level. For exampl
e,
// S2::kMaxDiag.GetMinLevel(0.1) returns the minimum level such that al
l
// cell diagonal lengths are 0.1 or smaller. The return value is alway
s a
// valid level.
int GetMinLevel(double value) const;
// Return the maximum level such that the metric is at least the given
// value, or zero if there is no such level. For example,
// S2::kMinWidth.GetMaxLevel(0.1) returns the maximum level such that a
ll
// cells have a minimum width of 0.1 or larger. The return value is
// always a valid level.
int GetMaxLevel(double value) const;
private:
double const deriv_;
DISALLOW_EVIL_CONSTRUCTORS(Metric);
};
typedef Metric<1> LengthMetric;
typedef Metric<2> AreaMetric;
// Each cell is bounded by four planes passing through its four edges and
// the center of the sphere. These metrics relate to the angle between e
ach
// pair of opposite bounding planes, or equivalently, between the planes
// corresponding to two different s-values or two different t-values. Fo
r
// example, the maximum angle between opposite bounding planes for a cell
at
// level k is kMaxAngleSpan.GetValue(k), and the average angle span for a
ll
// cells at level k is approximately kAvgAngleSpan.GetValue(k).
static LengthMetric const kMinAngleSpan;
static LengthMetric const kMaxAngleSpan;
static LengthMetric const kAvgAngleSpan;
// The width of geometric figure is defined as the distance between two
// parallel bounding lines in a given direction. For cells, the minimum
// width is always attained between two opposite edges, and the maximum
// width is attained between two opposite vertices. However, for our
// purposes we redefine the width of a cell as the perpendicular distance
// between a pair of opposite edges. A cell therefore has two widths, on
e
// in each direction. The minimum width according to this definition agr
ees
// with the classic geometric one, but the maximum width is different. (
The
// maximum geometric width corresponds to kMaxDiag defined below.)
//
// For a cell at level k, the distance between opposite edges is at least
// kMinWidth.GetValue(k) and at most kMaxWidth.GetValue(k). The average
// width in both directions for all cells at level k is approximately
// kAvgWidth.GetValue(k).
//
// The width is useful for bounding the minimum or maximum distance from
a
// point on one edge of a cell to the closest point on the opposite edge.
// For example, this is useful when "growing" regions by a fixed distance
.
static LengthMetric const kMinWidth;
static LengthMetric const kMaxWidth;
static LengthMetric const kAvgWidth;
// The minimum edge length of any cell at level k is at least
// kMinEdge.GetValue(k), and the maximum is at most kMaxEdge.GetValue(k).
// The average edge length is approximately kAvgEdge.GetValue(k).
//
// The edge length metrics can also be used to bound the minimum, maximum
,
// or average distance from the center of one cell to the center of one o
f
// its edge neighbors. In particular, it can be used to bound the distan
ce
// between adjacent cell centers along the space-filling Hilbert curve fo
r
// cells at any given level.
static LengthMetric const kMinEdge;
static LengthMetric const kMaxEdge;
static LengthMetric const kAvgEdge;
// The minimum diagonal length of any cell at level k is at least
// kMinDiag.GetValue(k), and the maximum is at most kMaxDiag.GetValue(k).
// The average diagonal length is approximately kAvgDiag.GetValue(k).
//
// The maximum diagonal also happens to be the maximum diameter of any ce
ll,
// and also the maximum geometric width (see the discussion above). So f
or
// example, the distance from an arbitrary point to the closest cell cent
er
// at a given level is at most half the maximum diagonal length.
static LengthMetric const kMinDiag;
static LengthMetric const kMaxDiag;
static LengthMetric const kAvgDiag;
// The minimum area of any cell at level k is at least kMinArea.GetValue(
k),
// and the maximum is at most kMaxArea.GetValue(k). The average area of
all
// cells at level k is exactly kAvgArea.GetValue(k).
static AreaMetric const kMinArea;
static AreaMetric const kMaxArea;
static AreaMetric const kAvgArea;
// This is the maximum edge aspect ratio over all cells at any level, whe
re
// the edge aspect ratio of a cell is defined as the ratio of its longest
// edge length to its shortest edge length.
static double const kMaxEdgeAspect;
// This is the maximum diagonal aspect ratio over all cells at any level,
// where the diagonal aspect ratio of a cell is defined as the ratio of i
ts
// longest diagonal length to its shortest diagonal length.
static double const kMaxDiagAspect;
private:
// Given a *valid* face for the given point p (meaning that dot product
// of p with the face normal is positive), return the corresponding
// u and v values (which may lie outside the range [-1,1]).
inline static void ValidFaceXYZtoUV(int face, S2Point const& p,
double* pu, double* pv);
// The value below is the maximum error in computing the determinant
// a.CrossProd(b).DotProd(c). To derive this, observe that computing the
// determinant in this way requires 14 multiplications and additions. Si
nce
// all three points are normalized, none of the intermediate results in t
his
// calculation exceed 1.0 in magnitude. The maximum rounding error for a
n
// operation whose result magnitude does not exceed 1.0 (before rounding)
is
// 2**-54 (i.e., half of the difference between 1.0 and the next
// representable value below 1.0). Therefore, the total error in computi
ng
// the determinant does not exceed 14 * (2**-54).
//
// The C++ standard requires to initialize kMaxDetError outside of
// the class definition, even though GCC doesn't enforce it.
static double const kMaxDetError;
DISALLOW_IMPLICIT_CONSTRUCTORS(S2); // Contains only static methods.
};
// Uncomment the following line for testing purposes only. It greatly
// increases the number of degenerate cases that need to be handled using
// ExpensiveCCW().
// #define S2_TEST_DEGENERACIES
inline S2Point S2::Origin() {
#ifdef S2_TEST_DEGENERACIES
return S2Point(0, 0, 1); // This makes polygon operations much slower.
#else
return S2Point(0.00457, 1, 0.0321).Normalize();
#endif
}
inline int S2::TriageCCW(S2Point const& a, S2Point const& b,
S2Point const& c, S2Point const& a_cross_b) {
DCHECK(IsUnitLength(a));
DCHECK(IsUnitLength(b));
DCHECK(IsUnitLength(c));
double det = a_cross_b.DotProd(c);
// Double-check borderline cases in debug mode.
DCHECK(fabs(det) < kMaxDetError ||
fabs(det) > 100 * kMaxDetError ||
det * ExpensiveCCW(a, b, c) > 0);
if (det > kMaxDetError) return 1;
if (det < -kMaxDetError) return -1;
return 0;
}
inline int S2::RobustCCW(S2Point const& a, S2Point const& b,
S2Point const& c, S2Point const& a_cross_b) {
int ccw = TriageCCW(a, b, c, a_cross_b);
if (ccw == 0) ccw = ExpensiveCCW(a, b, c);
return ccw;
}
// We have implemented three different projections from cell-space (s,t) to
// cube-space (u,v): linear, quadratic, and tangent. They have the followi
ng
// tradeoffs:
//
// Linear - This is the fastest transformation, but also produces the lea
st
// uniform cell sizes. Cell areas vary by a factor of about 5.2, with th
e
// largest cells at the center of each face and the smallest cells in
// the corners.
//
// Tangent - Transforming the coordinates via atan() makes the cell sizes
// more uniform. The areas vary by a maximum ratio of 1.4 as opposed to
a
// maximum ratio of 5.2. However, each call to atan() is about as expens
ive
// as all of the other calculations combined when converting from points
to
// cell ids, i.e. it reduces performance by a factor of 3.
//
// Quadratic - This is an approximation of the tangent projection that
// is much faster and produces cells that are almost as uniform in size.
// It is about 3 times faster than the tangent projection for converting
// cell ids to points or vice versa. Cell areas vary by a maximum ratio
of
// about 2.1.
//
// Here is a table comparing the cell uniformity using each projection. "A
rea
// ratio" is the maximum ratio over all subdivision levels of the largest c
ell
// area to the smallest cell area at that level, "edge ratio" is the maximu
m
// ratio of the longest edge of any cell to the shortest edge of any cell a
t
// the same level, and "diag ratio" is the ratio of the longest diagonal of
// any cell to the shortest diagonal of any cell at the same level. "ToPoi
nt"
// and "FromPoint" are the times in microseconds required to convert cell i
ds
// to and from points (unit vectors) respectively. "ToPointRaw" is the tim
e
// to convert to a non-unit-length vector, which is all that is needed for
// some purposes.
//
// Area Edge Diag ToPointRaw ToPoint FromPoint
// Ratio Ratio Ratio (microseconds)
// -------------------------------------------------------------------
// Linear: 5.200 2.117 2.959 0.020 0.087 0.085
// Tangent: 1.414 1.414 1.704 0.237 0.299 0.258
// Quadratic: 2.082 1.802 1.932 0.033 0.096 0.108
//
// The worst-case cell aspect ratios are about the same with all three
// projections. The maximum ratio of the longest edge to the shortest edge
// within the same cell is about 1.4 and the maximum ratio of the diagonals
// within the same cell is about 1.7.
//
// This data was produced using s2cell_unittest and s2cellid_unittest.
#define S2_LINEAR_PROJECTION 0
#define S2_TAN_PROJECTION 1
#define S2_QUADRATIC_PROJECTION 2
#define S2_PROJECTION S2_QUADRATIC_PROJECTION
#if S2_PROJECTION == S2_LINEAR_PROJECTION
inline double S2::STtoUV(double s) {
return 2 * s - 1;
}
inline double S2::UVtoST(double u) {
return 0.5 * (u + 1);
}
#elif S2_PROJECTION == S2_TAN_PROJECTION
inline double S2::STtoUV(double s) {
// Unfortunately, tan(M_PI_4) is slightly less than 1.0. This isn't due
to
// a flaw in the implementation of tan(), it's because the derivative of
// tan(x) at x=pi/4 is 2, and it happens that the two adjacent floating
// point numbers on either side of the infinite-precision value of pi/4 h
ave
// tangents that are slightly below and slightly above 1.0 when rounded t
o
// the nearest double-precision result.
s = tan(M_PI_2 * s - M_PI_4);
return s + (1.0 / (GG_LONGLONG(1) << 53)) * s;
}
inline double S2::UVtoST(double u) {
volatile double a = atan(u);
return (2 * M_1_PI) * (a + M_PI_4);
}
#elif S2_PROJECTION == S2_QUADRATIC_PROJECTION
inline double S2::STtoUV(double s) {
if (s >= 0.5) return (1/3.) * (4*s*s - 1);
else return (1/3.) * (1 - 4*(1-s)*(1-s));
}
inline double S2::UVtoST(double u) {
if (u >= 0) return 0.5 * sqrt(1 + 3*u);
else return 1 - 0.5 * sqrt(1 - 3*u);
}
#else
#error Unknown value for S2_PROJECTION
#ifdef __clang__
#pragma GCC diagnostic pop
#endif #endif
inline S2Point S2::FaceUVtoXYZ(int face, double u, double v) {
switch (face) {
case 0: return S2Point( 1, u, v);
case 1: return S2Point(-u, 1, v);
case 2: return S2Point(-u, -v, 1);
case 3: return S2Point(-1, -v, -u);
case 4: return S2Point( v, -1, -u);
default: return S2Point( v, u, -1);
}
}
inline void S2::ValidFaceXYZtoUV(int face, S2Point const& p,
double* pu, double* pv) {
DCHECK_GT(p.DotProd(FaceUVtoXYZ(face, 0, 0)), 0);
switch (face) {
case 0: *pu = p[1] / p[0]; *pv = p[2] / p[0]; break;
case 1: *pu = -p[0] / p[1]; *pv = p[2] / p[1]; break;
case 2: *pu = -p[0] / p[2]; *pv = -p[1] / p[2]; break;
case 3: *pu = p[2] / p[0]; *pv = p[1] / p[0]; break;
case 4: *pu = p[2] / p[1]; *pv = -p[0] / p[1]; break;
default: *pu = -p[1] / p[2]; *pv = -p[0] / p[2]; break;
}
}
inline int S2::XYZtoFaceUV(S2Point const& p, double* pu, double* pv) {
int face = p.LargestAbsComponent();
if (p[face] < 0) face += 3;
ValidFaceXYZtoUV(face, p, pu, pv);
return face;
}
inline bool S2::FaceXYZtoUV(int face, S2Point const& p,
double* pu, double* pv) {
if (face < 3) {
if (p[face] <= 0) return false;
} else {
if (p[face-3] >= 0) return false;
}
ValidFaceXYZtoUV(face, p, pu, pv);
return true;
}
inline S2Point S2::GetUNorm(int face, double u) {
switch (face) {
case 0: return S2Point( u, -1, 0);
case 1: return S2Point( 1, u, 0);
case 2: return S2Point( 1, 0, u);
case 3: return S2Point(-u, 0, 1);
case 4: return S2Point( 0, -u, 1);
default: return S2Point( 0, -1, -u);
}
}
inline S2Point S2::GetVNorm(int face, double v) {
switch (face) {
case 0: return S2Point(-v, 0, 1);
case 1: return S2Point( 0, -v, 1);
case 2: return S2Point( 0, -1, -v);
case 3: return S2Point( v, -1, 0);
case 4: return S2Point( 1, v, 0);
default: return S2Point( 1, 0, v);
}
}
inline S2Point S2::GetNorm(int face) {
return S2::FaceUVtoXYZ(face, 0, 0);
}
inline S2Point S2::GetUAxis(int face) {
switch (face) {
case 0: return S2Point( 0, 1, 0);
case 1: return S2Point(-1, 0, 0);
case 2: return S2Point(-1, 0, 0);
case 3: return S2Point( 0, 0, -1);
case 4: return S2Point( 0, 0, -1);
default: return S2Point( 0, 1, 0);
}
}
inline S2Point S2::GetVAxis(int face) {
switch (face) {
case 0: return S2Point( 0, 0, 1);
case 1: return S2Point( 0, 0, 1);
case 2: return S2Point( 0, -1, 0);
case 3: return S2Point( 0, -1, 0);
case 4: return S2Point( 1, 0, 0);
default: return S2Point( 1, 0, 0);
}
}
template <int dim>
int S2::Metric<dim>::GetMinLevel(double value) const {
if (value <= 0) return S2::kMaxCellLevel;
// This code is equivalent to computing a floating-point "level"
// value and rounding up. frexp() returns a fraction in the
// range [0.5,1) and the corresponding exponent.
int level;
frexp(value / deriv_, &level);
level = max(0, min(S2::kMaxCellLevel, -((level - 1) >> (dim - 1))));
DCHECK(level == S2::kMaxCellLevel || GetValue(level) <= value);
DCHECK(level == 0 || GetValue(level - 1) > value);
return level;
}
template <int dim>
int S2::Metric<dim>::GetMaxLevel(double value) const {
if (value <= 0) return S2::kMaxCellLevel;
// This code is equivalent to computing a floating-point "level"
// value and rounding down.
int level;
frexp(deriv_ / value, &level);
level = max(0, min(S2::kMaxCellLevel, (level - 1) >> (dim - 1)));
DCHECK(level == 0 || GetValue(level) >= value);
DCHECK(level == S2::kMaxCellLevel || GetValue(level + 1) < value);
return level;
}
template <int dim>
int S2::Metric<dim>::GetClosestLevel(double value) const {
return GetMinLevel((dim == 1 ? M_SQRT2 : 2) * value);
}
#endif // UTIL_GEOMETRY_S2_H_
 End of changes. 5 change blocks. 
842 lines changed or deleted 50 lines changed or added


 s2_access_method.h   s2_access_method.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/db/geo/s2common.h" #include "mongo/db/geo/s2common.h"
#include "mongo/db/index/btree_access_method_internal.h" #include "mongo/db/index/btree_based_access_method.h"
#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
namespace mongo { namespace mongo {
class IndexCursor; class IndexCursor;
struct S2IndexingParams; struct S2IndexingParams;
class S2AccessMethod : public BtreeBasedAccessMethod { class S2AccessMethod : public BtreeBasedAccessMethod {
public: public:
using BtreeBasedAccessMethod::_descriptor; using BtreeBasedAccessMethod::_descriptor;
S2AccessMethod(IndexDescriptor* descriptor); S2AccessMethod(IndexCatalogEntry* btreeState);
virtual ~S2AccessMethod() { } virtual ~S2AccessMethod() { }
virtual Status newCursor(IndexCursor** out);
private: private:
friend class Geo2dFindNearCmd;
const S2IndexingParams& getParams() const { return _params; }
virtual void getKeys(const BSONObj& obj, BSONObjSet* keys); virtual void getKeys(const BSONObj& obj, BSONObjSet* keys);
// getKeys calls the helper methods below. // getKeys calls the helper methods below.
void getGeoKeys(const BSONObj& document, const BSONElementSet& elem ents, void getGeoKeys(const BSONObj& document, const BSONElementSet& elem ents,
BSONObjSet* out) const; BSONObjSet* out) const;
void getLiteralKeys(const BSONElementSet& elements, BSONObjSet* out ) const; void getLiteralKeys(const BSONElementSet& elements, BSONObjSet* out ) const;
void getLiteralKeysArray(const BSONObj& obj, BSONObjSet* out) const ; void getLiteralKeysArray(const BSONObj& obj, BSONObjSet* out) const ;
void getOneLiteralKey(const BSONElement& elt, BSONObjSet *out) cons t; void getOneLiteralKey(const BSONElement& elt, BSONObjSet *out) cons t;
S2IndexingParams _params; S2IndexingParams _params;
 End of changes. 4 change blocks. 
7 lines changed or deleted 2 lines changed or added


 s2common.h   s2common.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#include "mongo/db/geo/geoparser.h" #include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/geoconstants.h" #include "mongo/db/geo/geoconstants.h"
#include "third_party/s2/s2.h" #include "mongo/db/geo/s2.h"
#include "third_party/s2/s2regioncoverer.h" #include "third_party/s2/s2regioncoverer.h"
#include "third_party/s2/s2cell.h" #include "third_party/s2/s2cell.h"
#include "third_party/s2/s2polyline.h" #include "third_party/s2/s2polyline.h"
#include "third_party/s2/s2polygon.h" #include "third_party/s2/s2polygon.h"
#include "third_party/s2/s2regioncoverer.h" #include "third_party/s2/s2regioncoverer.h"
#pragma once #pragma once
namespace mongo { namespace mongo {
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 s2near.h   s2near.h 
skipping to change at line 36 skipping to change at line 36
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <queue> #include <queue>
#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stage.h"
#include "mongo/db/geo/geoquery.h" #include "mongo/db/geo/geoquery.h"
#include "mongo/db/geo/s2common.h" #include "mongo/db/geo/s2common.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression.h"
#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds.h"
#include "mongo/platform/unordered_set.h" #include "mongo/platform/unordered_set.h"
#include "third_party/s2/s2cap.h" #include "third_party/s2/s2cap.h"
#include "third_party/s2/s2regionintersection.h" #include "third_party/s2/s2regionintersection.h"
namespace mongo { namespace mongo {
struct S2NearParams {
string ns;
BSONObj indexKeyPattern;
NearQuery nearQuery;
IndexBounds baseBounds;
MatchExpression* filter;
bool addPointMeta;
bool addDistMeta;
};
/** /**
* Executes a geoNear search. Is a leaf node. Output type is LOC_AND_ UNOWNED_OBJ. * Executes a geoNear search. Is a leaf node. Output type is LOC_AND_ UNOWNED_OBJ.
*/ */
class S2NearStage : public PlanStage { class S2NearStage : public PlanStage {
public: public:
/** /**
* Takes: index to scan over, MatchExpression with near point, othe r MatchExpressions for * Takes: index to scan over, MatchExpression with near point, othe r MatchExpressions for
* covered data, * covered data,
*/ */
S2NearStage(const string& ns, const BSONObj& indexKeyPattern, S2NearStage(const S2NearParams& params, WorkingSet* ws);
const NearQuery& nearQuery, const IndexBounds& baseBoun
ds,
MatchExpression* filter, WorkingSet* ws);
virtual ~S2NearStage(); virtual ~S2NearStage();
StageState work(WorkingSetID* out); StageState work(WorkingSetID* out);
bool isEOF(); bool isEOF();
void prepareToYield(); void prepareToYield();
void recoverFromYield(); void recoverFromYield();
void invalidate(const DiskLoc& dl); void invalidate(const DiskLoc& dl, InvalidationType type);
PlanStageStats* getStats(); PlanStageStats* getStats();
private: private:
void init();
StageState addResultToQueue(WorkingSetID* out); StageState addResultToQueue(WorkingSetID* out);
void nextAnnulus(); void nextAnnulus();
bool _worked; bool _worked;
WorkingSet* _ws; S2NearParams _params;
string _ns;
BSONObj _indexKeyPattern; WorkingSet* _ws;
// This is the "array index" of the key field that is the near fiel d. We use this to do // This is the "array index" of the key field that is the near fiel d. We use this to do
// cheap is-this-doc-in-the-annulus testing. We also need to know where to stuff the index // cheap is-this-doc-in-the-annulus testing. We also need to know where to stuff the index
// bounds for the various annuluses/annuli. // bounds for the various annuluses/annuli.
int _nearFieldIndex; int _nearFieldIndex;
NearQuery _nearQuery;
IndexBounds _baseBounds;
scoped_ptr<PlanStage> _child; scoped_ptr<PlanStage> _child;
// We don't check this ourselves; we let the sub-fetch deal w/it.
MatchExpression* _filter;
// The S2 machinery that represents the search annulus. We keep th is around after bounds // The S2 machinery that represents the search annulus. We keep th is around after bounds
// generation to check for intersection. // generation to check for intersection.
S2Cap _innerCap; S2Cap _innerCap;
S2Cap _outerCap; S2Cap _outerCap;
S2RegionIntersection _annulus; S2RegionIntersection _annulus;
// We use this to hold on to the results in an annulus. Results ar e sorted to have // We use this to hold on to the results in an annulus. Results ar e sorted to have
// decreasing distance. // decreasing distance.
struct Result { struct Result {
Result(WorkingSetID wsid, double dist) : id(wsid), distance(dis t) { } Result(WorkingSetID wsid, double dist) : id(wsid), distance(dis t) { }
skipping to change at line 131 skipping to change at line 132
// Geo-related variables. // Geo-related variables.
// At what min distance (arc length) do we start looking for result s? // At what min distance (arc length) do we start looking for result s?
double _minDistance; double _minDistance;
// What's the max distance (arc length) we're willing to look for r esults? // What's the max distance (arc length) we're willing to look for r esults?
double _maxDistance; double _maxDistance;
// These radii define the annulus we're currently looking at. // These radii define the annulus we're currently looking at.
double _innerRadius; double _innerRadius;
double _outerRadius; double _outerRadius;
// True if we are looking at last annulus
bool _outerRadiusInclusive;
// When we search the next annulus, what to adjust our radius by? Grows when we search an // When we search the next annulus, what to adjust our radius by? Grows when we search an
// annulus and find no results. // annulus and find no results.
double _radiusIncrement; double _radiusIncrement;
// Did we encounter an unrecoverable error? // Did we encounter an unrecoverable error?
bool _failed; bool _failed;
// Have we init()'d yet?
bool _initted;
// What index are we searching over?
IndexDescriptor* _descriptor;
CommonStats _commonStats; CommonStats _commonStats;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 11 change blocks. 
16 lines changed or deleted 25 lines changed or added


 sasl_client_authenticate.h   sasl_client_authenticate.h 
skipping to change at line 21 skipping to change at line 21
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/bson/bsontypes.h" #include "mongo/bson/bsontypes.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
class BSONObj; class BSONObj;
/** /**
* Attempts to authenticate "client" using the SASL protocol. * Attempts to authenticate "client" using the SASL protocol.
* *
* Do not use directly in client code. Use the DBClientWithCommands::a uth(const BSONObj&) * Do not use directly in client code. Use the DBClientWithCommands::a uth(const BSONObj&)
* method, instead. * method, instead.
* *
skipping to change at line 57 skipping to change at line 58
* "pwd": The password. * "pwd": The password.
* "serviceName": The GSSAPI service name to use. Defaults to "mon godb". * "serviceName": The GSSAPI service name to use. Defaults to "mon godb".
* "serviceHostname": The GSSAPI hostname to use. Defaults to the name of the remote host. * "serviceHostname": The GSSAPI hostname to use. Defaults to the name of the remote host.
* *
* Other fields in saslParameters are silently ignored. * Other fields in saslParameters are silently ignored.
* *
* Returns an OK status on success, and ErrorCodes::AuthenticationFaile d if authentication is * Returns an OK status on success, and ErrorCodes::AuthenticationFaile d if authentication is
* rejected. Other failures, all of which are tantamount to authentica tion failure, may also be * rejected. Other failures, all of which are tantamount to authentica tion failure, may also be
* returned. * returned.
*/ */
extern Status (*saslClientAuthenticate)(DBClientWithCommands* client, extern MONGO_CLIENT_API Status (*saslClientAuthenticate)(DBClientWithCo mmands* client,
const BSONObj& saslParameters); const BSONObj& saslParameters);
/** /**
* Extracts the payload field from "cmdObj", and store it into "*payloa d". * Extracts the payload field from "cmdObj", and store it into "*payloa d".
* *
* Sets "*type" to the BSONType of the payload field in cmdObj. * Sets "*type" to the BSONType of the payload field in cmdObj.
* *
* If the type of the payload field is String, the contents base64 deco des and * If the type of the payload field is String, the contents base64 deco des and
* stores into "*payload". If the type is BinData, the contents are st ored directly * stores into "*payload". If the type is BinData, the contents are st ored directly
* into "*payload". In all other cases, returns * into "*payload". In all other cases, returns
*/ */
Status saslExtractPayload(const BSONObj& cmdObj, std::string* payload, BSONType* type); Status MONGO_CLIENT_API saslExtractPayload(const BSONObj& cmdObj, std:: string* payload, BSONType* type);
// Constants // Constants
/// String name of the saslStart command. /// String name of the saslStart command.
extern const char* const saslStartCommandName; extern MONGO_CLIENT_API const char* const saslStartCommandName;
/// String name of the saslContinue command. /// String name of the saslContinue command.
extern const char* const saslContinueCommandName; extern MONGO_CLIENT_API const char* const saslContinueCommandName;
/// Name of the saslStart parameter indicating that the server should a utomatically grant the /// Name of the saslStart parameter indicating that the server should a utomatically grant the
/// connection all privileges associated with the user after successful authentication. /// connection all privileges associated with the user after successful authentication.
extern const char* const saslCommandAutoAuthorizeFieldName; extern MONGO_CLIENT_API const char* const saslCommandAutoAuthorizeField Name;
/// Name of the field contain the status code in responses from the ser ver. /// Name of the field contain the status code in responses from the ser ver.
extern const char* const saslCommandCodeFieldName; extern MONGO_CLIENT_API const char* const saslCommandCodeFieldName;
/// Name of the field containing the conversation identifier in server respones and saslContinue /// Name of the field containing the conversation identifier in server respones and saslContinue
/// commands. /// commands.
extern const char* const saslCommandConversationIdFieldName; extern MONGO_CLIENT_API const char* const saslCommandConversationIdFiel dName;
/// Name of the field that indicates whether or not the server believes authentication has /// Name of the field that indicates whether or not the server believes authentication has
/// completed successfully. /// completed successfully.
extern const char* const saslCommandDoneFieldName; extern MONGO_CLIENT_API const char* const saslCommandDoneFieldName;
/// Field in which to store error messages associated with non-success return codes. /// Field in which to store error messages associated with non-success return codes.
extern const char* const saslCommandErrmsgFieldName; extern MONGO_CLIENT_API const char* const saslCommandErrmsgFieldName;
/// Name of parameter to saslStart command indiciating the client's des ired sasl mechanism. /// Name of parameter to saslStart command indiciating the client's des ired sasl mechanism.
extern const char* const saslCommandMechanismFieldName; extern MONGO_CLIENT_API const char* const saslCommandMechanismFieldName ;
/// In the event that saslStart supplies an unsupported mechanism, the server responds with a /// In the event that saslStart supplies an unsupported mechanism, the server responds with a
/// field by this name, with a list of supported mechanisms. /// field by this name, with a list of supported mechanisms.
extern const char* const saslCommandMechanismListFieldName; extern MONGO_CLIENT_API const char* const saslCommandMechanismListField Name;
/// Field containing password information for saslClientAuthenticate(). /// Field containing password information for saslClientAuthenticate().
extern const char* const saslCommandPasswordFieldName; extern MONGO_CLIENT_API const char* const saslCommandPasswordFieldName;
/// Field containing sasl payloads passed to and from the server. /// Field containing sasl payloads passed to and from the server.
extern const char* const saslCommandPayloadFieldName; extern MONGO_CLIENT_API const char* const saslCommandPayloadFieldName;
/// Field containing the string identifier of the user to authenticate in /// Field containing the string identifier of the user to authenticate in
/// saslClientAuthenticate(). /// saslClientAuthenticate().
extern const char* const saslCommandUserFieldName; extern MONGO_CLIENT_API const char* const saslCommandUserFieldName;
/// Field containing the string identifier of the database containing c redential information, /// Field containing the string identifier of the database containing c redential information,
/// or "$external" if the credential information is stored outside of t he mongo cluster. /// or "$external" if the credential information is stored outside of t he mongo cluster.
extern const char* const saslCommandUserDBFieldName; extern MONGO_CLIENT_API const char* const saslCommandUserDBFieldName;
/// Field overriding the FQDN of the hostname hosting the mongodb srevi ce in /// Field overriding the FQDN of the hostname hosting the mongodb srevi ce in
/// saslClientAuthenticate(). /// saslClientAuthenticate().
extern const char* const saslCommandServiceHostnameFieldName; extern MONGO_CLIENT_API const char* const saslCommandServiceHostnameFie ldName;
/// Field overriding the name of the mongodb service saslClientAuthenti cate(). /// Field overriding the name of the mongodb service saslClientAuthenti cate().
extern const char* const saslCommandServiceNameFieldName; extern MONGO_CLIENT_API const char* const saslCommandServiceNameFieldNa me;
/// Default database against which sasl authentication commands should run. /// Default database against which sasl authentication commands should run.
extern const char* const saslDefaultDBName; extern MONGO_CLIENT_API const char* const saslDefaultDBName;
/// Default sasl service name, "mongodb". /// Default sasl service name, "mongodb".
extern const char* const saslDefaultServiceName; extern MONGO_CLIENT_API const char* const saslDefaultServiceName;
// Field whose value should be set to true if the field in saslCommandP asswordFieldName needs to // Field whose value should be set to true if the field in saslCommandP asswordFieldName needs to
// be digested. // be digested.
extern const char* const saslCommandDigestPasswordFieldName; extern MONGO_CLIENT_API const char* const saslCommandDigestPasswordFiel dName;
} }
 End of changes. 21 change blocks. 
20 lines changed or deleted 21 lines changed or added


 sasl_client_session.h   sasl_client_session.h 
skipping to change at line 24 skipping to change at line 24
*/ */
#include <boost/scoped_array.hpp> #include <boost/scoped_array.hpp>
#include <sasl/sasl.h> #include <sasl/sasl.h>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* Implementation of the client side of a SASL authentication conversat ion. * Implementation of the client side of a SASL authentication conversat ion.
* *
* To use, create an instance, then use setParameter() to configure the authentication * To use, create an instance, then use setParameter() to configure the authentication
* parameters. Once all parameters are set, call initialize() to initi alize the client state * parameters. Once all parameters are set, call initialize() to initi alize the client state
* machine. Finally, use repeated calls to step() to generate messages to send to the server * machine. Finally, use repeated calls to step() to generate messages to send to the server
* and process server responses. * and process server responses.
* *
* The required parameters vary by mechanism, but all mechanisms requir e parameterServiceName, * The required parameters vary by mechanism, but all mechanisms requir e parameterServiceName,
* parameterServiceHostname, parameterMechanism and parameterUser. All of the required * parameterServiceHostname, parameterMechanism and parameterUser. All of the required
* parameters must be UTF-8 encoded strings with no embedded NUL charac ters. The * parameters must be UTF-8 encoded strings with no embedded NUL charac ters. The
* parameterPassword parameter is not constrained. * parameterPassword parameter is not constrained.
*/ */
class SaslClientSession { class MONGO_CLIENT_API SaslClientSession {
MONGO_DISALLOW_COPYING(SaslClientSession); MONGO_DISALLOW_COPYING(SaslClientSession);
public: public:
/** /**
* Identifiers of parameters used to configure a SaslClientSession. * Identifiers of parameters used to configure a SaslClientSession.
*/ */
enum Parameter { enum Parameter {
parameterServiceName = 0, parameterServiceName = 0,
parameterServiceHostname, parameterServiceHostname,
parameterMechanism, parameterMechanism,
parameterUser, parameterUser,
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 server.h   server.h 
skipping to change at line 17 skipping to change at line 17
* it under the terms of the GNU Affero General Public License, version 3 , * it under the terms of the GNU Affero General Public License, version 3 ,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Licen se * You should have received a copy of the GNU Affero General Public Licen se
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certai
n
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Yo
u
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If y
ou
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
/** /**
This file contains includes commonly needed in the server files (mongod , mongos, test). It is *NOT* included in the C++ client; i.e. This file contains includes commonly needed in the server files (mongod , mongos, test). It is *NOT* included in the C++ client; i.e.
this is a very good place for global-ish things that you don't need to be in the client lib. this is a very good place for global-ish things that you don't need to be in the client lib.
Over time we should move more here, and more out of pch.h. And get rid of pch.h at some point. Over time we should move more here, and more out of pch.h. And get rid of pch.h at some point.
*/ */
#pragma once #pragma once
 End of changes. 1 change blocks. 
0 lines changed or deleted 17 lines changed or added


 service_stats.h   service_stats.h 
skipping to change at line 36 skipping to change at line 36
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#ifndef DB_STATS_SERVICE_STATS_HEADER #ifndef DB_STATS_SERVICE_STATS_HEADER
#define DB_STATS_SERVICE_STATS_HEADER #define DB_STATS_SERVICE_STATS_HEADER
#include <string> #include <string>
#include "../../util/concurrency/spin_lock.h" #include "mongo/util/concurrency/spin_lock.h"
namespace mongo { namespace mongo {
using std::string; using std::string;
class Histogram; class Histogram;
/** /**
* ServiceStats keeps track of the time a request/response message * ServiceStats keeps track of the time a request/response message
* took inside a service as well as the size of the response * took inside a service as well as the size of the response
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 shapes.h   shapes.h 
skipping to change at line 36 skipping to change at line 36
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/owned_pointer_vector.h" #include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "third_party/s2/s2.h" #include "mongo/db/geo/s2.h"
#include "third_party/s2/s2cap.h" #include "third_party/s2/s2cap.h"
#include "third_party/s2/s2cell.h" #include "third_party/s2/s2cell.h"
#include "third_party/s2/s2latlng.h" #include "third_party/s2/s2latlng.h"
#include "third_party/s2/s2polygon.h" #include "third_party/s2/s2polygon.h"
#include "third_party/s2/s2polyline.h" #include "third_party/s2/s2polyline.h"
namespace mongo { namespace mongo {
struct Point; struct Point;
double distance(const Point& p1, const Point &p2); double distance(const Point& p1, const Point &p2);
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 shard_filter.h   shard_filter.h 
skipping to change at line 41 skipping to change at line 41
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stage.h"
#include "mongo/s/chunk_version.h" #include "mongo/s/chunk_version.h"
#include "mongo/s/d_logic.h" #include "mongo/s/d_logic.h"
#include "mongo/s/stale_exception.h" #include "mongo/s/stale_exception.h"
namespace mongo { namespace mongo {
/** /**
* This stage drops documents that don't belong to the shard we're exec * This stage drops documents that didn't belong to the shard we're exe
uting on. cuting on at the time of
* construction. This matches the contract for sharded cursorids which
guarantees that a
* StaleConfigException will be thrown early or the cursorid for its en
tire lifetime will return
* documents matching the shard version set on the connection at the ti
me of cursorid creation.
*
* A related system will ensure that the data migrated away from a shar
d will not be deleted as
* long as there are active queries from before the migration. Currentl
y, "active queries" is
* defined by cursorids so it is important that the metadata used in th
is stage uses the same
* version as the cursorid. Therefore, you must wrap any Runner using t
his Stage in a
* ClientCursor during the same lock grab as constructing the Runner.
*
* BEGIN NOTE FROM GREG
*
* There are three sharded query contracts:
*
* 0) Migration commit takes the db lock - i.e. is serialized with writ
es and reads.
* 1) No data should be returned from a query in ranges of migrations t
hat committed after the
* query started, or from ranges not owned when the query began.
* 2) No migrated data should be removed from a shard while there are q
ueries that were active
* before the migration.
*
* As implementation details, collection metadata is used to determine
the ranges of all data
* not actively migrated (or orphaned). CursorIds are currently used t
o establish "active"
* queries before migration commit.
*
* Combining all this: if a query is started in a db lock and acquires
in that (same) lock the
* collection metadata and a cursorId, the query will return results fo
r exactly the ranges in
* the metadata (though of arbitrary staleness). This is the sharded c
ollection query contract.
*
* END NOTE FROM GREG
* *
* Preconditions: Child must be fetched. TODO XXX: when covering analy sis is in just build doc * Preconditions: Child must be fetched. TODO XXX: when covering analy sis is in just build doc
* and check that against shard key. * and check that against shard key.
*/ */
class ShardFilterStage : public PlanStage { class ShardFilterStage : public PlanStage {
public: public:
ShardFilterStage(const string& ns, WorkingSet* ws, PlanStage* child ); ShardFilterStage(const CollectionMetadataPtr& metadata, WorkingSet* ws, PlanStage* child);
virtual ~ShardFilterStage(); virtual ~ShardFilterStage();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
WorkingSet* _ws; WorkingSet* _ws;
scoped_ptr<PlanStage> _child; scoped_ptr<PlanStage> _child;
string _ns;
// Stats // Stats
CommonStats _commonStats; CommonStats _commonStats;
ShardingFilterStats _specificStats; ShardingFilterStats _specificStats;
bool _initted; // Note: it is important that this is the metadata from the time th
CollectionMetadataPtr _metadata; is stage is constructed.
// See class comment for details.
const CollectionMetadataPtr _metadata;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
7 lines changed or deleted 52 lines changed or added


 shell_options.h   shell_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
namespace mongo { namespace mongo {
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 single_solution_runner.h   single_solution_runner.h 
skipping to change at line 55 skipping to change at line 55
class TypeExplain; class TypeExplain;
class WorkingSet; class WorkingSet;
/** /**
* SingleSolutionRunner runs a plan that was the only possible solution to a query. It exists * SingleSolutionRunner runs a plan that was the only possible solution to a query. It exists
* only to dump stats into the cache after running. * only to dump stats into the cache after running.
*/ */
class SingleSolutionRunner : public Runner { class SingleSolutionRunner : public Runner {
public: public:
/** Takes ownership of all the arguments. */ /** Takes ownership of all the arguments except collection */
SingleSolutionRunner(CanonicalQuery* canonicalQuery, QuerySolution* SingleSolutionRunner(const Collection* collection,
soln, CanonicalQuery* canonicalQuery, QuerySolution*
soln,
PlanStage* root, WorkingSet* ws); PlanStage* root, WorkingSet* ws);
virtual ~SingleSolutionRunner(); virtual ~SingleSolutionRunner();
Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut); Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut);
virtual bool isEOF(); virtual bool isEOF();
virtual void saveState(); virtual void saveState();
virtual bool restoreState(); virtual bool restoreState();
virtual void setYieldPolicy(Runner::YieldPolicy policy); virtual void setYieldPolicy(Runner::YieldPolicy policy);
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual const std::string& ns(); virtual const std::string& ns();
virtual void kill(); virtual void kill();
virtual const Collection* collection() { return _collection; }
/** /**
* Returns OK, allocating and filling in '*explain' with the detail * Returns OK, allocating and filling in '*explain' and '*staticInf
s of the plan used o' with the details
* by this runner. Caller takes ownership of '*explain'. Otherwise, * of the plan used by this runner. Caller takes ownership of '*exp
return a status lain' and
* describing the error. * '*staticInfo'. Otherwise, return a status describing the error.
*/ */
virtual Status getExplainPlan(TypeExplain** explain) const; virtual Status getInfo(TypeExplain** explain,
PlanInfo** planInfo) const;
private: private:
const Collection* _collection;
boost::scoped_ptr<CanonicalQuery> _canonicalQuery; boost::scoped_ptr<CanonicalQuery> _canonicalQuery;
boost::scoped_ptr<QuerySolution> _solution; boost::scoped_ptr<QuerySolution> _solution;
boost::scoped_ptr<PlanExecutor> _exec; boost::scoped_ptr<PlanExecutor> _exec;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
10 lines changed or deleted 15 lines changed or added


 skip.h   skip.h 
skipping to change at line 53 skipping to change at line 53
class SkipStage : public PlanStage { class SkipStage : public PlanStage {
public: public:
SkipStage(int toSkip, WorkingSet* ws, PlanStage* child); SkipStage(int toSkip, WorkingSet* ws, PlanStage* child);
virtual ~SkipStage(); virtual ~SkipStage();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
virtual PlanStageStats* getStats(); virtual PlanStageStats* getStats();
private: private:
WorkingSet* _ws; WorkingSet* _ws;
scoped_ptr<PlanStage> _child; scoped_ptr<PlanStage> _child;
// We drop the first _toSkip results that we would have returned. // We drop the first _toSkip results that we would have returned.
int _toSkip; int _toSkip;
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 snapshots.h   snapshots.h 
skipping to change at line 33 skipping to change at line 33
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "../jsobj.h" #include "mongo/db/jsobj.h"
#include "top.h" #include "top.h"
#include "../../util/background.h" #include "mongo/util/background.h"
/** /**
handles snapshotting performance metrics and other such things handles snapshotting performance metrics and other such things
*/ */
namespace mongo { namespace mongo {
class SnapshotThread; class SnapshotThread;
/** /**
* stores a point in time snapshot * stores a point in time snapshot
 End of changes. 2 change blocks. 
2 lines changed or deleted 2 lines changed or added


 sort.h   sort.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <vector> #include <vector>
#include <set>
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/matcher.h" #include "mongo/db/matcher.h"
#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/working_set.h"
#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds.h"
#include "mongo/platform/unordered_map.h" #include "mongo/platform/unordered_map.h"
namespace mongo { namespace mongo {
class BtreeKeyGenerator; class BtreeKeyGenerator;
// External params for the sort stage. Declared below. // Parameters that must be provided to a SortStage
class SortStageParams; class SortStageParams {
public:
SortStageParams() : limit(0) { }
// How we're sorting.
BSONObj pattern;
// The query. Used to create the IndexBounds for the sorting.
BSONObj query;
// Must be >= 0. Equal to 0 for no limit.
int limit;
};
/**
* Maps a WSM value to a BSONObj key that can then be sorted via BSONOb
jCmp.
*/
class SortStageKeyGenerator {
public:
/**
* 'sortSpec' is the BSONObj in the .sort(...) clause.
*
* 'queryObj' is the BSONObj in the .find(...) clause. For multike
y arrays we have to
* ensure that the value we select to sort by is within bounds gene
rated by
* executing 'queryObj' using the virtual index with key pattern 's
ortSpec'.
*/
SortStageKeyGenerator(const BSONObj& sortSpec, const BSONObj& query
Obj);
/**
* Returns the key used to sort 'member'.
*/
BSONObj getSortKey(const WorkingSetMember& member) const;
/**
* Passed to std::sort and used to sort the keys that are returned
from getSortKey.
*
* Returned reference lives as long as 'this'.
*/
const BSONObj& getSortComparator() const { return _comparatorObj; }
private:
BSONObj getBtreeKey(const BSONObj& memberObj) const;
/**
* In order to emulate the existing sort behavior we must make unin
dexed sort behavior as
* consistent as possible with indexed sort behavior. As such, we
must only consider index
* keys that we would encounter if we were answering the query usin
g the sort-providing
* index.
*
* Populates _hasBounds and _bounds.
*/
void getBoundsForSort(const BSONObj& queryObj, const BSONObj& sortO
bj);
// The object that we use to call woCompare on our resulting key.
Is equal to _rawSortSpec
// unless we have some $meta expressions. Each $meta expression ha
s a default sort order.
BSONObj _comparatorObj;
// The raw object in .sort()
BSONObj _rawSortSpec;
// The sort pattern with any non-Btree sort pulled out.
BSONObj _btreeObj;
// If we're not sorting with a $meta value we can short-cut some wo
rk.
bool _sortHasMeta;
// True if the bounds are valid.
bool _hasBounds;
// The bounds generated from the query we're sorting.
IndexBounds _bounds;
// Helper to extract sorting keys from documents.
boost::scoped_ptr<BtreeKeyGenerator> _keyGen;
// Helper to filter keys, ensuring keys generated with _keyGen are
within _bounds.
boost::scoped_ptr<IndexBoundsChecker> _boundsChecker;
};
/** /**
* Sorts the input received from the child according to the sort patter n provided. * Sorts the input received from the child according to the sort patter n provided.
* *
* Preconditions: For each field in 'pattern', all inputs in the child must handle a * Preconditions: For each field in 'pattern', all inputs in the child must handle a
* getFieldDotted for that field. * getFieldDotted for that field.
*/ */
class SortStage : public PlanStage { class SortStage : public PlanStage {
public: public:
SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child); SortStage(const SortStageParams& params, WorkingSet* ws, PlanStage* child);
virtual ~SortStage(); virtual ~SortStage();
virtual bool isEOF(); virtual bool isEOF();
virtual StageState work(WorkingSetID* out); virtual StageState work(WorkingSetID* out);
virtual void prepareToYield(); virtual void prepareToYield();
virtual void recoverFromYield(); virtual void recoverFromYield();
virtual void invalidate(const DiskLoc& dl); virtual void invalidate(const DiskLoc& dl, InvalidationType type);
PlanStageStats* getStats(); PlanStageStats* getStats();
private: private:
void getBoundsForSort(const BSONObj& queryObj, const BSONObj& sortO
bj);
//
// Query Stage
//
// Not owned by us. // Not owned by us.
WorkingSet* _ws; WorkingSet* _ws;
// Where we're reading data to sort from. // Where we're reading data to sort from.
scoped_ptr<PlanStage> _child; boost::scoped_ptr<PlanStage> _child;
// Our sort pattern. // The raw sort _pattern as expressed by the user
BSONObj _pattern; BSONObj _pattern;
// The raw query as expressed by the user
BSONObj _query;
// Must be >= 0. Equal to 0 for no limit.
int _limit;
//
// Sort key generation
//
boost::scoped_ptr<SortStageKeyGenerator> _sortKeyGen;
//
// Data storage
//
// Have we sorted our data? If so, we can access _resultIterator. I f not, // Have we sorted our data? If so, we can access _resultIterator. I f not,
// we're still populating _data. // we're still populating _data.
bool _sorted; bool _sorted;
// Collection of working set members to sort with their respective sort key. // Collection of working set members to sort with their respective sort key.
struct SortableDataItem { struct SortableDataItem {
WorkingSetID wsid; WorkingSetID wsid;
BSONObj sortKey; BSONObj sortKey;
// Since we must replicate the behavior of a covered sort as mu ch as possible we use the // Since we must replicate the behavior of a covered sort as mu ch as possible we use the
// DiskLoc to break sortKey ties. // DiskLoc to break sortKey ties.
// See sorta.js. // See sorta.js.
DiskLoc loc; DiskLoc loc;
}; };
// Comparison object for data buffers (vector and set).
// Items are compared on (sortKey, loc). This is also how the items
are
// ordered in the indices.
// Keys are compared using BSONObj::woCompare() with DiskLoc as a t
ie-breaker.
struct WorkingSetComparator {
explicit WorkingSetComparator(BSONObj p);
bool operator()(const SortableDataItem& lhs, const SortableData
Item& rhs) const;
BSONObj pattern;
};
/**
* Inserts one item into data buffer (vector or set).
* If limit is exceeded, remove item with lowest key.
*/
void addToBuffer(const SortableDataItem& item);
/**
* Sorts data buffer.
* Assumes no more items will be added to buffer.
* If data is stored in set, copy set
* contents to vector and clear set.
*/
void sortBuffer();
// Comparator for data buffer
// Initialization follows sort key generator
scoped_ptr<WorkingSetComparator> _sortKeyComparator;
// The data we buffer and sort.
// _data will contain sorted data when all data is gathered
// and sorted.
// When _limit is greater than 1 and not all data has been gathered
from child stage,
// _dataSet is used instead to maintain an ordered set of the incom
plete data set.
// When the data set is complete, we copy the items from _dataSet t
o _data which will
// be used to provide the results of this stage through _resultIter
ator.
vector<SortableDataItem> _data; vector<SortableDataItem> _data;
typedef std::set<SortableDataItem, WorkingSetComparator> SortableDa
taItemSet;
scoped_ptr<SortableDataItemSet> _dataSet;
// Iterates through _data post-sort returning it. // Iterates through _data post-sort returning it.
vector<SortableDataItem>::iterator _resultIterator; vector<SortableDataItem>::iterator _resultIterator;
// We buffer a lot of data and we want to look it up by DiskLoc qui ckly upon invalidation. // We buffer a lot of data and we want to look it up by DiskLoc qui ckly upon invalidation.
typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap; typedef unordered_map<DiskLoc, WorkingSetID, DiskLoc::Hasher> DataM ap;
DataMap _wsidByDiskLoc; DataMap _wsidByDiskLoc;
// //
// Sort Apparatus
//
// A comparator for SortableDataItems.
struct WorkingSetComparator;
boost::scoped_ptr<WorkingSetComparator> _cmp;
// Bounds we should consider before sorting.
IndexBounds _bounds;
bool _hasBounds;
// Helper to extract sorting keys from documents containing dotted
fields, arrays,
// or both.
boost::scoped_ptr<BtreeKeyGenerator> _keyGen;
// Helper to filter keys, thus enforcing _bounds over whatever keys
generated with
// _keyGen.
boost::scoped_ptr<IndexBoundsChecker> _boundsChecker;
//
// Stats // Stats
// //
CommonStats _commonStats; CommonStats _commonStats;
SortStats _specificStats; SortStats _specificStats;
// The usage in bytes of all bufered data that we're sorting. // The usage in bytes of all bufered data that we're sorting.
size_t _memUsage; size_t _memUsage;
}; };
// Parameters that must be provided to a SortStage
class SortStageParams {
public:
SortStageParams() : hasBounds(false) { }
// How we're sorting.
BSONObj pattern;
IndexBounds bounds;
bool hasBounds;
// TODO: Implement this.
// Must be >= 0. Equal to 0 for no limit.
// int limit;
};
} // namespace mongo } // namespace mongo
 End of changes. 11 change blocks. 
45 lines changed or deleted 167 lines changed or added


 spaces-inl.h   spaces-inl.h 
skipping to change at line 152 skipping to change at line 152
#endif #endif
// ------------------------------------------------------------------------ -- // ------------------------------------------------------------------------ --
// PagedSpace // PagedSpace
Page* Page::Initialize(Heap* heap, Page* Page::Initialize(Heap* heap,
MemoryChunk* chunk, MemoryChunk* chunk,
Executability executable, Executability executable,
PagedSpace* owner) { PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk); Page* page = reinterpret_cast<Page*>(chunk);
ASSERT(chunk->size() <= static_cast<size_t>(kPageSize)); ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
ASSERT(chunk->owner() == owner); ASSERT(chunk->owner() == owner);
owner->IncreaseCapacity(page->area_size()); owner->IncreaseCapacity(page->area_size());
owner->Free(page->area_start(), page->area_size()); owner->Free(page->area_start(), page->area_size());
heap->incremental_marking()->SetOldSpacePageFlags(chunk); heap->incremental_marking()->SetOldSpacePageFlags(chunk);
return page; return page;
} }
bool PagedSpace::Contains(Address addr) { bool PagedSpace::Contains(Address addr) {
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 ssl_options.h   ssl_options.h 
skipping to change at line 19 skipping to change at line 19
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/client/export_macros.h"
#include "mongo/util/net/ssl_manager.h" #include "mongo/util/net/ssl_manager.h"
namespace mongo { namespace mongo {
namespace optionenvironment { namespace optionenvironment {
class OptionSection; class OptionSection;
class Environment; class Environment;
} // namespace optionenvironment } // namespace optionenvironment
namespace moe = mongo::optionenvironment; namespace moe = mongo::optionenvironment;
struct SSLGlobalParams { struct MONGO_CLIENT_API SSLGlobalParams {
AtomicInt32 sslMode; // --sslMode - the SSL operation mode, see enum SSLModes AtomicInt32 sslMode; // --sslMode - the SSL operation mode, see enum SSLModes
bool sslOnNormalPorts; // --sslOnNormalPorts (deprecated) bool sslOnNormalPorts; // --sslOnNormalPorts (deprecated)
std::string sslPEMKeyFile; // --sslPEMKeyFile std::string sslPEMKeyFile; // --sslPEMKeyFile
std::string sslPEMKeyPassword; // --sslPEMKeyPassword std::string sslPEMKeyPassword; // --sslPEMKeyPassword
std::string sslClusterFile; // --sslInternalKeyFile std::string sslClusterFile; // --sslInternalKeyFile
std::string sslClusterPassword; // --sslInternalKeyPassword std::string sslClusterPassword; // --sslInternalKeyPassword
std::string sslCAFile; // --sslCAFile std::string sslCAFile; // --sslCAFile
std::string sslCRLFile; // --sslCRLFile std::string sslCRLFile; // --sslCRLFile
bool sslWeakCertificateValidation; // --sslWeakCertificateValidatio n bool sslWeakCertificateValidation; // --sslWeakCertificateValidatio n
bool sslFIPSMode; // --sslFIPSMode bool sslFIPSMode; // --sslFIPSMode
skipping to change at line 70 skipping to change at line 71
*/ */
SSLMode_preferSSL, SSLMode_preferSSL,
/** /**
* Make outgoing SSL-connections and only accept incoming SSL-co nnections * Make outgoing SSL-connections and only accept incoming SSL-co nnections
*/ */
SSLMode_requireSSL SSLMode_requireSSL
}; };
}; };
extern SSLGlobalParams sslGlobalParams; extern MONGO_CLIENT_API SSLGlobalParams sslGlobalParams;
Status addSSLServerOptions(moe::OptionSection* options); Status addSSLServerOptions(moe::OptionSection* options);
Status addSSLClientOptions(moe::OptionSection* options); Status addSSLClientOptions(moe::OptionSection* options);
Status storeSSLServerOptions(const moe::Environment& params); Status storeSSLServerOptions(const moe::Environment& params);
Status storeSSLClientOptions(const moe::Environment& params); Status storeSSLClientOptions(const moe::Environment& params);
} }
 End of changes. 3 change blocks. 
2 lines changed or deleted 3 lines changed or added


 stage_types.h   stage_types.h 
skipping to change at line 40 skipping to change at line 40
namespace mongo { namespace mongo {
/** /**
* These map to implementations of the PlanStage interface, all of whic h live in db/exec/ * These map to implementations of the PlanStage interface, all of whic h live in db/exec/
*/ */
enum StageType { enum StageType {
STAGE_AND_HASH, STAGE_AND_HASH,
STAGE_AND_SORTED, STAGE_AND_SORTED,
STAGE_COLLSCAN, STAGE_COLLSCAN,
// If we're running a .count(), the query is fully covered by one i
xscan, and the ixscan is
// from one key to another, we can just skip through the keys witho
ut bothering to examine
// them.
STAGE_COUNT,
// If we're running a distinct, we only care about one value for ea
ch key. The distinct
// stage is an ixscan with some key-skipping behvaior that only dis
tinct uses.
STAGE_DISTINCT,
// This is more of an "internal-only" stage where we try to keep do
cs that were mutated
// during query execution.
STAGE_KEEP_MUTATIONS,
STAGE_FETCH, STAGE_FETCH,
// TODO: This is probably an expression index, but would take even // TODO: This is secretly an expression index but we need geometry
more time than -> covering for our
// STAGE_2DSPHERE to straighten out. // geohash.
STAGE_GEO_2D, STAGE_GEO_2D,
// The two $geoNear impls imply a fetch+sort and as such are not IX
SCANs. // The two $geoNear impls imply a fetch+sort and must be stages.
STAGE_GEO_NEAR_2D, STAGE_GEO_NEAR_2D,
STAGE_GEO_NEAR_2DSPHERE, STAGE_GEO_NEAR_2DSPHERE,
STAGE_IXSCAN, STAGE_IXSCAN,
STAGE_LIMIT, STAGE_LIMIT,
STAGE_OR, STAGE_OR,
STAGE_PROJECTION, STAGE_PROJECTION,
STAGE_SHARDING_FILTER, STAGE_SHARDING_FILTER,
STAGE_SKIP, STAGE_SKIP,
STAGE_SORT, STAGE_SORT,
 End of changes. 3 change blocks. 
5 lines changed or deleted 24 lines changed or added


 stat_util.h   stat_util.h 
skipping to change at line 17 skipping to change at line 17
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "../db/jsobj.h" #include "../db/jsobj.h"
namespace mongo { namespace mongo {
struct NamespaceInfo { struct NamespaceInfo {
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 status.h   status.h 
skipping to change at line 22 skipping to change at line 22
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include "mongo/base/error_codes.h" #include "mongo/base/error_codes.h"
#include "mongo/client/export_macros.h"
#include "mongo/platform/atomic_word.h" #include "mongo/platform/atomic_word.h"
namespace mongo { namespace mongo {
/** /**
* Status represents an error state or the absence thereof. * Status represents an error state or the absence thereof.
* *
* A Status uses the standardized error codes -- from file 'error_codes .h' -- to * A Status uses the standardized error codes -- from file 'error_codes .h' -- to
* determine an error's cause. It further clarifies the error with a te xtual * determine an error's cause. It further clarifies the error with a te xtual
* description. Optionally, a Status may also have an error location nu mber, which * description. Optionally, a Status may also have an error location nu mber, which
skipping to change at line 49 skipping to change at line 50
* } * }
* *
* *c = a+b; * *c = a+b;
* return Status::OK(); * return Status::OK();
* } * }
* *
* TODO: expand base/error_codes.h to capture common errors in current code * TODO: expand base/error_codes.h to capture common errors in current code
* TODO: generate base/error_codes.h out of a description file * TODO: generate base/error_codes.h out of a description file
* TODO: check 'location' duplicates against assert numbers * TODO: check 'location' duplicates against assert numbers
*/ */
class Status { class MONGO_CLIENT_API Status {
public: public:
// Short-hand for returning an OK status. // Short-hand for returning an OK status.
static inline Status OK(); static inline Status OK();
/** /**
* Builds an error status given the error code, a textual descripti on of what * Builds an error status given the error code, a textual descripti on of what
* caused the error, and a unique position in the where the error o ccurred * caused the error, and a unique position in the where the error o ccurred
* (similar to an assert number) * (similar to an assert number)
*/ */
Status(ErrorCodes::Error code, const std::string& reason, int locat ion = 0); Status(ErrorCodes::Error code, const std::string& reason, int locat ion = 0);
skipping to change at line 131 skipping to change at line 132
/** /**
* Increment/Decrement the reference counter inside an ErrorInfo * Increment/Decrement the reference counter inside an ErrorInfo
* *
* @param error ErrorInfo to be incremented * @param error ErrorInfo to be incremented
*/ */
static inline void ref(ErrorInfo* error); static inline void ref(ErrorInfo* error);
static inline void unref(ErrorInfo* error); static inline void unref(ErrorInfo* error);
}; };
inline bool operator==(const ErrorCodes::Error lhs, const Status& rhs); MONGO_CLIENT_API inline bool operator==(const ErrorCodes::Error lhs, co nst Status& rhs);
inline bool operator!=(const ErrorCodes::Error lhs, const Status& rhs); MONGO_CLIENT_API inline bool operator!=(const ErrorCodes::Error lhs, co nst Status& rhs);
// //
// Convenience method for unittest code. Please use accessors otherwise . // Convenience method for unittest code. Please use accessors otherwise .
// //
std::ostream& operator<<(std::ostream& os, const Status& status); MONGO_CLIENT_API std::ostream& operator<<(std::ostream& os, const Statu
std::ostream& operator<<(std::ostream& os, ErrorCodes::Error); s& status);
MONGO_CLIENT_API std::ostream& operator<<(std::ostream& os, ErrorCodes:
:Error);
} // namespace mongo } // namespace mongo
#include "mongo/base/status-inl.h" #include "mongo/base/status-inl.h"
 End of changes. 5 change blocks. 
5 lines changed or deleted 8 lines changed or added


 stemmer.h   stemmer.h 
skipping to change at line 50 skipping to change at line 50
namespace fts { namespace fts {
/** /**
* maintains case * maintains case
* but works * but works
* running/Running -> run/Run * running/Running -> run/Run
*/ */
class Stemmer { class Stemmer {
public: public:
Stemmer( const FTSLanguage language ); Stemmer( const FTSLanguage& language );
~Stemmer(); ~Stemmer();
std::string stem( const StringData& word ) const; std::string stem( const StringData& word ) const;
private: private:
struct sb_stemmer* _stemmer; struct sb_stemmer* _stemmer;
}; };
} }
} }
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 stop_words.h   stop_words.h 
skipping to change at line 54 skipping to change at line 54
public: public:
StopWords(); StopWords();
StopWords( const std::set<std::string>& words ); StopWords( const std::set<std::string>& words );
bool isStopWord( const std::string& word ) const { bool isStopWord( const std::string& word ) const {
return _words.count( word ) > 0; return _words.count( word ) > 0;
} }
size_t numStopWords() const { return _words.size(); } size_t numStopWords() const { return _words.size(); }
static const StopWords* getStopWords( const FTSLanguage langaug e ); static const StopWords* getStopWords( const FTSLanguage& langau ge );
private: private:
~StopWords(){} ~StopWords(){}
unordered_set<std::string> _words; unordered_set<std::string> _words;
}; };
} }
} }
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 stop_words_list.h   stop_words_list.h 
#pragma once #pragma once
#include <map>
#include <set> #include <set>
#include <string> #include <string>
#include "mongo/util/string_map.h"
namespace mongo { namespace mongo {
namespace fts { namespace fts {
void loadStopWordMap( std::map< std::string, std::set< std::string > >* m ); void loadStopWordMap( StringMap< std::set< std::string > >* m );
} }
} }
 End of changes. 3 change blocks. 
2 lines changed or deleted 2 lines changed or added


 str.h   str.h 
skipping to change at line 34 skipping to change at line 34
(4) are clean and easy to use in any c++ project without pulling in lots of other stuff (4) are clean and easy to use in any c++ project without pulling in lots of other stuff
Note: within this module, we use int for all offsets -- there are no uns igned offsets Note: within this module, we use int for all offsets -- there are no uns igned offsets
and no size_t's. If you need 3 gigabyte long strings, don't use this mo dule. and no size_t's. If you need 3 gigabyte long strings, don't use this mo dule.
*/ */
#include <string> #include <string>
#include <sstream> #include <sstream>
// this violates the README rules for mongoutils: // this violates the README rules for mongoutils:
#include "../../bson/util/builder.h" #include "mongo/bson/util/builder.h"
namespace mongoutils { namespace mongoutils {
namespace str { namespace str {
/** the idea here is to make one liners easy. e.g.: /** the idea here is to make one liners easy. e.g.:
return str::stream() << 1 << ' ' << 2; return str::stream() << 1 << ' ' << 2;
since the following doesn't work: since the following doesn't work:
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 strategy.h   strategy.h 
skipping to change at line 38 skipping to change at line 38
*/ */
#pragma once #pragma once
#include "mongo/pch.h" #include "mongo/pch.h"
#include "chunk.h" #include "chunk.h"
#include "request.h" #include "request.h"
namespace mongo { namespace mongo {
/**
* Legacy interface for processing client read/write/cmd requests.
*/
class Strategy { class Strategy {
public: public:
Strategy() {} Strategy() {}
virtual ~Strategy() {}
virtual void queryOp( Request& r ) = 0;
virtual void getMore( Request& r ) = 0;
virtual void writeOp( int op , Request& r ) = 0;
void insert( const Shard& shard , const char * ns , const BSONObj& void queryOp( Request& r );
obj , int flags=0 , bool safe=false );
void getMore( Request& r );
void writeOp( int op , Request& r );
struct CommandResult { struct CommandResult {
Shard shardTarget; Shard shardTarget;
ConnectionString target; ConnectionString target;
BSONObj result; BSONObj result;
}; };
virtual void commandOp( const string& db, /**
const BSONObj& command, * Executes a command against a particular database, and targets th
int options, e command based on a
const string& versionedNS, * collection in that database.
const BSONObj& targetingQuery, *
vector<CommandResult>* results ) * This version should be used by internal commands when possible.
{ */
// Only call this from sharded, for now. void commandOp( const string& db,
// TODO: Refactor all this. const BSONObj& command,
verify( false ); int options,
} const string& versionedNS,
const BSONObj& targetingQuery,
// These interfaces will merge soon, so make it easy to share logic vector<CommandResult>* results );
friend class ShardStrategy;
friend class SingleStrategy; /**
* Executes a command represented in the Request on the sharded clu
static bool useClusterWriteCommands; ster.
*
* DEPRECATED: should not be used by new code.
*/
void clientCommandOp( Request& r );
protected: protected:
void doWrite( int op , Request& r , const Shard& shard , bool check Version = true );
void doIndexQuery( Request& r , const Shard& shard ); void doIndexQuery( Request& r , const Shard& shard );
void broadcastWrite(int op, Request& r); // Sends to all shards in cluster. DOESN'T CHECK VERSION
void insert( const Shard& shard , const char * ns , const vector<BS bool handleSpecialNamespaces( Request& r , QueryMessage& q );
ONObj>& v , int flags=0 , bool safe=false );
void update( const Shard& shard , const char * ns , const BSONObj&
query , const BSONObj& toupdate , int flags=0, bool safe=false );
}; };
extern Strategy * SINGLE; extern Strategy* STRATEGY;
extern Strategy * SHARDED;
} }
 End of changes. 9 change blocks. 
31 lines changed or deleted 33 lines changed or added


 string_map.h   string_map.h 
skipping to change at line 25 skipping to change at line 25
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/util/unordered_fast_key_table.h" #include "mongo/util/unordered_fast_key_table.h"
namespace mongo { namespace mongo {
struct StringMapDefaultHash { typedef StringData::Hasher StringMapDefaultHash;
size_t operator()( const StringData& k ) const;
};
struct StringMapDefaultEqual { struct StringMapDefaultEqual {
bool operator()( const StringData& a, const StringData& b ) const { bool operator()( const StringData& a, const StringData& b ) const {
return a == b; return a == b;
} }
}; };
struct StringMapDefaultConvertor { struct StringMapDefaultConvertor {
StringData operator()( const std::string& s ) const { StringData operator()( const std::string& s ) const {
return StringData( s ); return StringData( s );
skipping to change at line 57 skipping to change at line 55
template< typename V > template< typename V >
class StringMap : public UnorderedFastKeyTable< StringData, // K_L class StringMap : public UnorderedFastKeyTable< StringData, // K_L
std::string, // K_S std::string, // K_S
V, // V V, // V
StringMapDefaultHash, StringMapDefaultHash,
StringMapDefaultEqual, StringMapDefaultEqual,
StringMapDefaultConvert or, StringMapDefaultConvert or,
StringMapDefaultConvert orOther > { StringMapDefaultConvert orOther > {
}; };
} }
#include "mongo/util/string_map_internal.h"
 End of changes. 2 change blocks. 
3 lines changed or deleted 1 lines changed or added


 strtoll.h   strtoll.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <cstdlib> #include <cstdlib>
#ifdef _WIN32 #ifdef _WIN32
static inline long long strtoll(const char* nptr, char** endptr, int base) { static inline long long strtoll(const char* nptr, char** endptr, int base) {
return _strtoi64(nptr, endptr, base); return _strtoi64(nptr, endptr, base);
} }
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 syncclusterconnection.h   syncclusterconnection.h 
skipping to change at line 24 skipping to change at line 24
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement.h"
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/client/dbclientinterface.h" #include "mongo/client/dbclientinterface.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* This is a connection to a cluster of servers that operate as one * This is a connection to a cluster of servers that operate as one
* for super high durability. * for super high durability.
* *
* Write operations are two-phase. First, all nodes are asked to fsync . If successful * Write operations are two-phase. First, all nodes are asked to fsync . If successful
* everywhere, the write is sent everywhere and then followed by an fsy nc. There is no * everywhere, the write is sent everywhere and then followed by an fsy nc. There is no
* rollback if a problem occurs during the second phase. Naturally, wi th all these fsyncs, * rollback if a problem occurs during the second phase. Naturally, wi th all these fsyncs,
* these operations will be quite slow -- use sparingly. * these operations will be quite slow -- use sparingly.
* *
* Read operations are sent to a single random node. * Read operations are sent to a single random node.
* *
* The class checks if a command is read or write style, and sends to a single * The class checks if a command is read or write style, and sends to a single
* node if a read lock command and to all in two phases with a write st yle command. * node if a read lock command and to all in two phases with a write st yle command.
*/ */
class SyncClusterConnection : public DBClientBase { class MONGO_CLIENT_API SyncClusterConnection : public DBClientBase {
public: public:
using DBClientBase::query; using DBClientBase::query;
using DBClientBase::update; using DBClientBase::update;
using DBClientBase::remove; using DBClientBase::remove;
/** /**
* @param commaSeparated should be 3 hosts comma separated * @param commaSeparated should be 3 hosts comma separated
*/ */
SyncClusterConnection( const list<HostAndPort> &, double socketTime out = 0); SyncClusterConnection( const list<HostAndPort> &, double socketTime out = 0);
skipping to change at line 95 skipping to change at line 96
virtual bool call( Message &toSend, Message &response, bool assertO k , string * actualServer ); virtual bool call( Message &toSend, Message &response, bool assertO k , string * actualServer );
virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 ); virtual void say( Message &toSend, bool isRetry = false , string * actualServer = 0 );
virtual void sayPiggyBack( Message &toSend ); virtual void sayPiggyBack( Message &toSend );
virtual void killCursor( long long cursorID ); virtual void killCursor( long long cursorID );
virtual string getServerAddress() const { return _address; } virtual string getServerAddress() const { return _address; }
virtual bool isFailed() const { return false; } virtual bool isFailed() const { return false; }
virtual bool isStillConnected(); virtual bool isStillConnected();
virtual string toString() { return _toString(); } virtual string toString() const { return _toString(); }
virtual BSONObj getLastErrorDetailed(const std::string& db, virtual BSONObj getLastErrorDetailed(const std::string& db,
bool fsync=false, bool fsync=false,
bool j=false, bool j=false,
int w=0, int w=0,
int wtimeout=0); int wtimeout=0);
virtual BSONObj getLastErrorDetailed(bool fsync=false, bool j=false , int w=0, int wtimeout=0); virtual BSONObj getLastErrorDetailed(bool fsync=false, bool j=false , int w=0, int wtimeout=0);
virtual bool callRead( Message& toSend , Message& response ); virtual bool callRead( Message& toSend , Message& response );
virtual ConnectionString::ConnectionType type() const { return Conn ectionString::SYNC; } virtual ConnectionString::ConnectionType type() const { return Conn ectionString::SYNC; }
void setAllSoTimeouts( double socketTimeout ); void setAllSoTimeouts( double socketTimeout );
double getSoTimeout() const { return _socketTimeout; } double getSoTimeout() const { return _socketTimeout; }
virtual bool lazySupported() const { return false; } virtual bool lazySupported() const { return false; }
virtual void setRunCommandHook(DBClientWithCommands::RunCommandHook
Func func);
virtual void setPostRunCommandHook(DBClientWithCommands::PostRunCom
mandHookFunc func);
protected: protected:
virtual void _auth(const BSONObj& params); virtual void _auth(const BSONObj& params);
private: private:
SyncClusterConnection( SyncClusterConnection& prev, double socketTi meout = 0 ); SyncClusterConnection( SyncClusterConnection& prev, double socketTi meout = 0 );
string _toString() const; string _toString() const;
bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSO NObj &info, int options=0); bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSO NObj &info, int options=0);
auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query que ry, int nToReturn, int nToSkip, auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query que ry, int nToReturn, int nToSkip,
const BSONObj *fieldsToRetu rn, int queryOptions, int batchSize ); const BSONObj *fieldsToRetu rn, int queryOptions, int batchSize );
int _lockType( const string& name ); int _lockType( const string& name );
skipping to change at line 137 skipping to change at line 141
vector<string> _connAddresses; vector<string> _connAddresses;
vector<DBClientConnection*> _conns; vector<DBClientConnection*> _conns;
map<string,int> _lockTypes; map<string,int> _lockTypes;
mongo::mutex _mutex; mongo::mutex _mutex;
vector<BSONObj> _lastErrors; vector<BSONObj> _lastErrors;
double _socketTimeout; double _socketTimeout;
}; };
class UpdateNotTheSame : public UserException { class MONGO_CLIENT_API UpdateNotTheSame : public UserException {
public: public:
UpdateNotTheSame( int code , const string& msg , const vector<strin g>& addrs , const vector<BSONObj>& lastErrors ) UpdateNotTheSame( int code , const string& msg , const vector<strin g>& addrs , const vector<BSONObj>& lastErrors )
: UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ) { : UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ) {
verify( _addrs.size() == _lastErrors.size() ); verify( _addrs.size() == _lastErrors.size() );
} }
virtual ~UpdateNotTheSame() throw() { virtual ~UpdateNotTheSame() throw() {
} }
unsigned size() const { unsigned size() const {
 End of changes. 5 change blocks. 
3 lines changed or deleted 9 lines changed or added


 temp_dir.h   temp_dir.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#include <string> #include <string>
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
namespace mongo { namespace mongo {
namespace unittest { namespace unittest {
/** /**
* An RAII temporary directory that deletes itself and all contents fil es on scope exit. * An RAII temporary directory that deletes itself and all contents fil es on scope exit.
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 thread_name.h   thread_name.h 
skipping to change at line 21 skipping to change at line 21
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* Sets the name of the current thread to "name". * Sets the name of the current thread to "name".
*/ */
void setThreadName(StringData name); void setThreadName(StringData name);
/** /**
* Retrieves the name of the current thread, as previously set, or "" i f no name was previously * Retrieves the name of the current thread, as previously set, or "" i f no name was previously
* set. * set.
*/ */
const std::string& getThreadName(); MONGO_CLIENT_API const std::string& getThreadName();
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 2 lines changed or added


 time_support.h   time_support.h 
skipping to change at line 25 skipping to change at line 25
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <ctime> #include <ctime>
#include <string> #include <string>
#include <boost/thread/xtime.hpp> #include <boost/thread/xtime.hpp>
#include <boost/version.hpp> #include <boost/version.hpp>
#include "mongo/base/status_with.h"
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
void time_t_to_Struct(time_t t, struct tm * buf , bool local = false ); void time_t_to_Struct(time_t t, struct tm * buf , bool local = false );
std::string time_t_to_String(time_t t); std::string time_t_to_String(time_t t);
std::string time_t_to_String_short(time_t t); std::string time_t_to_String_short(time_t t);
struct Date_t { struct Date_t {
// TODO: make signed (and look for related TODO's) // TODO: make signed (and look for related TODO's)
unsigned long long millis; unsigned long long millis;
Date_t(): millis(0) {} Date_t(): millis(0) {}
skipping to change at line 81 skipping to change at line 84
*/ */
std::string dateToISOStringLocal(Date_t date); std::string dateToISOStringLocal(Date_t date);
/** /**
* Formats "date" in fixed width in the local time zone. * Formats "date" in fixed width in the local time zone.
* *
* Sample format: "Wed Oct 31 13:34:47.996" * Sample format: "Wed Oct 31 13:34:47.996"
*/ */
std::string dateToCtimeString(Date_t date); std::string dateToCtimeString(Date_t date);
/**
* Parses a Date_t from an ISO 8601 string representation.
*
* Sample formats: "2013-07-23T18:42:14.072-05:00"
* "2013-07-23T18:42:14.072Z"
*
* Local times are currently not supported.
*/
StatusWith<Date_t> dateFromISOString(const StringData& dateString);
boost::gregorian::date currentDate(); boost::gregorian::date currentDate();
// parses time of day in "hh:mm" format assuming 'hh' is 00-23 // parses time of day in "hh:mm" format assuming 'hh' is 00-23
bool toPointInTime( const std::string& str , boost::posix_time::ptime* timeOfDay ); bool toPointInTime( const std::string& str , boost::posix_time::ptime* timeOfDay );
void sleepsecs(int s); MONGO_CLIENT_API void sleepsecs(int s);
void sleepmillis(long long ms); MONGO_CLIENT_API void sleepmillis(long long ms);
void sleepmicros(long long micros); MONGO_CLIENT_API void sleepmicros(long long micros);
class Backoff { class Backoff {
public: public:
Backoff( int maxSleepMillis, int resetAfter ) : Backoff( int maxSleepMillis, int resetAfter ) :
_maxSleepMillis( maxSleepMillis ), _maxSleepMillis( maxSleepMillis ),
_resetAfterMillis( maxSleepMillis + resetAfter ), // Don't rese t < the max sleep _resetAfterMillis( maxSleepMillis + resetAfter ), // Don't rese t < the max sleep
_lastSleepMillis( 0 ), _lastSleepMillis( 0 ),
_lastErrorTimeMillis( 0 ) _lastErrorTimeMillis( 0 )
{} {}
 End of changes. 3 change blocks. 
3 lines changed or deleted 16 lines changed or added


 timer.h   timer.h 
skipping to change at line 20 skipping to change at line 20
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include "mongo/client/export_macros.h"
namespace mongo { namespace mongo {
/** /**
* Time tracking object. * Time tracking object.
* *
* Should be of reasonably high performance, though the implementations are platform-specific. * Should be of reasonably high performance, though the implementations are platform-specific.
* Each platform provides a distinct implementation of the now() method , and sets the * Each platform provides a distinct implementation of the now() method , and sets the
* _countsPerSecond static field to the constant number of ticks per se cond that now() counts * _countsPerSecond static field to the constant number of ticks per se cond that now() counts
* in. The maximum span measurable by the counter and convertible to m icroseconds is about 10 * in. The maximum span measurable by the counter and convertible to m icroseconds is about 10
* trillion ticks. As long as there are fewer than 100 ticks per nanos econd, timer durations of * trillion ticks. As long as there are fewer than 100 ticks per nanos econd, timer durations of
* 2.5 years will be supported. Since a typical tick duration will be under 10 per nanosecond, * 2.5 years will be supported. Since a typical tick duration will be under 10 per nanosecond,
* if not below 1 per nanosecond, this should not be an issue. * if not below 1 per nanosecond, this should not be an issue.
*/ */
class Timer /*copyable*/ { class MONGO_CLIENT_API Timer /*copyable*/ {
public: public:
static const unsigned long long millisPerSecond = 1000; static const unsigned long long millisPerSecond = 1000;
static const unsigned long long microsPerSecond = 1000 * millisPerS econd; static const unsigned long long microsPerSecond = 1000 * millisPerS econd;
static const unsigned long long nanosPerSecond = 1000 * microsPerSe cond; static const unsigned long long nanosPerSecond = 1000 * microsPerSe cond;
Timer() { reset(); } Timer() { reset(); }
int seconds() const { return (int)(micros() / 1000000); } int seconds() const { return (int)(micros() / 1000000); }
int millis() const { return (int)(micros() / 1000); } int millis() const { return (int)(micros() / 1000); }
int minutes() const { return seconds() / 60; } int minutes() const { return seconds() / 60; }
 End of changes. 2 change blocks. 
1 lines changed or deleted 3 lines changed or added


 tokenizer.h   tokenizer.h 
skipping to change at line 63 skipping to change at line 63
Type type; Type type;
StringData data; StringData data;
unsigned offset; unsigned offset;
bool previousWhiteSpace; bool previousWhiteSpace;
}; };
class Tokenizer { class Tokenizer {
public: public:
Tokenizer( const FTSLanguage language, const StringData& str ); Tokenizer( const FTSLanguage& language, const StringData& str ) ;
bool more() const; bool more() const;
Token next(); Token next();
private: private:
Token::Type _type( char c ) const; Token::Type _type( char c ) const;
bool _skipWhitespace(); bool _skipWhitespace();
unsigned _pos; unsigned _pos;
bool _previousWhiteSpace; bool _previousWhiteSpace;
 End of changes. 1 change blocks. 
1 lines changed or deleted 1 lines changed or added


 tool.h   tool.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
// Tool.h // Tool.h
#pragma once #pragma once
#include <string> #include <string>
#if defined(_WIN32) #if defined(_WIN32)
#include <io.h> #include <io.h>
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 tool_options.h   tool_options.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
skipping to change at line 56 skipping to change at line 68
bool quiet; bool quiet;
bool canUseStdout; bool canUseStdout;
bool noconnection; bool noconnection;
std::vector<std::string> fields; std::vector<std::string> fields;
bool fieldsSpecified; bool fieldsSpecified;
std::string host; // --host std::string host; // --host
bool hostSet; bool hostSet;
std::string port; // --port int port; // --port
bool portSet; bool portSet;
std::string connectionString; // --host and --port after processing std::string connectionString; // --host and --port after processing
std::string dbpath; std::string dbpath;
bool useDirectClient; bool useDirectClient;
}; };
extern ToolGlobalParams toolGlobalParams; extern ToolGlobalParams toolGlobalParams;
struct BSONToolGlobalParams { struct BSONToolGlobalParams {
bool objcheck; bool objcheck;
 End of changes. 2 change blocks. 
1 lines changed or deleted 20 lines changed or added


 type_explain.h   type_explain.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/s/bson_serializable.h" #include "mongo/s/bson_serializable.h"
namespace mongo { namespace mongo {
/** /**
* Contains query debug information that describes the
* query plan. Generally this information depends only on
* the planning process that happens without running the
* query. The exception is the multi plan runner, in which
* case plan selection depends on actually running the query.
*
* Currently, just a summary string describing the plan
* used to run the query.
*/
struct PlanInfo {
PlanInfo() : planSummary("") { }
std::string planSummary;
};
/**
* This class represents the layout and content of a TypeExplain runCom mand, * This class represents the layout and content of a TypeExplain runCom mand,
* the response side. * the response side.
*/ */
class TypeExplain : public BSONSerializable { class TypeExplain : public BSONSerializable {
MONGO_DISALLOW_COPYING(TypeExplain); MONGO_DISALLOW_COPYING(TypeExplain);
public: public:
// //
// schema declarations // schema declarations
// //
skipping to change at line 133 skipping to change at line 160
void setScanAndOrder(bool scanAndOrder); void setScanAndOrder(bool scanAndOrder);
void unsetScanAndOrder(); void unsetScanAndOrder();
bool isScanAndOrderSet() const; bool isScanAndOrderSet() const;
bool getScanAndOrder() const; bool getScanAndOrder() const;
void setIndexOnly(bool indexOnly); void setIndexOnly(bool indexOnly);
void unsetIndexOnly(); void unsetIndexOnly();
bool isIndexOnlySet() const; bool isIndexOnlySet() const;
bool getIndexOnly() const; bool getIndexOnly() const;
void setIDHack(bool idhack);
void unsetIDHack();
bool isIDHackSet() const;
bool getIDHack() const;
void setNYields(long long nYields); void setNYields(long long nYields);
void unsetNYields(); void unsetNYields();
bool isNYieldsSet() const; bool isNYieldsSet() const;
long long getNYields() const; long long getNYields() const;
void setNChunkSkips(long long nChunkSkips); void setNChunkSkips(long long nChunkSkips);
void unsetNChunkSkips(); void unsetNChunkSkips();
bool isNChunkSkipsSet() const; bool isNChunkSkipsSet() const;
long long getNChunkSkips() const; long long getNChunkSkips() const;
skipping to change at line 171 skipping to change at line 203
void setOldPlan(TypeExplain* oldPlan); void setOldPlan(TypeExplain* oldPlan);
void unsetOldPlan(); void unsetOldPlan();
bool isOldPlanSet() const; bool isOldPlanSet() const;
const TypeExplain* getOldPlan() const; const TypeExplain* getOldPlan() const;
void setServer(const StringData& server); void setServer(const StringData& server);
void unsetServer(); void unsetServer();
bool isServerSet() const; bool isServerSet() const;
const std::string& getServer() const; const std::string& getServer() const;
// Opaque stats object
BSONObj stats;
private: private:
// Convention: (M)andatory, (O)ptional // Convention: (M)andatory, (O)ptional
// (O) explain for branches on a $or query // (O) explain for branches on a $or query
boost::scoped_ptr<std::vector<TypeExplain*> >_clauses; boost::scoped_ptr<std::vector<TypeExplain*> >_clauses;
// (O) type and name of the cursor used on the leaf stage // (O) type and name of the cursor used on the leaf stage
std::string _cursor; std::string _cursor;
bool _isCursorSet; bool _isCursorSet;
skipping to change at line 213 skipping to change at line 248
bool _isNScannedAllPlansSet; bool _isNScannedAllPlansSet;
// (O) whether this plan involved sorting // (O) whether this plan involved sorting
bool _scanAndOrder; bool _scanAndOrder;
bool _isScanAndOrderSet; bool _isScanAndOrderSet;
// (O) number of entries retrieved either from an index or collect ion across all plans // (O) number of entries retrieved either from an index or collect ion across all plans
bool _indexOnly; bool _indexOnly;
bool _isIndexOnlySet; bool _isIndexOnlySet;
// (O) whether the idhack was used to answer this query
bool _idHack;
bool _isIDHackSet;
// (O) number times this plan released and reacquired its lock // (O) number times this plan released and reacquired its lock
long long _nYields; long long _nYields;
bool _isNYieldsSet; bool _isNYieldsSet;
// (O) number times this plan skipped over migrated data // (O) number times this plan skipped over migrated data
long long _nChunkSkips; long long _nChunkSkips;
bool _isNChunkSkipsSet; bool _isNChunkSkipsSet;
// (O) elapsed time this plan took running, in milliseconds // (O) elapsed time this plan took running, in milliseconds
long long _millis; long long _millis;
 End of changes. 5 change blocks. 
0 lines changed or deleted 46 lines changed or added


 undef_macros.h   undef_macros.h 
skipping to change at line 43 skipping to change at line 43
#undef dassert #undef dassert
#pragma pop_macro("dassert") #pragma pop_macro("dassert")
#undef wassert #undef wassert
#pragma pop_macro("wassert") #pragma pop_macro("wassert")
#undef massert #undef massert
#pragma pop_macro("massert") #pragma pop_macro("massert")
#undef uassert #undef uassert
#pragma pop_macro("uassert") #pragma pop_macro("uassert")
#undef verify #undef verify
#pragma pop_macro("verify") #pragma pop_macro("verify")
#undef invariant
#pragma pop_macro("invariant")
#undef DESTRUCTOR_GUARD #undef DESTRUCTOR_GUARD
#pragma pop_macro("DESTRUCTOR_GUARD") #pragma pop_macro("DESTRUCTOR_GUARD")
// util/goodies.h // util/goodies.h
#undef PRINT #undef PRINT
#pragma pop_macro("PRINT") #pragma pop_macro("PRINT")
#undef PRINTFL #undef PRINTFL
#pragma pop_macro("PRINTFL") #pragma pop_macro("PRINTFL")
// util/debug_util.h // util/debug_util.h
 End of changes. 1 change blocks. 
0 lines changed or deleted 2 lines changed or added


 unittest-inl.h   unittest-inl.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3, * it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Lice nse * You should have received a copy of the GNU Affero General Public Lice nse
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certa
in
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Y
ou
* must comply with the GNU Affero General Public License in all respect
s
* for all of the code used other than as permitted herein. If you modif
y
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If
you
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
#pragma once #pragma once
namespace mongo { namespace mongo {
namespace unittest { namespace unittest {
template <typename T> template <typename T>
Test::RegistrationAgent<T>::RegistrationAgent(const std::string& su iteName, Test::RegistrationAgent<T>::RegistrationAgent(const std::string& su iteName,
const std::string& te stName) { const std::string& te stName) {
 End of changes. 1 change blocks. 
0 lines changed or deleted 19 lines changed or added


 unittest.h   unittest.h 
skipping to change at line 15 skipping to change at line 15
* it under the terms of the GNU Affero General Public License, version 3 , * it under the terms of the GNU Affero General Public License, version 3 ,
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details. * GNU Affero General Public License for more details.
* *
* You should have received a copy of the GNU Affero General Public Licen se * You should have received a copy of the GNU Affero General Public Licen se
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link
the
* code of portions of this program with the OpenSSL library under certai
n
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. Yo
u
* must comply with the GNU Affero General Public License in all respects
* for all of the code used other than as permitted herein. If you modify
* file(s) with this exception, you may extend this exception to your
* version of the file(s), but you are not obligated to do so. If you do
not
* wish to do so, delete this exception statement from your version. If y
ou
* delete this exception statement from all source files in the program,
* then also delete it in the license file.
*/ */
/* /*
* A C++ unit testing framework. * A C++ unit testing framework.
* *
* For examples of basic usage, see mongo/unittest/unittest_test.cpp. * For examples of basic usage, see mongo/unittest/unittest_test.cpp.
*/ */
#pragma once #pragma once
 End of changes. 1 change blocks. 
0 lines changed or deleted 17 lines changed or added


 unordered_fast_key_table.h   unordered_fast_key_table.h 
skipping to change at line 122 skipping to change at line 122
V& operator[]( const K_L& key ) { return get( key ); } V& operator[]( const K_L& key ) { return get( key ); }
V& get( const K_L& key ); V& get( const K_L& key );
/** /**
* @return number of elements removed * @return number of elements removed
*/ */
size_t erase( const K_L& key ); size_t erase( const K_L& key );
class const_iterator { class const_iterator {
friend class UnorderedFastKeyTable;
public: public:
const_iterator() { _position = -1; } const_iterator() { _position = -1; }
const_iterator( const Area* area ) { const_iterator( const Area* area ) {
_area = area; _area = area;
_position = 0; _position = 0;
_max = _area->_capacity - 1; _max = _area->_capacity - 1;
_skip(); _skip();
} }
const_iterator( const Area* area, int pos ) { const_iterator( const Area* area, int pos ) {
_area = area; _area = area;
skipping to change at line 175 skipping to change at line 177
} }
++_position; ++_position;
} }
} }
const Area* _area; const Area* _area;
int _position; int _position;
int _max; // inclusive int _max; // inclusive
}; };
void erase( const_iterator it );
/** /**
* @return either a one-shot iterator with the key, or end() * @return either a one-shot iterator with the key, or end()
*/ */
const_iterator find( const K_L& key ) const; const_iterator find( const K_L& key ) const;
const_iterator begin() const; const_iterator begin() const;
const_iterator end() const; const_iterator end() const;
private: private:
 End of changes. 2 change blocks. 
0 lines changed or deleted 4 lines changed or added


 unordered_fast_key_table_internal.h   unordered_fast_key_table_internal.h 
skipping to change at line 173 skipping to change at line 173
if ( pos < 0 ) if ( pos < 0 )
return 0; return 0;
_area._entries[pos].used = false; _area._entries[pos].used = false;
_area._entries[pos].data.second = V(); _area._entries[pos].data.second = V();
return 1; return 1;
} }
template< typename K_L, typename K_S, typename V, typename H, typename E, typename C, typename C_LS > template< typename K_L, typename K_S, typename V, typename H, typename E, typename C, typename C_LS >
void UnorderedFastKeyTable<K_L, K_S, V, H, E, C, C_LS>::erase( const_it
erator it ) {
dassert(it._position >= 0);
dassert(it._area == &_area);
_area._entries[it._position].used = false;
_area._entries[it._position].data.second = V();
}
template< typename K_L, typename K_S, typename V, typename H, typename
E, typename C, typename C_LS >
inline void UnorderedFastKeyTable<K_L, K_S, V, H, E, C, C_LS>::_grow() { inline void UnorderedFastKeyTable<K_L, K_S, V, H, E, C, C_LS>::_grow() {
unsigned capacity = _area._capacity; unsigned capacity = _area._capacity;
for ( int numGrowTries = 0; numGrowTries < 5; numGrowTries++ ) { for ( int numGrowTries = 0; numGrowTries < 5; numGrowTries++ ) {
capacity *= 2; capacity *= 2;
Area newArea( capacity, _maxProbeRatio ); Area newArea( capacity, _maxProbeRatio );
bool success = _area.transfer( &newArea, *this ); bool success = _area.transfer( &newArea, *this );
if ( !success ) { if ( !success ) {
continue; continue;
} }
_area.swap( &newArea ); _area.swap( &newArea );
 End of changes. 1 change blocks. 
0 lines changed or deleted 11 lines changed or added


 update.h   update.h 
skipping to change at line 37 skipping to change at line 37
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/curop.h" #include "mongo/db/curop.h"
#include "mongo/db/ops/update_request.h" #include "mongo/db/ops/update_request.h"
#include "mongo/db/ops/update_result.h" #include "mongo/db/ops/update_result.h"
#include "mongo/db/query_plan_selection_policy.h"
namespace mongo { namespace mongo {
class CanonicalQuery;
class UpdateDriver; class UpdateDriver;
UpdateResult update(const UpdateRequest& request, OpDebug* opDebug); UpdateResult update(const UpdateRequest& request, OpDebug* opDebug);
UpdateResult update(const UpdateRequest& request, OpDebug* opDebug, Upd UpdateResult update(const UpdateRequest& request,
ateDriver* driver); OpDebug* opDebug,
UpdateDriver* driver,
CanonicalQuery* cq);
/** /**
* takes the from document and returns a new document * takes the from document and returns a new document
* after apply all the operators * after apply all the operators
* e.g. * e.g.
* applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) ); * applyUpdateOperators( BSON( "x" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
* returns: { x : 2 } * returns: { x : 2 }
*/ */
BSONObj applyUpdateOperators( const BSONObj& from, const BSONObj& opera tors ); BSONObj applyUpdateOperators( const BSONObj& from, const BSONObj& opera tors );
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
3 lines changed or deleted 5 lines changed or added


 update_driver.h   update_driver.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h"
#include "mongo/base/owned_pointer_vector.h" #include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h"
#include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/document.h"
#include "mongo/db/field_ref_set.h" #include "mongo/db/field_ref_set.h"
#include "mongo/db/index_set.h" #include "mongo/db/index_set.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/ops/modifier_interface.h" #include "mongo/db/ops/modifier_interface.h"
#include "mongo/db/ops/modifier_table.h" #include "mongo/db/ops/modifier_table.h"
#include "mongo/db/query/canonical_query.h"
namespace mongo { namespace mongo {
class UpdateDriver { class UpdateDriver {
public: public:
struct Options; struct Options;
UpdateDriver(const Options& opts); UpdateDriver(const Options& opts);
~UpdateDriver(); ~UpdateDriver();
skipping to change at line 70 skipping to change at line 71
* Fills in document with any fields in the query which are valid. * Fills in document with any fields in the query which are valid.
* *
* Valid fields include equality matches like "a":1, or "a.b":false * Valid fields include equality matches like "a":1, or "a.b":false
* *
* Each valid field will be expanded (from dot notation) and confli cts will be * Each valid field will be expanded (from dot notation) and confli cts will be
* checked for all fields added to the underlying document. * checked for all fields added to the underlying document.
* *
* Returns Status::OK() if the document can be used. If there are a ny error or * Returns Status::OK() if the document can be used. If there are a ny error or
* conflicts along the way then those errors will be returned. * conflicts along the way then those errors will be returned.
*/ */
Status populateDocumentWithQueryFields(const BSONObj& query, mutabl Status populateDocumentWithQueryFields(const BSONObj& query,
ebson::Document& doc) const; mutablebson::Document& doc)
const;
Status populateDocumentWithQueryFields(const CanonicalQuery* query,
mutablebson::Document& doc)
const;
/** /**
* return a BSONObj with the _id field of the doc passed in, or the doc itself. * return a BSONObj with the _id field of the doc passed in, or the doc itself.
* If no _id and multi, error. * If no _id and multi, error.
*/ */
BSONObj makeOplogEntryQuery(const BSONObj& doc, bool multi) const; BSONObj makeOplogEntryQuery(const BSONObj& doc, bool multi) const;
/** /**
* Returns OK and executes '_mods' over 'doc', generating 'newObj'. If any mod is * Returns OK and executes '_mods' over 'doc', generating 'newObj'. If any mod is
* positional, use 'matchedField' (index of the array item matched) . If doc allows * positional, use 'matchedField' (index of the array item matched) . If doc allows
skipping to change at line 105 skipping to change at line 110
// //
// Accessors // Accessors
// //
size_t numMods() const; size_t numMods() const;
bool isDocReplacement() const; bool isDocReplacement() const;
bool modsAffectIndices() const; bool modsAffectIndices() const;
void refreshIndexKeys(const IndexPathSet& indexedFields); void refreshIndexKeys(const IndexPathSet* indexedFields);
bool multi() const; bool multi() const;
void setMulti(bool multi); void setMulti(bool multi);
bool upsert() const; bool upsert() const;
void setUpsert(bool upsert); void setUpsert(bool upsert);
bool logOp() const; bool logOp() const;
void setLogOp(bool logOp); void setLogOp(bool logOp);
ModifierInterface::Options modOptions() const; ModifierInterface::Options modOptions() const;
void setModOptions(ModifierInterface::Options modOpts); void setModOptions(ModifierInterface::Options modOpts);
ModifierInterface::ExecInfo::UpdateContext context() const; ModifierInterface::ExecInfo::UpdateContext context() const;
void setContext(ModifierInterface::ExecInfo::UpdateContext context) ; void setContext(ModifierInterface::ExecInfo::UpdateContext context) ;
mutablebson::Document& getDocument() {
return _objDoc;
}
const mutablebson::Document& getDocument() const {
return _objDoc;
}
bool needMatchDetails() const {
return _positional;
}
private: private:
/** Resets the state of the class associated with mods (not the err or state) */ /** Resets the state of the class associated with mods (not the err or state) */
void clear(); void clear();
/** Create the modifier and add it to the back of the modifiers vec tor */ /** Create the modifier and add it to the back of the modifiers vec tor */
inline Status addAndParse(const modifiertable::ModifierType type, inline Status addAndParse(const modifiertable::ModifierType type,
const BSONElement& elem); const BSONElement& elem);
// //
skipping to change at line 144 skipping to change at line 161
// Is there a list of $mod's on '_mods' or is it just full object r eplacement? // Is there a list of $mod's on '_mods' or is it just full object r eplacement?
bool _replacementMode; bool _replacementMode;
// Collection of update mod instances. Owned here. // Collection of update mod instances. Owned here.
vector<ModifierInterface*> _mods; vector<ModifierInterface*> _mods;
// What are the list of fields in the collection over which the upd ate is going to be // What are the list of fields in the collection over which the upd ate is going to be
// applied that participate in indices? // applied that participate in indices?
// //
// TODO: Do we actually need to keep a copy of this? // NOTE: Owned by the collection's info cache!.
IndexPathSet _indexedFields; const IndexPathSet* _indexedFields;
// //
// mutable properties after parsing // mutable properties after parsing
// //
// May this driver apply updates to several documents? // May this driver apply updates to several documents?
bool _multi; bool _multi;
// May this driver construct a new object if an update for a non-ex isting one is sent? // May this driver construct a new object if an update for a non-ex isting one is sent?
bool _upsert; bool _upsert;
skipping to change at line 167 skipping to change at line 184
// Should this driver generate an oplog record when it applies the update? // Should this driver generate an oplog record when it applies the update?
bool _logOp; bool _logOp;
// The options to initiate the mods with // The options to initiate the mods with
ModifierInterface::Options _modOptions; ModifierInterface::Options _modOptions;
// Are any of the fields mentioned in the mods participating in any index? Is set anew // Are any of the fields mentioned in the mods participating in any index? Is set anew
// at each call to update. // at each call to update.
bool _affectIndices; bool _affectIndices;
// Do any of the mods require positional match details when calling
'prepare'?
bool _positional;
// Is this update going to be an upsert? // Is this update going to be an upsert?
ModifierInterface::ExecInfo::UpdateContext _context; ModifierInterface::ExecInfo::UpdateContext _context;
// The document used to represent or store the object being updated
.
mutablebson::Document _objDoc;
// The document used to build the oplog entry for the update.
mutablebson::Document _logDoc; mutablebson::Document _logDoc;
}; };
struct UpdateDriver::Options { struct UpdateDriver::Options {
bool multi; bool multi;
bool upsert; bool upsert;
bool logOp; bool logOp;
ModifierInterface::Options modOptions; ModifierInterface::Options modOptions;
Options() : multi(false), upsert(false), logOp(false), modOptions() {} Options() : multi(false), upsert(false), logOp(false), modOptions() {}
 End of changes. 9 change blocks. 
6 lines changed or deleted 33 lines changed or added


 update_lifecycle.h   update_lifecycle.h 
skipping to change at line 32 skipping to change at line 32
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/field_ref.h" #include "mongo/db/field_ref.h"
#include "mongo/db/structure/collection.h" #include "mongo/db/catalog/collection.h"
#include "mongo/s/chunk_version.h" #include "mongo/s/chunk_version.h"
namespace mongo { namespace mongo {
class UpdateLifecycle { class UpdateLifecycle {
public: public:
virtual ~UpdateLifecycle() {} virtual ~UpdateLifecycle() {}
/** /**
* Update the cached collection pointer that this lifecycle object
uses.
*/
virtual void setCollection(Collection* collection) = 0;
/**
* Can the update continue? * Can the update continue?
* *
* The (only) implementation will check the following: * The (only) implementation will check the following:
* 1.) Collection still exists * 1.) Collection still exists
* 2.) Shard version has not changed (indicating that the query/up date is not valid * 2.) Shard version has not changed (indicating that the query/up date is not valid
*/ */
virtual const bool canContinue() const = 0; virtual bool canContinue() const = 0;
/** /**
* Set the out parameter if there is a collection and it has indexe s * Return a pointer to any indexes if there is a collection.
*/ */
virtual const void getIndexKeys(IndexPathSet* returnedIndexPathSet) const = 0; virtual const IndexPathSet* getIndexKeys() const = 0;
/** /**
* Returns the shard keys as immutable fields * Returns the shard keys as immutable fields
* Immutable fields in this case mean that they are required to exi st, cannot change values * Immutable fields in this case mean that they are required to exi st, cannot change values
* and must not be multi-valued (in an array, or an array) * and must not be multi-valued (in an array, or an array)
*/ */
virtual const std::vector<FieldRef*>* getImmutableFields() const = 0; virtual const std::vector<FieldRef*>* getImmutableFields() const = 0;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
4 lines changed or deleted 10 lines changed or added


 update_lifecycle_impl.h   update_lifecycle_impl.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/db/ops/update_lifecycle.h" #include "mongo/db/ops/update_lifecycle.h"
#include "mongo/db/structure/collection.h" #include "mongo/db/catalog/collection.h"
namespace mongo { namespace mongo {
class UpdateLifecycleImpl : public UpdateLifecycle { class UpdateLifecycleImpl : public UpdateLifecycle {
MONGO_DISALLOW_COPYING(UpdateLifecycleImpl); MONGO_DISALLOW_COPYING(UpdateLifecycleImpl);
public: public:
/** /**
* ignoreVersion is for shard version checking and * ignoreVersion is for shard version checking and
* means that version checks will not be done * means that version checks will not be done
* *
* nsString represents the namespace for the * nsString represents the namespace for the
*/ */
UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsSt ring); UpdateLifecycleImpl(bool ignoreVersion, const NamespaceString& nsSt ring);
virtual const bool canContinue() const; virtual void setCollection(Collection* collection);
virtual const void getIndexKeys(IndexPathSet* returnedIndexPathSet)
const; virtual bool canContinue() const;
virtual const IndexPathSet* getIndexKeys() const;
virtual const std::vector<FieldRef*>* getImmutableFields() const; virtual const std::vector<FieldRef*>* getImmutableFields() const;
private: private:
Collection* _collection;
const NamespaceString& _nsString; const NamespaceString& _nsString;
ChunkVersion _shardVersion; ChunkVersion _shardVersion;
}; };
} /* namespace mongo */ } /* namespace mongo */
 End of changes. 3 change blocks. 
4 lines changed or deleted 8 lines changed or added


 update_request.h   update_request.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/curop.h" #include "mongo/db/curop.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/db/query_plan_selection_policy.h"
#include "mongo/util/mongoutils/str.h" #include "mongo/util/mongoutils/str.h"
namespace mongo { namespace mongo {
namespace str = mongoutils::str; namespace str = mongoutils::str;
class FieldRef; class FieldRef;
class UpdateLifecycle; class UpdateLifecycle;
class UpdateRequest { class UpdateRequest {
public: public:
inline UpdateRequest( inline UpdateRequest(const NamespaceString& nsString)
const NamespaceString& nsString,
const QueryPlanSelectionPolicy& policy = QueryPlanSelectionPoli
cy::any() )
: _nsString(nsString) : _nsString(nsString)
, _queryPlanPolicy(policy)
, _god(false) , _god(false)
, _upsert(false) , _upsert(false)
, _multi(false) , _multi(false)
, _callLogOp(false) , _callLogOp(false)
, _fromMigration(false) , _fromMigration(false)
, _fromReplication(false) , _fromReplication(false)
, _lifecycle(NULL) {} , _lifecycle(NULL) {}
const NamespaceString& getNamespaceString() const { const NamespaceString& getNamespaceString() const {
return _nsString; return _nsString;
} }
const QueryPlanSelectionPolicy& getQueryPlanSelectionPolicy() const
{
return _queryPlanPolicy;
}
inline void setQuery(const BSONObj& query) { inline void setQuery(const BSONObj& query) {
_query = query; _query = query;
} }
inline const BSONObj& getQuery() const { inline const BSONObj& getQuery() const {
return _query; return _query;
} }
inline void setUpdates(const BSONObj& updates) { inline void setUpdates(const BSONObj& updates) {
_updates = updates; _updates = updates;
skipping to change at line 134 skipping to change at line 126
} }
inline void setFromReplication(bool value = true) { inline void setFromReplication(bool value = true) {
_fromReplication = value; _fromReplication = value;
} }
bool isFromReplication() const { bool isFromReplication() const {
return _fromReplication; return _fromReplication;
} }
inline void setLifecycle(const UpdateLifecycle* value) { inline void setLifecycle(UpdateLifecycle* value) {
_lifecycle = value; _lifecycle = value;
} }
inline const UpdateLifecycle* getLifecycle() const { inline UpdateLifecycle* getLifecycle() const {
return _lifecycle; return _lifecycle;
} }
const std::string toString() const { const std::string toString() const {
return str::stream() return str::stream()
<< " query: " << _query << " query: " << _query
<< " updated: " << _updates << " updated: " << _updates
<< " god: " << _god << " god: " << _god
<< " upsert: " << _upsert << " upsert: " << _upsert
<< " multi: " << _multi << " multi: " << _multi
<< " callLogOp: " << _callLogOp << " callLogOp: " << _callLogOp
<< " fromMigration: " << _fromMigration << " fromMigration: " << _fromMigration
<< " fromReplications: " << _fromReplication; << " fromReplications: " << _fromReplication;
} }
private: private:
const NamespaceString& _nsString; const NamespaceString& _nsString;
const QueryPlanSelectionPolicy& _queryPlanPolicy;
// Contains the query that selects documents to update. // Contains the query that selects documents to update.
BSONObj _query; BSONObj _query;
// Contains the modifiers to apply to matched objects, or a replace ment document. // Contains the modifiers to apply to matched objects, or a replace ment document.
BSONObj _updates; BSONObj _updates;
// Flags controlling the update. // Flags controlling the update.
// God bypasses _id checking and index generation. It is only used on behalf of system // God bypasses _id checking and index generation. It is only used on behalf of system
skipping to change at line 186 skipping to change at line 177
// True if the effects of the update should be written to the oplog . // True if the effects of the update should be written to the oplog .
bool _callLogOp; bool _callLogOp;
// True if this update is on behalf of a chunk migration. // True if this update is on behalf of a chunk migration.
bool _fromMigration; bool _fromMigration;
// True if this update is being applied during the application for the oplog. // True if this update is being applied during the application for the oplog.
bool _fromReplication; bool _fromReplication;
// The lifecycle data, and events used during the update request. // The lifecycle data, and events used during the update request.
const UpdateLifecycle* _lifecycle; UpdateLifecycle* _lifecycle;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 8 change blocks. 
15 lines changed or deleted 4 lines changed or added


 update_result.h   update_result.h 
skipping to change at line 34 skipping to change at line 34
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/db/curop.h" #include "mongo/db/curop.h"
#include "mongo/db/namespace_string.h" #include "mongo/db/namespace_string.h"
#include "mongo/db/query_plan_selection_policy.h"
#include "mongo/util/mongoutils/str.h" #include "mongo/util/mongoutils/str.h"
namespace mongo { namespace mongo {
namespace str = mongoutils::str; namespace str = mongoutils::str;
struct UpdateResult { struct UpdateResult {
UpdateResult( bool existing_, UpdateResult( bool existing_,
bool modifiers_, bool modifiers_,
unsigned long long numDocsModified_,
unsigned long long numMatched_, unsigned long long numMatched_,
const BSONObj& upsertedObject_ ) const BSONObj& upsertedObject_ )
: existing(existing_) : existing(existing_)
, modifiers(modifiers_) , modifiers(modifiers_)
, numDocsModified(numDocsModified_)
, numMatched(numMatched_) { , numMatched(numMatched_) {
BSONElement id = upsertedObject_["_id"]; BSONElement id = upsertedObject_["_id"];
if ( ! existing && numMatched == 1 && !id.eoo() ) { if ( ! existing && numMatched == 1 && !id.eoo() ) {
upserted = id.wrap(kUpsertedFieldName); upserted = id.wrap(kUpsertedFieldName);
} }
LOG(4) << "UpdateResult -- " << toString();
} }
// if existing objects were modified // if existing objects were modified
const bool existing; const bool existing;
// was this a $ mod // was this a $ mod
const bool modifiers; const bool modifiers;
// how many objects touched // how many docs updated
const long long numDocsModified;
// how many docs seen by update
const long long numMatched; const long long numMatched;
// if something was upserted, the new _id of the object // if something was upserted, the new _id of the object
BSONObj upserted; BSONObj upserted;
const std::string toString() const { const std::string toString() const {
return str::stream() return str::stream()
<< " upserted: " << upserted << " upserted: " << upserted
<< " modifiers: " << modifiers << " modifiers: " << modifiers
<< " existing: " << existing << " existing: " << existing
<< " numDocsModified: " << numDocsModified
<< " numMatched: " << numMatched; << " numMatched: " << numMatched;
} }
}; };
} // namespace mongo } // namespace mongo
 End of changes. 6 change blocks. 
2 lines changed or deleted 9 lines changed or added


 user_management_commands_parser.h   user_management_commands_parser.h 
skipping to change at line 29 skipping to change at line 29
* linked combinations including the program with the OpenSSL library. Yo u * linked combinations including the program with the OpenSSL library. Yo u
* must comply with the GNU Affero General Public License in all respects for * must comply with the GNU Affero General Public License in all respects for
* all of the code used other than as permitted herein. If you modify fil e(s) * all of the code used other than as permitted herein. If you modify fil e(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also de lete * exception statement from all source files in the program, then also de lete
* it in the license file. * it in the license file.
*/ */
#pragma once
#include <string> #include <string>
#include <vector> #include <vector>
#include "mongo/base/status.h" #include "mongo/base/status.h"
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/base/disallow_copying.h" #include "mongo/base/disallow_copying.h"
#include "mongo/db/auth/privilege.h" #include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/role_name.h"
#include "mongo/db/auth/user.h" #include "mongo/db/auth/user.h"
#include "mongo/db/auth/user_name.h" #include "mongo/db/auth/user_name.h"
skipping to change at line 183 skipping to change at line 185
/** /**
* Takes a command object describing an invocation of the "dropAllRoles FromDatabase" command and * Takes a command object describing an invocation of the "dropAllRoles FromDatabase" command and
* parses out the write concern. * parses out the write concern.
*/ */
Status parseDropAllRolesFromDatabaseCommand(const BSONObj& cmdObj, Status parseDropAllRolesFromDatabaseCommand(const BSONObj& cmdObj,
const std::string& dbname, const std::string& dbname,
BSONObj* parsedWriteConcern ); BSONObj* parsedWriteConcern );
/** /**
* Takes a command object describing an invocation of the "authSchemaUp * Takes a command object describing an invocation of the "authSchemaUp
gradeStep" command and grade" command and
* parses out the write concern. * parses out the write concern, maximum steps to take and whether or n
ot shard servers should
* also be upgraded, in the sharded deployment case.
*/ */
Status parseAuthSchemaUpgradeStepCommand(const BSONObj& cmdObj, Status parseAuthSchemaUpgradeStepCommand(const BSONObj& cmdObj,
const std::string& dbname, const std::string& dbname,
int* maxSteps,
bool* shouldUpgradeShards,
BSONObj* parsedWriteConcern); BSONObj* parsedWriteConcern);
/** /**
* Parses the privileges described in "privileges" into a vector of Pri vilege objects. * Parses the privileges described in "privileges" into a vector of Pri vilege objects.
* Returns Status::OK() upon successfully parsing all the elements of " privileges". * Returns Status::OK() upon successfully parsing all the elements of " privileges".
*/ */
Status parseAndValidatePrivilegeArray(const BSONArray& privileges, Status parseAndValidatePrivilegeArray(const BSONArray& privileges,
PrivilegeVector* parsedPrivileges ); PrivilegeVector* parsedPrivileges );
/** /**
* Takes a BSONArray of name,source pair documents, parses that array a nd returns (via the * Takes a BSONArray of name,db pair documents, parses that array and r eturns (via the
* output param parsedRoleNames) a list of the role names in the input array. * output param parsedRoleNames) a list of the role names in the input array.
* Performs syntactic validation of "rolesArray", only. * Performs syntactic validation of "rolesArray", only.
*/ */
Status parseRoleNamesFromBSONArray(const BSONArray& rolesArray, Status parseRoleNamesFromBSONArray(const BSONArray& rolesArray,
const StringData& dbname, const StringData& dbname,
std::vector<RoleName>* parsedRoleNam es); std::vector<RoleName>* parsedRoleNam es);
/**
* Takes a BSONArray of name,db pair documents, parses that array and r
eturns (via the
* output param parsedUserNames) a list of the usernames in the input a
rray.
* Performs syntactic validation of "usersArray", only.
*/
Status parseUserNamesFromBSONArray(const BSONArray& usersArray,
const StringData& dbname,
std::vector<UserName>* parsedUserNam
es);
} // namespace auth } // namespace auth
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
4 lines changed or deleted 22 lines changed or added


 user_name.h   user_name.h 
skipping to change at line 21 skipping to change at line 21
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli ed.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#pragma once #pragma once
#include <iosfwd> #include <iosfwd>
#include <string> #include <string>
#include <boost/scoped_ptr.hpp>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
namespace mongo { namespace mongo {
/** /**
* Representation of a name of a principal (authenticatable user) in a MongoDB system. * Representation of a name of a principal (authenticatable user) in a MongoDB system.
* *
* Consists of a "user name" part, and a "database name" part. * Consists of a "user name" part, and a "database name" part.
*/ */
class UserName { class UserName {
skipping to change at line 74 skipping to change at line 77
static inline bool operator!=(const UserName& lhs, const UserName& rhs) { static inline bool operator!=(const UserName& lhs, const UserName& rhs) {
return lhs.getFullName() != rhs.getFullName(); return lhs.getFullName() != rhs.getFullName();
} }
static inline bool operator<(const UserName& lhs, const UserName& rhs) { static inline bool operator<(const UserName& lhs, const UserName& rhs) {
return lhs.getFullName() < rhs.getFullName(); return lhs.getFullName() < rhs.getFullName();
} }
std::ostream& operator<<(std::ostream& os, const UserName& name); std::ostream& operator<<(std::ostream& os, const UserName& name);
/**
* Iterator over an unspecified container of UserName objects.
*/
class UserNameIterator {
public:
class Impl {
MONGO_DISALLOW_COPYING(Impl);
public:
Impl() {};
virtual ~Impl() {};
static Impl* clone(Impl* orig) { return orig ? orig->doClone():
NULL; }
virtual bool more() const = 0;
virtual const UserName& get() const = 0;
virtual const UserName& next() = 0;
private:
virtual Impl* doClone() const = 0;
};
UserNameIterator() : _impl(NULL) {}
UserNameIterator(const UserNameIterator& other) : _impl(Impl::clone
(other._impl.get())) {}
explicit UserNameIterator(Impl* impl) : _impl(impl) {}
UserNameIterator& operator=(const UserNameIterator& other) {
_impl.reset(Impl::clone(other._impl.get()));
return *this;
}
bool more() const { return _impl.get() && _impl->more(); }
const UserName& get() const { return _impl->get(); }
const UserName& next() { return _impl->next(); }
const UserName& operator*() const { return get(); }
const UserName* operator->() const { return &get(); }
private:
boost::scoped_ptr<Impl> _impl;
};
template <typename ContainerIterator>
class UserNameContainerIteratorImpl : public UserNameIterator::Impl {
MONGO_DISALLOW_COPYING(UserNameContainerIteratorImpl);
public:
UserNameContainerIteratorImpl(const ContainerIterator& begin,
const ContainerIterator& end) :
_curr(begin), _end(end) {}
virtual ~UserNameContainerIteratorImpl() {}
virtual bool more() const { return _curr != _end; }
virtual const UserName& next() { return *(_curr++); }
virtual const UserName& get() const { return *_curr; }
virtual UserNameIterator::Impl* doClone() const {
return new UserNameContainerIteratorImpl(_curr, _end);
}
private:
ContainerIterator _curr;
ContainerIterator _end;
};
template <typename ContainerIterator>
UserNameIterator makeUserNameIterator(const ContainerIterator& begin,
const ContainerIterator& end) {
return UserNameIterator( new UserNameContainerIteratorImpl<Containe
rIterator>(begin, end));
}
template <typename Container>
UserNameIterator makeUserNameIteratorForContainer(const Container& cont
ainer) {
return makeUserNameIterator(container.begin(), container.end());
}
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
0 lines changed or deleted 79 lines changed or added


 user_set.h   user_set.h 
skipping to change at line 39 skipping to change at line 39
/** /**
* A collection of authenticated users. * A collection of authenticated users.
* This class does not do any locking/synchronization, the consumer wil l be responsible for * This class does not do any locking/synchronization, the consumer wil l be responsible for
* synchronizing access. * synchronizing access.
*/ */
class UserSet { class UserSet {
MONGO_DISALLOW_COPYING(UserSet); MONGO_DISALLOW_COPYING(UserSet);
public: public:
typedef std::vector<User*>::const_iterator iterator; typedef std::vector<User*>::const_iterator iterator;
/**
* Forward iterator over the names of the users stored in a UserSet
.
*
* Instances are valid until the underlying vector<User*> is modifi
ed.
*
* more() must be the first method called after construction, and m
ust be checked
* after each call to next() before calling any other methods.
*/
class NameIterator {
public:
explicit NameIterator(iterator begin, iterator end) : _curr(beg
in), _end(end) {}
NameIterator() {}
bool more() { return _curr != _end; }
const UserName& next() {
const UserName& ret = get();
++_curr;
return ret;
}
const UserName& get() const { return (*_curr)->getName(); }
const UserName& operator*() const { return get(); }
const UserName* operator->() const { return &get(); }
private:
std::vector<User*>::const_iterator _curr;
std::vector<User*>::const_iterator _end;
};
UserSet(); UserSet();
~UserSet(); ~UserSet();
/** /**
* Adds a User to the UserSet. * Adds a User to the UserSet.
* *
* The UserSet does not take ownership of the User. * The UserSet does not take ownership of the User.
* *
* As there can only be one user per database in the UserSet, if a User already exists for * As there can only be one user per database in the UserSet, if a User already exists for
* the new User's database, the old user will be removed from the s et and returned. It is * the new User's database, the old user will be removed from the s et and returned. It is
skipping to change at line 123 skipping to change at line 92
// Gets the user whose authentication credentials came from dbname, or NULL if none // Gets the user whose authentication credentials came from dbname, or NULL if none
// exist. There should be at most one such user. // exist. There should be at most one such user.
User* lookupByDBName(const StringData& dbname) const; User* lookupByDBName(const StringData& dbname) const;
// Returns how many users are in the set. // Returns how many users are in the set.
size_t size() const { return _users.size(); }; size_t size() const { return _users.size(); };
// Gets an iterator over the names of the users stored in the set. The iterator is // Gets an iterator over the names of the users stored in the set. The iterator is
// valid until the next non-const method is called on the UserSet. // valid until the next non-const method is called on the UserSet.
NameIterator getNames() const { return NameIterator(begin(), end()) ; } UserNameIterator getNames() const;
iterator begin() const { return _users.begin(); } iterator begin() const { return _users.begin(); }
iterator end() const { return _usersEnd; } iterator end() const { return _usersEnd; }
private: private:
typedef std::vector<User*>::iterator mutable_iterator; typedef std::vector<User*>::iterator mutable_iterator;
mutable_iterator mbegin() { return _users.begin(); } mutable_iterator mbegin() { return _users.begin(); }
mutable_iterator mend() { return _usersEnd; } mutable_iterator mend() { return _usersEnd; }
 End of changes. 2 change blocks. 
36 lines changed or deleted 1 lines changed or added


 version.h   version.h 
skipping to change at line 34 skipping to change at line 34
* wish to do so, delete this exception statement from your version. If y ou * wish to do so, delete this exception statement from your version. If y ou
* delete this exception statement from all source files in the program, * delete this exception statement from all source files in the program,
* then also delete it in the license file. * then also delete it in the license file.
*/ */
#ifndef UTIL_VERSION_HEADER #ifndef UTIL_VERSION_HEADER
#define UTIL_VERSION_HEADER #define UTIL_VERSION_HEADER
#include <string> #include <string>
#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
namespace mongo { namespace mongo {
struct BSONArray; struct BSONArray;
// mongo version // mongo version
extern const char versionString[]; extern const char versionString[];
extern const BSONArray versionArray; extern const BSONArray versionArray;
std::string mongodVersion(); std::string mongodVersion();
// Convert a version string into a numeric array // Convert a version string into a numeric array
BSONArray toVersionArray(const char* version); BSONArray toVersionArray(const char* version);
// Checks whether another version is the same major version as us // Checks whether another version is the same major version as us
bool isSameMajorVersion(const char* version); bool isSameMajorVersion(const char* version);
void appendBuildInfo(BSONObjBuilder& result);
const char * gitVersion(); const char * gitVersion();
const char * compiledJSEngine(); const char * compiledJSEngine();
const char * allocator(); const char * allocator();
const char * loaderFlags(); const char * loaderFlags();
const char * compilerFlags(); const char * compilerFlags();
void printGitVersion();
const std::string openSSLVersion(const std::string &prefix = "", const
std::string &suffix = "");
void printOpenSSLVersion();
std::string sysInfo(); std::string sysInfo();
void printSysInfo();
void printTargetMinOS();
void printAllocator();
void show_warnings();
} // namespace mongo } // namespace mongo
#endif // UTIL_VERSION_HEADER #endif // UTIL_VERSION_HEADER
 End of changes. 4 change blocks. 
16 lines changed or deleted 1 lines changed or added


 working_set.h   working_set.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp>
#include <vector> #include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/db/diskloc.h" #include "mongo/db/diskloc.h"
#include "mongo/db/jsobj.h" #include "mongo/db/jsobj.h"
#include "mongo/platform/unordered_map.h" #include "mongo/platform/unordered_map.h"
namespace mongo { namespace mongo {
struct WorkingSetMember; class WorkingSetMember;
typedef long WorkingSetID; typedef size_t WorkingSetID;
/** /**
* All data in use by a query. Data is passed through the stage tree b y referencing the ID of * All data in use by a query. Data is passed through the stage tree b y referencing the ID of
* an element of the working set. Stages can add elements to the worki ng set, delete elements * an element of the working set. Stages can add elements to the worki ng set, delete elements
* from the working set, or mutate elements in the working set. * from the working set, or mutate elements in the working set.
*
* Concurrency Notes:
* flagForReview() can only be called with a write lock covering the co
llection this WorkingSet
* is for. All other methods should only be called by the thread owning
this WorkingSet while
* holding the read lock covering the collection.
*/ */
class WorkingSet { class WorkingSet {
MONGO_DISALLOW_COPYING(WorkingSet);
public: public:
static const WorkingSetID INVALID_ID; static const WorkingSetID INVALID_ID = WorkingSetID(-1);
WorkingSet(); WorkingSet();
~WorkingSet(); ~WorkingSet();
/** /**
* Allocate a new query result and return the ID used to get and fr ee it. * Allocate a new query result and return the ID used to get and fr ee it.
*/ */
WorkingSetID allocate(); WorkingSetID allocate();
/** /**
* Get the i-th mutable query result. * Get the i-th mutable query result. The pointer will be valid for
*/ this id until freed.
WorkingSetMember* get(const WorkingSetID& i); * Do not delete the returned pointer as the WorkingSet retains own
ership. Call free() to
* release it.
*/
WorkingSetMember* get(const WorkingSetID& i) {
dassert(i < _data.size()); // ID has been allocated.
dassert(_data[i].nextFreeOrSelf == i); // ID currently in use.
return _data[i].member;
}
/** /**
* Unallocate the i-th query result and release its resouces. * Deallocate the i-th query result and release its resources.
*/ */
void free(const WorkingSetID& i); void free(const WorkingSetID& i);
/** /**
* The DiskLoc in WSM 'i' was invalidated while being processed. A ny predicates over the * The DiskLoc in WSM 'i' was invalidated while being processed. A ny predicates over the
* WSM could not be fully evaluated, so the WSM may or may not sati sfy them. As such, if we * WSM could not be fully evaluated, so the WSM may or may not sati sfy them. As such, if we
* wish to output the WSM, we must do some clean-up work later. Ad ds the WSM with id 'i' to * wish to output the WSM, we must do some clean-up work later. Ad ds the WSM with id 'i' to
* the list of flagged WSIDs. * the list of flagged WSIDs.
* *
* The WSM must be in the state OWNED_OBJ. * The WSM must be in the state OWNED_OBJ.
*/ */
void flagForReview(const WorkingSetID& i); void flagForReview(const WorkingSetID& i);
/** /**
* Return a set of all WSIDs passed to flagForReview. * Return true if the provided ID is flagged.
*/ */
const unordered_set<WorkingSetID>& getFlagged() const; bool isFlagged(WorkingSetID id) const;
/** /**
* Return true if the provided ID is flagged. * Return the set of all WSIDs passed to flagForReview.
*/ */
bool isFlagged(WorkingSetID id) const; const unordered_set<WorkingSetID>& getFlagged() const;
private: private:
typedef unordered_map<WorkingSetID, WorkingSetMember*> DataMap; struct MemberHolder {
MemberHolder();
~MemberHolder();
DataMap _data; // Free list link if freed. Points to self if in use.
WorkingSetID nextFreeOrSelf;
// The WorkingSetID returned by the next call to allocate(). Shoul // Owning pointer
d refer to the next valid WorkingSetMember* member;
// ID. IDs allocated contiguously. Should never point at an in-us };
e ID.
WorkingSetID _nextId;
// All WSIDs invalidated during evaluation of a predicate (AND). // All WorkingSetIDs are indexes into this, except for INVALID_ID.
// Elements are added to _freeList rather than removed when freed.
vector<MemberHolder> _data;
// Index into _data, forming a linked-list using MemberHolder::next
FreeOrSelf as the next
// link. INVALID_ID is the list terminator since 0 is a valid index
.
// If _freeList == INVALID_ID, the free list is empty and all eleme
nts in _data are in use.
WorkingSetID _freeList;
// An insert-only set of WorkingSetIDs that have been flagged for r
eview.
unordered_set<WorkingSetID> _flagged; unordered_set<WorkingSetID> _flagged;
}; };
/** /**
* The key data extracted from an index. Keeps track of both the key ( currently a BSONObj) and * The key data extracted from an index. Keeps track of both the key ( currently a BSONObj) and
* the index that provided the key. The index key pattern is required to correctly interpret * the index that provided the key. The index key pattern is required to correctly interpret
* the key. * the key.
*/ */
struct IndexKeyDatum { struct IndexKeyDatum {
IndexKeyDatum(const BSONObj& keyPattern, const BSONObj& key) : inde xKeyPattern(keyPattern), IndexKeyDatum(const BSONObj& keyPattern, const BSONObj& key) : inde xKeyPattern(keyPattern),
skipping to change at line 122 skipping to change at line 149
BSONObj indexKeyPattern; BSONObj indexKeyPattern;
// This is the BSONObj for the key that we put into the index. Own ed by us. // This is the BSONObj for the key that we put into the index. Own ed by us.
BSONObj keyData; BSONObj keyData;
}; };
/** /**
* What types of computed data can we have? * What types of computed data can we have?
*/ */
enum WorkingSetComputedDataType { enum WorkingSetComputedDataType {
// What's the score of the document retrieved from a $text query?
WSM_COMPUTED_TEXT_SCORE = 0, WSM_COMPUTED_TEXT_SCORE = 0,
// What's the distance from a geoNear query point to the document?
WSM_COMPUTED_GEO_DISTANCE = 1, WSM_COMPUTED_GEO_DISTANCE = 1,
// The index key used to retrieve the document, for $returnKey quer
y option.
WSM_INDEX_KEY = 2,
// What point (of several possible points) was used to compute the
distance to the document
// via geoNear?
WSM_GEO_NEAR_POINT = 3,
// Must be last.
WSM_COMPUTED_NUM_TYPES,
}; };
/** /**
* Data that is a computed function of a WSM. * Data that is a computed function of a WSM.
*/ */
class WorkingSetComputedData { class WorkingSetComputedData {
MONGO_DISALLOW_COPYING(WorkingSetComputedData);
public: public:
WorkingSetComputedData(const WorkingSetComputedDataType type) : _ty pe(type) { } WorkingSetComputedData(const WorkingSetComputedDataType type) : _ty pe(type) { }
virtual ~WorkingSetComputedData() { } virtual ~WorkingSetComputedData() { }
WorkingSetComputedDataType type() const { return _type; } WorkingSetComputedDataType type() const { return _type; }
virtual WorkingSetComputedData* clone() const = 0; virtual WorkingSetComputedData* clone() const = 0;
private: private:
WorkingSetComputedDataType _type; WorkingSetComputedDataType _type;
skipping to change at line 151 skipping to change at line 192
/** /**
* The type of the data passed between query stages. In particular: * The type of the data passed between query stages. In particular:
* *
* Index scan stages return a WorkingSetMember in the LOC_AND_IDX state . * Index scan stages return a WorkingSetMember in the LOC_AND_IDX state .
* *
* Collection scan stages the LOC_AND_UNOWNED_OBJ state. * Collection scan stages the LOC_AND_UNOWNED_OBJ state.
* *
* A WorkingSetMember may have any of the data above. * A WorkingSetMember may have any of the data above.
*/ */
struct WorkingSetMember { class WorkingSetMember {
MONGO_DISALLOW_COPYING(WorkingSetMember);
public:
WorkingSetMember(); WorkingSetMember();
~WorkingSetMember(); ~WorkingSetMember();
/**
* Reset to an "empty" state.
*/
void clear();
enum MemberState { enum MemberState {
// Initial state. // Initial state.
INVALID, INVALID,
// Data is from 1 or more indices. // Data is from 1 or more indices.
LOC_AND_IDX, LOC_AND_IDX,
// Data is from a collection scan, or data is from an index sca n and was fetched. // Data is from a collection scan, or data is from an index sca n and was fetched.
LOC_AND_UNOWNED_OBJ, LOC_AND_UNOWNED_OBJ,
skipping to change at line 204 skipping to change at line 252
* name. * name.
* *
* Returns true if there is the element is in an index key or in an (owned or unowned) * Returns true if there is the element is in an index key or in an (owned or unowned)
* object. *out is set to the element if so. * object. *out is set to the element if so.
* *
* Returns false otherwise. Returning false indicates a query plan ning error. * Returns false otherwise. Returning false indicates a query plan ning error.
*/ */
bool getFieldDotted(const string& field, BSONElement* out) const; bool getFieldDotted(const string& field, BSONElement* out) const;
private: private:
unordered_map<size_t, WorkingSetComputedData*> _computed; boost::scoped_ptr<WorkingSetComputedData> _computed[WSM_COMPUTED_NU M_TYPES];
}; };
} // namespace mongo } // namespace mongo
 End of changes. 24 change blocks. 
21 lines changed or deleted 77 lines changed or added


 working_set_common.h   working_set_common.h 
skipping to change at line 33 skipping to change at line 33
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
namespace mongo { namespace mongo {
struct WorkingSetMember; class WorkingSetMember;
class WorkingSetCommon { class WorkingSetCommon {
public: public:
/** /**
* Get an owned copy of the BSONObj the WSM refers to. * Get an owned copy of the BSONObj the WSM refers to.
* Requires either a valid BSONObj or valid DiskLoc. * Requires either a valid BSONObj or valid DiskLoc.
* Returns true if the fetch and invalidate succeeded, false otherw ise. * Returns true if the fetch and invalidate succeeded, false otherw ise.
*/ */
static bool fetchAndInvalidateLoc(WorkingSetMember* member); static bool fetchAndInvalidateLoc(WorkingSetMember* member);
/**
* Initialize the fields in 'dest' from 'src', creating copies of o
wned objects as needed.
*/
static void initFrom(WorkingSetMember* dest, const WorkingSetMember
& src);
}; };
} // namespace mongo } // namespace mongo
 End of changes. 2 change blocks. 
1 lines changed or deleted 8 lines changed or added


 working_set_computed_data.h   working_set_computed_data.h 
skipping to change at line 53 skipping to change at line 53
virtual TextScoreComputedData* clone() const { virtual TextScoreComputedData* clone() const {
return new TextScoreComputedData(_score); return new TextScoreComputedData(_score);
} }
private: private:
double _score; double _score;
}; };
class GeoDistanceComputedData : public WorkingSetComputedData { class GeoDistanceComputedData : public WorkingSetComputedData {
public: public:
GeoDistanceComputedData(double score) GeoDistanceComputedData(double dist)
: WorkingSetComputedData(WSM_COMPUTED_GEO_DISTANCE), : WorkingSetComputedData(WSM_COMPUTED_GEO_DISTANCE),
_score(score) { } _dist(dist) { }
double getScore() const { return _score; } double getDist() const { return _dist; }
virtual GeoDistanceComputedData* clone() const { virtual GeoDistanceComputedData* clone() const {
return new GeoDistanceComputedData(_score); return new GeoDistanceComputedData(_dist);
} }
private: private:
double _score; double _dist;
};
class IndexKeyComputedData : public WorkingSetComputedData {
public:
IndexKeyComputedData(BSONObj key)
: WorkingSetComputedData(WSM_INDEX_KEY),
_key(key.getOwned()) { }
BSONObj getKey() const { return _key; }
virtual IndexKeyComputedData* clone() const {
return new IndexKeyComputedData(_key);
}
private:
BSONObj _key;
};
class GeoNearPointComputedData : public WorkingSetComputedData {
public:
GeoNearPointComputedData(BSONObj point)
: WorkingSetComputedData(WSM_GEO_NEAR_POINT),
_point(point.getOwned()) { }
BSONObj getPoint() const { return _point; }
virtual GeoNearPointComputedData* clone() const {
return new GeoNearPointComputedData(_point);
}
private:
BSONObj _point;
}; };
} // namespace mongo } // namespace mongo
 End of changes. 5 change blocks. 
5 lines changed or deleted 37 lines changed or added


 write_concern.h   write_concern.h 
skipping to change at line 31 skipping to change at line 31
* all of the code used other than as permitted herein. If you modify fi le(s) * all of the code used other than as permitted herein. If you modify fi le(s)
* with this exception, you may extend this exception to your version of the * with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so, * file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this * delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also d elete * exception statement from all source files in the program, then also d elete
* it in the license file. * it in the license file.
*/ */
#pragma once #pragma once
#include "mongo/db/write_concern_options.h"
namespace mongo { namespace mongo {
/** /**
* Helper method for commands to call. Blocks until write concern (as * Verifies that a WriteConcern is valid for this particular host.
specified in "cmdObj") */
* is satisfied. "err" should be set to true if the last operation suc Status validateWriteConcern( const WriteConcernOptions& writeConcern );
ceeded, otherwise false.
* "result" will be filled with write concern results. Returns false a struct WriteConcernResult {
nd sets "errmsg" on WriteConcernResult() {
* failure. reset();
}
void reset() {
syncMillis = -1;
fsyncFiles = -1;
wTimedOut = false;
wTime = -1;
err = "";
}
void appendTo( BSONObjBuilder* result ) const;
int syncMillis;
int fsyncFiles;
bool wTimedOut;
int wTime;
vector<BSONObj> writtenTo;
string err; // this is the old err field, should deprecate
};
/**
* Blocks until the database is sure the specified user write concern h
as been fulfilled, or
* returns an error status if the write concern fails. Does no validat
ion of the input write
* concern, it is an error to pass this function an invalid write conce
rn for the host.
*
* Takes a user write concern as well as the replication opTime the wri
te concern applies to -
* if this opTime.isNull() no replication-related write concern options
will be enforced.
*
* Returns result of the write concern if successful.
* Returns NotMaster if the host steps down while waiting for replicati
on
* Returns UnknownReplWriteConcern if the wMode specified was not enfor
ceable
*/ */
bool waitForWriteConcern(const BSONObj& cmdObj, Status waitForWriteConcern( const WriteConcernOptions& writeConcern,
bool err, const OpTime& replOpTime,
BSONObjBuilder* result, WriteConcernResult* result );
string* errmsg);
} // namespace mongo } // namespace mongo
 End of changes. 3 change blocks. 
11 lines changed or deleted 52 lines changed or added


 write_op.h   write_op.h 
skipping to change at line 37 skipping to change at line 37
*/ */
#pragma once #pragma once
#include <boost/scoped_ptr.hpp> #include <boost/scoped_ptr.hpp>
#include <vector> #include <vector>
#include "mongo/base/string_data.h" #include "mongo/base/string_data.h"
#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj.h"
#include "mongo/s/ns_targeter.h" #include "mongo/s/ns_targeter.h"
#include "mongo/s/write_ops/batched_error_detail.h" #include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_request.h"
namespace mongo { namespace mongo {
struct TargetedWrite; struct TargetedWrite;
struct ChildWriteOp; struct ChildWriteOp;
enum WriteOpState { enum WriteOpState {
// Item is ready to be targeted // Item is ready to be targeted
skipping to change at line 112 skipping to change at line 112
/** /**
* Returns the op's current state. * Returns the op's current state.
*/ */
WriteOpState getWriteState() const; WriteOpState getWriteState() const;
/** /**
* Returns the op's error. * Returns the op's error.
* *
* Can only be used in state _Error * Can only be used in state _Error
*/ */
const BatchedErrorDetail& getOpError() const; const WriteErrorDetail& getOpError() const;
/** /**
* Creates TargetedWrite operations for every applicable shard, whi ch contain the * Creates TargetedWrite operations for every applicable shard, whi ch contain the
* information needed to send the child writes generated from this write item. * information needed to send the child writes generated from this write item.
* *
* The ShardTargeter determines the ShardEndpoints to send child wr ites to, but is not * The ShardTargeter determines the ShardEndpoints to send child wr ites to, but is not
* modified by this operation. * modified by this operation.
* *
* Returns !OK if the targeting process itself fails * Returns !OK if the targeting process itself fails
* (no TargetedWrites will be added, state unchanged) * (no TargetedWrites will be added, state unchanged)
*/ */
Status targetWrites( const NSTargeter& targeter, Status targetWrites( const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites ); std::vector<TargetedWrite*>* targetedWrites );
/** /**
* Resets the state of this write op to _Ready and stops waiting fo r any outstanding * Resets the state of this write op to _Ready and stops waiting fo r any outstanding
* TargetedWrites. Optional error can be provided for reporting. * TargetedWrites. Optional error can be provided for reporting.
* *
* Can only be called when state is _Pending and no TargetedWrites * Can only be called when state is _Pending, or is a no-op if call
have been noted, or is a ed when the state
* no-op if called when the state is still _Ready (and therefore no * is still _Ready (and therefore no writes are pending).
writes are pending).
*/ */
void cancelWrites( const BatchedErrorDetail* why ); void cancelWrites( const WriteErrorDetail* why );
/** /**
* Marks the targeted write as finished for this write op. * Marks the targeted write as finished for this write op.
* *
* One of noteWriteComplete or noteWriteError should be called exac tly once for every * One of noteWriteComplete or noteWriteError should be called exac tly once for every
* TargetedWrite. * TargetedWrite.
*/ */
void noteWriteComplete( const TargetedWrite& targetedWrite ); void noteWriteComplete( const TargetedWrite& targetedWrite );
/** /**
* Stores the error response of a TargetedWrite for later use, mark s the write as finished. * Stores the error response of a TargetedWrite for later use, mark s the write as finished.
* *
* As above, one of noteWriteComplete or noteWriteError should be c alled exactly once for * As above, one of noteWriteComplete or noteWriteError should be c alled exactly once for
* every TargetedWrite. * every TargetedWrite.
*/ */
void noteWriteError( const TargetedWrite& targetedWrite, const Batc hedErrorDetail& error ); void noteWriteError( const TargetedWrite& targetedWrite, const Writ eErrorDetail& error );
/** /**
* Sets the error for this write op directly, and forces the state to _Error. * Sets the error for this write op directly, and forces the state to _Error.
* *
* Should only be used when in state _Ready. * Should only be used when in state _Ready.
*/ */
void setOpError( const BatchedErrorDetail& error ); void setOpError( const WriteErrorDetail& error );
private: private:
/** /**
* Updates the op state after new information is received. * Updates the op state after new information is received.
*/ */
void updateOpState(); void updateOpState();
// Owned elsewhere, reference to a batch with a write item // Owned elsewhere, reference to a batch with a write item
const BatchItemRef _itemRef; const BatchItemRef _itemRef;
// What stage of the operation we are at // What stage of the operation we are at
WriteOpState _state; WriteOpState _state;
// filled when state == _Pending // filled when state == _Pending
std::vector<ChildWriteOp*> _childOps; std::vector<ChildWriteOp*> _childOps;
// filled when state == _Error // filled when state == _Error
scoped_ptr<BatchedErrorDetail> _error; scoped_ptr<WriteErrorDetail> _error;
// Finished child operations, for debugging // Finished child operations, for debugging
std::vector<ChildWriteOp*> _history; std::vector<ChildWriteOp*> _history;
}; };
/** /**
* State of a write in-progress (to a single shard) which is one part o f a larger write * State of a write in-progress (to a single shard) which is one part o f a larger write
* operation. * operation.
* *
* As above, the write op may finish in either a successful (_Completed ) or unsuccessful * As above, the write op may finish in either a successful (_Completed ) or unsuccessful
skipping to change at line 206 skipping to change at line 206
WriteOpState state; WriteOpState state;
// non-zero when state == _Pending // non-zero when state == _Pending
// Not owned here but tracked for reporting // Not owned here but tracked for reporting
TargetedWrite* pendingWrite; TargetedWrite* pendingWrite;
// filled when state > _Pending // filled when state > _Pending
scoped_ptr<ShardEndpoint> endpoint; scoped_ptr<ShardEndpoint> endpoint;
// filled when state == _Error or (optionally) when state == _Cance lled // filled when state == _Error or (optionally) when state == _Cance lled
scoped_ptr<BatchedErrorDetail> error; scoped_ptr<WriteErrorDetail> error;
}; };
// First value is write item index in the batch, second value is child write op index // First value is write item index in the batch, second value is child write op index
typedef pair<int, int> WriteOpRef; typedef pair<int, int> WriteOpRef;
/** /**
* A write with A) a request targeted at a particular shard endpoint, a nd B) a response targeted * A write with A) a request targeted at a particular shard endpoint, a nd B) a response targeted
* at a particular WriteOp. * at a particular WriteOp.
* *
* TargetedWrites are the link between the RPC layer and the in-progres s write * TargetedWrites are the link between the RPC layer and the in-progres s write
 End of changes. 8 change blocks. 
11 lines changed or deleted 10 lines changed or added


 writeback_listener.h   writeback_listener.h 
skipping to change at line 54 skipping to change at line 54
* The writeback listener takes back write attempts that were made agai nst a wrong shard. * The writeback listener takes back write attempts that were made agai nst a wrong shard.
* (Wrong here in the sense that the target chunk moved before this mon gos had a chance to * (Wrong here in the sense that the target chunk moved before this mon gos had a chance to
* learn so.) It is responsible for reapplying these writes to the corr ect shard. * learn so.) It is responsible for reapplying these writes to the corr ect shard.
* *
* Runs (instantiated) on mongos. * Runs (instantiated) on mongos.
* Currently, there is one writebacklistener per shard. * Currently, there is one writebacklistener per shard.
*/ */
class WriteBackListener : public BackgroundJob { class WriteBackListener : public BackgroundJob {
public: public:
class ConnectionIdent {
public:
ConnectionIdent( const string& ii , ConnectionId id )
: instanceIdent( ii ) , connectionId( id ) {
}
bool operator<(const ConnectionIdent& other) const {
if ( instanceIdent == other.instanceIdent )
return connectionId < other.connectionId;
return instanceIdent < other.instanceIdent;
}
string toString() const { return str::stream() << instanceIdent
<< ":" << connectionId; }
string instanceIdent;
ConnectionId connectionId;
};
static void init( DBClientBase& conn ); static void init( DBClientBase& conn );
static void init( const string& host ); static void init( const string& host );
static BSONObj waitFor( const ConnectionIdent& ident, const OID& oi
d );
protected: protected:
WriteBackListener( const string& addr ); WriteBackListener( const string& addr );
string name() const { return _name; } string name() const { return _name; }
void run(); void run();
private: private:
string _addr; string _addr;
string _name; string _name;
static mongo::mutex _cacheLock; // protects _cache static mongo::mutex _cacheLock; // protects _cache
static unordered_map<string,WriteBackListener*> _cache; // server t o listener static unordered_map<string,WriteBackListener*> _cache; // server t o listener
static unordered_set<string> _seenSets; // cache of set urls we've seen - note this is ever expanding for order, case, changes static unordered_set<string> _seenSets; // cache of set urls we've seen - note this is ever expanding for order, case, changes
struct WBStatus {
OID id;
BSONObj gle;
};
static mongo::mutex _seenWritebacksLock; // protects _seenWritback
s
static map<ConnectionIdent,WBStatus> _seenWritebacks; // connection
Id -> last write back GLE
}; };
void waitForWriteback( const OID& oid );
} // namespace mongo } // namespace mongo
 End of changes. 4 change blocks. 
34 lines changed or deleted 0 lines changed or added

This html diff was produced by rfcdiff 1.41. The latest version is available from http://tools.ietf.org/tools/rfcdiff/