| assert_util.h | | assert_util.h | |
| | | | |
| skipping to change at line 90 | | skipping to change at line 90 | |
| }; | | }; | |
| | | | |
| extern AssertionCount assertionCount; | | extern AssertionCount assertionCount; | |
| | | | |
| class DBException : public std::exception { | | class DBException : public std::exception { | |
| public: | | public: | |
| virtual const char* what() const throw() = 0; | | virtual const char* what() const throw() = 0; | |
| virtual string toString() const { | | virtual string toString() const { | |
| return what(); | | return what(); | |
| } | | } | |
|
| virtual int getCode() = 0; | | virtual int getCode() const = 0; | |
| operator string() const { return toString(); } | | operator string() const { stringstream ss; ss << getCode() << " " < | |
| | | < what(); return ss.str(); } | |
| }; | | }; | |
| | | | |
| class AssertionException : public DBException { | | class AssertionException : public DBException { | |
| public: | | public: | |
| int code; | | int code; | |
| string msg; | | string msg; | |
| AssertionException() { code = 0; } | | AssertionException() { code = 0; } | |
| virtual ~AssertionException() throw() { } | | virtual ~AssertionException() throw() { } | |
| virtual bool severe() { | | virtual bool severe() { | |
| return true; | | return true; | |
| } | | } | |
| virtual bool isUserAssertion() { | | virtual bool isUserAssertion() { | |
| return false; | | return false; | |
| } | | } | |
|
| virtual int getCode(){ return code; } | | virtual int getCode() const { return code; } | |
| virtual const char* what() const throw() { return msg.c_str(); } | | virtual const char* what() const throw() { return msg.c_str(); } | |
| | | | |
| /* true if an interrupted exception - see KillCurrentOp */ | | /* true if an interrupted exception - see KillCurrentOp */ | |
| bool interrupted() { | | bool interrupted() { | |
| return code == 11600 || code == 11601; | | return code == 11600 || code == 11601; | |
| } | | } | |
| }; | | }; | |
| | | | |
| /* UserExceptions are valid errors that a user can cause, like out of d
isk space or duplicate key */ | | /* UserExceptions are valid errors that a user can cause, like out of d
isk space or duplicate key */ | |
| class UserException : public AssertionException { | | class UserException : public AssertionException { | |
| | | | |
| skipping to change at line 168 | | skipping to change at line 168 | |
| inline void msgasserted(int msgid, string msg) { msgasserted(msgid, msg
.c_str()); } | | inline void msgasserted(int msgid, string msg) { msgasserted(msgid, msg
.c_str()); } | |
| | | | |
| #ifdef assert | | #ifdef assert | |
| #undef assert | | #undef assert | |
| #endif | | #endif | |
| | | | |
| #define MONGO_assert(_Expression) (void)( (!!(_Expression)) || (mongo::asse
rted(#_Expression, __FILE__, __LINE__), 0) ) | | #define MONGO_assert(_Expression) (void)( (!!(_Expression)) || (mongo::asse
rted(#_Expression, __FILE__, __LINE__), 0) ) | |
| #define assert MONGO_assert | | #define assert MONGO_assert | |
| | | | |
| /* "user assert". if asserts, user did something wrong, not our code *
/ | | /* "user assert". if asserts, user did something wrong, not our code *
/ | |
|
| inline void uassert(unsigned msgid, string msg, bool expr) { | | #define MONGO_uassert(msgid, msg, expr) (void)( (!!(expr)) || (mongo::uasse | |
| if( !expr ) uasserted(msgid, msg.c_str()); | | rted(msgid, msg), 0) ) | |
| } | | #define uassert MONGO_uassert | |
| | | | |
| template<class T> | | | |
| inline void uassert(unsigned msgid, string msg, const T&t , bool expr) | | | |
| { | | | |
| if( !expr ){ | | | |
| stringstream ss; | | | |
| ss << msg << " " << t.toString(); | | | |
| uasserted(msgid, ss.str()); | | | |
| } | | | |
| } | | | |
| | | | |
| inline void uassert(unsigned msgid, const char * msg, bool expr) { | | | |
| if( !expr ) uasserted(msgid, msg); | | | |
| } | | | |
| | | | |
| /* warning only - keeps going */ | | /* warning only - keeps going */ | |
| #define MONGO_wassert(_Expression) (void)( (!!(_Expression)) || (mongo::was
serted(#_Expression, __FILE__, __LINE__), 0) ) | | #define MONGO_wassert(_Expression) (void)( (!!(_Expression)) || (mongo::was
serted(#_Expression, __FILE__, __LINE__), 0) ) | |
| #define wassert MONGO_wassert | | #define wassert MONGO_wassert | |
| | | | |
| /* display a message, no context, and throw assertionexception | | /* display a message, no context, and throw assertionexception | |
| | | | |
| easy way to throw an exception and log something without our stack t
race | | easy way to throw an exception and log something without our stack t
race | |
| display happening. | | display happening. | |
| */ | | */ | |
|
| inline void massert(unsigned msgid, string msg, bool expr) { | | #define MONGO_massert(msgid, msg, expr) (void)( (!!(expr)) || (mongo::msgas | |
| if( !expr) msgasserted(msgid, msg.c_str()); | | serted(msgid, msg), 0) ) | |
| } | | #define massert MONGO_massert | |
| inline void massert(unsigned msgid, const char * msg, bool expr) { | | | |
| if( !expr) msgasserted(msgid, msg); | | | |
| } | | | |
| | | | |
| /* dassert is 'debug assert' -- might want to turn off for production a
s these | | /* dassert is 'debug assert' -- might want to turn off for production a
s these | |
| could be slow. | | could be slow. | |
| */ | | */ | |
| #if defined(_DEBUG) | | #if defined(_DEBUG) | |
| # define MONGO_dassert assert | | # define MONGO_dassert assert | |
| #else | | #else | |
| # define MONGO_dassert(x) | | # define MONGO_dassert(x) | |
| #endif | | #endif | |
| #define dassert MONGO_dassert | | #define dassert MONGO_dassert | |
| | | | |
End of changes. 4 change blocks. |
| 26 lines changed or deleted | | 10 lines changed or added | |
|
| bsonobjbuilder.h | | bsonobjbuilder.h | |
| | | | |
| skipping to change at line 26 | | skipping to change at line 26 | |
| * Unless required by applicable law or agreed to in writing, software | | * Unless required by applicable law or agreed to in writing, software | |
| * distributed under the License is distributed on an "AS IS" BASIS, | | * distributed under the License is distributed on an "AS IS" BASIS, | |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | |
| * See the License for the specific language governing permissions and | | * See the License for the specific language governing permissions and | |
| * limitations under the License. | | * limitations under the License. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include <limits> | | #include <limits> | |
|
| | | #include <cmath> | |
| | | using namespace std; | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| #if defined(_WIN32) | | #if defined(_WIN32) | |
| // warning: 'this' : used in base member initializer list | | // warning: 'this' : used in base member initializer list | |
| #pragma warning( disable : 4355 ) | | #pragma warning( disable : 4355 ) | |
| #endif | | #endif | |
| | | | |
| /** Utility for creating a BSONObj. | | /** Utility for creating a BSONObj. | |
| See also the BSON() and BSON_ARRAY() macros. | | See also the BSON() and BSON_ARRAY() macros. | |
| | | | |
| skipping to change at line 102 | | skipping to change at line 104 | |
| the subobject's body */ | | the subobject's body */ | |
| BufBuilder &subobjStart(const char *fieldName) { | | BufBuilder &subobjStart(const char *fieldName) { | |
| _b.append((char) Object); | | _b.append((char) Object); | |
| _b.append(fieldName); | | _b.append(fieldName); | |
| return _b; | | return _b; | |
| } | | } | |
| | | | |
| /** add a subobject as a member with type Array. Thus arr object s
hould have "0", "1", ... | | /** add a subobject as a member with type Array. Thus arr object s
hould have "0", "1", ... | |
| style fields in it. | | style fields in it. | |
| */ | | */ | |
|
| BSONObjBuilder& appendArray(const char *fieldName, BSONObj subObj)
{ | | BSONObjBuilder& appendArray(const char *fieldName, const BSONObj &s
ubObj) { | |
| _b.append((char) Array); | | _b.append((char) Array); | |
| _b.append(fieldName); | | _b.append(fieldName); | |
| _b.append((void *) subObj.objdata(), subObj.objsize()); | | _b.append((void *) subObj.objdata(), subObj.objsize()); | |
| return *this; | | return *this; | |
| } | | } | |
| BSONObjBuilder& append(const char *fieldName, BSONArray arr) { | | BSONObjBuilder& append(const char *fieldName, BSONArray arr) { | |
| return appendArray(fieldName, arr); | | return appendArray(fieldName, arr); | |
| } | | } | |
| | | | |
| /** add header for a new subarray and return bufbuilder for writing
to | | /** add header for a new subarray and return bufbuilder for writing
to | |
| | | | |
| skipping to change at line 441 | | skipping to change at line 443 | |
| } | | } | |
| | | | |
| /** | | /** | |
| these are the min/max when comparing, not strict min/max element
s for a given type | | these are the min/max when comparing, not strict min/max element
s for a given type | |
| */ | | */ | |
| void appendMinForType( const string& field , int type ); | | void appendMinForType( const string& field , int type ); | |
| void appendMaxForType( const string& field , int type ); | | void appendMaxForType( const string& field , int type ); | |
| | | | |
| /** Append an array of values. */ | | /** Append an array of values. */ | |
| template < class T > | | template < class T > | |
|
| BSONObjBuilder& append( const char *fieldName, const vector< T >& v | | BSONObjBuilder& append( const char *fieldName, const vector< T >& v | |
| als ) { | | als ); | |
| BSONObjBuilder arrBuilder; | | | |
| for ( unsigned int i = 0; i < vals.size(); ++i ) | | | |
| arrBuilder.append( numStr( i ).c_str(), vals[ i ] ); | | | |
| marshalArray( fieldName, arrBuilder.done() ); | | | |
| return *this; | | | |
| } | | | |
| | | | |
|
| /* Append an array of ints | | template < class T > | |
| void appendArray( const char *fieldName, const vector< int >& va | | BSONObjBuilder& append( const char *fieldName, const list< T >& val | |
| ls ) { | | s ); | |
| BSONObjBuilder arrBuilder; | | | |
| for ( unsigned i = 0; i < vals.size(); ++i ) | | | |
| arrBuilder.append( numStr( i ).c_str(), vals[ i ] ); | | | |
| marshalArray( fieldName, arrBuilder.done() ); | | | |
| }*/ | | | |
| | | | |
| /** The returned BSONObj will free the buffer when it is finished.
*/ | | /** The returned BSONObj will free the buffer when it is finished.
*/ | |
| BSONObj obj() { | | BSONObj obj() { | |
| bool own = owned(); | | bool own = owned(); | |
| massert( 10335 , "builder does not own memory", own ); | | massert( 10335 , "builder does not own memory", own ); | |
| int l; | | int l; | |
| return BSONObj(decouple(l), true); | | return BSONObj(decouple(l), true); | |
| } | | } | |
| | | | |
| /** Fetch the object we have built. | | /** Fetch the object we have built. | |
| | | | |
| skipping to change at line 539 | | skipping to change at line 529 | |
| massert( 10336 , "No subobject started", _s.subobjStarted() ); | | massert( 10336 , "No subobject started", _s.subobjStarted() ); | |
| return _s << l; | | return _s << l; | |
| } | | } | |
| | | | |
| /** @return true if we are using our own bufbuilder, and not an alt
ernate that was given to us in our constructor */ | | /** @return true if we are using our own bufbuilder, and not an alt
ernate that was given to us in our constructor */ | |
| bool owned() const { return &_b == &_buf; } | | bool owned() const { return &_b == &_buf; } | |
| | | | |
| BSONObjIterator iterator() const ; | | BSONObjIterator iterator() const ; | |
| | | | |
| private: | | private: | |
|
| // Append the provided arr object as an array. | | | |
| void marshalArray( const char *fieldName, const BSONObj &arr ) { | | | |
| _b.append( (char) Array ); | | | |
| _b.append( fieldName ); | | | |
| _b.append( (void *) arr.objdata(), arr.objsize() ); | | | |
| } | | | |
| | | | |
| char* _done() { | | char* _done() { | |
| _s.endField(); | | _s.endField(); | |
| _b.append((char) EOO); | | _b.append((char) EOO); | |
| char *data = _b.buf() + _offset; | | char *data = _b.buf() + _offset; | |
| int size = _b.len() - _offset; | | int size = _b.len() - _offset; | |
| *((int*)data) = size; | | *((int*)data) = size; | |
| if ( _tracker ) | | if ( _tracker ) | |
| _tracker->got( size ); | | _tracker->got( size ); | |
| return data; | | return data; | |
| } | | } | |
| | | | |
| skipping to change at line 602 | | skipping to change at line 585 | |
| | | | |
| BSONObj done() { return _b.done(); } | | BSONObj done() { return _b.done(); } | |
| | | | |
| template <typename T> | | template <typename T> | |
| BSONArrayBuilder& append(const char *name, const T& x){ | | BSONArrayBuilder& append(const char *name, const T& x){ | |
| fill( name ); | | fill( name ); | |
| append( x ); | | append( x ); | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
|
| BufBuilder &subobjStart( const char *name ) { | | BufBuilder &subobjStart( const char *name = "0" ) { | |
| fill( name ); | | fill( name ); | |
| return _b.subobjStart( num().c_str() ); | | return _b.subobjStart( num().c_str() ); | |
| } | | } | |
| | | | |
| BufBuilder &subarrayStart( const char *name ) { | | BufBuilder &subarrayStart( const char *name ) { | |
| fill( name ); | | fill( name ); | |
| return _b.subarrayStart( num().c_str() ); | | return _b.subarrayStart( num().c_str() ); | |
| } | | } | |
| | | | |
| void appendArray( const char *name, BSONObj subObj ) { | | void appendArray( const char *name, BSONObj subObj ) { | |
| | | | |
| skipping to change at line 647 | | skipping to change at line 630 | |
| BSONObjBuilder _b; | | BSONObjBuilder _b; | |
| _b.appendNull( "" ); | | _b.appendNull( "" ); | |
| return _b.obj(); | | return _b.obj(); | |
| } | | } | |
| | | | |
| string num(){ return _b.numStr(_i++); } | | string num(){ return _b.numStr(_i++); } | |
| int _i; | | int _i; | |
| BSONObjBuilder _b; | | BSONObjBuilder _b; | |
| }; | | }; | |
| | | | |
|
| | | template < class T > | |
| | | inline BSONObjBuilder& BSONObjBuilder::append( const char *fieldName, c | |
| | | onst vector< T >& vals ) { | |
| | | BSONObjBuilder arrBuilder; | |
| | | for ( unsigned int i = 0; i < vals.size(); ++i ) | |
| | | arrBuilder.append( numStr( i ).c_str(), vals[ i ] ); | |
| | | appendArray( fieldName, arrBuilder.done() ); | |
| | | return *this; | |
| | | } | |
| | | | |
| | | template < class T > | |
| | | inline BSONObjBuilder& BSONObjBuilder::append( const char *fieldName, c | |
| | | onst list< T >& vals ) { | |
| | | BSONObjBuilder arrBuilder; | |
| | | int n = 0; | |
| | | for( typename list< T >::const_iterator i = vals.begin(); i != vals | |
| | | .end(); i++ ) | |
| | | arrBuilder.append( numStr(n++).c_str(), *i ); | |
| | | appendArray( fieldName, arrBuilder.done() ); | |
| | | return *this; | |
| | | } | |
| | | | |
| } | | } | |
| | | | |
End of changes. 7 change blocks. |
| 25 lines changed or deleted | | 31 lines changed or added | |
|
| btree.h | | btree.h | |
| | | | |
| skipping to change at line 190 | | skipping to change at line 187 | |
| int n; // # of keys so far. | | int n; // # of keys so far. | |
| int reserved; | | int reserved; | |
| const _KeyNode& k(int i) const { | | const _KeyNode& k(int i) const { | |
| return ((_KeyNode*)data)[i]; | | return ((_KeyNode*)data)[i]; | |
| } | | } | |
| _KeyNode& k(int i) { | | _KeyNode& k(int i) { | |
| return ((_KeyNode*)data)[i]; | | return ((_KeyNode*)data)[i]; | |
| } | | } | |
| char data[4]; | | char data[4]; | |
| }; | | }; | |
|
| | | #pragma pack() | |
| | | | |
|
| | | #pragma pack(1) | |
| class BtreeBucket : public BucketBasics { | | class BtreeBucket : public BucketBasics { | |
| friend class BtreeCursor; | | friend class BtreeCursor; | |
| public: | | public: | |
| void dump(); | | void dump(); | |
| | | | |
| /* @return true if key exists in index | | /* @return true if key exists in index | |
| | | | |
| order - indicates order of keys in the index. this is basically
the index's key pattern, e.g.: | | order - indicates order of keys in the index. this is basically
the index's key pattern, e.g.: | |
| BSONObj order = ((IndexDetails&)idx).keyPattern(); | | BSONObj order = ((IndexDetails&)idx).keyPattern(); | |
| likewise below in bt_insert() etc. | | likewise below in bt_insert() etc. | |
| | | | |
| skipping to change at line 265 | | skipping to change at line 264 | |
| DiskLoc lchild, DiskLoc rchild, IndexDetails&); | | DiskLoc lchild, DiskLoc rchild, IndexDetails&); | |
| int _insert(DiskLoc thisLoc, DiskLoc recordLoc, | | int _insert(DiskLoc thisLoc, DiskLoc recordLoc, | |
| const BSONObj& key, const Ordering &order, bool dupsAll
owed, | | const BSONObj& key, const Ordering &order, bool dupsAll
owed, | |
| DiskLoc lChild, DiskLoc rChild, IndexDetails&); | | DiskLoc lChild, DiskLoc rChild, IndexDetails&); | |
| bool find(const IndexDetails& idx, const BSONObj& key, DiskLoc reco
rdLoc, const Ordering &order, int& pos, bool assertIfDup); | | bool find(const IndexDetails& idx, const BSONObj& key, DiskLoc reco
rdLoc, const Ordering &order, int& pos, bool assertIfDup); | |
| static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largest
Loc, int& largestKey); | | static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largest
Loc, int& largestKey); | |
| public: | | public: | |
| // simply builds and returns a dup key error message string | | // simply builds and returns a dup key error message string | |
| static string dupKeyError( const IndexDetails& idx , const BSONObj&
key ); | | static string dupKeyError( const IndexDetails& idx , const BSONObj&
key ); | |
| }; | | }; | |
|
| | | #pragma pack() | |
| | | | |
| class BtreeCursor : public Cursor { | | class BtreeCursor : public Cursor { | |
| public: | | public: | |
| BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&,
const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int
direction ); | | BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&,
const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int
direction ); | |
| | | | |
| BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&
_id, const BoundList &_bounds, int _direction ); | | BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&
_id, const BoundList &_bounds, int _direction ); | |
|
| | | ~BtreeCursor(){ | |
| | | } | |
| virtual bool ok() { | | virtual bool ok() { | |
| return !bucket.isNull(); | | return !bucket.isNull(); | |
| } | | } | |
| bool eof() { | | bool eof() { | |
| return !ok(); | | return !ok(); | |
| } | | } | |
| virtual bool advance(); | | virtual bool advance(); | |
| | | | |
| virtual void noteLocation(); // updates keyAtKeyOfs... | | virtual void noteLocation(); // updates keyAtKeyOfs... | |
| virtual void checkLocation(); | | virtual void checkLocation(); | |
| virtual bool supportGetMore() { return true; } | | virtual bool supportGetMore() { return true; } | |
| | | | |
| /* used for multikey index traversal to avoid sending back dups. se
e Matcher::matches(). | | /* used for multikey index traversal to avoid sending back dups. se
e Matcher::matches(). | |
| if a multikey index traversal: | | if a multikey index traversal: | |
| if loc has already been sent, returns true. | | if loc has already been sent, returns true. | |
| otherwise, marks loc as sent. | | otherwise, marks loc as sent. | |
| @return true if the loc has not been seen | | @return true if the loc has not been seen | |
| */ | | */ | |
|
| set<DiskLoc> dups; | | | |
| virtual bool getsetdup(DiskLoc loc) { | | virtual bool getsetdup(DiskLoc loc) { | |
| if( multikey ) { | | if( multikey ) { | |
| pair<set<DiskLoc>::iterator, bool> p = dups.insert(loc); | | pair<set<DiskLoc>::iterator, bool> p = dups.insert(loc); | |
| return !p.second; | | return !p.second; | |
| } | | } | |
| return false; | | return false; | |
| } | | } | |
| | | | |
| _KeyNode& _currKeyNode() { | | _KeyNode& _currKeyNode() { | |
| assert( !bucket.isNull() ); | | assert( !bucket.isNull() ); | |
| | | | |
| skipping to change at line 345 | | skipping to change at line 345 | |
| string s = string("BtreeCursor ") + indexDetails.indexName(); | | string s = string("BtreeCursor ") + indexDetails.indexName(); | |
| if ( direction < 0 ) s += " reverse"; | | if ( direction < 0 ) s += " reverse"; | |
| if ( bounds_.size() > 1 ) s += " multi"; | | if ( bounds_.size() > 1 ) s += " multi"; | |
| return s; | | return s; | |
| } | | } | |
| | | | |
| BSONObj prettyKey( const BSONObj &key ) const { | | BSONObj prettyKey( const BSONObj &key ) const { | |
| return key.replaceFieldNames( indexDetails.keyPattern() ).clien
tReadable(); | | return key.replaceFieldNames( indexDetails.keyPattern() ).clien
tReadable(); | |
| } | | } | |
| | | | |
|
| virtual BSONObj prettyIndexBounds() const { | | virtual BSONArray prettyIndexBounds() const { | |
| BSONArrayBuilder ba; | | BSONArrayBuilder ba; | |
| if ( bounds_.size() == 0 ) { | | if ( bounds_.size() == 0 ) { | |
| ba << BSON_ARRAY( prettyKey( startKey ) << prettyKey( endKe
y ) ); | | ba << BSON_ARRAY( prettyKey( startKey ) << prettyKey( endKe
y ) ); | |
| } else { | | } else { | |
| for( BoundList::const_iterator i = bounds_.begin(); i != bo
unds_.end(); ++i ) { | | for( BoundList::const_iterator i = bounds_.begin(); i != bo
unds_.end(); ++i ) { | |
| ba << BSON_ARRAY( prettyKey( i->first ) << prettyKey( i
->second ) ); | | ba << BSON_ARRAY( prettyKey( i->first ) << prettyKey( i
->second ) ); | |
| } | | } | |
| } | | } | |
| return ba.arr(); | | return ba.arr(); | |
| } | | } | |
| | | | |
| void forgetEndKey() { endKey = BSONObj(); } | | void forgetEndKey() { endKey = BSONObj(); } | |
| | | | |
|
| | | virtual CoveredIndexMatcher *matcher() const { return _matcher.get( | |
| | | ); } | |
| | | | |
| | | virtual void setMatcher( auto_ptr< CoveredIndexMatcher > matcher ) | |
| | | { | |
| | | _matcher = matcher; | |
| | | } | |
| | | | |
| private: | | private: | |
| /* Our btrees may (rarely) have "unused" keys when items are delete
d. | | /* Our btrees may (rarely) have "unused" keys when items are delete
d. | |
| Skip past them. | | Skip past them. | |
| */ | | */ | |
| void skipUnusedKeys(); | | void skipUnusedKeys(); | |
| | | | |
| /* Check if the current key is beyond endKey. */ | | /* Check if the current key is beyond endKey. */ | |
| void checkEnd(); | | void checkEnd(); | |
| | | | |
| // selective audits on construction | | // selective audits on construction | |
| void audit(); | | void audit(); | |
| | | | |
| // set initial bucket | | // set initial bucket | |
| void init(); | | void init(); | |
| | | | |
| // init start / end keys with a new range | | // init start / end keys with a new range | |
| void initInterval(); | | void initInterval(); | |
| | | | |
| friend class BtreeBucket; | | friend class BtreeBucket; | |
|
| | | set<DiskLoc> dups; | |
| NamespaceDetails *d; | | NamespaceDetails *d; | |
| int idxNo; | | int idxNo; | |
| BSONObj startKey; | | BSONObj startKey; | |
| BSONObj endKey; | | BSONObj endKey; | |
| bool endKeyInclusive_; | | bool endKeyInclusive_; | |
| bool multikey; // note this must be updated every getmore batch in
case someone added a multikey... | | bool multikey; // note this must be updated every getmore batch in
case someone added a multikey... | |
| | | | |
| const IndexDetails& indexDetails; | | const IndexDetails& indexDetails; | |
| BSONObj order; | | BSONObj order; | |
| DiskLoc bucket; | | DiskLoc bucket; | |
| int keyOfs; | | int keyOfs; | |
| int direction; // 1=fwd,-1=reverse | | int direction; // 1=fwd,-1=reverse | |
| BSONObj keyAtKeyOfs; // so we can tell if things moved around on us
between the query and the getMore call | | BSONObj keyAtKeyOfs; // so we can tell if things moved around on us
between the query and the getMore call | |
| DiskLoc locAtKeyOfs; | | DiskLoc locAtKeyOfs; | |
| BoundList bounds_; | | BoundList bounds_; | |
| unsigned boundIndex_; | | unsigned boundIndex_; | |
| const IndexSpec& _spec; | | const IndexSpec& _spec; | |
|
| | | auto_ptr< CoveredIndexMatcher > _matcher; | |
| }; | | }; | |
| | | | |
|
| #pragma pack() | | | |
| | | | |
| inline bool IndexDetails::hasKey(const BSONObj& key) { | | inline bool IndexDetails::hasKey(const BSONObj& key) { | |
| return head.btree()->exists(*this, head, key, Ordering::make(keyPat
tern())); | | return head.btree()->exists(*this, head, key, Ordering::make(keyPat
tern())); | |
| } | | } | |
| inline bool IndexDetails::wouldCreateDup(const BSONObj& key, DiskLoc se
lf) { | | inline bool IndexDetails::wouldCreateDup(const BSONObj& key, DiskLoc se
lf) { | |
| return head.btree()->wouldCreateDup(*this, head, key, Ordering::mak
e(keyPattern()), self); | | return head.btree()->wouldCreateDup(*this, head, key, Ordering::mak
e(keyPattern()), self); | |
| } | | } | |
| | | | |
| /* build btree from the bottom up */ | | /* build btree from the bottom up */ | |
| /* _ TODO dropDups */ | | /* _ TODO dropDups */ | |
| class BtreeBuilder { | | class BtreeBuilder { | |
| | | | |
End of changes. 10 change blocks. |
| 5 lines changed or deleted | | 16 lines changed or added | |
|
| chunk.h | | chunk.h | |
| | | | |
| skipping to change at line 32 | | skipping to change at line 32 | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include "../pch.h" | | #include "../pch.h" | |
| #include "../client/dbclient.h" | | #include "../client/dbclient.h" | |
| #include "../client/model.h" | | #include "../client/model.h" | |
| #include "../bson/util/atomic_int.h" | | #include "../bson/util/atomic_int.h" | |
| #include "shardkey.h" | | #include "shardkey.h" | |
| #include "shard.h" | | #include "shard.h" | |
|
| | | #include "config.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| class DBConfig; | | class DBConfig; | |
|
| | | class Chunk; | |
| | | class ChunkRange; | |
| class ChunkManager; | | class ChunkManager; | |
|
| | | class ChunkRangeMangager; | |
| class ChunkObjUnitTest; | | class ChunkObjUnitTest; | |
| | | | |
| typedef unsigned long long ShardChunkVersion; | | typedef unsigned long long ShardChunkVersion; | |
|
| | | typedef shared_ptr<Chunk> ChunkPtr; | |
| | | | |
| | | // key is max for each Chunk or ChunkRange | |
| | | typedef map<BSONObj,ChunkPtr,BSONObjCmp> ChunkMap; | |
| | | typedef map<BSONObj,shared_ptr<ChunkRange>,BSONObjCmp> ChunkRangeMap; | |
| | | | |
| /** | | /** | |
| config.chunks | | config.chunks | |
| { ns : "alleyinsider.fs.chunks" , min : {} , max : {} , server : "lo
calhost:30001" } | | { ns : "alleyinsider.fs.chunks" , min : {} , max : {} , server : "lo
calhost:30001" } | |
| | | | |
| x is in a shard iff | | x is in a shard iff | |
| min <= x < max | | min <= x < max | |
| */ | | */ | |
| class Chunk : public Model , boost::noncopyable { | | class Chunk : public Model , boost::noncopyable { | |
| public: | | public: | |
| | | | |
| skipping to change at line 83 | | skipping to change at line 92 | |
| | | | |
| bool operator==(const Chunk& s) const; | | bool operator==(const Chunk& s) const; | |
| | | | |
| bool operator!=(const Chunk& s) const{ | | bool operator!=(const Chunk& s) const{ | |
| return ! ( *this == s ); | | return ! ( *this == s ); | |
| } | | } | |
| | | | |
| void getFilter( BSONObjBuilder& b ) const; | | void getFilter( BSONObjBuilder& b ) const; | |
| BSONObj getFilter() const{ BSONObjBuilder b; getFilter( b ); return
b.obj(); } | | BSONObj getFilter() const{ BSONObjBuilder b; getFilter( b ); return
b.obj(); } | |
| | | | |
|
| | | // if min/max key is pos/neg infinity | |
| | | bool minIsInf() const; | |
| | | bool maxIsInf() const; | |
| | | | |
| BSONObj pickSplitPoint() const; | | BSONObj pickSplitPoint() const; | |
|
| Chunk * split(); | | ChunkPtr split(); | |
| Chunk * split( const BSONObj& middle ); | | ChunkPtr split( const BSONObj& middle ); | |
| | | | |
| /** | | /** | |
| * @return size of shard in bytes | | * @return size of shard in bytes | |
| * talks to mongod to do this | | * talks to mongod to do this | |
| */ | | */ | |
| long getPhysicalSize() const; | | long getPhysicalSize() const; | |
| | | | |
| long countObjects( const BSONObj& filter = BSONObj() ) const; | | long countObjects( const BSONObj& filter = BSONObj() ) const; | |
| | | | |
| /** | | /** | |
| * if the amount of data written nears the max size of a shard | | * if the amount of data written nears the max size of a shard | |
| * then we check the real size, and if its too big, we split | | * then we check the real size, and if its too big, we split | |
| */ | | */ | |
| bool splitIfShould( long dataWritten ); | | bool splitIfShould( long dataWritten ); | |
| | | | |
| /* | | /* | |
| * moves either this shard or newShard if it makes sense too | | * moves either this shard or newShard if it makes sense too | |
| * @return whether or not a shard was moved | | * @return whether or not a shard was moved | |
| */ | | */ | |
|
| bool moveIfShould( Chunk * newShard = 0 ); | | bool moveIfShould( ChunkPtr newShard = ChunkPtr() ); | |
| | | | |
| bool moveAndCommit( const Shard& to , string& errmsg ); | | bool moveAndCommit( const Shard& to , string& errmsg ); | |
| | | | |
| virtual const char * getNS(){ return "config.chunks"; } | | virtual const char * getNS(){ return "config.chunks"; } | |
| virtual void serialize(BSONObjBuilder& to); | | virtual void serialize(BSONObjBuilder& to); | |
| virtual void unserialize(const BSONObj& from); | | virtual void unserialize(const BSONObj& from); | |
| virtual string modelServer(); | | virtual string modelServer(); | |
| | | | |
| void appendShortVersion( const char * name , BSONObjBuilder& b ); | | void appendShortVersion( const char * name , BSONObjBuilder& b ); | |
| | | | |
| virtual void save( bool check=false ); | | virtual void save( bool check=false ); | |
| | | | |
| void ensureIndex(); | | void ensureIndex(); | |
| | | | |
| void _markModified(); | | void _markModified(); | |
| | | | |
| static int MaxChunkSize; | | static int MaxChunkSize; | |
| | | | |
| static string genID( const string& ns , const BSONObj& min ); | | static string genID( const string& ns , const BSONObj& min ); | |
| | | | |
|
| | | const ChunkManager* getManager() const { return _manager; } | |
| | | | |
| private: | | private: | |
| | | | |
| // main shard info | | // main shard info | |
| | | | |
| ChunkManager * _manager; | | ChunkManager * _manager; | |
| ShardKeyPattern skey() const; | | ShardKeyPattern skey() const; | |
| | | | |
| string _ns; | | string _ns; | |
| BSONObj _min; | | BSONObj _min; | |
| BSONObj _max; | | BSONObj _max; | |
| Shard _shard; | | Shard _shard; | |
| ShardChunkVersion _lastmod; | | ShardChunkVersion _lastmod; | |
| | | | |
| bool _modified; | | bool _modified; | |
| | | | |
| // transient stuff | | // transient stuff | |
| | | | |
| long _dataWritten; | | long _dataWritten; | |
| | | | |
|
| | | ChunkPtr _this; | |
| | | | |
| // methods, etc.. | | // methods, etc.. | |
| | | | |
| void _split( BSONObj& middle ); | | void _split( BSONObj& middle ); | |
| | | | |
| friend class ChunkManager; | | friend class ChunkManager; | |
| friend class ShardObjUnitTest; | | friend class ShardObjUnitTest; | |
| }; | | }; | |
| | | | |
|
| | | class ChunkRange{ | |
| | | public: | |
| | | const ChunkManager* getManager() const{ return _manager; } | |
| | | Shard getShard() const{ return _shard; } | |
| | | | |
| | | const BSONObj& getMin() const { return _min; } | |
| | | const BSONObj& getMax() const { return _max; } | |
| | | | |
| | | // clones of Chunk methods | |
| | | bool contains(const BSONObj& obj) const; | |
| | | void getFilter( BSONObjBuilder& b ) const; | |
| | | BSONObj getFilter() const{ BSONObjBuilder b; getFilter( b ); return | |
| | | b.obj(); } | |
| | | long countObjects( const BSONObj& filter = BSONObj() ) const; | |
| | | | |
| | | ChunkRange(ChunkMap::const_iterator begin, const ChunkMap::const_it | |
| | | erator end) | |
| | | : _manager(begin->second->getManager()) | |
| | | , _shard(begin->second->getShard()) | |
| | | , _min(begin->second->getMin()) | |
| | | , _max(prior(end)->second->getMax()) | |
| | | { | |
| | | assert( begin != end ); | |
| | | | |
| | | DEV while (begin != end){ | |
| | | assert(begin->second->getManager() == _manager); | |
| | | assert(begin->second->getShard() == _shard); | |
| | | ++begin; | |
| | | } | |
| | | } | |
| | | | |
| | | // Merge min and max (must be adjacent ranges) | |
| | | ChunkRange(const ChunkRange& min, const ChunkRange& max) | |
| | | : _manager(min.getManager()) | |
| | | , _shard(min.getShard()) | |
| | | , _min(min.getMin()) | |
| | | , _max(max.getMax()) | |
| | | { | |
| | | assert(min.getShard() == max.getShard()); | |
| | | assert(min.getManager() == max.getManager()); | |
| | | assert(min.getMax() == max.getMin()); | |
| | | } | |
| | | | |
| | | friend ostream& operator<<(ostream& out, const ChunkRange& cr){ | |
| | | return (out << "ChunkRange(min=" << cr._min << ", max=" << cr._ | |
| | | max << ", shard=" << cr._shard <<")"); | |
| | | } | |
| | | | |
| | | private: | |
| | | const ChunkManager* _manager; | |
| | | const Shard _shard; | |
| | | const BSONObj _min; | |
| | | const BSONObj _max; | |
| | | }; | |
| | | | |
| | | class ChunkRangeManager { | |
| | | public: | |
| | | const ChunkRangeMap& ranges() const { return _ranges; } | |
| | | | |
| | | void clear() { _ranges.clear(); } | |
| | | | |
| | | void reloadAll(const ChunkMap& chunks); | |
| | | void reloadRange(const ChunkMap& chunks, const BSONObj& min, const | |
| | | BSONObj& max); | |
| | | | |
| | | // Slow operation -- wrap with DEV | |
| | | void assertValid() const; | |
| | | | |
| | | ChunkRangeMap::const_iterator upper_bound(const BSONObj& o) const { | |
| | | return _ranges.upper_bound(o); } | |
| | | ChunkRangeMap::const_iterator lower_bound(const BSONObj& o) const { | |
| | | return _ranges.lower_bound(o); } | |
| | | | |
| | | private: | |
| | | // assumes nothing in this range exists in _ranges | |
| | | void _insertRange(ChunkMap::const_iterator begin, const ChunkMap::c | |
| | | onst_iterator end); | |
| | | | |
| | | ChunkRangeMap _ranges; | |
| | | }; | |
| | | | |
| /* config.sharding | | /* config.sharding | |
| { ns: 'alleyinsider.fs.chunks' , | | { ns: 'alleyinsider.fs.chunks' , | |
| key: { ts : 1 } , | | key: { ts : 1 } , | |
| shards: [ { min: 1, max: 100, server: a } , { min: 101, max: 200
, server : b } ] | | shards: [ { min: 1, max: 100, server: a } , { min: 101, max: 200
, server : b } ] | |
| } | | } | |
| */ | | */ | |
| class ChunkManager { | | class ChunkManager { | |
| public: | | public: | |
| | | | |
| ChunkManager( DBConfig * config , string ns , ShardKeyPattern patte
rn , bool unique ); | | ChunkManager( DBConfig * config , string ns , ShardKeyPattern patte
rn , bool unique ); | |
| virtual ~ChunkManager(); | | virtual ~ChunkManager(); | |
| | | | |
|
| string getns(){ | | string getns() const { | |
| return _ns; | | return _ns; | |
| } | | } | |
| | | | |
| int numChunks(){ rwlock lk( _lock , false ); return _chunks.size();
} | | int numChunks(){ rwlock lk( _lock , false ); return _chunks.size();
} | |
|
| Chunk* getChunk( int i ){ rwlock lk( _lock , false ); return _chunk
s[i]; } | | ChunkPtr getChunk( int i ){ rwlock lk( _lock , false ); return _chu
nks[i]; } | |
| bool hasShardKey( const BSONObj& obj ); | | bool hasShardKey( const BSONObj& obj ); | |
| | | | |
|
| Chunk& findChunk( const BSONObj& obj , bool retry = false ); | | ChunkPtr findChunk( const BSONObj& obj , bool retry = false ); | |
| Chunk* findChunkOnServer( const Shard& shard ) const; | | ChunkPtr findChunkOnServer( const Shard& shard ) const; | |
| | | | |
| ShardKeyPattern& getShardKey(){ return _key; } | | ShardKeyPattern& getShardKey(){ return _key; } | |
|
| | | const ShardKeyPattern& getShardKey() const { return _key; } | |
| bool isUnique(){ return _unique; } | | bool isUnique(){ return _unique; } | |
| | | | |
| /** | | /** | |
| * makes sure the shard index is on all servers | | * makes sure the shard index is on all servers | |
| */ | | */ | |
| void ensureIndex(); | | void ensureIndex(); | |
| | | | |
| /** | | /** | |
| * @return number of Chunk added to the vector | | * @return number of Chunk added to the vector | |
| */ | | */ | |
|
| int getChunksForQuery( vector<Chunk*>& chunks , const BSONObj& quer
y ); | | int getChunksForQuery( vector<shared_ptr<ChunkRange> >& chunks , co
nst BSONObj& query ); | |
| | | | |
| /** | | /** | |
| * @return number of Shards added to the set | | * @return number of Shards added to the set | |
| */ | | */ | |
| int getShardsForQuery( set<Shard>& shards , const BSONObj& query ); | | int getShardsForQuery( set<Shard>& shards , const BSONObj& query ); | |
| | | | |
| void getAllShards( set<Shard>& all ); | | void getAllShards( set<Shard>& all ); | |
| | | | |
| void save(); | | void save(); | |
| | | | |
| | | | |
| skipping to change at line 211 | | skipping to change at line 303 | |
| ShardChunkVersion getVersion( const Shard& shard ) const; | | ShardChunkVersion getVersion( const Shard& shard ) const; | |
| ShardChunkVersion getVersion() const; | | ShardChunkVersion getVersion() const; | |
| | | | |
| /** | | /** | |
| * this is just an increasing number of how many ChunkManagers we h
ave so we know if something has been updated | | * this is just an increasing number of how many ChunkManagers we h
ave so we know if something has been updated | |
| */ | | */ | |
| unsigned long long getSequenceNumber(){ | | unsigned long long getSequenceNumber(){ | |
| return _sequenceNumber; | | return _sequenceNumber; | |
| } | | } | |
| | | | |
|
| void drop(); | | /** | |
| | | * @param me - so i don't get deleted before i'm done | |
| | | */ | |
| | | void drop( ChunkManagerPtr me ); | |
| | | | |
| private: | | private: | |
| | | | |
| void _reload(); | | void _reload(); | |
| void _load(); | | void _load(); | |
| | | | |
| DBConfig * _config; | | DBConfig * _config; | |
| string _ns; | | string _ns; | |
| ShardKeyPattern _key; | | ShardKeyPattern _key; | |
| bool _unique; | | bool _unique; | |
| | | | |
|
| vector<Chunk*> _chunks; | | vector<ChunkPtr> _chunks; | |
| map<string,unsigned long long> _maxMarkers; | | map<string,unsigned long long> _maxMarkers; | |
| | | | |
|
| typedef map<BSONObj,Chunk*,BSONObjCmp> ChunkMap; | | ChunkMap _chunkMap; | |
| ChunkMap _chunkMap; // max -> Chunk | | ChunkRangeManager _chunkRanges; | |
| | | | |
| unsigned long long _sequenceNumber; | | unsigned long long _sequenceNumber; | |
| | | | |
| RWLock _lock; | | RWLock _lock; | |
| | | | |
|
| | | // This should only be called from Chunk after it has been migrated | |
| | | void _migrationNotification(Chunk* c); | |
| | | | |
| friend class Chunk; | | friend class Chunk; | |
|
| | | friend class ChunkRangeManager; // only needed for CRM::assertValid
() | |
| static AtomicUInt NextSequenceNumber; | | static AtomicUInt NextSequenceNumber; | |
| | | | |
|
| | | bool _isValid() const; | |
| | | void _printChunks() const; | |
| | | | |
| /** | | /** | |
| * @return number of Chunk matching the query or -1 for all chunks. | | * @return number of Chunk matching the query or -1 for all chunks. | |
| */ | | */ | |
|
| int _getChunksForQuery( vector<Chunk*>& chunks , const BSONObj& que
ry ); | | int _getChunksForQuery( vector<shared_ptr<ChunkRange> >& chunks , c
onst BSONObj& query ); | |
| }; | | }; | |
| | | | |
| // like BSONObjCmp. for use as an STL comparison functor | | // like BSONObjCmp. for use as an STL comparison functor | |
| // key-order in "order" argument must match key-order in shardkey | | // key-order in "order" argument must match key-order in shardkey | |
| class ChunkCmp { | | class ChunkCmp { | |
| public: | | public: | |
| ChunkCmp( const BSONObj &order = BSONObj() ) : _cmp( order ) {} | | ChunkCmp( const BSONObj &order = BSONObj() ) : _cmp( order ) {} | |
| bool operator()( const Chunk &l, const Chunk &r ) const { | | bool operator()( const Chunk &l, const Chunk &r ) const { | |
| return _cmp(l.getMin(), r.getMin()); | | return _cmp(l.getMin(), r.getMin()); | |
| } | | } | |
|
| | | bool operator()( const ptr<Chunk> l, const ptr<Chunk> r ) const { | |
| | | return operator()(*l, *r); | |
| | | } | |
| | | | |
|
| bool operator()( const Chunk *l, const Chunk *r ) const { | | // Also support ChunkRanges | |
| | | bool operator()( const ChunkRange &l, const ChunkRange &r ) const { | |
| | | return _cmp(l.getMin(), r.getMin()); | |
| | | } | |
| | | bool operator()( const shared_ptr<ChunkRange> l, const shared_ptr<C | |
| | | hunkRange> r ) const { | |
| return operator()(*l, *r); | | return operator()(*l, *r); | |
| } | | } | |
| private: | | private: | |
| BSONObjCmp _cmp; | | BSONObjCmp _cmp; | |
| }; | | }; | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
End of changes. 24 change blocks. |
| 14 lines changed or deleted | | 131 lines changed or added | |
|
| clientcursor.h | | clientcursor.h | |
| | | | |
| skipping to change at line 42 | | skipping to change at line 42 | |
| #include "diskloc.h" | | #include "diskloc.h" | |
| #include "dbhelpers.h" | | #include "dbhelpers.h" | |
| #include "matcher.h" | | #include "matcher.h" | |
| #include "../client/dbclient.h" | | #include "../client/dbclient.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| typedef long long CursorId; /* passed to the client so it can send back
on getMore */ | | typedef long long CursorId; /* passed to the client so it can send back
on getMore */ | |
| class Cursor; /* internal server cursor base class */ | | class Cursor; /* internal server cursor base class */ | |
| class ClientCursor; | | class ClientCursor; | |
|
| | | class ParsedQuery; | |
| | | | |
| /* todo: make this map be per connection. this will prevent cursor hij
acking security attacks perhaps. | | /* todo: make this map be per connection. this will prevent cursor hij
acking security attacks perhaps. | |
| */ | | */ | |
| typedef map<CursorId, ClientCursor*> CCById; | | typedef map<CursorId, ClientCursor*> CCById; | |
| | | | |
| typedef multimap<DiskLoc, ClientCursor*> CCByLoc; | | typedef multimap<DiskLoc, ClientCursor*> CCByLoc; | |
| | | | |
| extern BSONObj id_obj; | | extern BSONObj id_obj; | |
| | | | |
| class ClientCursor { | | class ClientCursor { | |
| | | | |
| skipping to change at line 105 | | skipping to change at line 106 | |
| _c->_pinValue += 100; | | _c->_pinValue += 100; | |
| } | | } | |
| } | | } | |
| ~Pointer() { | | ~Pointer() { | |
| release(); | | release(); | |
| } | | } | |
| }; | | }; | |
| | | | |
| /*const*/ CursorId cursorid; | | /*const*/ CursorId cursorid; | |
| string ns; | | string ns; | |
|
| auto_ptr<CoveredIndexMatcher> matcher; | | shared_ptr<Cursor> c; | |
| auto_ptr<Cursor> c; | | | |
| int pos; // # objects into the curs
or so far | | int pos; // # objects into the curs
or so far | |
| BSONObj query; | | BSONObj query; | |
| int _queryOptions; | | int _queryOptions; | |
| OpTime _slaveReadTill; | | OpTime _slaveReadTill; | |
| | | | |
|
| ClientCursor(int queryOptions, auto_ptr<Cursor>& _c, const char *_n
s) : | | ClientCursor(int queryOptions, shared_ptr<Cursor>& _c, const char *
_ns) : | |
| _idleAgeMillis(0), _pinValue(0), | | _idleAgeMillis(0), _pinValue(0), | |
| _doingDeletes(false), | | _doingDeletes(false), | |
| ns(_ns), c(_c), | | ns(_ns), c(_c), | |
| pos(0), _queryOptions(queryOptions) | | pos(0), _queryOptions(queryOptions) | |
| { | | { | |
| if( queryOptions & QueryOption_NoCursorTimeout ) | | if( queryOptions & QueryOption_NoCursorTimeout ) | |
| noTimeout(); | | noTimeout(); | |
| recursive_scoped_lock lock(ccmutex); | | recursive_scoped_lock lock(ccmutex); | |
| cursorid = allocCursorId_inlock(); | | cursorid = allocCursorId_inlock(); | |
| clientCursorsById.insert( make_pair(cursorid, this) ); | | clientCursorsById.insert( make_pair(cursorid, this) ); | |
| } | | } | |
| ~ClientCursor(); | | ~ClientCursor(); | |
| | | | |
| DiskLoc lastLoc() const { | | DiskLoc lastLoc() const { | |
| return _lastLoc; | | return _lastLoc; | |
| } | | } | |
| | | | |
|
| | | shared_ptr< ParsedQuery > pq; | |
| shared_ptr< FieldMatcher > fields; // which fields query wants retu
rned | | shared_ptr< FieldMatcher > fields; // which fields query wants retu
rned | |
| Message originalMessage; // this is effectively an auto ptr for dat
a the matcher points to | | Message originalMessage; // this is effectively an auto ptr for dat
a the matcher points to | |
| | | | |
| /* Get rid of cursors for namespaces that begin with nsprefix. | | /* Get rid of cursors for namespaces that begin with nsprefix. | |
| Used by drop, dropIndexes, dropDatabase. | | Used by drop, dropIndexes, dropDatabase. | |
| */ | | */ | |
| static void invalidate(const char *nsPrefix); | | static void invalidate(const char *nsPrefix); | |
| | | | |
| /** | | /** | |
| * do a dbtemprelease | | * do a dbtemprelease | |
| * note: caller should check matcher.docMatcher().atomic() first an
d not yield if atomic - | | * note: caller should check matcher.docMatcher().atomic() first an
d not yield if atomic - | |
| * we don't do herein as this->matcher (above) is only initia
lized for true queries/getmore. | | * we don't do herein as this->matcher (above) is only initia
lized for true queries/getmore. | |
| * (ie not set for remote/update) | | * (ie not set for remote/update) | |
| * @return if the cursor is still valid. | | * @return if the cursor is still valid. | |
| * if false is returned, then this ClientCursor should be c
onsidered deleted - | | * if false is returned, then this ClientCursor should be c
onsidered deleted - | |
| * in fact, the whole database could be gone. | | * in fact, the whole database could be gone. | |
| */ | | */ | |
| bool yield(); | | bool yield(); | |
| | | | |
|
| struct YieldLock { | | struct YieldLock : boost::noncopyable { | |
| YieldLock( ClientCursor * cc ) | | explicit YieldLock( ptr<ClientCursor> cc ) | |
| : _cc( cc ) , _id( cc->cursorid ) , _doingDeletes( cc->_doi
ngDeletes ) { | | : _cc( cc ) , _id( cc->cursorid ) , _doingDeletes( cc->_doi
ngDeletes ) { | |
| cc->updateLocation(); | | cc->updateLocation(); | |
|
| _unlock = new dbtempreleasecond(); | | _unlock.reset(new dbtempreleasecond()); | |
| } | | } | |
| ~YieldLock(){ | | ~YieldLock(){ | |
| assert( ! _unlock ); | | assert( ! _unlock ); | |
| } | | } | |
| | | | |
| bool stillOk(){ | | bool stillOk(){ | |
|
| delete _unlock; | | relock(); | |
| _unlock = 0; | | | |
| | | | |
| if ( ClientCursor::find( _id , false ) == 0 ){ | | if ( ClientCursor::find( _id , false ) == 0 ){ | |
| // i was deleted | | // i was deleted | |
| return false; | | return false; | |
| } | | } | |
| | | | |
| _cc->_doingDeletes = _doingDeletes; | | _cc->_doingDeletes = _doingDeletes; | |
| return true; | | return true; | |
| } | | } | |
| | | | |
|
| | | void relock(){ | |
| | | _unlock.reset(); | |
| | | } | |
| | | | |
| | | private: | |
| ClientCursor * _cc; | | ClientCursor * _cc; | |
| CursorId _id; | | CursorId _id; | |
| bool _doingDeletes; | | bool _doingDeletes; | |
| | | | |
|
| dbtempreleasecond * _unlock; | | scoped_ptr<dbtempreleasecond> _unlock; | |
| | | | |
| }; | | }; | |
| | | | |
|
| YieldLock yieldHold(){ | | | |
| return YieldLock( this ); | | | |
| } | | | |
| | | | |
| // --- some pass through helpers for Cursor --- | | // --- some pass through helpers for Cursor --- | |
| | | | |
| BSONObj indexKeyPattern() { | | BSONObj indexKeyPattern() { | |
| return c->indexKeyPattern(); | | return c->indexKeyPattern(); | |
| } | | } | |
| | | | |
| bool ok(){ | | bool ok(){ | |
| return c->ok(); | | return c->ok(); | |
| } | | } | |
| | | | |
| bool advance(){ | | bool advance(){ | |
| return c->advance(); | | return c->advance(); | |
| } | | } | |
| | | | |
| bool currentMatches(){ | | bool currentMatches(){ | |
|
| if ( ! matcher.get() ) | | if ( ! c->matcher() ) | |
| return true; | | return true; | |
|
| return matcher->matchesCurrent( c.get() ); | | return c->matcher()->matchesCurrent( c.get() ); | |
| } | | } | |
| | | | |
| BSONObj current(){ | | BSONObj current(){ | |
| return c->current(); | | return c->current(); | |
| } | | } | |
| | | | |
| private: | | private: | |
| void setLastLoc_inlock(DiskLoc); | | void setLastLoc_inlock(DiskLoc); | |
| | | | |
| static ClientCursor* find_inlock(CursorId id, bool warn = true) { | | static ClientCursor* find_inlock(CursorId id, bool warn = true) { | |
| | | | |
| skipping to change at line 247 | | skipping to change at line 248 | |
| } | | } | |
| return false; | | return false; | |
| } | | } | |
| | | | |
| /* call when cursor's location changes so that we can update the | | /* call when cursor's location changes so that we can update the | |
| cursorsbylocation map. if you are locked and internally iterati
ng, only | | cursorsbylocation map. if you are locked and internally iterati
ng, only | |
| need to call when you are ready to "unlock". | | need to call when you are ready to "unlock". | |
| */ | | */ | |
| void updateLocation(); | | void updateLocation(); | |
| | | | |
|
| void cleanupByLocation(DiskLoc loc); | | | |
| | | | |
| void mayUpgradeStorage() { | | void mayUpgradeStorage() { | |
| /* if ( !ids_.get() ) | | /* if ( !ids_.get() ) | |
| return; | | return; | |
| stringstream ss; | | stringstream ss; | |
| ss << ns << "." << cursorid; | | ss << ns << "." << cursorid; | |
| ids_->mayUpgradeStorage( ss.str() );*/ | | ids_->mayUpgradeStorage( ss.str() );*/ | |
| } | | } | |
| | | | |
| /** | | /** | |
| * @param millis amount of idle passed time since last call | | * @param millis amount of idle passed time since last call | |
| | | | |
| skipping to change at line 293 | | skipping to change at line 292 | |
| | | | |
| static unsigned byLocSize(); // just for diagnostics | | static unsigned byLocSize(); // just for diagnostics | |
| | | | |
| static void informAboutToDeleteBucket(const DiskLoc& b); | | static void informAboutToDeleteBucket(const DiskLoc& b); | |
| static void aboutToDelete(const DiskLoc& dl); | | static void aboutToDelete(const DiskLoc& dl); | |
| }; | | }; | |
| | | | |
| class ClientCursorMonitor : public BackgroundJob { | | class ClientCursorMonitor : public BackgroundJob { | |
| public: | | public: | |
| void run(); | | void run(); | |
|
| | | string name() { return "ClientCursorMonitor"; } | |
| }; | | }; | |
| | | | |
| extern ClientCursorMonitor clientCursorMonitor; | | extern ClientCursorMonitor clientCursorMonitor; | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
End of changes. 14 change blocks. |
| 17 lines changed or deleted | | 17 lines changed or added | |
|
| cursor.h | | cursor.h | |
| | | | |
| skipping to change at line 23 | | skipping to change at line 23 | |
| * You should have received a copy of the GNU Affero General Public Licen
se | | * You should have received a copy of the GNU Affero General Public Licen
se | |
| * along with this program. If not, see <http://www.gnu.org/licenses/>. | | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include "../pch.h" | | #include "../pch.h" | |
| | | | |
| #include "jsobj.h" | | #include "jsobj.h" | |
| #include "diskloc.h" | | #include "diskloc.h" | |
|
| | | #include "matcher.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| class Record; | | class Record; | |
|
| | | class CoveredIndexMatcher; | |
| | | | |
| /* Query cursors, base class. This is for our internal cursors. "Clie
ntCursor" is a separate | | /* Query cursors, base class. This is for our internal cursors. "Clie
ntCursor" is a separate | |
| concept and is for the user's cursor. | | concept and is for the user's cursor. | |
| | | | |
| WARNING concurrency: the vfunctions below are called back from withi
n a | | WARNING concurrency: the vfunctions below are called back from withi
n a | |
| ClientCursor::ccmutex. Don't cause a deadlock, you've been warned. | | ClientCursor::ccmutex. Don't cause a deadlock, you've been warned. | |
| */ | | */ | |
|
| class Cursor { | | class Cursor : boost::noncopyable { | |
| public: | | public: | |
| virtual ~Cursor() {} | | virtual ~Cursor() {} | |
| virtual bool ok() = 0; | | virtual bool ok() = 0; | |
|
| bool eof() { | | bool eof() { return !ok(); } | |
| return !ok(); | | | |
| } | | | |
| virtual Record* _current() = 0; | | virtual Record* _current() = 0; | |
| virtual BSONObj current() = 0; | | virtual BSONObj current() = 0; | |
| virtual DiskLoc currLoc() = 0; | | virtual DiskLoc currLoc() = 0; | |
| virtual bool advance() = 0; /*true=ok*/ | | virtual bool advance() = 0; /*true=ok*/ | |
| virtual BSONObj currKey() const { return BSONObj(); } | | virtual BSONObj currKey() const { return BSONObj(); } | |
| | | | |
| // DiskLoc the cursor requires for continued operation. Before thi
s | | // DiskLoc the cursor requires for continued operation. Before thi
s | |
| // DiskLoc is deleted, the cursor must be incremented or destroyed. | | // DiskLoc is deleted, the cursor must be incremented or destroyed. | |
| virtual DiskLoc refLoc() = 0; | | virtual DiskLoc refLoc() = 0; | |
| | | | |
| | | | |
| skipping to change at line 82 | | skipping to change at line 82 | |
| /* called after every query block is iterated -- i.e. between getMo
re() blocks | | /* called after every query block is iterated -- i.e. between getMo
re() blocks | |
| so you can note where we are, if necessary. | | so you can note where we are, if necessary. | |
| */ | | */ | |
| virtual void noteLocation() { } | | virtual void noteLocation() { } | |
| | | | |
| /* called before query getmore block is iterated */ | | /* called before query getmore block is iterated */ | |
| virtual void checkLocation() { } | | virtual void checkLocation() { } | |
| | | | |
| virtual bool supportGetMore() = 0; | | virtual bool supportGetMore() = 0; | |
| | | | |
|
| virtual string toString() { | | virtual string toString() { return "abstract?"; } | |
| return "abstract?"; | | | |
| } | | | |
| | | | |
| /* used for multikey index traversal to avoid sending back dups. se
e Matcher::matches(). | | /* used for multikey index traversal to avoid sending back dups. se
e Matcher::matches(). | |
| if a multikey index traversal: | | if a multikey index traversal: | |
| if loc has already been sent, returns true. | | if loc has already been sent, returns true. | |
| otherwise, marks loc as sent. | | otherwise, marks loc as sent. | |
| @param deep - match was against an array, so we know it is multi
key. this is legacy and kept | | @param deep - match was against an array, so we know it is multi
key. this is legacy and kept | |
| for backwards datafile compatibility. 'deep' can
be eliminated next time we | | for backwards datafile compatibility. 'deep' can
be eliminated next time we | |
| force a data file conversion. 7Jul09 | | force a data file conversion. 7Jul09 | |
| */ | | */ | |
| virtual bool getsetdup(DiskLoc loc) = 0; | | virtual bool getsetdup(DiskLoc loc) = 0; | |
| | | | |
|
| virtual BSONObj prettyIndexBounds() const { return BSONObj(); } | | virtual BSONArray prettyIndexBounds() const { return BSONArray(); } | |
| | | | |
| virtual bool capped() const { return false; } | | virtual bool capped() const { return false; } | |
| | | | |
|
| | | // The implementation may return different matchers depending on th | |
| | | e | |
| | | // position of the cursor. If matcher() is nonzero at the start, | |
| | | // matcher() should be checked each time advance() is called. | |
| | | virtual CoveredIndexMatcher *matcher() const { return 0; } | |
| | | | |
| | | // A convenience function for setting the value of matcher() manual | |
| | | ly | |
| | | // so it may accessed later. Implementations which must generate | |
| | | // their own matcher() should assert here. | |
| | | virtual void setMatcher( auto_ptr< CoveredIndexMatcher > matcher ) | |
| | | { | |
| | | massert( 13285, "manual matcher config not allowed", false ); | |
| | | } | |
| }; | | }; | |
| | | | |
| // strategy object implementing direction of traversal. | | // strategy object implementing direction of traversal. | |
| class AdvanceStrategy { | | class AdvanceStrategy { | |
| public: | | public: | |
| virtual ~AdvanceStrategy() { } | | virtual ~AdvanceStrategy() { } | |
| virtual DiskLoc next( const DiskLoc &prev ) const = 0; | | virtual DiskLoc next( const DiskLoc &prev ) const = 0; | |
| }; | | }; | |
| | | | |
| const AdvanceStrategy *forward(); | | const AdvanceStrategy *forward(); | |
| const AdvanceStrategy *reverse(); | | const AdvanceStrategy *reverse(); | |
| | | | |
| /* table-scan style cursor */ | | /* table-scan style cursor */ | |
| class BasicCursor : public Cursor { | | class BasicCursor : public Cursor { | |
| protected: | | protected: | |
| DiskLoc curr, last; | | DiskLoc curr, last; | |
| const AdvanceStrategy *s; | | const AdvanceStrategy *s; | |
| | | | |
| private: | | private: | |
| bool tailable_; | | bool tailable_; | |
|
| | | auto_ptr< CoveredIndexMatcher > _matcher; | |
| void init() { | | void init() { | |
| tailable_ = false; | | tailable_ = false; | |
| } | | } | |
| public: | | public: | |
| bool ok() { | | bool ok() { | |
| return !curr.isNull(); | | return !curr.isNull(); | |
| } | | } | |
| Record* _current() { | | Record* _current() { | |
| assert( ok() ); | | assert( ok() ); | |
| return curr.rec(); | | return curr.rec(); | |
| | | | |
| skipping to change at line 164 | | skipping to change at line 174 | |
| virtual void setTailable() { | | virtual void setTailable() { | |
| if ( !curr.isNull() || !last.isNull() ) | | if ( !curr.isNull() || !last.isNull() ) | |
| tailable_ = true; | | tailable_ = true; | |
| } | | } | |
| virtual bool tailable() { | | virtual bool tailable() { | |
| return tailable_; | | return tailable_; | |
| } | | } | |
| virtual bool getsetdup(DiskLoc loc) { return false; } | | virtual bool getsetdup(DiskLoc loc) { return false; } | |
| | | | |
| virtual bool supportGetMore() { return true; } | | virtual bool supportGetMore() { return true; } | |
|
| | | | |
| | | virtual CoveredIndexMatcher *matcher() const { return _matcher.get( | |
| | | ); } | |
| | | | |
| | | virtual void setMatcher( auto_ptr< CoveredIndexMatcher > matcher ) | |
| | | { | |
| | | _matcher = matcher; | |
| | | } | |
| | | | |
| }; | | }; | |
| | | | |
| /* used for order { $natural: -1 } */ | | /* used for order { $natural: -1 } */ | |
| class ReverseCursor : public BasicCursor { | | class ReverseCursor : public BasicCursor { | |
| public: | | public: | |
| ReverseCursor(DiskLoc dl) : BasicCursor( dl, reverse() ) { } | | ReverseCursor(DiskLoc dl) : BasicCursor( dl, reverse() ) { } | |
| ReverseCursor() : BasicCursor( reverse() ) { } | | ReverseCursor() : BasicCursor( reverse() ) { } | |
| virtual string toString() { | | virtual string toString() { | |
| return "ReverseCursor"; | | return "ReverseCursor"; | |
| } | | } | |
| | | | |
End of changes. 9 change blocks. |
| 8 lines changed or deleted | | 30 lines changed or added | |
|
| goodies.h | | goodies.h | |
|
| // goodies.h | | // @file goodies.h | |
| // miscellaneous junk | | // miscellaneous junk | |
| | | | |
| /* Copyright 2009 10gen Inc. | | /* Copyright 2009 10gen Inc. | |
| * | | * | |
| * Licensed under the Apache License, Version 2.0 (the "License"); | | * Licensed under the Apache License, Version 2.0 (the "License"); | |
| * you may not use this file except in compliance with the License. | | * you may not use this file except in compliance with the License. | |
| * You may obtain a copy of the License at | | * You may obtain a copy of the License at | |
| * | | * | |
| * http://www.apache.org/licenses/LICENSE-2.0 | | * http://www.apache.org/licenses/LICENSE-2.0 | |
| * | | * | |
| * Unless required by applicable law or agreed to in writing, software | | * Unless required by applicable law or agreed to in writing, software | |
| * distributed under the License is distributed on an "AS IS" BASIS, | | * distributed under the License is distributed on an "AS IS" BASIS, | |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | |
| * See the License for the specific language governing permissions and | | * See the License for the specific language governing permissions and | |
| * limitations under the License. | | * limitations under the License. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include "../bson/util/misc.h" | | #include "../bson/util/misc.h" | |
|
| | | #include "concurrency/mutex.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
|
| | | void setThreadName(const char * name); | |
| | | | |
| | | template<class T> | |
| | | inline string ToString(const T& t) { | |
| | | stringstream s; | |
| | | s << t; | |
| | | return s.str(); | |
| | | } | |
| | | | |
| #if !defined(_WIN32) && !defined(NOEXECINFO) && !defined(__freebsd__) && !d
efined(__sun__) | | #if !defined(_WIN32) && !defined(NOEXECINFO) && !defined(__freebsd__) && !d
efined(__sun__) | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
| #include <pthread.h> | | #include <pthread.h> | |
| #include <execinfo.h> | | #include <execinfo.h> | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| inline pthread_t GetCurrentThreadId() { | | inline pthread_t GetCurrentThreadId() { | |
| | | | |
| skipping to change at line 138 | | skipping to change at line 148 | |
| bool operator<=(WrappingInt r) { | | bool operator<=(WrappingInt r) { | |
| // platform dependent | | // platform dependent | |
| int df = (r.x - x); | | int df = (r.x - x); | |
| return df >= 0; | | return df >= 0; | |
| } | | } | |
| bool operator>(WrappingInt r) { | | bool operator>(WrappingInt r) { | |
| return !(r<=*this); | | return !(r<=*this); | |
| } | | } | |
| }; | | }; | |
| | | | |
|
| } // namespace mongo | | | |
| | | | |
| #include <ctime> | | | |
| | | | |
| namespace mongo { | | | |
| | | | |
| inline void time_t_to_Struct(time_t t, struct tm * buf , bool local = f
alse ) { | | inline void time_t_to_Struct(time_t t, struct tm * buf , bool local = f
alse ) { | |
| #if defined(_WIN32) | | #if defined(_WIN32) | |
| if ( local ) | | if ( local ) | |
| localtime_s( buf , &t ); | | localtime_s( buf , &t ); | |
| else | | else | |
| gmtime_s(buf, &t); | | gmtime_s(buf, &t); | |
| #else | | #else | |
| if ( local ) | | if ( local ) | |
| localtime_r(&t, buf); | | localtime_r(&t, buf); | |
| else | | else | |
| | | | |
| skipping to change at line 186 | | skipping to change at line 190 | |
| #define MONGO_ctime _ctime_is_not_threadsafe_ | | #define MONGO_ctime _ctime_is_not_threadsafe_ | |
| #define ctime MONGO_ctime | | #define ctime MONGO_ctime | |
| | | | |
| #if defined(_WIN32) || defined(__sunos__) | | #if defined(_WIN32) || defined(__sunos__) | |
| inline void sleepsecs(int s) { | | inline void sleepsecs(int s) { | |
| boost::xtime xt; | | boost::xtime xt; | |
| boost::xtime_get(&xt, boost::TIME_UTC); | | boost::xtime_get(&xt, boost::TIME_UTC); | |
| xt.sec += s; | | xt.sec += s; | |
| boost::thread::sleep(xt); | | boost::thread::sleep(xt); | |
| } | | } | |
|
| inline void sleepmillis(int s) { | | inline void sleepmillis(long long s) { | |
| boost::xtime xt; | | boost::xtime xt; | |
| boost::xtime_get(&xt, boost::TIME_UTC); | | boost::xtime_get(&xt, boost::TIME_UTC); | |
|
| xt.sec += ( s / 1000 ); | | xt.sec += (int)( s / 1000 ); | |
| xt.nsec += ( s % 1000 ) * 1000000; | | xt.nsec += (int)(( s % 1000 ) * 1000000); | |
| if ( xt.nsec >= 1000000000 ) { | | if ( xt.nsec >= 1000000000 ) { | |
| xt.nsec -= 1000000000; | | xt.nsec -= 1000000000; | |
| xt.sec++; | | xt.sec++; | |
| } | | } | |
| boost::thread::sleep(xt); | | boost::thread::sleep(xt); | |
| } | | } | |
|
| inline void sleepmicros(int s) { | | inline void sleepmicros(long long s) { | |
| if ( s <= 0 ) | | if ( s <= 0 ) | |
| return; | | return; | |
| boost::xtime xt; | | boost::xtime xt; | |
| boost::xtime_get(&xt, boost::TIME_UTC); | | boost::xtime_get(&xt, boost::TIME_UTC); | |
|
| xt.sec += ( s / 1000000 ); | | xt.sec += (int)( s / 1000000 ); | |
| xt.nsec += ( s % 1000000 ) * 1000; | | xt.nsec += (int)(( s % 1000000 ) * 1000); | |
| if ( xt.nsec >= 1000000000 ) { | | if ( xt.nsec >= 1000000000 ) { | |
| xt.nsec -= 1000000000; | | xt.nsec -= 1000000000; | |
| xt.sec++; | | xt.sec++; | |
| } | | } | |
| boost::thread::sleep(xt); | | boost::thread::sleep(xt); | |
| } | | } | |
| #else | | #else | |
| inline void sleepsecs(int s) { | | inline void sleepsecs(int s) { | |
| struct timespec t; | | struct timespec t; | |
| t.tv_sec = s; | | t.tv_sec = s; | |
| t.tv_nsec = 0; | | t.tv_nsec = 0; | |
| if ( nanosleep( &t , 0 ) ){ | | if ( nanosleep( &t , 0 ) ){ | |
| cout << "nanosleep failed" << endl; | | cout << "nanosleep failed" << endl; | |
| } | | } | |
| } | | } | |
|
| inline void sleepmicros(int s) { | | inline void sleepmicros(long long s) { | |
| if ( s <= 0 ) | | if ( s <= 0 ) | |
| return; | | return; | |
| struct timespec t; | | struct timespec t; | |
| t.tv_sec = (int)(s / 1000000); | | t.tv_sec = (int)(s / 1000000); | |
| t.tv_nsec = 1000 * ( s % 1000000 ); | | t.tv_nsec = 1000 * ( s % 1000000 ); | |
| struct timespec out; | | struct timespec out; | |
| if ( nanosleep( &t , &out ) ){ | | if ( nanosleep( &t , &out ) ){ | |
| cout << "nanosleep failed" << endl; | | cout << "nanosleep failed" << endl; | |
| } | | } | |
| } | | } | |
|
| inline void sleepmillis(int s) { | | inline void sleepmillis(long long s) { | |
| sleepmicros( s * 1000 ); | | sleepmicros( s * 1000 ); | |
| } | | } | |
| #endif | | #endif | |
| | | | |
| // note this wraps | | // note this wraps | |
| inline int tdiff(unsigned told, unsigned tnew) { | | inline int tdiff(unsigned told, unsigned tnew) { | |
| return WrappingInt::diff(tnew, told); | | return WrappingInt::diff(tnew, told); | |
| } | | } | |
| inline unsigned curTimeMillis() { | | inline unsigned curTimeMillis() { | |
| boost::xtime xt; | | boost::xtime xt; | |
| | | | |
| skipping to change at line 268 | | skipping to change at line 272 | |
| } | | } | |
| | | | |
| // measures up to 1024 seconds. or, 512 seconds with tdiff that is... | | // measures up to 1024 seconds. or, 512 seconds with tdiff that is... | |
| inline unsigned curTimeMicros() { | | inline unsigned curTimeMicros() { | |
| boost::xtime xt; | | boost::xtime xt; | |
| boost::xtime_get(&xt, boost::TIME_UTC); | | boost::xtime_get(&xt, boost::TIME_UTC); | |
| unsigned t = xt.nsec / 1000; | | unsigned t = xt.nsec / 1000; | |
| unsigned secs = xt.sec % 1024; | | unsigned secs = xt.sec % 1024; | |
| return secs*1000000 + t; | | return secs*1000000 + t; | |
| } | | } | |
|
| using namespace boost; | | | |
| | | | |
| extern bool __destroyingStatics; | | | |
| | | | |
| // If you create a local static instance of this class, that instance w | | | |
| ill be destroyed | | | |
| // before all global static objects are destroyed, so __destroyingStati | | | |
| cs will be set | | | |
| // to true before the global static variables are destroyed. | | | |
| class StaticObserver : boost::noncopyable { | | | |
| public: | | | |
| ~StaticObserver() { __destroyingStatics = true; } | | | |
| }; | | | |
| | | | |
| // On pthread systems, it is an error to destroy a mutex while held. S | | | |
| tatic global | | | |
| // mutexes may be held upon shutdown in our implementation, and this wa | | | |
| y we avoid | | | |
| // destroying them. | | | |
| class mutex : boost::noncopyable { | | | |
| public: | | | |
| mutex() { _m = new boost::mutex(); } | | | |
| ~mutex() { | | | |
| if( !__destroyingStatics ) { | | | |
| delete _m; | | | |
| } | | | |
| } | | | |
| class scoped_lock : boost::noncopyable { | | | |
| public: | | | |
| scoped_lock( mongo::mutex &m ) : _l( m.boost() ) {} | | | |
| boost::mutex::scoped_lock &boost() { return _l; } | | | |
| private: | | | |
| boost::mutex::scoped_lock _l; | | | |
| }; | | | |
| private: | | | |
| boost::mutex &boost() { return *_m; } | | | |
| boost::mutex *_m; | | | |
| }; | | | |
| | | | |
| typedef mongo::mutex::scoped_lock scoped_lock; | | | |
| typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock; | | | |
| | | | |
| // simple scoped timer | | // simple scoped timer | |
| class Timer { | | class Timer { | |
| public: | | public: | |
| Timer() { | | Timer() { | |
| reset(); | | reset(); | |
| } | | } | |
| Timer( unsigned long long start ) { | | Timer( unsigned long long start ) { | |
| old = start; | | old = start; | |
| } | | } | |
| | | | |
| skipping to change at line 368 | | skipping to change at line 335 | |
| } | | } | |
| inline bool startsWith(string s, string p) { return startsWith(s.c_str(
), p.c_str()); } | | inline bool startsWith(string s, string p) { return startsWith(s.c_str(
), p.c_str()); } | |
| | | | |
| inline bool endsWith(const char *p, const char *suffix) { | | inline bool endsWith(const char *p, const char *suffix) { | |
| size_t a = strlen(p); | | size_t a = strlen(p); | |
| size_t b = strlen(suffix); | | size_t b = strlen(suffix); | |
| if ( b > a ) return false; | | if ( b > a ) return false; | |
| return strcmp(p + a - b, suffix) == 0; | | return strcmp(p + a - b, suffix) == 0; | |
| } | | } | |
| | | | |
|
| } // namespace mongo | | | |
| | | | |
| #include "boost/detail/endian.hpp" | | | |
| | | | |
| namespace mongo { | | | |
| | | | |
| inline unsigned long swapEndian(unsigned long x) { | | inline unsigned long swapEndian(unsigned long x) { | |
| return | | return | |
| ((x & 0xff) << 24) | | | ((x & 0xff) << 24) | | |
| ((x & 0xff00) << 8) | | | ((x & 0xff00) << 8) | | |
| ((x & 0xff0000) >> 8) | | | ((x & 0xff0000) >> 8) | | |
| ((x & 0xff000000) >> 24); | | ((x & 0xff000000) >> 24); | |
| } | | } | |
| | | | |
| #if defined(BOOST_LITTLE_ENDIAN) | | #if defined(BOOST_LITTLE_ENDIAN) | |
| inline unsigned long fixEndian(unsigned long x) { | | inline unsigned long fixEndian(unsigned long x) { | |
| | | | |
| skipping to change at line 442 | | skipping to change at line 403 | |
| } | | } | |
| v = new T(i); | | v = new T(i); | |
| _val.reset( v ); | | _val.reset( v ); | |
| } | | } | |
| | | | |
| private: | | private: | |
| T _default; | | T _default; | |
| boost::thread_specific_ptr<T> _val; | | boost::thread_specific_ptr<T> _val; | |
| }; | | }; | |
| | | | |
|
| class ProgressMeter { | | class ProgressMeter : boost::noncopyable { | |
| public: | | public: | |
| ProgressMeter( long long total , int secondsBetween = 3 , int check
Interval = 100 ){ | | ProgressMeter( long long total , int secondsBetween = 3 , int check
Interval = 100 ){ | |
| reset( total , secondsBetween , checkInterval ); | | reset( total , secondsBetween , checkInterval ); | |
| } | | } | |
| | | | |
| ProgressMeter(){ | | ProgressMeter(){ | |
| _active = 0; | | _active = 0; | |
| } | | } | |
| | | | |
| void reset( long long total , int secondsBetween = 3 , int checkInt
erval = 100 ){ | | void reset( long long total , int secondsBetween = 3 , int checkInt
erval = 100 ){ | |
| | | | |
| skipping to change at line 509 | | skipping to change at line 470 | |
| return _hits; | | return _hits; | |
| } | | } | |
| | | | |
| string toString() const { | | string toString() const { | |
| if ( ! _active ) | | if ( ! _active ) | |
| return ""; | | return ""; | |
| stringstream buf; | | stringstream buf; | |
| buf << _done << "/" << _total << " " << (_done*100)/_total << "
%"; | | buf << _done << "/" << _total << " " << (_done*100)/_total << "
%"; | |
| return buf.str(); | | return buf.str(); | |
| } | | } | |
|
| | | | |
| | | bool operator==( const ProgressMeter& other ) const { | |
| | | return this == &other; | |
| | | } | |
| private: | | private: | |
| | | | |
| bool _active; | | bool _active; | |
| | | | |
| long long _total; | | long long _total; | |
| int _secondsBetween; | | int _secondsBetween; | |
| int _checkInterval; | | int _checkInterval; | |
| | | | |
| long long _done; | | long long _done; | |
| long long _hits; | | long long _hits; | |
| int _lastTime; | | int _lastTime; | |
| }; | | }; | |
| | | | |
|
| | | class ProgressMeterHolder : boost::noncopyable { | |
| | | public: | |
| | | ProgressMeterHolder( ProgressMeter& pm ) | |
| | | : _pm( pm ){ | |
| | | } | |
| | | | |
| | | ~ProgressMeterHolder(){ | |
| | | _pm.finished(); | |
| | | } | |
| | | | |
| | | ProgressMeter* operator->(){ | |
| | | return &_pm; | |
| | | } | |
| | | | |
| | | bool hit( int n = 1 ){ | |
| | | return _pm.hit( n ); | |
| | | } | |
| | | | |
| | | void finished(){ | |
| | | _pm.finished(); | |
| | | } | |
| | | | |
| | | bool operator==( const ProgressMeter& other ){ | |
| | | return _pm == other; | |
| | | } | |
| | | | |
| | | private: | |
| | | ProgressMeter& _pm; | |
| | | }; | |
| | | | |
| class TicketHolder { | | class TicketHolder { | |
| public: | | public: | |
|
| TicketHolder( int num ){ | | TicketHolder( int num ) : _mutex("TicketHolder") { | |
| _outof = num; | | _outof = num; | |
| _num = num; | | _num = num; | |
| } | | } | |
| | | | |
| bool tryAcquire(){ | | bool tryAcquire(){ | |
| scoped_lock lk( _mutex ); | | scoped_lock lk( _mutex ); | |
| if ( _num <= 0 ){ | | if ( _num <= 0 ){ | |
| if ( _num < 0 ){ | | if ( _num < 0 ){ | |
| cerr << "DISASTER! in TicketHolder" << endl; | | cerr << "DISASTER! in TicketHolder" << endl; | |
| } | | } | |
| | | | |
| skipping to change at line 713 | | skipping to change at line 708 | |
| s1++; s2++; | | s1++; s2++; | |
| } | | } | |
| | | | |
| if ( *s1 ) | | if ( *s1 ) | |
| return 1; | | return 1; | |
| if ( *s2 ) | | if ( *s2 ) | |
| return -1; | | return -1; | |
| return 0; | | return 0; | |
| } | | } | |
| | | | |
|
| | | /** A generic pointer type for function arguments. | |
| | | * It will convert from any pointer type except auto_ptr. | |
| | | * Semantics are the same as passing the pointer returned from get() | |
| | | * const ptr<T> => T * const | |
| | | * ptr<const T> => T const * or const T* | |
| | | */ | |
| | | template <typename T> | |
| | | struct ptr{ | |
| | | | |
| | | ptr() : _p(NULL) {} | |
| | | | |
| | | // convert to ptr<T> | |
| | | ptr(T* p) : _p(p) {} // needed for NULL | |
| | | template<typename U> ptr(U* p) : _p(p) {} | |
| | | template<typename U> ptr(const ptr<U>& p) : _p(p) {} | |
| | | template<typename U> ptr(const boost::shared_ptr<U>& p) : _p(p.get( | |
| | | )) {} | |
| | | template<typename U> ptr(const boost::scoped_ptr<U>& p) : _p(p.get( | |
| | | )) {} | |
| | | //template<typename U> ptr(const auto_ptr<U>& p) : _p(p.get()) {} | |
| | | | |
| | | // assign to ptr<T> | |
| | | ptr& operator= (T* p) { _p = p; return *this; } // needed for NULL | |
| | | template<typename U> ptr& operator= (U* p) { _p = p; return *this; | |
| | | } | |
| | | template<typename U> ptr& operator= (const ptr<U>& p) { _p = p; ret | |
| | | urn *this; } | |
| | | template<typename U> ptr& operator= (const boost::shared_ptr<U>& p) | |
| | | { _p = p.get(); return *this; } | |
| | | template<typename U> ptr& operator= (const boost::scoped_ptr<U>& p) | |
| | | { _p = p.get(); return *this; } | |
| | | //template<typename U> ptr& operator= (const auto_ptr<U>& p) { _p = | |
| | | p.get(); return *this; } | |
| | | | |
| | | // use | |
| | | T* operator->() const { return _p; } | |
| | | T& operator*() const { return *_p; } | |
| | | | |
| | | // convert from ptr<T> | |
| | | operator T* () const { return _p; } | |
| | | | |
| | | private: | |
| | | T* _p; | |
| | | }; | |
| | | | |
| | | /** Hmmmm */ | |
| | | using namespace boost; | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
End of changes. 17 change blocks. |
| 64 lines changed or deleted | | 103 lines changed or added | |
|
| hostandport.h | | hostandport.h | |
| | | | |
| skipping to change at line 22 | | skipping to change at line 22 | |
| * distributed under the License is distributed on an "AS IS" BASIS, | | * distributed under the License is distributed on an "AS IS" BASIS, | |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | |
| * See the License for the specific language governing permissions and | | * See the License for the specific language governing permissions and | |
| * limitations under the License. | | * limitations under the License. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include "sock.h" | | #include "sock.h" | |
| #include "../db/cmdline.h" | | #include "../db/cmdline.h" | |
|
| | | #include "mongoutils/str.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
|
| | | using namespace mongoutils; | |
| | | | |
| /** helper for manipulating host:port connection endpoints. | | /** helper for manipulating host:port connection endpoints. | |
| */ | | */ | |
| struct HostAndPort { | | struct HostAndPort { | |
| HostAndPort() : _port(-1) { } | | HostAndPort() : _port(-1) { } | |
| | | | |
|
| HostAndPort(string h, int p = -1) : _host(h), _port(p) { } | | /** From a string hostname[:portnumber] | |
| | | Throws user assertion if bad config string or bad port #. | |
| | | */ | |
| | | HostAndPort(string s); | |
| | | | |
| | | /** @param p port number. -1 is ok to use default. */ | |
| | | HostAndPort(string h, int p /*= -1*/) : _host(h), _port(p) { } | |
| | | | |
| HostAndPort(const SockAddr& sock ) | | HostAndPort(const SockAddr& sock ) | |
| : _host( sock.getAddr() ) , _port( sock.getPort() ){ | | : _host( sock.getAddr() ) , _port( sock.getPort() ){ | |
| } | | } | |
| | | | |
| static HostAndPort me() { | | static HostAndPort me() { | |
| return HostAndPort("localhost", cmdLine.port); | | return HostAndPort("localhost", cmdLine.port); | |
| } | | } | |
| | | | |
|
| static HostAndPort fromString(string s) { | | | |
| const char *p = s.c_str(); | | | |
| uassert(13110, "HostAndPort: bad config string", *p); | | | |
| const char *colon = strchr(p, ':'); | | | |
| HostAndPort m; | | | |
| if( colon ) { | | | |
| int port = atoi(colon+1); | | | |
| uassert(13095, "HostAndPort: bad port #", port > 0); | | | |
| return HostAndPort(string(p,colon-p),port); | | | |
| } | | | |
| // no port specified. | | | |
| return HostAndPort(p); | | | |
| } | | | |
| | | | |
| bool operator<(const HostAndPort& r) const { return _host < r._host
|| (_host==r._host&&_port<r._port); } | | bool operator<(const HostAndPort& r) const { return _host < r._host
|| (_host==r._host&&_port<r._port); } | |
| | | | |
| /* returns true if the host/port combo identifies this process inst
ance. */ | | /* returns true if the host/port combo identifies this process inst
ance. */ | |
| bool isSelf() const; | | bool isSelf() const; | |
| | | | |
| bool isLocalHost() const; | | bool isLocalHost() const; | |
| | | | |
| // @returns host:port | | // @returns host:port | |
| string toString() const; | | string toString() const; | |
| | | | |
| | | | |
| skipping to change at line 65 | | skipping to change at line 60 | |
| | | | |
| /* returns true if the host/port combo identifies this process inst
ance. */ | | /* returns true if the host/port combo identifies this process inst
ance. */ | |
| bool isSelf() const; | | bool isSelf() const; | |
| | | | |
| bool isLocalHost() const; | | bool isLocalHost() const; | |
| | | | |
| // @returns host:port | | // @returns host:port | |
| string toString() const; | | string toString() const; | |
| | | | |
| string host() const { return _host; } | | string host() const { return _host; } | |
|
| | | | |
| int port() const { return _port >= 0 ? _port : cmdLine.port; } | | int port() const { return _port >= 0 ? _port : cmdLine.port; } | |
| | | | |
| private: | | private: | |
| // invariant (except full obj assignment): | | // invariant (except full obj assignment): | |
| string _host; | | string _host; | |
| int _port; // -1 indicates unspecified | | int _port; // -1 indicates unspecified | |
| }; | | }; | |
| | | | |
|
| /** returns true if strings share a common starting prefix */ | | /** returns true if strings seem to be the same hostname. | |
| inline bool sameStart(const char *p, const char *q) { | | "nyc1" and "nyc1.acme.com" are treated as the same. | |
| while( 1 ) { | | in fact "nyc1.foo.com" and "nyc1.acme.com" are treated the same - | |
| if( *p == 0 || *q == 0 ) | | we oly look up to the first period. | |
| return true; | | */ | |
| if( *p != *q ) | | inline bool sameHostname(const string& a, const string& b) { | |
| break; | | return str::before(a, '.') == str::before(b, '.'); | |
| p++; q++; | | | |
| } | | | |
| return false; | | | |
| } | | } | |
| | | | |
| inline bool HostAndPort::isSelf() const { | | inline bool HostAndPort::isSelf() const { | |
| int p = _port == -1 ? CmdLine::DefaultDBPort : _port; | | int p = _port == -1 ? CmdLine::DefaultDBPort : _port; | |
| if( p != cmdLine.port ) | | if( p != cmdLine.port ) | |
| return false; | | return false; | |
|
| assert( _host != "localhost" && _host != "127.0.0.1" ); | | | |
| return sameStart(getHostName().c_str(), _host.c_str()); | | return sameHostname(getHostName(), _host) || isLocalHost(); | |
| } | | } | |
| | | | |
| inline string HostAndPort::toString() const { | | inline string HostAndPort::toString() const { | |
| stringstream ss; | | stringstream ss; | |
| ss << _host; | | ss << _host; | |
| if( _port != -1 ) ss << ':' << _port; | | if( _port != -1 ) ss << ':' << _port; | |
| return ss.str(); | | return ss.str(); | |
| } | | } | |
| | | | |
| inline bool HostAndPort::isLocalHost() const { | | inline bool HostAndPort::isLocalHost() const { | |
|
| return _host == "localhost" || _host == "127.0.0.1"; | | return _host == "localhost" || _host == "127.0.0.1" || _host == ":: | |
| | | 1"; | |
| | | } | |
| | | | |
| | | inline HostAndPort::HostAndPort(string s) { | |
| | | const char *p = s.c_str(); | |
| | | uassert(13110, "HostAndPort: bad config string", *p); | |
| | | const char *colon = strrchr(p, ':'); | |
| | | if( colon ) { | |
| | | int port = atoi(colon+1); | |
| | | uassert(13095, "HostAndPort: bad port #", port > 0); | |
| | | _host = string(p,colon-p); | |
| | | _port = port; | |
| | | } | |
| | | else { | |
| | | // no port specified. | |
| | | _host = p; | |
| | | _port = -1; | |
| | | } | |
| } | | } | |
| | | | |
| } | | } | |
| | | | |
End of changes. 8 change blocks. |
| 28 lines changed or deleted | | 39 lines changed or added | |
|
| log.h | | log.h | |
| | | | |
| skipping to change at line 22 | | skipping to change at line 22 | |
| * distributed under the License is distributed on an "AS IS" BASIS, | | * distributed under the License is distributed on an "AS IS" BASIS, | |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | |
| * See the License for the specific language governing permissions and | | * See the License for the specific language governing permissions and | |
| * limitations under the License. | | * limitations under the License. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include <string.h> | | #include <string.h> | |
| #include <errno.h> | | #include <errno.h> | |
|
| | | #include "../bson/util/builder.h" | |
| | | | |
| | | #ifndef _WIN32 | |
| | | //#include <syslog.h> | |
| | | #endif | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| using boost::shared_ptr; | | using boost::shared_ptr; | |
| | | | |
| // Utility interface for stringifying object only when val() called. | | // Utility interface for stringifying object only when val() called. | |
| class LazyString { | | class LazyString { | |
| public: | | public: | |
| virtual ~LazyString() {} | | virtual ~LazyString() {} | |
| virtual string val() const = 0; | | virtual string val() const = 0; | |
| | | | |
| skipping to change at line 44 | | skipping to change at line 49 | |
| // Utility class for stringifying object only when val() called. | | // Utility class for stringifying object only when val() called. | |
| template< class T > | | template< class T > | |
| class LazyStringImpl : public LazyString { | | class LazyStringImpl : public LazyString { | |
| public: | | public: | |
| LazyStringImpl( const T &t ) : t_( t ) {} | | LazyStringImpl( const T &t ) : t_( t ) {} | |
| virtual string val() const { return (string)t_; } | | virtual string val() const { return (string)t_; } | |
| private: | | private: | |
| const T& t_; | | const T& t_; | |
| }; | | }; | |
| | | | |
|
| | | class Tee { | |
| | | public: | |
| | | virtual ~Tee(){} | |
| | | virtual void write(const string& str) = 0; | |
| | | }; | |
| | | | |
| class Nullstream { | | class Nullstream { | |
| public: | | public: | |
|
| | | virtual Nullstream& operator<< (Tee* tee) { | |
| | | return *this; | |
| | | } | |
| virtual ~Nullstream() {} | | virtual ~Nullstream() {} | |
| virtual Nullstream& operator<<(const char *) { | | virtual Nullstream& operator<<(const char *) { | |
| return *this; | | return *this; | |
| } | | } | |
| virtual Nullstream& operator<<(char *) { | | virtual Nullstream& operator<<(char *) { | |
| return *this; | | return *this; | |
| } | | } | |
| virtual Nullstream& operator<<(char) { | | virtual Nullstream& operator<<(char) { | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| skipping to change at line 114 | | skipping to change at line 128 | |
| template< class T > | | template< class T > | |
| Nullstream& operator<<(const T &t) { | | Nullstream& operator<<(const T &t) { | |
| return operator<<( static_cast<const LazyString&>( LazyStringIm
pl< T >( t ) ) ); | | return operator<<( static_cast<const LazyString&>( LazyStringIm
pl< T >( t ) ) ); | |
| } | | } | |
| virtual Nullstream& operator<< (ostream& ( *endl )(ostream&)) { | | virtual Nullstream& operator<< (ostream& ( *endl )(ostream&)) { | |
| return *this; | | return *this; | |
| } | | } | |
| virtual Nullstream& operator<< (ios_base& (*hex)(ios_base&)) { | | virtual Nullstream& operator<< (ios_base& (*hex)(ios_base&)) { | |
| return *this; | | return *this; | |
| } | | } | |
|
| virtual void flush(){} | | virtual void flush(Tee *t = 0) {} | |
| }; | | }; | |
| extern Nullstream nullstream; | | extern Nullstream nullstream; | |
| | | | |
| class Logstream : public Nullstream { | | class Logstream : public Nullstream { | |
| static mongo::mutex mutex; | | static mongo::mutex mutex; | |
| static int doneSetup; | | static int doneSetup; | |
| stringstream ss; | | stringstream ss; | |
| public: | | public: | |
| static int magicNumber(){ | | static int magicNumber(){ | |
| return 1717; | | return 1717; | |
| } | | } | |
|
| void flush() { | | void flush(Tee *t = 0) { | |
| // this ensures things are sane | | // this ensures things are sane | |
|
| if ( doneSetup == 1717 ){ | | if ( doneSetup == 1717 ) { | |
| | | BufBuilder b(512); | |
| | | time_t_to_String( time(0) , b.grow(20) ); | |
| | | b.append( ss.str() ); | |
| | | const char *s = b.buf(); | |
| | | | |
| scoped_lock lk(mutex); | | scoped_lock lk(mutex); | |
|
| cout << ss.str(); | | | |
| | | if( t ) t->write(s); | |
| | | #ifndef _WIN32 | |
| | | //syslog( LOG_INFO , "%s" , cc ); | |
| | | #endif | |
| | | cout << s; | |
| cout.flush(); | | cout.flush(); | |
| } | | } | |
|
| ss.str(""); | | _init(); | |
| } | | } | |
| | | | |
| /** note these are virtual */ | | /** note these are virtual */ | |
| Logstream& operator<<(const char *x) { ss << x; return *this; } | | Logstream& operator<<(const char *x) { ss << x; return *this; } | |
| Logstream& operator<<(char *x) { ss << x; return *this; } | | Logstream& operator<<(char *x) { ss << x; return *this; } | |
| Logstream& operator<<(char x) { ss << x; return *this; } | | Logstream& operator<<(char x) { ss << x; return *this; } | |
| Logstream& operator<<(int x) { ss << x; return *this; } | | Logstream& operator<<(int x) { ss << x; return *this; } | |
| Logstream& operator<<(ExitCode x) { ss << x; return *this; } | | Logstream& operator<<(ExitCode x) { ss << x; return *this; } | |
| Logstream& operator<<(long x) { ss << x; return *this; } | | Logstream& operator<<(long x) { ss << x; return *this; } | |
| Logstream& operator<<(unsigned long x) { ss << x; return *this; } | | Logstream& operator<<(unsigned long x) { ss << x; return *this; } | |
| | | | |
| skipping to change at line 156 | | skipping to change at line 180 | |
| Logstream& operator<<(void *x) { ss << x; return *this; } | | Logstream& operator<<(void *x) { ss << x; return *this; } | |
| Logstream& operator<<(const void *x) { ss << x; return *this; } | | Logstream& operator<<(const void *x) { ss << x; return *this; } | |
| Logstream& operator<<(long long x) { ss << x; return *this; } | | Logstream& operator<<(long long x) { ss << x; return *this; } | |
| Logstream& operator<<(unsigned long long x) { ss << x; return *this
; } | | Logstream& operator<<(unsigned long long x) { ss << x; return *this
; } | |
| Logstream& operator<<(bool x) { ss << x; return *this
; } | | Logstream& operator<<(bool x) { ss << x; return *this
; } | |
| | | | |
| Logstream& operator<<(const LazyString& x) { | | Logstream& operator<<(const LazyString& x) { | |
| ss << x.val(); | | ss << x.val(); | |
| return *this; | | return *this; | |
| } | | } | |
|
| | | Nullstream& operator<< (Tee* tee) { | |
| | | ss << '\n'; | |
| | | flush(tee); | |
| | | return *this; | |
| | | } | |
| Logstream& operator<< (ostream& ( *_endl )(ostream&)) { | | Logstream& operator<< (ostream& ( *_endl )(ostream&)) { | |
| ss << '\n'; | | ss << '\n'; | |
|
| flush(); | | flush(0); | |
| return *this; | | return *this; | |
| } | | } | |
| Logstream& operator<< (ios_base& (*_hex)(ios_base&)) { | | Logstream& operator<< (ios_base& (*_hex)(ios_base&)) { | |
| ss << _hex; | | ss << _hex; | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| template< class T > | | template< class T > | |
| Nullstream& operator<<(const shared_ptr<T> p ){ | | Nullstream& operator<<(const shared_ptr<T> p ){ | |
| T * t = p.get(); | | T * t = p.get(); | |
| if ( ! t ) | | if ( ! t ) | |
| *this << "null"; | | *this << "null"; | |
| else | | else | |
|
| *this << t; | | *this << *t; | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| Logstream& prolog() { | | Logstream& prolog() { | |
|
| char now[64]; | | | |
| time_t_to_String(time(0), now); | | | |
| now[20] = 0; | | | |
| ss << now; | | | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| private: | | private: | |
| static thread_specific_ptr<Logstream> tsp; | | static thread_specific_ptr<Logstream> tsp; | |
|
| | | Logstream(){ | |
| | | _init(); | |
| | | } | |
| | | void _init(){ | |
| | | ss.str(""); | |
| | | } | |
| public: | | public: | |
| static Logstream& get() { | | static Logstream& get() { | |
| Logstream *p = tsp.get(); | | Logstream *p = tsp.get(); | |
| if( p == 0 ) | | if( p == 0 ) | |
| tsp.reset( p = new Logstream() ); | | tsp.reset( p = new Logstream() ); | |
| return *p; | | return *p; | |
| } | | } | |
| }; | | }; | |
| | | | |
| extern int logLevel; | | extern int logLevel; | |
|
| | | extern int tlogLevel; | |
| | | | |
| inline Nullstream& out( int level = 0 ) { | | inline Nullstream& out( int level = 0 ) { | |
| if ( level > logLevel ) | | if ( level > logLevel ) | |
| return nullstream; | | return nullstream; | |
| return Logstream::get(); | | return Logstream::get(); | |
| } | | } | |
| | | | |
| /* flush the log stream if the log level is | | /* flush the log stream if the log level is | |
| at the specified level or higher. */ | | at the specified level or higher. */ | |
| inline void logflush(int level = 0) { | | inline void logflush(int level = 0) { | |
| if( level > logLevel ) | | if( level > logLevel ) | |
|
| Logstream::get().flush(); | | Logstream::get().flush(0); | |
| } | | } | |
| | | | |
| /* without prolog */ | | /* without prolog */ | |
| inline Nullstream& _log( int level = 0 ){ | | inline Nullstream& _log( int level = 0 ){ | |
| if ( level > logLevel ) | | if ( level > logLevel ) | |
| return nullstream; | | return nullstream; | |
| return Logstream::get(); | | return Logstream::get(); | |
| } | | } | |
| | | | |
|
| inline Nullstream& log( int level = 0 ) { | | /** logging which we may not want during unit tests runs. | |
| | | set tlogLevel to -1 to suppress tlog() output in a test program. */ | |
| | | inline Nullstream& tlog( int level = 0 ) { | |
| | | if ( level > tlogLevel || level > logLevel ) | |
| | | return nullstream; | |
| | | return Logstream::get().prolog(); | |
| | | } | |
| | | | |
| | | inline Nullstream& log( int level ) { | |
| if ( level > logLevel ) | | if ( level > logLevel ) | |
| return nullstream; | | return nullstream; | |
| return Logstream::get().prolog(); | | return Logstream::get().prolog(); | |
| } | | } | |
| | | | |
|
| | | inline Nullstream& log() { | |
| | | return Logstream::get().prolog(); | |
| | | } | |
| | | | |
| /* TODOCONCURRENCY */ | | /* TODOCONCURRENCY */ | |
| inline ostream& stdcout() { | | inline ostream& stdcout() { | |
| return cout; | | return cout; | |
| } | | } | |
| | | | |
| /* default impl returns "" -- mongod overrides */ | | /* default impl returns "" -- mongod overrides */ | |
| extern const char * (*getcurns)(); | | extern const char * (*getcurns)(); | |
| | | | |
| inline Nullstream& problem( int level = 0 ) { | | inline Nullstream& problem( int level = 0 ) { | |
| if ( level > logLevel ) | | if ( level > logLevel ) | |
| | | | |
End of changes. 17 change blocks. |
| 13 lines changed or deleted | | 57 lines changed or added | |
|
| matcher.h | | matcher.h | |
| | | | |
| skipping to change at line 155 | | skipping to change at line 155 | |
| bool keyMatch() const { return !all && !haveSize && !hasArray && !h
aveNeg && _orMatchers.size() == 0; } | | bool keyMatch() const { return !all && !haveSize && !hasArray && !h
aveNeg && _orMatchers.size() == 0; } | |
| | | | |
| bool atomic() const { return _atomic; } | | bool atomic() const { return _atomic; } | |
| | | | |
| bool hasType( BSONObj::MatchType type ) const; | | bool hasType( BSONObj::MatchType type ) const; | |
| | | | |
| string toString() const { | | string toString() const { | |
| return jsobj.toString(); | | return jsobj.toString(); | |
| } | | } | |
| | | | |
|
| | | // void popOr() { | |
| | | // massert( 13261, "no or to pop", !_orMatchers.empty() ); | |
| | | // _norMatchers.push_back( _orMatchers.front() ); | |
| | | // _orMatchers.pop_front(); | |
| | | // } | |
| | | | |
| private: | | private: | |
| void addBasic(const BSONElement &e, int c, bool isNot) { | | void addBasic(const BSONElement &e, int c, bool isNot) { | |
| // TODO May want to selectively ignore these element types base
d on op type. | | // TODO May want to selectively ignore these element types base
d on op type. | |
| if ( e.type() == MinKey || e.type() == MaxKey ) | | if ( e.type() == MinKey || e.type() == MaxKey ) | |
| return; | | return; | |
| basics.push_back( ElementMatcher( e , c, isNot ) ); | | basics.push_back( ElementMatcher( e , c, isNot ) ); | |
| } | | } | |
| | | | |
| void addRegex(const char *fieldName, const char *regex, const char
*flags, bool isNot = false); | | void addRegex(const char *fieldName, const char *regex, const char
*flags, bool isNot = false); | |
| bool addOp( const BSONElement &e, const BSONElement &fe, bool isNot
, const char *& regex, const char *&flags ); | | bool addOp( const BSONElement &e, const BSONElement &fe, bool isNot
, const char *& regex, const char *&flags ); | |
| | | | |
| int valuesMatch(const BSONElement& l, const BSONElement& r, int op,
const ElementMatcher& bm); | | int valuesMatch(const BSONElement& l, const BSONElement& r, int op,
const ElementMatcher& bm); | |
| | | | |
| bool parseOrNor( const BSONElement &e, bool subMatcher ); | | bool parseOrNor( const BSONElement &e, bool subMatcher ); | |
|
| void parseOr( const BSONElement &e, bool subMatcher, vector< shared
_ptr< Matcher > > &matchers ); | | void parseOr( const BSONElement &e, bool subMatcher, list< shared_p
tr< Matcher > > &matchers ); | |
| | | | |
| Where *where; // set if query uses $where | | Where *where; // set if query uses $where | |
| BSONObj jsobj; // the query pattern. e.g., { name
: "joe" } | | BSONObj jsobj; // the query pattern. e.g., { name
: "joe" } | |
| BSONObj constrainIndexKey_; | | BSONObj constrainIndexKey_; | |
| vector<ElementMatcher> basics; | | vector<ElementMatcher> basics; | |
| bool haveSize; | | bool haveSize; | |
| bool all; | | bool all; | |
| bool hasArray; | | bool hasArray; | |
| bool haveNeg; | | bool haveNeg; | |
| | | | |
| | | | |
| skipping to change at line 192 | | skipping to change at line 198 | |
| i.e. we stay locked the whole time. | | i.e. we stay locked the whole time. | |
| http://www.mongodb.org/display/DOCS/Removing[ | | http://www.mongodb.org/display/DOCS/Removing[ | |
| */ | | */ | |
| bool _atomic; | | bool _atomic; | |
| | | | |
| RegexMatcher regexs[4]; | | RegexMatcher regexs[4]; | |
| int nRegex; | | int nRegex; | |
| | | | |
| // so we delete the mem when we're done: | | // so we delete the mem when we're done: | |
| vector< shared_ptr< BSONObjBuilder > > _builders; | | vector< shared_ptr< BSONObjBuilder > > _builders; | |
|
| vector< shared_ptr< Matcher > > _orMatchers; | | list< shared_ptr< Matcher > > _orMatchers; | |
| vector< shared_ptr< Matcher > > _norMatchers; | | list< shared_ptr< Matcher > > _norMatchers; | |
| | | | |
| friend class CoveredIndexMatcher; | | friend class CoveredIndexMatcher; | |
| }; | | }; | |
| | | | |
| // If match succeeds on index key, then attempt to match full document. | | // If match succeeds on index key, then attempt to match full document. | |
| class CoveredIndexMatcher : boost::noncopyable { | | class CoveredIndexMatcher : boost::noncopyable { | |
| public: | | public: | |
|
| CoveredIndexMatcher(const BSONObj &pattern, const BSONObj &indexKey
Pattern); | | CoveredIndexMatcher(const BSONObj &pattern, const BSONObj &indexKey
Pattern , bool alwaysUseRecord=false ); | |
| bool matches(const BSONObj &o){ return _docMatcher.matches( o ); } | | bool matches(const BSONObj &o){ return _docMatcher.matches( o ); } | |
| bool matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetai
ls * details = 0 ); | | bool matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetai
ls * details = 0 ); | |
| bool matchesCurrent( Cursor * cursor , MatchDetails * details = 0 )
; | | bool matchesCurrent( Cursor * cursor , MatchDetails * details = 0 )
; | |
| bool needRecord(){ return _needRecord; } | | bool needRecord(){ return _needRecord; } | |
| | | | |
| Matcher& docMatcher() { return _docMatcher; } | | Matcher& docMatcher() { return _docMatcher; } | |
| private: | | private: | |
| Matcher _keyMatcher; | | Matcher _keyMatcher; | |
| Matcher _docMatcher; | | Matcher _docMatcher; | |
| bool _needRecord; | | bool _needRecord; | |
| | | | |
End of changes. 4 change blocks. |
| 4 lines changed or deleted | | 10 lines changed or added | |
|
| message.h | | message.h | |
| | | | |
| skipping to change at line 93 | | skipping to change at line 93 | |
| bool call(Message& toSend, Message& response); | | bool call(Message& toSend, Message& response); | |
| void say(Message& toSend, int responseTo = -1); | | void say(Message& toSend, int responseTo = -1); | |
| | | | |
| void piggyBack( Message& toSend , int responseTo = -1 ); | | void piggyBack( Message& toSend , int responseTo = -1 ); | |
| | | | |
| virtual unsigned remotePort() const; | | virtual unsigned remotePort() const; | |
| virtual HostAndPort remote() const; | | virtual HostAndPort remote() const; | |
| | | | |
| // send len or throw SocketException | | // send len or throw SocketException | |
| void send( const char * data , int len, const char *context ); | | void send( const char * data , int len, const char *context ); | |
|
| | | void send( const vector< pair< char *, int > > &data, const char *c | |
| | | ontext ); | |
| | | | |
| // recv len or throw SocketException | | // recv len or throw SocketException | |
| void recv( char * data , int len ); | | void recv( char * data , int len ); | |
| | | | |
| int unsafe_recv( char *buf, int max ); | | int unsafe_recv( char *buf, int max ); | |
| private: | | private: | |
| int sock; | | int sock; | |
| PiggyBackData * piggyBackData; | | PiggyBackData * piggyBackData; | |
| public: | | public: | |
| SockAddr farEnd; | | SockAddr farEnd; | |
| int _timeout; | | int _timeout; | |
| | | | |
| skipping to change at line 179 | | skipping to change at line 181 | |
| }; | | }; | |
| const int MsgDataHeaderSize = sizeof(MsgData) - 4; | | const int MsgDataHeaderSize = sizeof(MsgData) - 4; | |
| inline int MsgData::dataLen() { | | inline int MsgData::dataLen() { | |
| return len - MsgDataHeaderSize; | | return len - MsgDataHeaderSize; | |
| } | | } | |
| | | | |
| #pragma pack() | | #pragma pack() | |
| | | | |
| class Message { | | class Message { | |
| public: | | public: | |
|
| Message() { | | // we assume here that a vector with initial size 0 does no allocat | |
| data = 0; | | ion (0 is the default, but wanted to make it explicit). | |
| freeIt = false; | | Message() : _buf( 0 ), _data( 0 ), _freeIt( false ) {} | |
| } | | Message( void * data , bool freeIt ) : | |
| Message( void * _data , bool _freeIt ) { | | _buf( 0 ), _data( 0 ), _freeIt( false ) { | |
| data = (MsgData*)_data; | | _setData( reinterpret_cast< MsgData* >( data ), freeIt ); | |
| freeIt = _freeIt; | | | |
| }; | | }; | |
|
| | | Message(Message& r) : _buf( 0 ), _data( 0 ), _freeIt( false ) { | |
| | | *this = r; | |
| | | } | |
| ~Message() { | | ~Message() { | |
| reset(); | | reset(); | |
| } | | } | |
| | | | |
|
| SockAddr from; | | SockAddr _from; | |
| MsgData *data; | | | |
| | | | |
|
| int operation() const { | | MsgData *header() const { | |
| return data->operation(); | | assert( !empty() ); | |
| | | return _buf ? _buf : reinterpret_cast< MsgData* > ( _data[ 0 ]. | |
| | | first ); | |
| | | } | |
| | | int operation() const { return header()->operation(); } | |
| | | | |
| | | MsgData *singleData() const { | |
| | | massert( 13273, "single data buffer expected", _buf ); | |
| | | return header(); | |
| } | | } | |
| | | | |
|
| | | bool empty() const { return !_buf && _data.empty(); } | |
| | | | |
| | | // concat multiple buffers - noop if <2 buffers already, otherwise | |
| | | can be expensive copy | |
| | | // can get rid of this if we make response handling smarter | |
| | | void concat() { | |
| | | if ( _buf || empty() ) { | |
| | | return; | |
| | | } | |
| | | | |
| | | assert( _freeIt ); | |
| | | int totalSize = 0; | |
| | | for( vector< pair< char *, int > >::const_iterator i = _data.be | |
| | | gin(); i != _data.end(); ++i ) { | |
| | | totalSize += i->second; | |
| | | } | |
| | | char *buf = (char*)malloc( totalSize ); | |
| | | char *p = buf; | |
| | | for( vector< pair< char *, int > >::const_iterator i = _data.be | |
| | | gin(); i != _data.end(); ++i ) { | |
| | | memcpy( p, i->first, i->second ); | |
| | | p += i->second; | |
| | | } | |
| | | reset(); | |
| | | _setData( (MsgData*)buf, true ); | |
| | | } | |
| | | | |
| | | // vector swap() so this is fast | |
| Message& operator=(Message& r) { | | Message& operator=(Message& r) { | |
|
| assert( data == 0 ); | | assert( empty() ); | |
| data = r.data; | | assert( r._freeIt ); | |
| assert( r.freeIt ); | | _buf = r._buf; | |
| r.freeIt = false; | | r._buf = 0; | |
| r.data = 0; | | if ( r._data.size() > 0 ) { | |
| freeIt = true; | | _data.swap( r._data ); | |
| | | } | |
| | | r._freeIt = false; | |
| | | _freeIt = true; | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| void reset() { | | void reset() { | |
|
| if ( freeIt && data ) | | if ( _freeIt ) { | |
| free(data); | | if ( _buf ) { | |
| data = 0; | | free( _buf ); | |
| freeIt = false; | | } | |
| | | for( vector< pair< char *, int > >::const_iterator i = _dat | |
| | | a.begin(); i != _data.end(); ++i ) { | |
| | | free(i->first); | |
| | | } | |
| | | } | |
| | | _buf = 0; | |
| | | _data.clear(); | |
| | | _freeIt = false; | |
| } | | } | |
| | | | |
|
| void setData(MsgData *d, bool _freeIt) { | | // use to add a buffer | |
| assert( data == 0 ); | | // assumes message will free everything | |
| freeIt = _freeIt; | | void appendData(char *d, int size) { | |
| data = d; | | if ( size <= 0 ) { | |
| | | return; | |
| | | } | |
| | | if ( empty() ) { | |
| | | MsgData *md = (MsgData*)d; | |
| | | md->len = size; // can be updated later if more buffers add | |
| | | ed | |
| | | _setData( md, true ); | |
| | | return; | |
| | | } | |
| | | assert( _freeIt ); | |
| | | if ( _buf ) { | |
| | | _data.push_back( make_pair( (char*)_buf, _buf->len ) ); | |
| | | _buf = 0; | |
| | | } | |
| | | _data.push_back( make_pair( d, size ) ); | |
| | | header()->len += size; | |
| | | } | |
| | | | |
| | | // use to set first buffer if empty | |
| | | void setData(MsgData *d, bool freeIt) { | |
| | | assert( empty() ); | |
| | | _setData( d, freeIt ); | |
| } | | } | |
| void setData(int operation, const char *msgtxt) { | | void setData(int operation, const char *msgtxt) { | |
| setData(operation, msgtxt, strlen(msgtxt)+1); | | setData(operation, msgtxt, strlen(msgtxt)+1); | |
| } | | } | |
| void setData(int operation, const char *msgdata, size_t len) { | | void setData(int operation, const char *msgdata, size_t len) { | |
|
| assert(data == 0); | | assert( empty() ); | |
| size_t dataLen = len + sizeof(MsgData) - 4; | | size_t dataLen = len + sizeof(MsgData) - 4; | |
| MsgData *d = (MsgData *) malloc(dataLen); | | MsgData *d = (MsgData *) malloc(dataLen); | |
| memcpy(d->_data, msgdata, len); | | memcpy(d->_data, msgdata, len); | |
| d->len = fixEndian(dataLen); | | d->len = fixEndian(dataLen); | |
| d->setOperation(operation); | | d->setOperation(operation); | |
|
| freeIt= true; | | _setData( d, true ); | |
| data = d; | | | |
| } | | } | |
| | | | |
| bool doIFreeIt() { | | bool doIFreeIt() { | |
|
| return freeIt; | | return _freeIt; | |
| | | } | |
| | | | |
| | | void send( MessagingPort &p, const char *context ) { | |
| | | if ( empty() ) { | |
| | | return; | |
| | | } | |
| | | if ( _buf != 0 ) { | |
| | | p.send( (char*)_buf, _buf->len, context ); | |
| | | } else { | |
| | | p.send( _data, context ); | |
| | | } | |
| } | | } | |
| | | | |
| private: | | private: | |
|
| bool freeIt; | | void _setData( MsgData *d, bool freeIt ) { | |
| | | _freeIt = freeIt; | |
| | | _buf = d; | |
| | | } | |
| | | // if just one buffer, keep it in _buf, otherwise keep a sequence o | |
| | | f buffers in _data | |
| | | MsgData * _buf; | |
| | | // byte buffer(s) - the first must contain at least a full MsgData | |
| | | unless using _buf for storage instead | |
| | | vector< pair< char*, int > > _data; | |
| | | bool _freeIt; | |
| }; | | }; | |
| | | | |
| class SocketException : public DBException { | | class SocketException : public DBException { | |
| public: | | public: | |
| virtual const char* what() const throw() { return "socket exception
"; } | | virtual const char* what() const throw() { return "socket exception
"; } | |
|
| virtual int getCode(){ return 9001; } | | virtual int getCode() const { return 9001; } | |
| }; | | }; | |
| | | | |
| MSGID nextMessageId(); | | MSGID nextMessageId(); | |
| | | | |
| void setClientId( int id ); | | void setClientId( int id ); | |
| int getClientId(); | | int getClientId(); | |
| | | | |
| extern TicketHolder connTicketHolder; | | extern TicketHolder connTicketHolder; | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
End of changes. 14 change blocks. |
| 31 lines changed or deleted | | 124 lines changed or added | |
|
| mmap.h | | mmap.h | |
| | | | |
| skipping to change at line 23 | | skipping to change at line 23 | |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impli
ed. | |
| * See the License for the specific language governing permissions and | | * See the License for the specific language governing permissions and | |
| * limitations under the License. | | * limitations under the License. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| /* the administrative-ish stuff here */ | | /* the administrative-ish stuff here */ | |
|
| class MongoFile { | | class MongoFile : boost::noncopyable { | |
| protected: | | protected: | |
| virtual void close() = 0; | | virtual void close() = 0; | |
| virtual void flush(bool sync) = 0; | | virtual void flush(bool sync) = 0; | |
| | | | |
| void created(); /* subclass must call after create */ | | void created(); /* subclass must call after create */ | |
| void destroyed(); /* subclass must call in destructor */ | | void destroyed(); /* subclass must call in destructor */ | |
| public: | | public: | |
|
| | | virtual ~MongoFile() {} | |
| virtual long length() = 0; | | virtual long length() = 0; | |
| | | | |
| enum Options { | | enum Options { | |
| SEQUENTIAL = 1 // hint - e.g. FILE_FLAG_SEQUENTIAL_SCAN on wind
ows | | SEQUENTIAL = 1 // hint - e.g. FILE_FLAG_SEQUENTIAL_SCAN on wind
ows | |
| }; | | }; | |
| | | | |
|
| virtual ~MongoFile() {} | | | |
| | | | |
| static int flushAll( bool sync ); // returns n flushed | | static int flushAll( bool sync ); // returns n flushed | |
| static long long totalMappedLength(); | | static long long totalMappedLength(); | |
| static void closeAllFiles( stringstream &message ); | | static void closeAllFiles( stringstream &message ); | |
| | | | |
| /* can be "overriden" if necessary */ | | /* can be "overriden" if necessary */ | |
| static bool exists(boost::filesystem::path p) { | | static bool exists(boost::filesystem::path p) { | |
| return boost::filesystem::exists(p); | | return boost::filesystem::exists(p); | |
| } | | } | |
| }; | | }; | |
| | | | |
|
| class MFTemplate : public MongoFile { | | /** template for what a new storage engine's class definition must impl | |
| | | ement | |
| | | PRELIMINARY - subject to change. | |
| | | */ | |
| | | class StorageContainerTemplate : public MongoFile { | |
| protected: | | protected: | |
| virtual void close(); | | virtual void close(); | |
| virtual void flush(bool sync); | | virtual void flush(bool sync); | |
| public: | | public: | |
| virtual long length(); | | virtual long length(); | |
| | | | |
|
| | | /** pointer to a range of space in this storage unit */ | |
| class Pointer { | | class Pointer { | |
| public: | | public: | |
|
| | | /** retried address of buffer at offset 'offset' withing the st | |
| | | orage unit. returned range is a contiguous | |
| | | buffer reflecting what is in storage. caller will not read | |
| | | or write past 'len'. | |
| | | | |
| | | note calls may be received that are at different points in | |
| | | a range and different lengths. however | |
| | | for now assume that on writes, if a call is made, previousl | |
| | | y returned addresses are no longer valid. i.e. | |
| | | p = at(10000, 500); | |
| | | q = at(10000, 600); | |
| | | after the second call it is ok if p is invalid. | |
| | | */ | |
| void* at(int offset, int len); | | void* at(int offset, int len); | |
|
| void grow(int offset, int len); | | | |
| | | /** indicate that we wrote to the range (from a previous at() c | |
| | | all) and that it needs | |
| | | flushing to disk. | |
| | | */ | |
| | | void written(int offset, int len); | |
| | | | |
| bool isNull() const; | | bool isNull() const; | |
| }; | | }; | |
| | | | |
|
| Pointer map( const char *filename ); | | /** commit written() calls from above. */ | |
| Pointer map(const char *_filename, long &length, int options=0); | | void commit(); | |
| | | | |
| | | Pointer open(const char *filename); | |
| | | Pointer open(const char *_filename, long &length, int options=0); | |
| }; | | }; | |
| | | | |
| class MemoryMappedFile : public MongoFile { | | class MemoryMappedFile : public MongoFile { | |
| public: | | public: | |
| class Pointer { | | class Pointer { | |
| char *_base; | | char *_base; | |
| public: | | public: | |
| Pointer() : _base(0) { } | | Pointer() : _base(0) { } | |
| Pointer(void *p) : _base((char*) p) { } | | Pointer(void *p) : _base((char*) p) { } | |
| void* at(int offset, int maxLen) { return _base + offset; } | | void* at(int offset, int maxLen) { return _base + offset; } | |
| | | | |
| skipping to change at line 88 | | skipping to change at line 108 | |
| | | | |
| MemoryMappedFile(); | | MemoryMappedFile(); | |
| ~MemoryMappedFile() { | | ~MemoryMappedFile() { | |
| destroyed(); | | destroyed(); | |
| close(); | | close(); | |
| } | | } | |
| void close(); | | void close(); | |
| | | | |
| // Throws exception if file doesn't exist. (dm may2010: not sure if
this is always true?) | | // Throws exception if file doesn't exist. (dm may2010: not sure if
this is always true?) | |
| void* map( const char *filename ); | | void* map( const char *filename ); | |
|
| /*Pointer pmap( const char *filename ) { | | | |
| | | /*To replace map(): | |
| | | | |
| | | Pointer open( const char *filename ) { | |
| void *p = map(filename); | | void *p = map(filename); | |
| uassert(13077, "couldn't open/map file", p); | | uassert(13077, "couldn't open/map file", p); | |
| return Pointer(p); | | return Pointer(p); | |
| }*/ | | }*/ | |
| | | | |
| /* Creates with length if DNE, otherwise uses existing file length, | | /* Creates with length if DNE, otherwise uses existing file length, | |
| passed length. | | passed length. | |
| */ | | */ | |
| void* map(const char *filename, long &length, int options = 0 ); | | void* map(const char *filename, long &length, int options = 0 ); | |
| | | | |
| | | | |
| skipping to change at line 116 | | skipping to change at line 139 | |
| return len; | | return len; | |
| } | | } | |
| | | | |
| private: | | private: | |
| static void updateLength( const char *filename, long &length ); | | static void updateLength( const char *filename, long &length ); | |
| | | | |
| HANDLE fd; | | HANDLE fd; | |
| HANDLE maphandle; | | HANDLE maphandle; | |
| void *view; | | void *view; | |
| long len; | | long len; | |
|
| | | string _filename; | |
| }; | | }; | |
| | | | |
| void printMemInfo( const char * where ); | | void printMemInfo( const char * where ); | |
| | | | |
| #include "ramstore.h" | | #include "ramstore.h" | |
| | | | |
| //#define _RAMSTORE | | //#define _RAMSTORE | |
| #if defined(_RAMSTORE) | | #if defined(_RAMSTORE) | |
| typedef RamStoreFile MMF; | | typedef RamStoreFile MMF; | |
| #else | | #else | |
| | | | |
End of changes. 10 change blocks. |
| 8 lines changed or deleted | | 38 lines changed or added | |
|
| namespace.h | | namespace.h | |
| | | | |
| skipping to change at line 30 | | skipping to change at line 30 | |
| | | | |
| #include "../pch.h" | | #include "../pch.h" | |
| #include "jsobj.h" | | #include "jsobj.h" | |
| #include "queryutil.h" | | #include "queryutil.h" | |
| #include "diskloc.h" | | #include "diskloc.h" | |
| #include "../util/hashtab.h" | | #include "../util/hashtab.h" | |
| #include "../util/mmap.h" | | #include "../util/mmap.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
|
| class Cursor; | | | |
| | | | |
| #pragma pack(1) | | | |
| | | | |
| /* in the mongo source code, "client" means "database". */ | | /* in the mongo source code, "client" means "database". */ | |
| | | | |
| const int MaxDatabaseLen = 256; // max str len for the db name, includi
ng null char | | const int MaxDatabaseLen = 256; // max str len for the db name, includi
ng null char | |
| | | | |
| // "database.a.b.c" -> "database" | | // "database.a.b.c" -> "database" | |
| inline void nsToDatabase(const char *ns, char *database) { | | inline void nsToDatabase(const char *ns, char *database) { | |
| const char *p = ns; | | const char *p = ns; | |
| char *q = database; | | char *q = database; | |
| while ( *p != '.' ) { | | while ( *p != '.' ) { | |
| if ( *p == 0 ) | | if ( *p == 0 ) | |
| | | | |
| skipping to change at line 58 | | skipping to change at line 54 | |
| if (q-database>=MaxDatabaseLen) { | | if (q-database>=MaxDatabaseLen) { | |
| log() << "nsToDatabase: ns too long. terminating, buf overrun c
ondition" << endl; | | log() << "nsToDatabase: ns too long. terminating, buf overrun c
ondition" << endl; | |
| dbexit( EXIT_POSSIBLE_CORRUPTION ); | | dbexit( EXIT_POSSIBLE_CORRUPTION ); | |
| } | | } | |
| } | | } | |
| inline string nsToDatabase(const char *ns) { | | inline string nsToDatabase(const char *ns) { | |
| char buf[MaxDatabaseLen]; | | char buf[MaxDatabaseLen]; | |
| nsToDatabase(ns, buf); | | nsToDatabase(ns, buf); | |
| return buf; | | return buf; | |
| } | | } | |
|
| | | inline string nsToDatabase(const string& ns) { | |
| | | size_t i = ns.find( '.' ); | |
| | | if ( i == string::npos ) | |
| | | return ns; | |
| | | return ns.substr( 0 , i ); | |
| | | } | |
| | | | |
| /* e.g. | | /* e.g. | |
| NamespaceString ns("acme.orders"); | | NamespaceString ns("acme.orders"); | |
| cout << ns.coll; // "orders" | | cout << ns.coll; // "orders" | |
| */ | | */ | |
| class NamespaceString { | | class NamespaceString { | |
| public: | | public: | |
| string db; | | string db; | |
| string coll; // note collection names can have periods in them for
organizing purposes (e.g. "system.indexes") | | string coll; // note collection names can have periods in them for
organizing purposes (e.g. "system.indexes") | |
| private: | | private: | |
| | | | |
| skipping to change at line 87 | | skipping to change at line 89 | |
| | | | |
| string ns() const { | | string ns() const { | |
| return db + '.' + coll; | | return db + '.' + coll; | |
| } | | } | |
| | | | |
| bool isSystem() { | | bool isSystem() { | |
| return strncmp(coll.c_str(), "system.", 7) == 0; | | return strncmp(coll.c_str(), "system.", 7) == 0; | |
| } | | } | |
| }; | | }; | |
| | | | |
|
| | | #pragma pack(1) | |
| /* This helper class is used to make the HashMap below in NamespaceD
etails */ | | /* This helper class is used to make the HashMap below in NamespaceD
etails */ | |
| class Namespace { | | class Namespace { | |
| public: | | public: | |
| enum MaxNsLenValue { MaxNsLen = 128 }; | | enum MaxNsLenValue { MaxNsLen = 128 }; | |
| Namespace(const char *ns) { | | Namespace(const char *ns) { | |
| *this = ns; | | *this = ns; | |
| } | | } | |
| Namespace& operator=(const char *ns) { | | Namespace& operator=(const char *ns) { | |
| uassert( 10080 , "ns name too long, max size is 128", strlen(ns
) < MaxNsLen); | | uassert( 10080 , "ns name too long, max size is 128", strlen(ns
) < MaxNsLen); | |
| //memset(buf, 0, MaxNsLen); /* this is just to keep stuff clean
in the files for easy dumping and reading */ | | //memset(buf, 0, MaxNsLen); /* this is just to keep stuff clean
in the files for easy dumping and reading */ | |
| strcpy_s(buf, MaxNsLen, ns); | | strcpy_s(buf, MaxNsLen, ns); | |
| return *this; | | return *this; | |
| } | | } | |
| | | | |
| /* for more than 10 indexes -- see NamespaceDetails::Extra */ | | /* for more than 10 indexes -- see NamespaceDetails::Extra */ | |
|
| string extraName() { | | string extraName(int i) { | |
| string s = string(buf) + "$extra"; | | char ex[] = "$extra"; | |
| massert( 10348 , "ns name too long", s.size() < MaxNsLen); | | ex[5] += i; | |
| | | string s = string(buf) + ex; | |
| | | massert( 10348 , "$extra: ns name too long", s.size() < MaxNsLe | |
| | | n); | |
| return s; | | return s; | |
| } | | } | |
| bool isExtra() const { | | bool isExtra() const { | |
|
| const char *p = strstr(buf, "$extra"); | | const char *p = strstr(buf, "$extr"); | |
| return p && p[6] == 0; //==0 important in case an index uses na | | return p && p[5] && p[6] == 0; //==0 important in case an index | |
| me "$extra_1" for example | | uses name "$extra_1" for example | |
| } | | | |
| bool hasDollarSign() const { | | | |
| return strstr( buf , "$" ) > 0; | | | |
| } | | | |
| | | | |
| void kill() { | | | |
| buf[0] = 0x7f; | | | |
| } | | | |
| | | | |
| bool operator==(const char *r) { | | | |
| return strcmp(buf, r) == 0; | | | |
| } | | | |
| bool operator==(const Namespace& r) { | | | |
| return strcmp(buf, r.buf) == 0; | | | |
| } | | } | |
|
| | | bool hasDollarSign() const { return strchr( buf , '$' ) > 0; } | |
| | | void kill() { buf[0] = 0x7f; } | |
| | | bool operator==(const char *r) const { return strcmp(buf, r) == 0; | |
| | | } | |
| | | bool operator==(const Namespace& r) const { return strcmp(buf, r.bu | |
| | | f) == 0; } | |
| int hash() const { | | int hash() const { | |
| unsigned x = 0; | | unsigned x = 0; | |
| const char *p = buf; | | const char *p = buf; | |
| while ( *p ) { | | while ( *p ) { | |
| x = x * 131 + *p; | | x = x * 131 + *p; | |
| p++; | | p++; | |
| } | | } | |
| return (x & 0x7fffffff) | 0x8000000; // must be > 0 | | return (x & 0x7fffffff) | 0x8000000; // must be > 0 | |
| } | | } | |
| | | | |
| | | | |
| skipping to change at line 153 | | skipping to change at line 148 | |
| old = old.substr( 0 , old.find( "." ) ); | | old = old.substr( 0 , old.find( "." ) ); | |
| return old + "." + local; | | return old + "." + local; | |
| } | | } | |
| | | | |
| operator string() const { | | operator string() const { | |
| return (string)buf; | | return (string)buf; | |
| } | | } | |
| | | | |
| char buf[MaxNsLen]; | | char buf[MaxNsLen]; | |
| }; | | }; | |
|
| | | #pragma pack() | |
| | | | |
|
| } | | } // namespace mongo | |
| | | | |
| #include "index.h" | | #include "index.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
|
| /** | | /** @return true if a client can modify this namespace | |
| @return true if a client can modify this namespace | | things like *.system.users */ | |
| things like *.system.users | | | |
| */ | | | |
| bool legalClientSystemNS( const string& ns , bool write ); | | bool legalClientSystemNS( const string& ns , bool write ); | |
| | | | |
| /* deleted lists -- linked lists of deleted records -- are placed in 'b
uckets' of various sizes | | /* deleted lists -- linked lists of deleted records -- are placed in 'b
uckets' of various sizes | |
| so you can look for a deleterecord about the right size. | | so you can look for a deleterecord about the right size. | |
| */ | | */ | |
| const int Buckets = 19; | | const int Buckets = 19; | |
| const int MaxBucket = 18; | | const int MaxBucket = 18; | |
| | | | |
| extern int bucketSizes[]; | | extern int bucketSizes[]; | |
| | | | |
|
| | | #pragma pack(1) | |
| /* this is the "header" for a collection that has all its details. in
the .ns file. | | /* this is the "header" for a collection that has all its details. in
the .ns file. | |
| */ | | */ | |
| class NamespaceDetails { | | class NamespaceDetails { | |
| friend class NamespaceIndex; | | friend class NamespaceIndex; | |
| enum { NIndexesExtra = 30, | | enum { NIndexesExtra = 30, | |
| NIndexesBase = 10 | | NIndexesBase = 10 | |
| }; | | }; | |
|
| struct Extra { | | public: | |
| | | struct ExtraOld { | |
| // note we could use this field for more chaining later, so don
't waste it: | | // note we could use this field for more chaining later, so don
't waste it: | |
| unsigned long long reserved1; | | unsigned long long reserved1; | |
| IndexDetails details[NIndexesExtra]; | | IndexDetails details[NIndexesExtra]; | |
| unsigned reserved2; | | unsigned reserved2; | |
| unsigned reserved3; | | unsigned reserved3; | |
| }; | | }; | |
|
| | | class Extra { | |
| | | long long _next; | |
| | | public: | |
| | | IndexDetails details[NIndexesExtra]; | |
| | | private: | |
| | | unsigned reserved2; | |
| | | unsigned reserved3; | |
| | | Extra(const Extra&) { assert(false); } | |
| | | Extra& operator=(const Extra& r) { assert(false); re | |
| | | turn *this; } | |
| | | public: | |
| | | Extra() { } | |
| | | long ofsFrom(NamespaceDetails *d) { | |
| | | return ((char *) this) - ((char *) d); | |
| | | } | |
| | | void init() { memset(this, 0, sizeof(Extra)); } | |
| | | Extra* next(NamespaceDetails *d) { | |
| | | if( _next == 0 ) return 0; | |
| | | return (Extra*) (((char *) d) + _next); | |
| | | } | |
| | | void setNext(long ofs) { _next = ofs; } | |
| | | void copy(NamespaceDetails *d, const Extra& e) { | |
| | | memcpy(this, &e, sizeof(Extra)); | |
| | | _next = 0; | |
| | | } | |
| | | }; // Extra | |
| | | | |
| Extra* extra() { | | Extra* extra() { | |
|
| assert( extraOffset ); | | if( extraOffset == 0 ) return 0; | |
| return (Extra *) (((char *) this) + extraOffset); | | return (Extra *) (((char *) this) + extraOffset); | |
| } | | } | |
|
| | | | |
| public: | | public: | |
|
| | | /* add extra space for indexes when more than 10 */ | |
| | | Extra* allocExtra(const char *ns, int nindexessofar); | |
| | | | |
| void copyingFrom(const char *thisns, NamespaceDetails *src); // mus
t be called when renaming a NS to fix up extra | | void copyingFrom(const char *thisns, NamespaceDetails *src); // mus
t be called when renaming a NS to fix up extra | |
| | | | |
|
| enum { NIndexesMax = 40 }; | | enum { NIndexesMax = 64 }; | |
| | | | |
|
| BOOST_STATIC_ASSERT( NIndexesMax == NIndexesBase + NIndexesExtra ); | | BOOST_STATIC_ASSERT( NIndexesMax <= NIndexesBase + NIndexesExtra*2 | |
| | | ); | |
| | | BOOST_STATIC_ASSERT( NIndexesMax <= 64 ); // multiKey bits | |
| | | BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::ExtraOld) == 4 | |
| | | 96 ); | |
| | | BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) == 496 | |
| | | ); | |
| | | | |
| /* called when loaded from disk */ | | /* called when loaded from disk */ | |
| void onLoad(const Namespace& k); | | void onLoad(const Namespace& k); | |
| | | | |
|
| NamespaceDetails( const DiskLoc &loc, bool _capped ) { | | NamespaceDetails( const DiskLoc &loc, bool _capped ); | |
| /* be sure to initialize new fields here -- doesn't default to | | | |
| zeroes the way we use it */ | | | |
| firstExtent = lastExtent = capExtent = loc; | | | |
| datasize = nrecords = 0; | | | |
| lastExtentSize = 0; | | | |
| nIndexes = 0; | | | |
| capped = _capped; | | | |
| max = 0x7fffffff; | | | |
| paddingFactor = 1.0; | | | |
| flags = 0; | | | |
| capFirstNewRecord = DiskLoc(); | | | |
| // Signal that we are on first allocation iteration through ext | | | |
| ents. | | | |
| capFirstNewRecord.setInvalid(); | | | |
| // For capped case, signal that we are doing initial extent all | | | |
| ocation. | | | |
| if ( capped ) | | | |
| deletedList[ 1 ].setInvalid(); | | | |
| assert( sizeof(dataFileVersion) == 2 ); | | | |
| dataFileVersion = 0; | | | |
| indexFileVersion = 0; | | | |
| multiKeyIndexBits = 0; | | | |
| reservedA = 0; | | | |
| extraOffset = 0; | | | |
| backgroundIndexBuildInProgress = 0; | | | |
| memset(reserved, 0, sizeof(reserved)); | | | |
| } | | | |
| DiskLoc firstExtent; | | DiskLoc firstExtent; | |
| DiskLoc lastExtent; | | DiskLoc lastExtent; | |
| | | | |
| /* NOTE: capped collections override the meaning of deleted list. | | /* NOTE: capped collections override the meaning of deleted list. | |
| deletedList[0] points to a list of free records (DeletedRe
cord's) for all extents in | | deletedList[0] points to a list of free records (DeletedRe
cord's) for all extents in | |
| the namespace. | | the namespace. | |
| deletedList[1] points to the last record in the prev exten
t. When the "current extent" | | deletedList[1] points to the last record in the prev exten
t. When the "current extent" | |
| changes, this value is updated. !deletedList[1].isValid()
when this value is not | | changes, this value is updated. !deletedList[1].isValid()
when this value is not | |
| yet computed. | | yet computed. | |
| */ | | */ | |
| | | | |
| skipping to change at line 270 | | skipping to change at line 276 | |
| private: | | private: | |
| unsigned long long reservedA; | | unsigned long long reservedA; | |
| long long extraOffset; // where the $extra info is located (bytes r
elative to this) | | long long extraOffset; // where the $extra info is located (bytes r
elative to this) | |
| public: | | public: | |
| int backgroundIndexBuildInProgress; // 1 if in prog | | int backgroundIndexBuildInProgress; // 1 if in prog | |
| char reserved[76]; | | char reserved[76]; | |
| | | | |
| /* when a background index build is in progress, we don't count the
index in nIndexes until | | /* when a background index build is in progress, we don't count the
index in nIndexes until | |
| complete, yet need to still use it in _indexRecord() - thus we u
se this function for that. | | complete, yet need to still use it in _indexRecord() - thus we u
se this function for that. | |
| */ | | */ | |
|
| int nIndexesBeingBuilt() const { | | int nIndexesBeingBuilt() const { return nIndexes + backgroundIndexB | |
| return nIndexes + backgroundIndexBuildInProgress; | | uildInProgress; } | |
| } | | | |
| | | | |
| /* NOTE: be careful with flags. are we manipulating them in read l
ocks? if so, | | /* NOTE: be careful with flags. are we manipulating them in read l
ocks? if so, | |
| this isn't thread safe. TODO | | this isn't thread safe. TODO | |
| */ | | */ | |
| enum NamespaceFlags { | | enum NamespaceFlags { | |
| Flag_HaveIdIndex = 1 << 0, // set when we have _id index (ONLY
if ensureIdIndex was called -- 0 if that has never been called) | | Flag_HaveIdIndex = 1 << 0, // set when we have _id index (ONLY
if ensureIdIndex was called -- 0 if that has never been called) | |
| Flag_CappedDisallowDelete = 1 << 1 // set when deletes not allo
wed during capped table allocation. | | Flag_CappedDisallowDelete = 1 << 1 // set when deletes not allo
wed during capped table allocation. | |
| }; | | }; | |
| | | | |
| IndexDetails& idx(int idxNo) { | | IndexDetails& idx(int idxNo) { | |
| if( idxNo < NIndexesBase ) | | if( idxNo < NIndexesBase ) | |
| return _indexes[idxNo]; | | return _indexes[idxNo]; | |
|
| return extra()->details[idxNo-NIndexesBase]; | | Extra *e = extra(); | |
| | | massert(13282, "missing Extra", e); | |
| | | int i = idxNo - NIndexesBase; | |
| | | if( i >= NIndexesExtra ) { | |
| | | e = e->next(this); | |
| | | massert(13283, "missing Extra", e); | |
| | | i -= NIndexesExtra; | |
| | | } | |
| | | return e->details[i]; | |
| } | | } | |
| IndexDetails& backgroundIdx() { | | IndexDetails& backgroundIdx() { | |
| DEV assert(backgroundIndexBuildInProgress); | | DEV assert(backgroundIndexBuildInProgress); | |
| return idx(nIndexes); | | return idx(nIndexes); | |
| } | | } | |
| | | | |
| class IndexIterator { | | class IndexIterator { | |
| friend class NamespaceDetails; | | friend class NamespaceDetails; | |
| int i; | | int i; | |
| int n; | | int n; | |
| NamespaceDetails *d; | | NamespaceDetails *d; | |
|
| Extra *e; | | | |
| IndexIterator(NamespaceDetails *_d) { | | IndexIterator(NamespaceDetails *_d) { | |
| d = _d; | | d = _d; | |
| i = 0; | | i = 0; | |
| n = d->nIndexes; | | n = d->nIndexes; | |
|
| if( n > NIndexesBase ) | | | |
| e = d->extra(); | | | |
| } | | } | |
| public: | | public: | |
| int pos() { return i; } // note this is the next one to come | | int pos() { return i; } // note this is the next one to come | |
| bool more() { return i < n; } | | bool more() { return i < n; } | |
|
| IndexDetails& next() { | | IndexDetails& next() { return d->idx(i++); } | |
| int k = i; | | }; // IndexIterator | |
| i++; | | | |
| return k < NIndexesBase ? d->_indexes[k] : | | | |
| e->details[k-10]; | | | |
| } | | | |
| }; | | | |
| | | | |
|
| IndexIterator ii() { | | IndexIterator ii() { return IndexIterator(this); } | |
| return IndexIterator(this); | | | |
| } | | | |
| | | | |
| /* hackish - find our index # in the indexes array | | /* hackish - find our index # in the indexes array | |
| */ | | */ | |
| int idxNo(IndexDetails& idx) { | | int idxNo(IndexDetails& idx) { | |
| IndexIterator i = ii(); | | IndexIterator i = ii(); | |
| while( i.more() ) { | | while( i.more() ) { | |
| if( &i.next() == &idx ) | | if( &i.next() == &idx ) | |
| return i.pos()-1; | | return i.pos()-1; | |
| } | | } | |
| massert( 10349 , "E12000 idxNo fails", false); | | massert( 10349 , "E12000 idxNo fails", false); | |
| | | | |
| skipping to change at line 353 | | skipping to change at line 355 | |
| void clearIndexIsMultikey(int i) { | | void clearIndexIsMultikey(int i) { | |
| dassert( i < NIndexesMax ); | | dassert( i < NIndexesMax ); | |
| multiKeyIndexBits &= ~(((unsigned long long) 1) << i); | | multiKeyIndexBits &= ~(((unsigned long long) 1) << i); | |
| } | | } | |
| | | | |
| /* add a new index. does not add to system.indexes etc. - just to
NamespaceDetails. | | /* add a new index. does not add to system.indexes etc. - just to
NamespaceDetails. | |
| caller must populate returned object. | | caller must populate returned object. | |
| */ | | */ | |
| IndexDetails& addIndex(const char *thisns, bool resetTransient=true
); | | IndexDetails& addIndex(const char *thisns, bool resetTransient=true
); | |
| | | | |
|
| void aboutToDeleteAnIndex() { | | void aboutToDeleteAnIndex() { flags &= ~Flag_HaveIdIndex; } | |
| flags &= ~Flag_HaveIdIndex; | | | |
| } | | | |
| | | | |
|
| void cappedDisallowDelete() { | | void cappedDisallowDelete() { flags |= Flag_CappedDisallowDelete; } | |
| flags |= Flag_CappedDisallowDelete; | | | |
| } | | | |
| | | | |
| /* returns index of the first index in which the field is present.
-1 if not present. */ | | /* returns index of the first index in which the field is present.
-1 if not present. */ | |
| int fieldIsIndexed(const char *fieldName); | | int fieldIsIndexed(const char *fieldName); | |
| | | | |
| void paddingFits() { | | void paddingFits() { | |
| double x = paddingFactor - 0.01; | | double x = paddingFactor - 0.01; | |
| if ( x >= 1.0 ) | | if ( x >= 1.0 ) | |
| paddingFactor = x; | | paddingFactor = x; | |
| } | | } | |
| void paddingTooSmall() { | | void paddingTooSmall() { | |
| | | | |
| skipping to change at line 422 | | skipping to change at line 420 | |
| return Buckets-1; | | return Buckets-1; | |
| } | | } | |
| | | | |
| /* allocate a new record. lenToAlloc includes headers. */ | | /* allocate a new record. lenToAlloc includes headers. */ | |
| DiskLoc alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc); | | DiskLoc alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc); | |
| | | | |
| /* add a given record to the deleted chains for this NS */ | | /* add a given record to the deleted chains for this NS */ | |
| void addDeletedRec(DeletedRecord *d, DiskLoc dloc); | | void addDeletedRec(DeletedRecord *d, DiskLoc dloc); | |
| | | | |
| void dumpDeleted(set<DiskLoc> *extents = 0); | | void dumpDeleted(set<DiskLoc> *extents = 0); | |
|
| | | bool capLooped() const { return capped && capFirstNewRecord.isValid | |
| bool capLooped() const { | | (); } | |
| return capped && capFirstNewRecord.isValid(); | | | |
| } | | | |
| | | | |
| // Start from firstExtent by default. | | // Start from firstExtent by default. | |
| DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const
; | | DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const
; | |
| | | | |
| // Start from lastExtent by default. | | // Start from lastExtent by default. | |
| DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const; | | DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const; | |
| | | | |
| bool inCapExtent( const DiskLoc &dl ) const; | | bool inCapExtent( const DiskLoc &dl ) const; | |
|
| | | | |
| void checkMigrate(); | | void checkMigrate(); | |
|
| | | | |
| long long storageSize( int * numExtents = 0 ); | | long long storageSize( int * numExtents = 0 ); | |
| | | | |
| private: | | private: | |
|
| bool cappedMayDelete() const { | | bool cappedMayDelete() const { return !( flags & Flag_CappedDisallo | |
| return !( flags & Flag_CappedDisallowDelete ); | | wDelete ); } | |
| } | | Extent *theCapExtent() const { return capExtent.ext(); } | |
| Extent *theCapExtent() const { | | | |
| return capExtent.ext(); | | | |
| } | | | |
| void advanceCapExtent( const char *ns ); | | void advanceCapExtent( const char *ns ); | |
| void maybeComplain( const char *ns, int len ) const; | | void maybeComplain( const char *ns, int len ) const; | |
| DiskLoc __stdAlloc(int len); | | DiskLoc __stdAlloc(int len); | |
| DiskLoc __capAlloc(int len); | | DiskLoc __capAlloc(int len); | |
| DiskLoc _alloc(const char *ns, int len); | | DiskLoc _alloc(const char *ns, int len); | |
| void compact(); // combine adjacent deleted records | | void compact(); // combine adjacent deleted records | |
|
| | | | |
| DiskLoc &firstDeletedInCapExtent(); | | DiskLoc &firstDeletedInCapExtent(); | |
| bool nextIsInCapExtent( const DiskLoc &dl ) const; | | bool nextIsInCapExtent( const DiskLoc &dl ) const; | |
|
| }; | | }; // NamespaceDetails | |
| | | | |
| #pragma pack() | | #pragma pack() | |
| | | | |
|
| /* these are things we know / compute about a namespace that are transi | | /* NamespaceDetailsTransient | |
| ent -- things | | | |
| | | these are things we know / compute about a namespace that are transi | |
| | | ent -- things | |
| we don't actually store in the .ns file. so mainly caching of frequ
ently used | | we don't actually store in the .ns file. so mainly caching of frequ
ently used | |
| information. | | information. | |
| | | | |
| CAUTION: Are you maintaining this properly on a collection drop()?
A dropdatabase()? Be careful. | | CAUTION: Are you maintaining this properly on a collection drop()?
A dropdatabase()? Be careful. | |
| The current field "allIndexKeys" may have too many keys in
it on such an occurrence; | | The current field "allIndexKeys" may have too many keys in
it on such an occurrence; | |
| as currently used that does not cause anything terrible to
happen. | | as currently used that does not cause anything terrible to
happen. | |
| | | | |
| todo: cleanup code, need abstractions and separation | | todo: cleanup code, need abstractions and separation | |
| */ | | */ | |
| class NamespaceDetailsTransient : boost::noncopyable { | | class NamespaceDetailsTransient : boost::noncopyable { | |
|
| | | BOOST_STATIC_ASSERT( sizeof(NamespaceDetails) == 496 ); | |
| | | | |
| /* general --------------------------------------------------------
----- */ | | /* general --------------------------------------------------------
----- */ | |
| private: | | private: | |
| string _ns; | | string _ns; | |
| void reset(); | | void reset(); | |
| static std::map< string, shared_ptr< NamespaceDetailsTransient > >
_map; | | static std::map< string, shared_ptr< NamespaceDetailsTransient > >
_map; | |
| public: | | public: | |
| NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(
false), _qcWriteCount(), _cll_enabled() { } | | NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(
false), _qcWriteCount(), _cll_enabled() { } | |
| /* _get() is not threadsafe -- see get_inlock() comments */ | | /* _get() is not threadsafe -- see get_inlock() comments */ | |
| static NamespaceDetailsTransient& _get(const char *ns); | | static NamespaceDetailsTransient& _get(const char *ns); | |
| /* use get_w() when doing write operations */ | | /* use get_w() when doing write operations */ | |
| | | | |
| skipping to change at line 583 | | skipping to change at line 574 | |
| if ( t.get() == 0 ) | | if ( t.get() == 0 ) | |
| t.reset( new NamespaceDetailsTransient(ns) ); | | t.reset( new NamespaceDetailsTransient(ns) ); | |
| return *t; | | return *t; | |
| } | | } | |
| | | | |
| /* NamespaceIndex is the ".ns" file you see in the data directory. It
is the "system catalog" | | /* NamespaceIndex is the ".ns" file you see in the data directory. It
is the "system catalog" | |
| if you will: at least the core parts. (Additional info in system.*
collections.) | | if you will: at least the core parts. (Additional info in system.*
collections.) | |
| */ | | */ | |
| class NamespaceIndex { | | class NamespaceIndex { | |
| friend class NamespaceCursor; | | friend class NamespaceCursor; | |
|
| BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(Name | | | |
| spaceDetails) ); | | | |
| public: | | | |
| | | | |
|
| | | public: | |
| NamespaceIndex(const string &dir, const string &database) : | | NamespaceIndex(const string &dir, const string &database) : | |
|
| ht( 0 ), | | ht( 0 ), dir_( dir ), database_( database ) {} | |
| dir_( dir ), | | | |
| database_( database ) {} | | | |
| | | | |
| /* returns true if new db will be created if we init lazily */ | | /* returns true if new db will be created if we init lazily */ | |
| bool exists() const; | | bool exists() const; | |
| | | | |
| void init(); | | void init(); | |
| | | | |
| void add_ns(const char *ns, DiskLoc& loc, bool capped) { | | void add_ns(const char *ns, DiskLoc& loc, bool capped) { | |
| NamespaceDetails details( loc, capped ); | | NamespaceDetails details( loc, capped ); | |
| add_ns( ns, details ); | | add_ns( ns, details ); | |
| } | | } | |
| | | | |
| skipping to change at line 614 | | skipping to change at line 601 | |
| uassert( 10081 , "too many namespaces/collections", ht->put(n,
details)); | | uassert( 10081 , "too many namespaces/collections", ht->put(n,
details)); | |
| } | | } | |
| | | | |
| /* just for diagnostics */ | | /* just for diagnostics */ | |
| /*size_t detailsOffset(NamespaceDetails *d) { | | /*size_t detailsOffset(NamespaceDetails *d) { | |
| if ( !ht ) | | if ( !ht ) | |
| return -1; | | return -1; | |
| return ((char *) d) - (char *) ht->nodes; | | return ((char *) d) - (char *) ht->nodes; | |
| }*/ | | }*/ | |
| | | | |
|
| /* extra space for indexes when more than 10 */ | | | |
| NamespaceDetails::Extra* allocExtra(const char *ns) { | | | |
| Namespace n(ns); | | | |
| Namespace extra(n.extraName().c_str()); // throws userexception | | | |
| if ns name too long | | | |
| NamespaceDetails *d = details(ns); | | | |
| massert( 10350 , "allocExtra: base ns missing?", d ); | | | |
| assert( d->extraOffset == 0 ); | | | |
| massert( 10351 , "allocExtra: extra already exists", ht->get(e | | | |
| xtra) == 0 ); | | | |
| NamespaceDetails::Extra temp; | | | |
| memset(&temp, 0, sizeof(temp)); | | | |
| uassert( 10082 , "allocExtra: too many namespaces/collections" | | | |
| , ht->put(extra, (NamespaceDetails&) temp)); | | | |
| NamespaceDetails::Extra *e = (NamespaceDetails::Extra *) ht->ge | | | |
| t(extra); | | | |
| d->extraOffset = ((char *) e) - ((char *) d); | | | |
| assert( d->extra() == e ); | | | |
| return e; | | | |
| } | | | |
| | | | |
| NamespaceDetails* details(const char *ns) { | | NamespaceDetails* details(const char *ns) { | |
| if ( !ht ) | | if ( !ht ) | |
| return 0; | | return 0; | |
| Namespace n(ns); | | Namespace n(ns); | |
| NamespaceDetails *d = ht->get(n); | | NamespaceDetails *d = ht->get(n); | |
| if ( d ) | | if ( d ) | |
| d->checkMigrate(); | | d->checkMigrate(); | |
| return d; | | return d; | |
| } | | } | |
| | | | |
| void kill_ns(const char *ns) { | | void kill_ns(const char *ns) { | |
| if ( !ht ) | | if ( !ht ) | |
| return; | | return; | |
| Namespace n(ns); | | Namespace n(ns); | |
| ht->kill(n); | | ht->kill(n); | |
| | | | |
|
| try { | | for( int i = 0; i<=1; i++ ) { | |
| Namespace extra(n.extraName().c_str()); | | try { | |
| ht->kill(extra); | | Namespace extra(n.extraName(i).c_str()); | |
| | | ht->kill(extra); | |
| | | } | |
| | | catch(DBException&) { } | |
| } | | } | |
|
| catch(DBException&) { } | | | |
| } | | } | |
| | | | |
| bool find(const char *ns, DiskLoc& loc) { | | bool find(const char *ns, DiskLoc& loc) { | |
| NamespaceDetails *l = details(ns); | | NamespaceDetails *l = details(ns); | |
| if ( l ) { | | if ( l ) { | |
| loc = l->firstExtent; | | loc = l->firstExtent; | |
| return true; | | return true; | |
| } | | } | |
| return false; | | return false; | |
| } | | } | |
| | | | |
| bool allocated() const { | | bool allocated() const { | |
| return ht != 0; | | return ht != 0; | |
| } | | } | |
| | | | |
| void getNamespaces( list<string>& tofill , bool onlyCollections = t
rue ) const; | | void getNamespaces( list<string>& tofill , bool onlyCollections = t
rue ) const; | |
| | | | |
|
| | | NamespaceDetails::Extra* newExtra(const char *ns, int n, NamespaceD | |
| | | etails *d); | |
| | | | |
| private: | | private: | |
| boost::filesystem::path path() const; | | boost::filesystem::path path() const; | |
| void maybeMkdir() const; | | void maybeMkdir() const; | |
| | | | |
| MMF f; | | MMF f; | |
| HashTable<Namespace,NamespaceDetails,MMF::Pointer> *ht; | | HashTable<Namespace,NamespaceDetails,MMF::Pointer> *ht; | |
| string dir_; | | string dir_; | |
| string database_; | | string database_; | |
| }; | | }; | |
| | | | |
| | | | |
End of changes. 41 change blocks. |
| 132 lines changed or deleted | | 110 lines changed or added | |
|
| oplog.h | | oplog.h | |
| | | | |
| skipping to change at line 65 | | skipping to change at line 65 | |
| class FindingStartCursor { | | class FindingStartCursor { | |
| public: | | public: | |
| FindingStartCursor( const QueryPlan & qp ) : | | FindingStartCursor( const QueryPlan & qp ) : | |
| _qp( qp ), | | _qp( qp ), | |
| _findingStart( true ), | | _findingStart( true ), | |
| _findingStartMode(), | | _findingStartMode(), | |
| _findingStartTimer( 0 ), | | _findingStartTimer( 0 ), | |
| _findingStartCursor( 0 ) | | _findingStartCursor( 0 ) | |
| { init(); } | | { init(); } | |
| bool done() const { return !_findingStart; } | | bool done() const { return !_findingStart; } | |
|
| auto_ptr< Cursor > cRelease() { return _c; } | | shared_ptr<Cursor> cRelease() { return _c; } | |
| void next() { | | void next() { | |
| if ( !_findingStartCursor || !_findingStartCursor->c->ok() ) { | | if ( !_findingStartCursor || !_findingStartCursor->c->ok() ) { | |
| _findingStart = false; | | _findingStart = false; | |
| _c = _qp.newCursor(); // on error, start from beginning | | _c = _qp.newCursor(); // on error, start from beginning | |
| destroyClientCursor(); | | destroyClientCursor(); | |
| return; | | return; | |
| } | | } | |
| switch( _findingStartMode ) { | | switch( _findingStartMode ) { | |
| case Initial: { | | case Initial: { | |
| if ( !_matcher->matches( _findingStartCursor->c->currKe
y(), _findingStartCursor->c->currLoc() ) ) { | | if ( !_matcher->matches( _findingStartCursor->c->currKe
y(), _findingStartCursor->c->currLoc() ) ) { | |
| | | | |
| skipping to change at line 133 | | skipping to change at line 133 | |
| } | | } | |
| } | | } | |
| private: | | private: | |
| enum FindingStartMode { Initial, FindExtent, InExtent }; | | enum FindingStartMode { Initial, FindExtent, InExtent }; | |
| const QueryPlan &_qp; | | const QueryPlan &_qp; | |
| bool _findingStart; | | bool _findingStart; | |
| FindingStartMode _findingStartMode; | | FindingStartMode _findingStartMode; | |
| auto_ptr< CoveredIndexMatcher > _matcher; | | auto_ptr< CoveredIndexMatcher > _matcher; | |
| Timer _findingStartTimer; | | Timer _findingStartTimer; | |
| ClientCursor * _findingStartCursor; | | ClientCursor * _findingStartCursor; | |
|
| auto_ptr< Cursor > _c; | | shared_ptr<Cursor> _c; | |
| DiskLoc startLoc( const DiskLoc &rec ) { | | DiskLoc startLoc( const DiskLoc &rec ) { | |
| Extent *e = rec.rec()->myExtent( rec ); | | Extent *e = rec.rec()->myExtent( rec ); | |
|
| if ( e->myLoc != _qp.nsd()->capExtent ) | | if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExt
ent ) ) | |
| return e->firstRecord; | | return e->firstRecord; | |
| // Likely we are on the fresh side of capExtent, so return firs
t fresh record. | | // Likely we are on the fresh side of capExtent, so return firs
t fresh record. | |
| // If we are on the stale side of capExtent, then the collectio
n is small and it | | // If we are on the stale side of capExtent, then the collectio
n is small and it | |
| // doesn't matter if we start the extent scan with capFirstNewR
ecord. | | // doesn't matter if we start the extent scan with capFirstNewR
ecord. | |
| return _qp.nsd()->capFirstNewRecord; | | return _qp.nsd()->capFirstNewRecord; | |
| } | | } | |
| | | | |
|
| | | // should never have an empty extent in the oplog, so don't worry a
bout that case | |
| DiskLoc prevLoc( const DiskLoc &rec ) { | | DiskLoc prevLoc( const DiskLoc &rec ) { | |
| Extent *e = rec.rec()->myExtent( rec ); | | Extent *e = rec.rec()->myExtent( rec ); | |
|
| if ( e->xprev.isNull() ) | | if ( _qp.nsd()->capLooped() ) { | |
| e = _qp.nsd()->lastExtent.ext(); | | if ( e->xprev.isNull() ) | |
| else | | e = _qp.nsd()->lastExtent.ext(); | |
| e = e->xprev.ext(); | | else | |
| if ( e->myLoc != _qp.nsd()->capExtent ) | | e = e->xprev.ext(); | |
| return e->firstRecord; | | if ( e->myLoc != _qp.nsd()->capExtent ) | |
| | | return e->firstRecord; | |
| | | } else { | |
| | | if ( !e->xprev.isNull() ) { | |
| | | e = e->xprev.ext(); | |
| | | return e->firstRecord; | |
| | | } | |
| | | } | |
| return DiskLoc(); // reached beginning of collection | | return DiskLoc(); // reached beginning of collection | |
| } | | } | |
| void createClientCursor( const DiskLoc &startLoc = DiskLoc() ) { | | void createClientCursor( const DiskLoc &startLoc = DiskLoc() ) { | |
|
| auto_ptr<Cursor> c = _qp.newCursor( startLoc ); | | shared_ptr<Cursor> c = _qp.newCursor( startLoc ); | |
| _findingStartCursor = new ClientCursor(QueryOption_NoCursorTime
out, c, _qp.ns()); | | _findingStartCursor = new ClientCursor(QueryOption_NoCursorTime
out, c, _qp.ns()); | |
| } | | } | |
| void destroyClientCursor() { | | void destroyClientCursor() { | |
| if ( _findingStartCursor ) { | | if ( _findingStartCursor ) { | |
| ClientCursor::erase( _findingStartCursor->cursorid ); | | ClientCursor::erase( _findingStartCursor->cursorid ); | |
| _findingStartCursor = 0; | | _findingStartCursor = 0; | |
| } | | } | |
| } | | } | |
| void maybeRelease() { | | void maybeRelease() { | |
| RARELY { | | RARELY { | |
| | | | |
| skipping to change at line 177 | | skipping to change at line 185 | |
| _findingStartCursor->updateLocation(); | | _findingStartCursor->updateLocation(); | |
| { | | { | |
| dbtemprelease t; | | dbtemprelease t; | |
| } | | } | |
| _findingStartCursor = ClientCursor::find( id, false ); | | _findingStartCursor = ClientCursor::find( id, false ); | |
| } | | } | |
| } | | } | |
| void init() { | | void init() { | |
| // Use a ClientCursor here so we can release db mutex while sca
nning | | // Use a ClientCursor here so we can release db mutex while sca
nning | |
| // oplog (can take quite a while with large oplogs). | | // oplog (can take quite a while with large oplogs). | |
|
| auto_ptr<Cursor> c = _qp.newReverseCursor(); | | shared_ptr<Cursor> c = _qp.newReverseCursor(); | |
| _findingStartCursor = new ClientCursor(QueryOption_NoCursorTime
out, c, _qp.ns()); | | _findingStartCursor = new ClientCursor(QueryOption_NoCursorTime
out, c, _qp.ns()); | |
| _findingStartTimer.reset(); | | _findingStartTimer.reset(); | |
| _findingStartMode = Initial; | | _findingStartMode = Initial; | |
| BSONElement tsElt = _qp.query()[ "ts" ]; | | BSONElement tsElt = _qp.query()[ "ts" ]; | |
| massert( 13044, "no ts field in query", !tsElt.eoo() ); | | massert( 13044, "no ts field in query", !tsElt.eoo() ); | |
| BSONObjBuilder b; | | BSONObjBuilder b; | |
| b.append( tsElt ); | | b.append( tsElt ); | |
| BSONObj tsQuery = b.obj(); | | BSONObj tsQuery = b.obj(); | |
| _matcher.reset(new CoveredIndexMatcher(tsQuery, _qp.indexKey())
); | | _matcher.reset(new CoveredIndexMatcher(tsQuery, _qp.indexKey())
); | |
| } | | } | |
| | | | |
End of changes. 7 change blocks. |
| 11 lines changed or deleted | | 19 lines changed or added | |
|
| parallel.h | | parallel.h | |
| | | | |
| skipping to change at line 32 | | skipping to change at line 32 | |
| #include "../pch.h" | | #include "../pch.h" | |
| #include "dbclient.h" | | #include "dbclient.h" | |
| #include "redef_macros.h" | | #include "redef_macros.h" | |
| | | | |
| #include "../db/dbmessage.h" | | #include "../db/dbmessage.h" | |
| #include "../db/matcher.h" | | #include "../db/matcher.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| /** | | /** | |
|
| * this is a cursor that works over a set of servers | | | |
| * can be used in serial/paralellel as controlled by sub classes | | | |
| */ | | | |
| class ClusteredCursor { | | | |
| public: | | | |
| ClusteredCursor( QueryMessage& q ); | | | |
| ClusteredCursor( const string& ns , const BSONObj& q , int options= | | | |
| 0 , const BSONObj& fields=BSONObj() ); | | | |
| virtual ~ClusteredCursor(); | | | |
| | | | |
| virtual bool more() = 0; | | | |
| virtual BSONObj next() = 0; | | | |
| | | | |
| static BSONObj concatQuery( const BSONObj& query , const BSONObj& e | | | |
| xtraFilter ); | | | |
| | | | |
| virtual string type() const = 0; | | | |
| | | | |
| protected: | | | |
| auto_ptr<DBClientCursor> query( const string& server , int num = 0 | | | |
| , BSONObj extraFilter = BSONObj() ); | | | |
| | | | |
| static BSONObj _concatFilter( const BSONObj& filter , const BSONObj | | | |
| & extraFilter ); | | | |
| | | | |
| string _ns; | | | |
| BSONObj _query; | | | |
| int _options; | | | |
| BSONObj _fields; | | | |
| | | | |
| bool _done; | | | |
| }; | | | |
| | | | |
| /** | | | |
| * holder for a server address and a query to run | | * holder for a server address and a query to run | |
| */ | | */ | |
| class ServerAndQuery { | | class ServerAndQuery { | |
| public: | | public: | |
| ServerAndQuery( const string& server , BSONObj extra = BSONObj() ,
BSONObj orderObject = BSONObj() ) : | | ServerAndQuery( const string& server , BSONObj extra = BSONObj() ,
BSONObj orderObject = BSONObj() ) : | |
| _server( server ) , _extra( extra.getOwned() ) , _orderObject(
orderObject.getOwned() ){ | | _server( server ) , _extra( extra.getOwned() ) , _orderObject(
orderObject.getOwned() ){ | |
| } | | } | |
| | | | |
| bool operator<( const ServerAndQuery& other ) const{ | | bool operator<( const ServerAndQuery& other ) const{ | |
| if ( ! _orderObject.isEmpty() ) | | if ( ! _orderObject.isEmpty() ) | |
| | | | |
| skipping to change at line 96 | | skipping to change at line 66 | |
| | | | |
| operator string() const { | | operator string() const { | |
| return toString(); | | return toString(); | |
| } | | } | |
| | | | |
| string _server; | | string _server; | |
| BSONObj _extra; | | BSONObj _extra; | |
| BSONObj _orderObject; | | BSONObj _orderObject; | |
| }; | | }; | |
| | | | |
|
| | | /** | |
| | | * this is a cursor that works over a set of servers | |
| | | * can be used in serial/paralellel as controlled by sub classes | |
| | | */ | |
| | | class ClusteredCursor { | |
| | | public: | |
| | | ClusteredCursor( QueryMessage& q ); | |
| | | ClusteredCursor( const string& ns , const BSONObj& q , int options= | |
| | | 0 , const BSONObj& fields=BSONObj() ); | |
| | | virtual ~ClusteredCursor(); | |
| | | | |
| | | virtual bool more() = 0; | |
| | | virtual BSONObj next() = 0; | |
| | | | |
| | | static BSONObj concatQuery( const BSONObj& query , const BSONObj& e | |
| | | xtraFilter ); | |
| | | | |
| | | virtual string type() const = 0; | |
| | | | |
| | | virtual BSONObj explain(); | |
| | | | |
| | | protected: | |
| | | auto_ptr<DBClientCursor> query( const string& server , int num = 0 | |
| | | , BSONObj extraFilter = BSONObj() ); | |
| | | BSONObj explain( const string& server , BSONObj extraFilter = BSONO | |
| | | bj() ); | |
| | | | |
| | | static BSONObj _concatFilter( const BSONObj& filter , const BSONObj | |
| | | & extraFilter ); | |
| | | | |
| | | virtual void _explain( map< string,list<BSONObj> >& out ) = 0; | |
| | | | |
| | | string _ns; | |
| | | BSONObj _query; | |
| | | int _options; | |
| | | BSONObj _fields; | |
| | | | |
| | | bool _done; | |
| | | }; | |
| | | | |
| class FilteringClientCursor { | | class FilteringClientCursor { | |
| public: | | public: | |
| FilteringClientCursor( const BSONObj filter = BSONObj() ); | | FilteringClientCursor( const BSONObj filter = BSONObj() ); | |
| FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSON
Obj filter = BSONObj() ); | | FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSON
Obj filter = BSONObj() ); | |
| ~FilteringClientCursor(); | | ~FilteringClientCursor(); | |
| | | | |
| void reset( auto_ptr<DBClientCursor> cursor ); | | void reset( auto_ptr<DBClientCursor> cursor ); | |
| | | | |
| bool more(); | | bool more(); | |
| BSONObj next(); | | BSONObj next(); | |
| | | | |
| skipping to change at line 180 | | skipping to change at line 185 | |
| /** | | /** | |
| * runs a query in serial across any number of servers | | * runs a query in serial across any number of servers | |
| * returns all results from 1 server, then the next, etc... | | * returns all results from 1 server, then the next, etc... | |
| */ | | */ | |
| class SerialServerClusteredCursor : public ClusteredCursor { | | class SerialServerClusteredCursor : public ClusteredCursor { | |
| public: | | public: | |
| SerialServerClusteredCursor( const set<ServerAndQuery>& servers , Q
ueryMessage& q , int sortOrder=0); | | SerialServerClusteredCursor( const set<ServerAndQuery>& servers , Q
ueryMessage& q , int sortOrder=0); | |
| virtual bool more(); | | virtual bool more(); | |
| virtual BSONObj next(); | | virtual BSONObj next(); | |
| virtual string type() const { return "SerialServer"; } | | virtual string type() const { return "SerialServer"; } | |
|
| | | | |
| private: | | private: | |
|
| | | virtual void _explain( map< string,list<BSONObj> >& out ); | |
| | | | |
| vector<ServerAndQuery> _servers; | | vector<ServerAndQuery> _servers; | |
| unsigned _serverIndex; | | unsigned _serverIndex; | |
| | | | |
| FilteringClientCursor _current; | | FilteringClientCursor _current; | |
| | | | |
| int _needToSkip; | | int _needToSkip; | |
| }; | | }; | |
| | | | |
| /** | | /** | |
| * runs a query in parellel across N servers | | * runs a query in parellel across N servers | |
| | | | |
| skipping to change at line 205 | | skipping to change at line 213 | |
| ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , Q
ueryMessage& q , const BSONObj& sortKey ); | | ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , Q
ueryMessage& q , const BSONObj& sortKey ); | |
| ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , c
onst string& ns , | | ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , c
onst string& ns , | |
| const Query& q , int options=0, const
BSONObj& fields=BSONObj() ); | | const Query& q , int options=0, const
BSONObj& fields=BSONObj() ); | |
| virtual ~ParallelSortClusteredCursor(); | | virtual ~ParallelSortClusteredCursor(); | |
| virtual bool more(); | | virtual bool more(); | |
| virtual BSONObj next(); | | virtual BSONObj next(); | |
| virtual string type() const { return "ParallelSort"; } | | virtual string type() const { return "ParallelSort"; } | |
| private: | | private: | |
| void _init(); | | void _init(); | |
| | | | |
|
| | | virtual void _explain( map< string,list<BSONObj> >& out ); | |
| | | | |
| int _numServers; | | int _numServers; | |
| set<ServerAndQuery> _servers; | | set<ServerAndQuery> _servers; | |
| BSONObj _sortKey; | | BSONObj _sortKey; | |
| | | | |
| FilteringClientCursor * _cursors; | | FilteringClientCursor * _cursors; | |
| int _needToSkip; | | int _needToSkip; | |
| }; | | }; | |
| | | | |
| /** | | /** | |
| * tools for doing asynchronous operations | | * tools for doing asynchronous operations | |
| | | | |
End of changes. 5 change blocks. |
| 34 lines changed or deleted | | 45 lines changed or added | |
|
| query.h | | query.h | |
| | | | |
| skipping to change at line 117 | | skipping to change at line 117 | |
| god - allow access to system namespaces | | god - allow access to system namespaces | |
| */ | | */ | |
| UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BS
ONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug ); | | UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BS
ONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug ); | |
| UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& up
dateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& d
ebug ); | | UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& up
dateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& d
ebug ); | |
| | | | |
| // If justOne is true, deletedId is set to the id of the deleted object
. | | // If justOne is true, deletedId is set to the id of the deleted object
. | |
| long long deleteObjects(const char *ns, BSONObj pattern, bool justOne,
bool logop = false, bool god=false); | | long long deleteObjects(const char *ns, BSONObj pattern, bool justOne,
bool logop = false, bool god=false); | |
| | | | |
| long long runCount(const char *ns, const BSONObj& cmd, string& err); | | long long runCount(const char *ns, const BSONObj& cmd, string& err); | |
| | | | |
|
| auto_ptr< QueryResult > runQuery(Message& m, QueryMessage& q, CurOp& cu
rop ); | | void runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &resul
t); | |
| | | | |
| /* This is for languages whose "objects" are not well ordered (JSON is
well ordered). | | /* This is for languages whose "objects" are not well ordered (JSON is
well ordered). | |
| [ { a : ... } , { b : ... } ] -> { a : ..., b : ... } | | [ { a : ... } , { b : ... } ] -> { a : ..., b : ... } | |
| */ | | */ | |
| inline BSONObj transformOrderFromArrayFormat(BSONObj order) { | | inline BSONObj transformOrderFromArrayFormat(BSONObj order) { | |
| /* note: this is slow, but that is ok as order will have very few p
ieces */ | | /* note: this is slow, but that is ok as order will have very few p
ieces */ | |
| BSONObjBuilder b; | | BSONObjBuilder b; | |
| char p[2] = "0"; | | char p[2] = "0"; | |
| | | | |
| while ( 1 ) { | | while ( 1 ) { | |
| | | | |
| skipping to change at line 178 | | skipping to change at line 178 | |
| | | | |
| int getSkip() const { return _ntoskip; } | | int getSkip() const { return _ntoskip; } | |
| int getNumToReturn() const { return _ntoreturn; } | | int getNumToReturn() const { return _ntoreturn; } | |
| bool wantMore() const { return _wantMore; } | | bool wantMore() const { return _wantMore; } | |
| int getOptions() const { return _options; } | | int getOptions() const { return _options; } | |
| bool hasOption( int x ) const { return x & _options; } | | bool hasOption( int x ) const { return x & _options; } | |
| | | | |
| bool isExplain() const { return _explain; } | | bool isExplain() const { return _explain; } | |
| bool isSnapshot() const { return _snapshot; } | | bool isSnapshot() const { return _snapshot; } | |
| bool returnKey() const { return _returnKey; } | | bool returnKey() const { return _returnKey; } | |
|
| | | bool showDiskLoc() const { return _showDiskLoc; } | |
| | | | |
| const BSONObj& getMin() const { return _min; } | | const BSONObj& getMin() const { return _min; } | |
| const BSONObj& getMax() const { return _max; } | | const BSONObj& getMax() const { return _max; } | |
| const BSONObj& getOrder() const { return _order; } | | const BSONObj& getOrder() const { return _order; } | |
| const BSONElement& getHint() const { return _hint; } | | const BSONElement& getHint() const { return _hint; } | |
| int getMaxScan() const { return _maxScan; } | | int getMaxScan() const { return _maxScan; } | |
| | | | |
| bool couldBeCommand() const { | | bool couldBeCommand() const { | |
| /* we assume you are using findOne() for running a cmd... */ | | /* we assume you are using findOne() for running a cmd... */ | |
| return _ntoreturn == 1 && strstr( _ns , ".$cmd" ); | | return _ntoreturn == 1 && strstr( _ns , ".$cmd" ); | |
| | | | |
| skipping to change at line 245 | | skipping to change at line 246 | |
| else { | | else { | |
| _filter = q; | | _filter = q; | |
| } | | } | |
| } | | } | |
| | | | |
| void _reset(){ | | void _reset(){ | |
| _wantMore = true; | | _wantMore = true; | |
| _explain = false; | | _explain = false; | |
| _snapshot = false; | | _snapshot = false; | |
| _returnKey = false; | | _returnKey = false; | |
|
| | | _showDiskLoc = false; | |
| _maxScan = 0; | | _maxScan = 0; | |
| } | | } | |
| | | | |
| void _initTop( const BSONObj& top ){ | | void _initTop( const BSONObj& top ){ | |
| BSONObjIterator i( top ); | | BSONObjIterator i( top ); | |
| while ( i.more() ){ | | while ( i.more() ){ | |
| BSONElement e = i.next(); | | BSONElement e = i.next(); | |
| const char * name = e.fieldName(); | | const char * name = e.fieldName(); | |
| | | | |
| if ( strcmp( "$orderby" , name ) == 0 || | | if ( strcmp( "$orderby" , name ) == 0 || | |
| | | | |
| skipping to change at line 277 | | skipping to change at line 279 | |
| else if ( strcmp( "$min" , name ) == 0 ) | | else if ( strcmp( "$min" , name ) == 0 ) | |
| _min = e.embeddedObject(); | | _min = e.embeddedObject(); | |
| else if ( strcmp( "$max" , name ) == 0 ) | | else if ( strcmp( "$max" , name ) == 0 ) | |
| _max = e.embeddedObject(); | | _max = e.embeddedObject(); | |
| else if ( strcmp( "$hint" , name ) == 0 ) | | else if ( strcmp( "$hint" , name ) == 0 ) | |
| _hint = e; | | _hint = e; | |
| else if ( strcmp( "$returnKey" , name ) == 0 ) | | else if ( strcmp( "$returnKey" , name ) == 0 ) | |
| _returnKey = e.trueValue(); | | _returnKey = e.trueValue(); | |
| else if ( strcmp( "$maxScan" , name ) == 0 ) | | else if ( strcmp( "$maxScan" , name ) == 0 ) | |
| _maxScan = e.numberInt(); | | _maxScan = e.numberInt(); | |
|
| | | else if ( strcmp( "$showDiskLoc" , name ) == 0 ) | |
| | | _showDiskLoc = e.trueValue(); | |
| | | | |
| } | | } | |
| | | | |
| if ( _snapshot ){ | | if ( _snapshot ){ | |
| uassert( 12001 , "E12001 can't sort with $snapshot", _order
.isEmpty() ); | | uassert( 12001 , "E12001 can't sort with $snapshot", _order
.isEmpty() ); | |
| uassert( 12002 , "E12002 can't use hint with $snapshot", _h
int.eoo() ); | | uassert( 12002 , "E12002 can't use hint with $snapshot", _h
int.eoo() ); | |
| } | | } | |
| | | | |
| } | | } | |
| | | | |
| | | | |
| skipping to change at line 311 | | skipping to change at line 315 | |
| int _options; | | int _options; | |
| | | | |
| BSONObj _filter; | | BSONObj _filter; | |
| shared_ptr< FieldMatcher > _fields; | | shared_ptr< FieldMatcher > _fields; | |
| | | | |
| bool _wantMore; | | bool _wantMore; | |
| | | | |
| bool _explain; | | bool _explain; | |
| bool _snapshot; | | bool _snapshot; | |
| bool _returnKey; | | bool _returnKey; | |
|
| | | bool _showDiskLoc; | |
| BSONObj _min; | | BSONObj _min; | |
| BSONObj _max; | | BSONObj _max; | |
| BSONElement _hint; | | BSONElement _hint; | |
| BSONObj _order; | | BSONObj _order; | |
| int _maxScan; | | int _maxScan; | |
| }; | | }; | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
| #include "clientcursor.h" | | #include "clientcursor.h" | |
| | | | |
End of changes. 5 change blocks. |
| 1 lines changed or deleted | | 6 lines changed or added | |
|
| queryoptimizer.h | | queryoptimizer.h | |
| | | | |
| skipping to change at line 24 | | skipping to change at line 24 | |
| * | | * | |
| * You should have received a copy of the GNU Affero General Public Licen
se | | * You should have received a copy of the GNU Affero General Public Licen
se | |
| * along with this program. If not, see <http://www.gnu.org/licenses/>. | | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include "cursor.h" | | #include "cursor.h" | |
| #include "jsobj.h" | | #include "jsobj.h" | |
| #include "queryutil.h" | | #include "queryutil.h" | |
|
| | | #include "matcher.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| class IndexDetails; | | class IndexDetails; | |
| class IndexType; | | class IndexType; | |
| | | | |
| class QueryPlan : boost::noncopyable { | | class QueryPlan : boost::noncopyable { | |
| public: | | public: | |
| QueryPlan(NamespaceDetails *_d, | | QueryPlan(NamespaceDetails *_d, | |
| int _idxNo, // -1 = no index | | int _idxNo, // -1 = no index | |
| | | | |
| skipping to change at line 52 | | skipping to change at line 53 | |
| /* ScanAndOrder processing will be required if true */ | | /* ScanAndOrder processing will be required if true */ | |
| bool scanAndOrderRequired() const { return scanAndOrderRequired_; } | | bool scanAndOrderRequired() const { return scanAndOrderRequired_; } | |
| /* When true, the index we are using has keys such that it can comp
letely resolve the | | /* When true, the index we are using has keys such that it can comp
letely resolve the | |
| query expression to match by itself without ever checking the main
object. | | query expression to match by itself without ever checking the main
object. | |
| */ | | */ | |
| bool exactKeyMatch() const { return exactKeyMatch_; } | | bool exactKeyMatch() const { return exactKeyMatch_; } | |
| /* If true, the startKey and endKey are unhelpful and the index ord
er doesn't match the | | /* If true, the startKey and endKey are unhelpful and the index ord
er doesn't match the | |
| requested sort order */ | | requested sort order */ | |
| bool unhelpful() const { return unhelpful_; } | | bool unhelpful() const { return unhelpful_; } | |
| int direction() const { return direction_; } | | int direction() const { return direction_; } | |
|
| auto_ptr< Cursor > newCursor( const DiskLoc &startLoc = DiskLoc() , | | shared_ptr<Cursor> newCursor( const DiskLoc &startLoc = DiskLoc() , | |
| int numWanted=0 ) const; | | int numWanted=0 ) const; | |
| auto_ptr< Cursor > newReverseCursor() const; | | shared_ptr<Cursor> newReverseCursor() const; | |
| BSONObj indexKey() const; | | BSONObj indexKey() const; | |
| const char *ns() const { return fbs_.ns(); } | | const char *ns() const { return fbs_.ns(); } | |
| NamespaceDetails *nsd() const { return d; } | | NamespaceDetails *nsd() const { return d; } | |
| BSONObj query() const { return fbs_.query(); } | | BSONObj query() const { return fbs_.query(); } | |
| BSONObj simplifiedQuery( const BSONObj& fields = BSONObj() ) const
{ return fbs_.simplifiedQuery( fields ); } | | BSONObj simplifiedQuery( const BSONObj& fields = BSONObj() ) const
{ return fbs_.simplifiedQuery( fields ); } | |
| const FieldRange &range( const char *fieldName ) const { return fbs
_.range( fieldName ); } | | const FieldRange &range( const char *fieldName ) const { return fbs
_.range( fieldName ); } | |
| void registerSelf( long long nScanned ) const; | | void registerSelf( long long nScanned ) const; | |
| // just for testing | | // just for testing | |
| BoundList indexBounds() const { return indexBounds_; } | | BoundList indexBounds() const { return indexBounds_; } | |
| private: | | private: | |
| | | | |
| skipping to change at line 85 | | skipping to change at line 86 | |
| bool unhelpful_; | | bool unhelpful_; | |
| string _special; | | string _special; | |
| IndexType * _type; | | IndexType * _type; | |
| }; | | }; | |
| | | | |
| // Inherit from this interface to implement a new query operation. | | // Inherit from this interface to implement a new query operation. | |
| // The query optimizer will clone the QueryOp that is provided, giving | | // The query optimizer will clone the QueryOp that is provided, giving | |
| // each clone its own query plan. | | // each clone its own query plan. | |
| class QueryOp { | | class QueryOp { | |
| public: | | public: | |
|
| QueryOp() : complete_(), qp_(), error_() {} | | QueryOp() : _complete(), _stopRequested(), _qp(), _error() {} | |
| virtual ~QueryOp() {} | | virtual ~QueryOp() {} | |
| | | | |
| /** this gets called after a query plan is set? ERH 2/16/10 */ | | /** this gets called after a query plan is set? ERH 2/16/10 */ | |
| virtual void init() = 0; | | virtual void init() = 0; | |
| virtual void next() = 0; | | virtual void next() = 0; | |
| virtual bool mayRecordPlan() const = 0; | | virtual bool mayRecordPlan() const = 0; | |
| | | | |
| /** @return a copy of the inheriting class, which will be run with
its own | | /** @return a copy of the inheriting class, which will be run with
its own | |
|
| query plan. | | query plan. If multiple plan sets are required for an | |
| | | $or query, | |
| | | the QueryOp of the winning plan from a given set will b | |
| | | e cloned | |
| | | to generate QueryOps for the subsequent plan set. | |
| */ | | */ | |
| virtual QueryOp *clone() const = 0; | | virtual QueryOp *clone() const = 0; | |
|
| bool complete() const { return complete_; } | | bool complete() const { return _complete; } | |
| bool error() const { return error_; } | | bool error() const { return _error; } | |
| string exceptionMessage() const { return exceptionMessage_; } | | bool stopRequested() const { return _stopRequested; } | |
| const QueryPlan &qp() const { return *qp_; } | | string exceptionMessage() const { return _exceptionMessage; } | |
| | | const QueryPlan &qp() const { return *_qp; } | |
| // To be called by QueryPlanSet::Runner only. | | // To be called by QueryPlanSet::Runner only. | |
|
| void setQueryPlan( const QueryPlan *qp ) { qp_ = qp; } | | void setQueryPlan( const QueryPlan *qp ) { _qp = qp; } | |
| void setExceptionMessage( const string &exceptionMessage ) { | | void setExceptionMessage( const string &exceptionMessage ) { | |
|
| error_ = true; | | _error = true; | |
| exceptionMessage_ = exceptionMessage; | | _exceptionMessage = exceptionMessage; | |
| } | | } | |
| protected: | | protected: | |
|
| void setComplete() { complete_ = true; } | | void setComplete() { _complete = true; } | |
| | | void setStop() { setComplete(); _stopRequested = true; } | |
| private: | | private: | |
|
| bool complete_; | | bool _complete; | |
| string exceptionMessage_; | | bool _stopRequested; | |
| const QueryPlan *qp_; | | string _exceptionMessage; | |
| bool error_; | | const QueryPlan *_qp; | |
| | | bool _error; | |
| }; | | }; | |
| | | | |
| // Set of candidate query plans for a particular query. Used for runni
ng | | // Set of candidate query plans for a particular query. Used for runni
ng | |
| // a QueryOp on these plans. | | // a QueryOp on these plans. | |
| class QueryPlanSet { | | class QueryPlanSet { | |
| public: | | public: | |
| | | | |
| typedef boost::shared_ptr< QueryPlan > PlanPtr; | | typedef boost::shared_ptr< QueryPlan > PlanPtr; | |
| typedef vector< PlanPtr > PlanSet; | | typedef vector< PlanPtr > PlanSet; | |
| | | | |
| | | | |
| skipping to change at line 137 | | skipping to change at line 143 | |
| const BSONElement *hint = 0, | | const BSONElement *hint = 0, | |
| bool honorRecordedPlan = true, | | bool honorRecordedPlan = true, | |
| const BSONObj &min = BSONObj(), | | const BSONObj &min = BSONObj(), | |
| const BSONObj &max = BSONObj() ); | | const BSONObj &max = BSONObj() ); | |
| int nPlans() const { return plans_.size(); } | | int nPlans() const { return plans_.size(); } | |
| shared_ptr< QueryOp > runOp( QueryOp &op ); | | shared_ptr< QueryOp > runOp( QueryOp &op ); | |
| template< class T > | | template< class T > | |
| shared_ptr< T > runOp( T &op ) { | | shared_ptr< T > runOp( T &op ) { | |
| return dynamic_pointer_cast< T >( runOp( static_cast< QueryOp&
>( op ) ) ); | | return dynamic_pointer_cast< T >( runOp( static_cast< QueryOp&
>( op ) ) ); | |
| } | | } | |
|
| const FieldRangeSet &fbs() const { return fbs_; } | | | |
| BSONObj explain() const; | | BSONObj explain() const; | |
| bool usingPrerecordedPlan() const { return usingPrerecordedPlan_; } | | bool usingPrerecordedPlan() const { return usingPrerecordedPlan_; } | |
| PlanPtr getBestGuess() const; | | PlanPtr getBestGuess() const; | |
|
| | | void setBestGuessOnly() { _bestGuessOnly = true; } | |
| | | //for testing | |
| | | const FieldRangeSet &fbs() const { return fbs_; } | |
| private: | | private: | |
| void addOtherPlans( bool checkFirst ); | | void addOtherPlans( bool checkFirst ); | |
| void addPlan( PlanPtr plan, bool checkFirst ) { | | void addPlan( PlanPtr plan, bool checkFirst ) { | |
| if ( checkFirst && plan->indexKey().woCompare( plans_[ 0 ]->ind
exKey() ) == 0 ) | | if ( checkFirst && plan->indexKey().woCompare( plans_[ 0 ]->ind
exKey() ) == 0 ) | |
| return; | | return; | |
| plans_.push_back( plan ); | | plans_.push_back( plan ); | |
| } | | } | |
| void init(); | | void init(); | |
| void addHint( IndexDetails &id ); | | void addHint( IndexDetails &id ); | |
| struct Runner { | | struct Runner { | |
| | | | |
| skipping to change at line 171 | | skipping to change at line 179 | |
| PlanSet plans_; | | PlanSet plans_; | |
| bool mayRecordPlan_; | | bool mayRecordPlan_; | |
| bool usingPrerecordedPlan_; | | bool usingPrerecordedPlan_; | |
| BSONObj hint_; | | BSONObj hint_; | |
| BSONObj order_; | | BSONObj order_; | |
| long long oldNScanned_; | | long long oldNScanned_; | |
| bool honorRecordedPlan_; | | bool honorRecordedPlan_; | |
| BSONObj min_; | | BSONObj min_; | |
| BSONObj max_; | | BSONObj max_; | |
| string _special; | | string _special; | |
|
| | | bool _bestGuessOnly; | |
| | | }; | |
| | | | |
| | | // Handles $or type queries by generating a QueryPlanSet for each $or c | |
| | | lause | |
| | | // NOTE on our $or implementation: In our current qo implementation we | |
| | | don't | |
| | | // keep statistics on our data, but we can conceptualize the problem of | |
| | | // selecting an index when statistics exist for all index ranges. The | |
| | | // d-hitting set problem on k sets and n elements can be reduced to the | |
| | | // problem of index selection on k $or clauses and n index ranges (wher | |
| | | e | |
| | | // d is the max number of indexes, and the number of ranges n is unboun | |
| | | ded). | |
| | | // In light of the fact that d-hitting set is np complete, and we don't | |
| | | even | |
| | | // track statistics (so cost calculations are expensive) our first | |
| | | // implementation uses the following greedy approach: We take one $or c | |
| | | lause | |
| | | // at a time and treat each as a separate query for index selection pur | |
| | | poses. | |
| | | // But if an index range is scanned for a particular $or clause, we eli | |
| | | minate | |
| | | // that range from all subsequent clauses. One could imagine an opposi | |
| | | te | |
| | | // implementation where we select indexes based on the union of index r | |
| | | anges | |
| | | // for all $or clauses, but this can have much poorer worst case behavi | |
| | | or. | |
| | | // (An index range that suits one $or clause may not suit another, and | |
| | | this | |
| | | // is worse than the typical case of index range choice staleness becau | |
| | | se | |
| | | // with $or the clauses may likely be logically distinct.) The greedy | |
| | | // implementation won't do any worse than all the $or clauses individua | |
| | | lly, | |
| | | // and it can often do better. In the first cut we are intentionally u | |
| | | sing | |
| | | // QueryPattern tracking to record successful plans on $or queries for | |
| | | use by | |
| | | // subsequent $or queries, even though there may be a significant aggre | |
| | | gate | |
| | | // $nor component that would not be represented in QueryPattern. | |
| | | class MultiPlanScanner { | |
| | | public: | |
| | | MultiPlanScanner( const char *ns, | |
| | | const BSONObj &query, | |
| | | const BSONObj &order, | |
| | | const BSONElement *hint = 0, | |
| | | bool honorRecordedPlan = true, | |
| | | const BSONObj &min = BSONObj(), | |
| | | const BSONObj &max = BSONObj() ); | |
| | | shared_ptr< QueryOp > runOp( QueryOp &op ); | |
| | | template< class T > | |
| | | shared_ptr< T > runOp( T &op ) { | |
| | | return dynamic_pointer_cast< T >( runOp( static_cast< QueryOp& | |
| | | >( op ) ) ); | |
| | | } | |
| | | shared_ptr< QueryOp > runOpOnce( QueryOp &op ); | |
| | | template< class T > | |
| | | shared_ptr< T > runOpOnce( T &op ) { | |
| | | return dynamic_pointer_cast< T >( runOpOnce( static_cast< Query | |
| | | Op& >( op ) ) ); | |
| | | } | |
| | | bool mayRunMore() const { return _i < _n; } | |
| | | BSONObj oldExplain() const { assertNotOr(); return _currentQps->exp | |
| | | lain(); } | |
| | | // just report this when only one query op | |
| | | bool usingPrerecordedPlan() const { | |
| | | return !_or && _currentQps->usingPrerecordedPlan(); | |
| | | } | |
| | | void setBestGuessOnly() { _bestGuessOnly = true; } | |
| | | private: | |
| | | //temp | |
| | | void assertNotOr() const { | |
| | | massert( 13266, "not implemented for $or query", !_or ); | |
| | | } | |
| | | // temp (and yucky) | |
| | | BSONObj nextSimpleQuery() { | |
| | | massert( 13267, "only generate simple query if $or", _or ); | |
| | | massert( 13270, "no more simple queries", mayRunMore() ); | |
| | | BSONObjBuilder b; | |
| | | BSONArrayBuilder norb; | |
| | | BSONObjIterator i( _query ); | |
| | | while( i.more() ) { | |
| | | BSONElement e = i.next(); | |
| | | if ( strcmp( e.fieldName(), "$nor" ) == 0 ) { | |
| | | massert( 13269, "$nor must be array", e.type() == Array | |
| | | ); | |
| | | BSONObjIterator j( e.embeddedObject() ); | |
| | | while( j.more() ) { | |
| | | norb << j.next(); | |
| | | } | |
| | | } else if ( strcmp( e.fieldName(), "$or" ) == 0 ) { | |
| | | BSONObjIterator j( e.embeddedObject() ); | |
| | | for( int k = 0; k < _i; ++k ) { | |
| | | norb << j.next(); | |
| | | } | |
| | | b << "$or" << BSON_ARRAY( j.next() ); | |
| | | } else { | |
| | | b.append( e ); | |
| | | } | |
| | | } | |
| | | BSONArray nor = norb.arr(); | |
| | | if ( !nor.isEmpty() ) { | |
| | | b << "$nor" << nor; | |
| | | } | |
| | | ++_i; | |
| | | BSONObj ret = b.obj(); | |
| | | return ret; | |
| | | } | |
| | | const char * _ns; | |
| | | bool _or; | |
| | | BSONObj _query; | |
| | | // FieldRangeOrSet _fros; | |
| | | auto_ptr< QueryPlanSet > _currentQps; | |
| | | int _i; | |
| | | int _n; | |
| | | bool _honorRecordedPlan; | |
| | | bool _bestGuessOnly; | |
| | | }; | |
| | | | |
| | | class MultiCursor : public Cursor { | |
| | | public: | |
| | | class CursorOp : public QueryOp { | |
| | | public: | |
| | | virtual shared_ptr< Cursor > newCursor() const = 0; | |
| | | virtual auto_ptr< CoveredIndexMatcher > newMatcher() const = 0; | |
| | | }; | |
| | | // takes ownership of 'op' | |
| | | MultiCursor( const char *ns, const BSONObj &pattern, const BSONObj | |
| | | &order, auto_ptr< CursorOp > op = auto_ptr< CursorOp >( 0 ) ) | |
| | | : _mps( new MultiPlanScanner( ns, pattern, order ) ) { | |
| | | if ( op.get() ) { | |
| | | _op = op; | |
| | | } else { | |
| | | _op.reset( new NoOp() ); | |
| | | _mps->setBestGuessOnly(); | |
| | | } | |
| | | if ( _mps->mayRunMore() ) { | |
| | | nextClause(); | |
| | | if ( !ok() ) { | |
| | | advance(); | |
| | | } | |
| | | } else { | |
| | | _c.reset( new BasicCursor( DiskLoc() ) ); | |
| | | } | |
| | | } | |
| | | // used to handoff a query to a getMore() | |
| | | MultiCursor( auto_ptr< MultiPlanScanner > mps, const shared_ptr< Cu | |
| | | rsor > &c, auto_ptr< CoveredIndexMatcher > matcher ) | |
| | | : _op( new NoOp() ), _c( c ), _mps( mps ), _matcher( matcher ) { | |
| | | _mps->setBestGuessOnly(); | |
| | | } | |
| | | virtual bool ok() { return _c->ok(); } | |
| | | virtual Record* _current() { return _c->_current(); } | |
| | | virtual BSONObj current() { return _c->current(); } | |
| | | virtual DiskLoc currLoc() { return _c->currLoc(); } | |
| | | virtual bool advance() { | |
| | | _c->advance(); | |
| | | while( !ok() && _mps->mayRunMore() ) { | |
| | | nextClause(); | |
| | | } | |
| | | return ok(); | |
| | | } | |
| | | virtual BSONObj currKey() const { return _c->currKey(); } | |
| | | virtual DiskLoc refLoc() { return _c->refLoc(); } | |
| | | virtual void noteLocation() { _c->noteLocation(); } | |
| | | virtual void checkLocation() { | |
| | | _c->checkLocation(); | |
| | | if ( !ok() ) { | |
| | | advance(); | |
| | | } | |
| | | } | |
| | | virtual bool supportGetMore() { return true; } | |
| | | // with update we could potentially get the same document on multip | |
| | | le | |
| | | // indexes, but update appears to already handle this with seenObje | |
| | | cts | |
| | | // so we don't have to do anything special here. | |
| | | virtual bool getsetdup(DiskLoc loc) { | |
| | | return _c->getsetdup( loc ); | |
| | | } | |
| | | virtual CoveredIndexMatcher *matcher() const { return _matcher.get( | |
| | | ); } | |
| | | private: | |
| | | class NoOp : public CursorOp { | |
| | | virtual void init() { setComplete(); } | |
| | | virtual void next() {} | |
| | | virtual bool mayRecordPlan() const { return false; } | |
| | | virtual QueryOp *clone() const { return new NoOp(); } | |
| | | virtual shared_ptr< Cursor > newCursor() const { return qp().ne | |
| | | wCursor(); } | |
| | | virtual auto_ptr< CoveredIndexMatcher > newMatcher() const { | |
| | | return auto_ptr< CoveredIndexMatcher >( new CoveredIndexMat | |
| | | cher( qp().query(), qp().indexKey() ) ); | |
| | | } | |
| | | }; | |
| | | void nextClause() { | |
| | | shared_ptr< CursorOp > best = _mps->runOpOnce( *_op ); | |
| | | massert( 10401 , best->exceptionMessage(), best->complete() ); | |
| | | _c = best->newCursor(); | |
| | | _matcher = best->newMatcher(); | |
| | | } | |
| | | auto_ptr< CursorOp > _op; | |
| | | shared_ptr< Cursor > _c; | |
| | | auto_ptr< MultiPlanScanner > _mps; | |
| | | auto_ptr< CoveredIndexMatcher > _matcher; | |
| }; | | }; | |
| | | | |
| // NOTE min, max, and keyPattern will be updated to be consistent with
the selected index. | | // NOTE min, max, and keyPattern will be updated to be consistent with
the selected index. | |
| IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSO
NObj &min, BSONObj &max, BSONObj &keyPattern ); | | IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSO
NObj &min, BSONObj &max, BSONObj &keyPattern ); | |
| | | | |
| inline bool isSimpleIdQuery( const BSONObj& query ){ | | inline bool isSimpleIdQuery( const BSONObj& query ){ | |
| return | | return | |
| strcmp( query.firstElement().fieldName() , "_id" ) == 0 && | | strcmp( query.firstElement().fieldName() , "_id" ) == 0 && | |
| query.nFields() == 1 && | | query.nFields() == 1 && | |
| query.firstElement().isSimpleType(); | | query.firstElement().isSimpleType(); | |
| } | | } | |
| | | | |
|
| | | // matcher() will always work on the returned cursor | |
| | | inline shared_ptr< Cursor > bestGuessCursor( const char *ns, const BSON | |
| | | Obj &query, const BSONObj &sort ) { | |
| | | if( !query.getField( "$or" ).eoo() ) { | |
| | | return shared_ptr< Cursor >( new MultiCursor( ns, query, sort ) | |
| | | ); | |
| | | } else { | |
| | | shared_ptr< Cursor > ret = QueryPlanSet( ns, query, sort ).getB | |
| | | estGuess()->newCursor(); | |
| | | if ( !query.isEmpty() ) { | |
| | | auto_ptr< CoveredIndexMatcher > matcher( new CoveredIndexMa | |
| | | tcher( query, ret->indexKeyPattern() ) ); | |
| | | ret->setMatcher( matcher ); | |
| | | } | |
| | | return ret; | |
| | | } | |
| | | } | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
End of changes. 13 change blocks. |
| 18 lines changed or deleted | | 254 lines changed or added | |
|
| queryutil.h | | queryutil.h | |
| | | | |
| skipping to change at line 25 | | skipping to change at line 25 | |
| * limitations under the License. | | * limitations under the License. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include "jsobj.h" | | #include "jsobj.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| struct FieldBound { | | struct FieldBound { | |
|
| BSONElement bound_; | | BSONElement _bound; | |
| bool inclusive_; | | bool _inclusive; | |
| bool operator==( const FieldBound &other ) const { | | bool operator==( const FieldBound &other ) const { | |
|
| return bound_.woCompare( other.bound_ ) == 0 && | | return _bound.woCompare( other._bound ) == 0 && | |
| inclusive_ == other.inclusive_; | | _inclusive == other._inclusive; | |
| } | | } | |
| }; | | }; | |
| | | | |
| struct FieldInterval { | | struct FieldInterval { | |
| FieldInterval(){} | | FieldInterval(){} | |
| FieldInterval( const BSONElement& e ){ | | FieldInterval( const BSONElement& e ){ | |
|
| lower_.bound_ = upper_.bound_ = e; | | _lower._bound = _upper._bound = e; | |
| lower_.inclusive_ = upper_.inclusive_ = true; | | _lower._inclusive = _upper._inclusive = true; | |
| } | | } | |
|
| FieldBound lower_; | | FieldBound _lower; | |
| FieldBound upper_; | | FieldBound _upper; | |
| bool valid() const { | | bool valid() const { | |
|
| int cmp = lower_.bound_.woCompare( upper_.bound_, false ); | | int cmp = _lower._bound.woCompare( _upper._bound, false ); | |
| return ( cmp < 0 || ( cmp == 0 && lower_.inclusive_ && upper_.i | | return ( cmp < 0 || ( cmp == 0 && _lower._inclusive && _upper._ | |
| nclusive_ ) ); | | inclusive ) ); | |
| } | | } | |
| }; | | }; | |
| | | | |
| // range of a field's value that may be determined from query -- used t
o | | // range of a field's value that may be determined from query -- used t
o | |
| // determine index limits | | // determine index limits | |
| class FieldRange { | | class FieldRange { | |
| public: | | public: | |
| FieldRange( const BSONElement &e = BSONObj().firstElement() , bool
isNot=false , bool optimize=true ); | | FieldRange( const BSONElement &e = BSONObj().firstElement() , bool
isNot=false , bool optimize=true ); | |
| const FieldRange &operator&=( const FieldRange &other ); | | const FieldRange &operator&=( const FieldRange &other ); | |
|
| const FieldRange &operator|=( const FieldRange &other ); | | const FieldRange &operator|=( const FieldRange &other ); | |
| BSONElement min() const { assert( !empty() ); return intervals_[ 0 | | // does not remove fully contained ranges (eg [1,3] - [2,2] doesn't | |
| ].lower_.bound_; } | | remove anything) | |
| BSONElement max() const { assert( !empty() ); return intervals_[ in | | // in future we can change so that an or on $in:[3] combined with $ | |
| tervals_.size() - 1 ].upper_.bound_; } | | in:{$gt:2} doesn't scan 3 a second time | |
| bool minInclusive() const { assert( !empty() ); return intervals_[ | | const FieldRange &operator-=( const FieldRange &other ); | |
| 0 ].lower_.inclusive_; } | | BSONElement min() const { assert( !empty() ); return _intervals[ 0 | |
| bool maxInclusive() const { assert( !empty() ); return intervals_[ | | ]._lower._bound; } | |
| intervals_.size() - 1 ].upper_.inclusive_; } | | BSONElement max() const { assert( !empty() ); return _intervals[ _i | |
| | | ntervals.size() - 1 ]._upper._bound; } | |
| | | bool minInclusive() const { assert( !empty() ); return _intervals[ | |
| | | 0 ]._lower._inclusive; } | |
| | | bool maxInclusive() const { assert( !empty() ); return _intervals[ | |
| | | _intervals.size() - 1 ]._upper._inclusive; } | |
| bool equality() const { | | bool equality() const { | |
| return | | return | |
| !empty() && | | !empty() && | |
| min().woCompare( max(), false ) == 0 && | | min().woCompare( max(), false ) == 0 && | |
| maxInclusive() && | | maxInclusive() && | |
| minInclusive(); | | minInclusive(); | |
| } | | } | |
| bool nontrivial() const { | | bool nontrivial() const { | |
| return | | return | |
| ! empty() && | | ! empty() && | |
| ( minKey.firstElement().woCompare( min(), false ) != 0 || | | ( minKey.firstElement().woCompare( min(), false ) != 0 || | |
| maxKey.firstElement().woCompare( max(), false ) != 0 ); | | maxKey.firstElement().woCompare( max(), false ) != 0 ); | |
| } | | } | |
|
| bool empty() const { return intervals_.empty(); } | | bool empty() const { return _intervals.empty(); } | |
| const vector< FieldInterval > &intervals() const { return in | | const vector< FieldInterval > &intervals() const { return _i | |
| tervals_; } | | ntervals; } | |
| string getSpecial() const { return _special; } | | string getSpecial() const { return _special; } | |
| | | | |
| private: | | private: | |
| BSONObj addObj( const BSONObj &o ); | | BSONObj addObj( const BSONObj &o ); | |
|
| vector< FieldInterval > intervals_; | | void finishOperation( const vector< FieldInterval > &newIntervals, | |
| vector< BSONObj > objData_; | | const FieldRange &other ); | |
| | | vector< FieldInterval > _intervals; | |
| | | vector< BSONObj > _objData; | |
| string _special; | | string _special; | |
| }; | | }; | |
| | | | |
| // implements query pattern matching, used to determine if a query is | | // implements query pattern matching, used to determine if a query is | |
| // similar to an earlier query and should use the same plan | | // similar to an earlier query and should use the same plan | |
| class QueryPattern { | | class QueryPattern { | |
| public: | | public: | |
| friend class FieldRangeSet; | | friend class FieldRangeSet; | |
| enum Type { | | enum Type { | |
| Equality, | | Equality, | |
| | | | |
| skipping to change at line 104 | | skipping to change at line 108 | |
| bool operator==( const QueryPattern &other ) const { | | bool operator==( const QueryPattern &other ) const { | |
| bool less = operator<( other ); | | bool less = operator<( other ); | |
| bool more = other.operator<( *this ); | | bool more = other.operator<( *this ); | |
| assert( !( less && more ) ); | | assert( !( less && more ) ); | |
| return !( less || more ); | | return !( less || more ); | |
| } | | } | |
| bool operator!=( const QueryPattern &other ) const { | | bool operator!=( const QueryPattern &other ) const { | |
| return !operator==( other ); | | return !operator==( other ); | |
| } | | } | |
| bool operator<( const QueryPattern &other ) const { | | bool operator<( const QueryPattern &other ) const { | |
|
| map< string, Type >::const_iterator i = fieldTypes_.begin(); | | map< string, Type >::const_iterator i = _fieldTypes.begin(); | |
| map< string, Type >::const_iterator j = other.fieldTypes_.begin | | map< string, Type >::const_iterator j = other._fieldTypes.begin | |
| (); | | (); | |
| while( i != fieldTypes_.end() ) { | | while( i != _fieldTypes.end() ) { | |
| if ( j == other.fieldTypes_.end() ) | | if ( j == other._fieldTypes.end() ) | |
| return false; | | return false; | |
| if ( i->first < j->first ) | | if ( i->first < j->first ) | |
| return true; | | return true; | |
| else if ( i->first > j->first ) | | else if ( i->first > j->first ) | |
| return false; | | return false; | |
| if ( i->second < j->second ) | | if ( i->second < j->second ) | |
| return true; | | return true; | |
| else if ( i->second > j->second ) | | else if ( i->second > j->second ) | |
| return false; | | return false; | |
| ++i; | | ++i; | |
| ++j; | | ++j; | |
| } | | } | |
|
| if ( j != other.fieldTypes_.end() ) | | if ( j != other._fieldTypes.end() ) | |
| return true; | | return true; | |
|
| return sort_.woCompare( other.sort_ ) < 0; | | return _sort.woCompare( other._sort ) < 0; | |
| } | | } | |
| private: | | private: | |
| QueryPattern() {} | | QueryPattern() {} | |
| void setSort( const BSONObj sort ) { | | void setSort( const BSONObj sort ) { | |
|
| sort_ = normalizeSort( sort ); | | _sort = normalizeSort( sort ); | |
| } | | } | |
| BSONObj static normalizeSort( const BSONObj &spec ) { | | BSONObj static normalizeSort( const BSONObj &spec ) { | |
| if ( spec.isEmpty() ) | | if ( spec.isEmpty() ) | |
| return spec; | | return spec; | |
| int direction = ( spec.firstElement().number() >= 0 ) ? 1 : -1; | | int direction = ( spec.firstElement().number() >= 0 ) ? 1 : -1; | |
| BSONObjIterator i( spec ); | | BSONObjIterator i( spec ); | |
| BSONObjBuilder b; | | BSONObjBuilder b; | |
| while( i.moreWithEOO() ) { | | while( i.moreWithEOO() ) { | |
| BSONElement e = i.next(); | | BSONElement e = i.next(); | |
| if ( e.eoo() ) | | if ( e.eoo() ) | |
| break; | | break; | |
| b.append( e.fieldName(), direction * ( ( e.number() >= 0 )
? -1 : 1 ) ); | | b.append( e.fieldName(), direction * ( ( e.number() >= 0 )
? -1 : 1 ) ); | |
| } | | } | |
| return b.obj(); | | return b.obj(); | |
| } | | } | |
|
| map< string, Type > fieldTypes_; | | map< string, Type > _fieldTypes; | |
| BSONObj sort_; | | BSONObj _sort; | |
| }; | | }; | |
| | | | |
| // a BoundList contains intervals specified by inclusive start | | // a BoundList contains intervals specified by inclusive start | |
| // and end bounds. The intervals should be nonoverlapping and occur in | | // and end bounds. The intervals should be nonoverlapping and occur in | |
| // the specified direction of traversal. For example, given a simple i
ndex {i:1} | | // the specified direction of traversal. For example, given a simple i
ndex {i:1} | |
| // and direction +1, one valid BoundList is: (1, 2); (4, 6). The same
BoundList | | // and direction +1, one valid BoundList is: (1, 2); (4, 6). The same
BoundList | |
| // would be valid for index {i:-1} with direction -1. | | // would be valid for index {i:-1} with direction -1. | |
| typedef vector< pair< BSONObj, BSONObj > > BoundList; | | typedef vector< pair< BSONObj, BSONObj > > BoundList; | |
| | | | |
| // ranges of fields' value that may be determined from query -- used to | | // ranges of fields' value that may be determined from query -- used to | |
| // determine index limits | | // determine index limits | |
| class FieldRangeSet { | | class FieldRangeSet { | |
| public: | | public: | |
|
| | | friend class FieldRangeOrSet; | |
| FieldRangeSet( const char *ns, const BSONObj &query , bool optimize
=true ); | | FieldRangeSet( const char *ns, const BSONObj &query , bool optimize
=true ); | |
| const FieldRange &range( const char *fieldName ) const { | | const FieldRange &range( const char *fieldName ) const { | |
|
| map< string, FieldRange >::const_iterator f = ranges_.find( fie | | map< string, FieldRange >::const_iterator f = _ranges.find( fie | |
| ldName ); | | ldName ); | |
| if ( f == ranges_.end() ) | | if ( f == _ranges.end() ) | |
| return trivialRange(); | | return trivialRange(); | |
| return f->second; | | return f->second; | |
| } | | } | |
| int nNontrivialRanges() const { | | int nNontrivialRanges() const { | |
| int count = 0; | | int count = 0; | |
|
| for( map< string, FieldRange >::const_iterator i = ranges_.begi
n(); i != ranges_.end(); ++i ) | | for( map< string, FieldRange >::const_iterator i = _ranges.begi
n(); i != _ranges.end(); ++i ) | |
| if ( i->second.nontrivial() ) | | if ( i->second.nontrivial() ) | |
| ++count; | | ++count; | |
| return count; | | return count; | |
| } | | } | |
|
| const char *ns() const { return ns_; } | | const char *ns() const { return _ns; } | |
| BSONObj query() const { return query_; } | | | |
| // if fields is specified, order fields of returned object to match
those of 'fields' | | // if fields is specified, order fields of returned object to match
those of 'fields' | |
| BSONObj simplifiedQuery( const BSONObj &fields = BSONObj() ) const; | | BSONObj simplifiedQuery( const BSONObj &fields = BSONObj() ) const; | |
| bool matchPossible() const { | | bool matchPossible() const { | |
|
| for( map< string, FieldRange >::const_iterator i = ranges_.begi
n(); i != ranges_.end(); ++i ) | | for( map< string, FieldRange >::const_iterator i = _ranges.begi
n(); i != _ranges.end(); ++i ) | |
| if ( i->second.empty() ) | | if ( i->second.empty() ) | |
| return false; | | return false; | |
| return true; | | return true; | |
| } | | } | |
| QueryPattern pattern( const BSONObj &sort = BSONObj() ) const; | | QueryPattern pattern( const BSONObj &sort = BSONObj() ) const; | |
| BoundList indexBounds( const BSONObj &keyPattern, int direction ) c
onst; | | BoundList indexBounds( const BSONObj &keyPattern, int direction ) c
onst; | |
| string getSpecial() const; | | string getSpecial() const; | |
|
| | | // intended to handle sets without _orSets | |
| | | const FieldRangeSet &operator-=( const FieldRangeSet &other ) { | |
| | | for( map< string, FieldRange >::const_iterator i = other._range | |
| | | s.begin(); | |
| | | i != other._ranges.end(); ++i ) { | |
| | | map< string, FieldRange >::iterator f = _ranges.find( i->fi | |
| | | rst.c_str() ); | |
| | | if ( f != _ranges.end() ) | |
| | | f->second -= i->second; | |
| | | } | |
| | | return *this; | |
| | | } | |
| | | BSONObj query() const { return _query; } | |
| private: | | private: | |
|
| | | void processQueryField( const BSONElement &e, bool optimize ); | |
| void processOpElement( const char *fieldName, const BSONElement &f,
bool isNot, bool optimize ); | | void processOpElement( const char *fieldName, const BSONElement &f,
bool isNot, bool optimize ); | |
| static FieldRange *trivialRange_; | | static FieldRange *trivialRange_; | |
| static FieldRange &trivialRange(); | | static FieldRange &trivialRange(); | |
|
| mutable map< string, FieldRange > ranges_; | | mutable map< string, FieldRange > _ranges; | |
| const char *ns_; | | const char *_ns; | |
| BSONObj query_; | | BSONObj _query; | |
| }; | | }; | |
| | | | |
|
| | | // generages FieldRangeSet objects, accounting for or clauses | |
| | | // class FieldRangeOrSet { | |
| | | // public: | |
| | | // FieldRangeOrSet( const char *ns, const BSONObj &query , bool opti | |
| | | mize=true ); | |
| | | // // if there's a trivial or clause, we won't use or ranges to help | |
| | | with scanning | |
| | | // bool trivialOr() const { | |
| | | // for( list< FieldRangeSet >::const_iterator i = _orSets.begin( | |
| | | ); i != _orSets.end(); ++i ) { | |
| | | // if ( i->nNontrivialRanges() == 0 ) { | |
| | | // return true; | |
| | | // } | |
| | | // } | |
| | | // return false; | |
| | | // } | |
| | | // bool orFinished() const { return _orFound && _orSets.empty(); } | |
| | | // // removes first or clause, and removes the field ranges it cover | |
| | | s from all subsequent or clauses | |
| | | // void popOrClause() { | |
| | | // massert( 13274, "no or clause to pop", !orFinished() ); | |
| | | // const FieldRangeSet &toPop = _orSets.front(); | |
| | | // list< FieldRangeSet >::iterator i = _orSets.begin(); | |
| | | // ++i; | |
| | | // while( i != _orSets.end() ) { | |
| | | // *i -= toPop; | |
| | | // if( !i->matchPossible() ) { | |
| | | // i = _orSets.erase( i ); | |
| | | // } else { | |
| | | // ++i; | |
| | | // } | |
| | | // } | |
| | | // _orSets.pop_front(); | |
| | | // } | |
| | | // private: | |
| | | // FieldRangeSet _baseSet; | |
| | | // list< FieldRangeSet > _orSets; | |
| | | // bool _orFound; | |
| | | // }; | |
| | | | |
| /** | | /** | |
| used for doing field limiting | | used for doing field limiting | |
| */ | | */ | |
| class FieldMatcher { | | class FieldMatcher { | |
| public: | | public: | |
| FieldMatcher() | | FieldMatcher() | |
| : _include(true) | | : _include(true) | |
| , _special(false) | | , _special(false) | |
| , _includeID(true) | | , _includeID(true) | |
| , _skip(0) | | , _skip(0) | |
| | | | |
| skipping to change at line 217 | | skipping to change at line 269 | |
| void add( const BSONObj& o ); | | void add( const BSONObj& o ); | |
| | | | |
| void append( BSONObjBuilder& b , const BSONElement& e ) const; | | void append( BSONObjBuilder& b , const BSONElement& e ) const; | |
| | | | |
| BSONObj getSpec() const; | | BSONObj getSpec() const; | |
| bool includeID() { return _includeID; } | | bool includeID() { return _includeID; } | |
| private: | | private: | |
| | | | |
| void add( const string& field, bool include ); | | void add( const string& field, bool include ); | |
| void add( const string& field, int skip, int limit ); | | void add( const string& field, int skip, int limit ); | |
|
| void appendArray( BSONObjBuilder& b , const BSONObj& a ) const; | | void appendArray( BSONObjBuilder& b , const BSONObj& a , bool neste
d=false) const; | |
| | | | |
| bool _include; // true if default at this level is to include | | bool _include; // true if default at this level is to include | |
| bool _special; // true if this level can't be skipped or included w
ithout recursing | | bool _special; // true if this level can't be skipped or included w
ithout recursing | |
| //TODO: benchmark vector<pair> vs map | | //TODO: benchmark vector<pair> vs map | |
| typedef map<string, boost::shared_ptr<FieldMatcher> > FieldMap; | | typedef map<string, boost::shared_ptr<FieldMatcher> > FieldMap; | |
| FieldMap _fields; | | FieldMap _fields; | |
| BSONObj _source; | | BSONObj _source; | |
| bool _includeID; | | bool _includeID; | |
| | | | |
| // used for $slice operator | | // used for $slice operator | |
| | | | |
End of changes. 23 change blocks. |
| 46 lines changed or deleted | | 107 lines changed or added | |
|
| replset.h | | replset.h | |
| | | | |
| skipping to change at line 23 | | skipping to change at line 23 | |
| * GNU Affero General Public License for more details. | | * GNU Affero General Public License for more details. | |
| * | | * | |
| * You should have received a copy of the GNU Affero General Public Licen
se | | * You should have received a copy of the GNU Affero General Public Licen
se | |
| * along with this program. If not, see <http://www.gnu.org/licenses/>. | | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include "../../util/concurrency/list.h" | | #include "../../util/concurrency/list.h" | |
| #include "../../util/concurrency/value.h" | | #include "../../util/concurrency/value.h" | |
|
| | | #include "../../util/concurrency/msg.h" | |
| #include "../../util/hostandport.h" | | #include "../../util/hostandport.h" | |
|
| | | #include "../commands.h" | |
| | | #include "rstime.h" | |
| | | #include "rsmember.h" | |
| | | #include "rs_config.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
|
| | | struct Target; | |
| extern bool replSet; // true if using repl sets | | extern bool replSet; // true if using repl sets | |
| extern class ReplSet *theReplSet; // null until initialized | | extern class ReplSet *theReplSet; // null until initialized | |
|
| | | extern Tee *rsLog; | |
| | | | |
| | | /** most operations on a ReplSet object should be done while locked. */ | |
| | | class RSBase : boost::noncopyable { | |
| | | private: | |
| | | mutex m; | |
| | | int _locked; | |
| | | protected: | |
| | | RSBase() : m("RSBase"), _locked(0) { } | |
| | | class lock : scoped_lock { | |
| | | RSBase& _b; | |
| | | public: | |
| | | lock(RSBase* b) : scoped_lock(b->m), _b(*b) { b->_locked++; } | |
| | | ~lock() { _b._locked--; } | |
| | | }; | |
| | | bool locked() const { return _locked; } | |
| | | }; | |
| | | | |
| /* information about the entire repl set, such as the various servers i
n the set, and their state */ | | /* information about the entire repl set, such as the various servers i
n the set, and their state */ | |
| /* note: We currently do not free mem when the set goes away - it is as
sumed the replset is a | | /* note: We currently do not free mem when the set goes away - it is as
sumed the replset is a | |
| singleton and long lived. | | singleton and long lived. | |
| */ | | */ | |
|
| class ReplSet { | | class ReplSet : RSBase { | |
| public: | | public: | |
|
| bool isMaster(const char *client) { | | /** info on our state if the replset isn't yet "up". for example, | |
| //zzz | | if we are pre-initiation. */ | |
| return false; | | enum StartupStatus { | |
| } | | PRESTART=0, LOADINGCONFIG=1, BADCONFIG=2, EMPTYCONFIG=3, | |
| void fillIsMaster(BSONObjBuilder&); | | EMPTYUNREACHABLE=4, STARTED=5, SOON=6 | |
| | | }; | |
| static enum StartupStatus { PRESTART=0, LOADINGCONFIG=1, BADCONFIG= | | static StartupStatus startupStatus; | |
| 2, EMPTYCONFIG=3, EMPTYUNREACHABLE=4, FINISHME=5 } startupStatus; | | | |
| static string startupStatusMsg; | | static string startupStatusMsg; | |
|
| bool fatal; | | | |
| | | | |
|
| bool ok() const { return !fatal; } | | void fatal(); | |
| | | bool isMaster(const char *client); | |
| | | void fillIsMaster(BSONObjBuilder&); | |
| | | bool ok() const { return _myState != FATAL; } | |
| | | MemberState state() const { return _myState; } | |
| | | string name() const { return _name; } /* @return replica set's logi | |
| | | cal name */ | |
| | | | |
|
| /* @return replica set's logical name */ | | void relinquish(); | |
| string getName() const { return _name; } | | void assumePrimary(); | |
| | | | |
| /* cfgString format is | | /* cfgString format is | |
| replsetname/host1,host2:port,... | | replsetname/host1,host2:port,... | |
| where :port is optional. | | where :port is optional. | |
|
| | | throws exception if a problem initializing. */ | |
| throws exception if a problem initializing. | | | |
| */ | | | |
| ReplSet(string cfgString); | | ReplSet(string cfgString); | |
| | | | |
|
| | | /* call after constructing to start - returns fairly quickly after | |
| | | launching its threads */ | |
| | | void go() { _myState = STARTUP2; startThreads(); } | |
| | | | |
| // for replSetGetStatus command | | // for replSetGetStatus command | |
| void summarizeStatus(BSONObjBuilder&) const; | | void summarizeStatus(BSONObjBuilder&) const; | |
|
| | | void summarizeAsHtml(stringstream&) const; | |
| | | const ReplSetConfig& config() { return *_cfg; } | |
| | | | |
| private: | | private: | |
|
| | | MemberState _myState; | |
| string _name; | | string _name; | |
| const vector<HostAndPort> *_seeds; | | const vector<HostAndPort> *_seeds; | |
|
| | | ReplSetConfig *_cfg; | |
| | | | |
| /** load our configuration from admin.replset. try seed machines t
oo. | | /** load our configuration from admin.replset. try seed machines t
oo. | |
| throws exception if a problem. | | throws exception if a problem. | |
| */ | | */ | |
|
| | | void _loadConfigFinish(vector<ReplSetConfig>& v); | |
| void loadConfig(); | | void loadConfig(); | |
|
| | | void initFromConfig(ReplSetConfig& c);//, bool save); | |
| | | | |
|
| // void addMemberIfMissing(const HostAndPort& p); | | class Consensus { | |
| | | ReplSet &rs; | |
| | | struct LastYea { | |
| | | LastYea() : when(0), who(0xffffffff) { } | |
| | | time_t when; | |
| | | unsigned who; | |
| | | }; | |
| | | Atomic<LastYea> ly; | |
| | | unsigned yea(unsigned memberId); // throws VoteException | |
| | | void _electSelf(); | |
| | | bool weAreFreshest(); | |
| | | public: | |
| | | Consensus(ReplSet *t) : rs(*t) { } | |
| | | int totalVotes() const; | |
| | | bool aMajoritySeemsToBeUp() const; | |
| | | void electSelf(); | |
| | | void electCmdReceived(BSONObj, BSONObjBuilder*); | |
| | | } elect; | |
| | | | |
|
| struct MemberInfo : public List1<MemberInfo>::Base { | | public: | |
| MemberInfo(string h, int p) : _port(p), _host(h) { | | class Member : public List1<Member>::Base { | |
| _dead = false; | | public: | |
| _lastHeartbeat = 0; | | Member(HostAndPort h, unsigned ord, const ReplSetConfig::Member | |
| _upSince = 0; | | Cfg *c); | |
| _health = -1.0; | | | |
| } | | string fullName() const { return h().toString(); } | |
| string fullName() const { | | const ReplSetConfig::MemberCfg& config() const { return *_confi | |
| if( _port < 0 ) return _host; | | g; } | |
| stringstream ss; | | const HeartbeatInfo& hbinfo() const { return _hbinfo; } | |
| ss << _host << ':' << _port; | | string lhb() { return _hbinfo.lastHeartbeatMsg; } | |
| return ss.str(); | | MemberState state() const { return _hbinfo.hbstate; } | |
| } | | const HostAndPort& h() const { return _h; } | |
| double health() const { return _health; } | | unsigned id() const { return _hbinfo.id(); } | |
| time_t upSince() const { return _upSince; } | | | |
| time_t lastHeartbeat() const { return _lastHeartbeat; } | | void summarizeAsHtml(stringstream& s) const; | |
| | | friend class ReplSet; | |
| private: | | private: | |
|
| friend class FeedbackThread; // feedbackthread is the primary w | | const ReplSetConfig::MemberCfg *_config; /* todo: when this cha | |
| riter to these objects | | nges??? */ | |
| | | HostAndPort _h; | |
| | | HeartbeatInfo _hbinfo; | |
| | | }; | |
| | | list<HostAndPort> memberHostnames() const; | |
| | | const Member* currentPrimary() const { return _currentPrimary; } | |
| | | bool primary() const { return _myState == PRIMARY; } | |
| | | const ReplSetConfig::MemberCfg& myConfig() const { return _self->co | |
| | | nfig(); } | |
| | | void msgUpdateHBInfo(HeartbeatInfo); | |
| | | | |
|
| bool _dead; | | private: | |
| const int _port; | | const Member *_currentPrimary; | |
| const string _host; | | Member *_self; | |
| double _health; | | List1<Member> _members; /* all members of the set EXCEPT self. */ | |
| time_t _lastHeartbeat; | | | |
| time_t _upSince; | | public: | |
| | | class Manager : public task::Server { | |
| | | bool got(const any&); | |
| | | ReplSet *rs; | |
| | | int _primary; | |
| | | const Member* findOtherPrimary(); | |
| | | void noteARemoteIsPrimary(const Member *); | |
| public: | | public: | |
|
| DiagStr _lastHeartbeatErrMsg; | | Manager(ReplSet *rs); | |
| | | void msgReceivedNewConfig(BSONObj) { assert(false); } | |
| | | void msgCheckNewState(); | |
| }; | | }; | |
|
| /* all members of the set EXCEPT SELF. */ | | shared_ptr<Manager> mgr; | |
| List1<MemberInfo> _members; | | | |
| | | | |
|
| void startHealthThreads(); | | private: | |
| | | Member* head() const { return _members.head(); } | |
| | | void getTargets(list<Target>&); | |
| | | static string stateAsStr(MemberState state); | |
| | | static string stateAsHtml(MemberState state); | |
| | | void startThreads(); | |
| friend class FeedbackThread; | | friend class FeedbackThread; | |
|
| | | friend class CmdReplSetElect; | |
| | | }; | |
| | | | |
| | | inline void ReplSet::fatal() | |
| | | { | |
| | | lock l(this); | |
| | | _myState = FATAL; | |
| | | log() << "replSet error fatal error, stopping replication" << rsLog | |
| | | ; | |
| | | } | |
| | | | |
| | | inline ReplSet::Member::Member(HostAndPort h, unsigned ord, const ReplS | |
| | | etConfig::MemberCfg *c) : | |
| | | _config(c), _h(h), _hbinfo(ord) { } | |
| | | | |
| | | inline bool ReplSet::isMaster(const char *client) { | |
| | | /* todo replset */ | |
| | | return false; | |
| | | } | |
| | | | |
| | | class ReplSetCommand : public Command { | |
| | | protected: | |
| | | ReplSetCommand(const char * s) : Command(s) { } | |
| | | virtual bool slaveOk() const { return true; } | |
| | | virtual bool adminOnly() const { return true; } | |
| | | virtual bool logTheOp() { return false; } | |
| | | virtual LockType locktype() const { return NONE; } | |
| | | virtual void help( stringstream &help ) const { help << "internal"; | |
| | | } | |
| | | bool check(string& errmsg, BSONObjBuilder& result) { | |
| | | if( !replSet ) { | |
| | | errmsg = "not running with --replSet"; | |
| | | return false; | |
| | | } | |
| | | if( theReplSet == 0 ) { | |
| | | result.append("startupStatus", ReplSet::startupStatus); | |
| | | errmsg = ReplSet::startupStatusMsg.empty() ? "replset unkno | |
| | | wn error 2" : ReplSet::startupStatusMsg; | |
| | | return false; | |
| | | } | |
| | | return true; | |
| | | } | |
| }; | | }; | |
| | | | |
| } | | } | |
| | | | |
End of changes. 24 change blocks. |
| 45 lines changed or deleted | | 161 lines changed or added | |
|
| syncclusterconnection.h | | syncclusterconnection.h | |
|
| // syncclusterconnection.h | | // @file syncclusterconnection.h | |
| | | | |
| /* | | /* | |
| * Copyright 2010 10gen Inc. | | * Copyright 2010 10gen Inc. | |
| * | | * | |
| * Licensed under the Apache License, Version 2.0 (the "License"); | | * Licensed under the Apache License, Version 2.0 (the "License"); | |
| * you may not use this file except in compliance with the License. | | * you may not use this file except in compliance with the License. | |
| * You may obtain a copy of the License at | | * You may obtain a copy of the License at | |
| * | | * | |
| * http://www.apache.org/licenses/LICENSE-2.0 | | * http://www.apache.org/licenses/LICENSE-2.0 | |
| * | | * | |
| * Unless required by applicable law or agreed to in writing, software | | * Unless required by applicable law or agreed to in writing, software | |
| | | | |
| skipping to change at line 25 | | skipping to change at line 26 | |
| * limitations under the License. | | * limitations under the License. | |
| */ | | */ | |
| | | | |
| #include "../pch.h" | | #include "../pch.h" | |
| #include "dbclient.h" | | #include "dbclient.h" | |
| #include "redef_macros.h" | | #include "redef_macros.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| /** | | /** | |
|
| * this is a connection to a cluster of servers that operate as one | | * This is a connection to a cluster of servers that operate as one | |
| * for super high durability | | * for super high durability. | |
| | | * | |
| | | * Write operations are two-phase. First, all nodes are asked to fsync | |
| | | . If successful | |
| | | * everywhere, the write is sent everywhere and then followed by an fsy | |
| | | nc. There is no | |
| | | * rollback if a problem occurs during the second phase. Naturally, wi | |
| | | th all these fsyncs, | |
| | | * these operations will be quite slow -- use sparingly. | |
| | | * | |
| | | * Read operations are sent to a single random node. | |
| | | * | |
| | | * The class checks if a command is read or write style, and sends to a | |
| | | single | |
| | | * node if a read lock command and to all in two phases with a write st | |
| | | yle command. | |
| */ | | */ | |
| class SyncClusterConnection : public DBClientBase { | | class SyncClusterConnection : public DBClientBase { | |
| public: | | public: | |
| /** | | /** | |
|
| * @param commaSeperated should be 3 hosts comma seperated | | * @param commaSeparated should be 3 hosts comma separated | |
| */ | | */ | |
|
| SyncClusterConnection( string commaSeperated ); | | SyncClusterConnection( const list<HostAndPort> & ); | |
| | | SyncClusterConnection( string commaSeparated ); | |
| SyncClusterConnection( string a , string b , string c ); | | SyncClusterConnection( string a , string b , string c ); | |
| ~SyncClusterConnection(); | | ~SyncClusterConnection(); | |
| | | | |
| /** | | /** | |
| * @return true if all servers are up and ready for writes | | * @return true if all servers are up and ready for writes | |
| */ | | */ | |
| bool prepare( string& errmsg ); | | bool prepare( string& errmsg ); | |
| | | | |
| /** | | /** | |
| * runs fsync on all servers | | * runs fsync on all servers | |
| | | | |
| skipping to change at line 64 | | skipping to change at line 76 | |
| virtual auto_ptr<DBClientCursor> getMore( const string &ns, long lo
ng cursorId, int nToReturn, int options ); | | virtual auto_ptr<DBClientCursor> getMore( const string &ns, long lo
ng cursorId, int nToReturn, int options ); | |
| | | | |
| virtual void insert( const string &ns, BSONObj obj ); | | virtual void insert( const string &ns, BSONObj obj ); | |
| | | | |
| virtual void insert( const string &ns, const vector< BSONObj >& v )
; | | virtual void insert( const string &ns, const vector< BSONObj >& v )
; | |
| | | | |
| virtual void remove( const string &ns , Query query, bool justOne )
; | | virtual void remove( const string &ns , Query query, bool justOne )
; | |
| | | | |
| virtual void update( const string &ns , Query query , BSONObj obj ,
bool upsert , bool multi ); | | virtual void update( const string &ns , Query query , BSONObj obj ,
bool upsert , bool multi ); | |
| | | | |
|
| virtual string toString(){ | | | |
| return _toString(); | | | |
| } | | | |
| | | | |
| virtual bool call( Message &toSend, Message &response, bool assertO
k ); | | virtual bool call( Message &toSend, Message &response, bool assertO
k ); | |
| virtual void say( Message &toSend ); | | virtual void say( Message &toSend ); | |
| virtual void sayPiggyBack( Message &toSend ); | | virtual void sayPiggyBack( Message &toSend ); | |
| | | | |
| virtual string getServerAddress() const { return _address; } | | virtual string getServerAddress() const { return _address; } | |
|
| | | virtual bool isFailed() const { return false; } | |
| virtual bool isFailed() const { | | virtual string toString() { return _toString(); } | |
| return false; | | | |
| } | | | |
| | | | |
| private: | | private: | |
|
| | | | |
| SyncClusterConnection( SyncClusterConnection& prev ); | | SyncClusterConnection( SyncClusterConnection& prev ); | |
|
| | | | |
| string _toString() const; | | string _toString() const; | |
|
| | | | |
| bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSO
NObj &info, int options=0); | | bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSO
NObj &info, int options=0); | |
|
| | | | |
| auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query que
ry, int nToReturn, int nToSkip, | | auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query que
ry, int nToReturn, int nToSkip, | |
| const BSONObj *fieldsToRetu
rn, int queryOptions, int batchSize ); | | const BSONObj *fieldsToRetu
rn, int queryOptions, int batchSize ); | |
|
| | | | |
| int _lockType( const string& name ); | | int _lockType( const string& name ); | |
|
| | | | |
| void _checkLast(); | | void _checkLast(); | |
|
| | | | |
| void _connect( string host ); | | void _connect( string host ); | |
| | | | |
| string _address; | | string _address; | |
| vector<DBClientConnection*> _conns; | | vector<DBClientConnection*> _conns; | |
|
| | | | |
| map<string,int> _lockTypes; | | map<string,int> _lockTypes; | |
| mongo::mutex _mutex; | | mongo::mutex _mutex; | |
| }; | | }; | |
| | | | |
| }; | | }; | |
| | | | |
| #include "undef_macros.h" | | #include "undef_macros.h" | |
| | | | |
End of changes. 14 change blocks. |
| 21 lines changed or deleted | | 24 lines changed or added | |
|
| update.h | | update.h | |
| | | | |
| skipping to change at line 103 | | skipping to change at line 103 | |
| switch (op){ | | switch (op){ | |
| case PUSH: | | case PUSH: | |
| case PUSH_ALL: | | case PUSH_ALL: | |
| case POP: | | case POP: | |
| return true; | | return true; | |
| default: | | default: | |
| return false; | | return false; | |
| } | | } | |
| } | | } | |
| | | | |
|
| bool isIndexed( const set<string>& idxKeys ) const { | | static bool isIndexed( const string& fullName , const set<string>& | |
| | | idxKeys ){ | |
| | | const char * fieldName = fullName.c_str(); | |
| // check if there is an index key that is a parent of mod | | // check if there is an index key that is a parent of mod | |
| for( const char *dot = strchr( fieldName, '.' ); dot; dot = str
chr( dot + 1, '.' ) ) | | for( const char *dot = strchr( fieldName, '.' ); dot; dot = str
chr( dot + 1, '.' ) ) | |
| if ( idxKeys.count( string( fieldName, dot - fieldName ) )
) | | if ( idxKeys.count( string( fieldName, dot - fieldName ) )
) | |
| return true; | | return true; | |
|
| string fullName = fieldName; | | | |
| // check if there is an index key equal to mod | | // check if there is an index key equal to mod | |
| if ( idxKeys.count(fullName) ) | | if ( idxKeys.count(fullName) ) | |
| return true; | | return true; | |
| // check if there is an index key that is a child of mod | | // check if there is an index key that is a child of mod | |
| set< string >::const_iterator j = idxKeys.upper_bound( fullName
); | | set< string >::const_iterator j = idxKeys.upper_bound( fullName
); | |
| if ( j != idxKeys.end() && j->find( fullName ) == 0 && (*j)[ful
lName.size()] == '.' ) | | if ( j != idxKeys.end() && j->find( fullName ) == 0 && (*j)[ful
lName.size()] == '.' ) | |
| return true; | | return true; | |
|
| | | | |
| | | return false; | |
| | | } | |
| | | | |
| | | bool isIndexed( const set<string>& idxKeys ) const { | |
| | | string fullName = fieldName; | |
| | | | |
| | | if ( isIndexed( fullName , idxKeys ) ) | |
| | | return true; | |
| | | | |
| | | if ( strstr( fieldName , "." ) ){ | |
| | | // check for a.0.1 | |
| | | StringBuilder buf( fullName.size() + 1 ); | |
| | | for ( size_t i=0; i<fullName.size(); i++ ){ | |
| | | char c = fullName[i]; | |
| | | buf << c; | |
| | | | |
| | | if ( c != '.' ) | |
| | | continue; | |
| | | | |
| | | if ( ! isdigit( fullName[i+1] ) ) | |
| | | continue; | |
| | | | |
| | | bool possible = true; | |
| | | size_t j=i+2; | |
| | | for ( ; j<fullName.size(); j++ ){ | |
| | | char d = fullName[j]; | |
| | | if ( d == '.' ) | |
| | | break; | |
| | | if ( isdigit( d ) ) | |
| | | continue; | |
| | | possible = false; | |
| | | break; | |
| | | } | |
| | | | |
| | | if ( possible ) | |
| | | i = j; | |
| | | } | |
| | | string x = buf.str(); | |
| | | if ( isIndexed( x , idxKeys ) ) | |
| | | return true; | |
| | | } | |
| | | | |
| return false; | | return false; | |
| } | | } | |
| | | | |
| template< class Builder > | | template< class Builder > | |
| void apply( Builder& b , BSONElement in , ModState& ms ) const; | | void apply( Builder& b , BSONElement in , ModState& ms ) const; | |
| | | | |
| /** | | /** | |
| * @return true iff toMatch should be removed from the array | | * @return true iff toMatch should be removed from the array | |
| */ | | */ | |
| bool _pullElementMatch( BSONElement& toMatch ) const; | | bool _pullElementMatch( BSONElement& toMatch ) const; | |
| | | | |
| skipping to change at line 368 | | skipping to change at line 412 | |
| case Mod::BIT: | | case Mod::BIT: | |
| case Mod::BITAND: | | case Mod::BITAND: | |
| case Mod::BITOR: | | case Mod::BITOR: | |
| // TODO: should we convert this to $set? | | // TODO: should we convert this to $set? | |
| return false; | | return false; | |
| default: | | default: | |
| return false; | | return false; | |
| } | | } | |
| } | | } | |
| | | | |
|
| void appendForOpLog( BSONObjBuilder& b ) const { | | void appendForOpLog( BSONObjBuilder& b ) const; | |
| if ( incType ){ | | | |
| BSONObjBuilder bb( b.subobjStart( "$set" ) ); | | | |
| appendIncValue( bb ); | | | |
| bb.done(); | | | |
| return; | | | |
| } | | | |
| | | | |
| const char * name = fixedOpName ? fixedOpName : Mod::modNames[o | | | |
| p()]; | | | |
| | | | |
| BSONObjBuilder bb( b.subobjStart( name ) ); | | | |
| if ( fixed ) | | | |
| bb.appendAs( *fixed , m->fieldName ); | | | |
| else | | | |
| bb.appendAs( m->elt , m->fieldName ); | | | |
| bb.done(); | | | |
| } | | | |
| | | | |
| template< class Builder > | | template< class Builder > | |
| void apply( Builder& b , BSONElement in ){ | | void apply( Builder& b , BSONElement in ){ | |
| m->apply( b , in , *this ); | | m->apply( b , in , *this ); | |
| } | | } | |
| | | | |
| template< class Builder > | | template< class Builder > | |
|
| void appendIncValue( Builder& b ) const { | | void appendIncValue( Builder& b , bool useFullName ) const { | |
| | | const char * n = useFullName ? m->fieldName : m->shortFieldName | |
| | | ; | |
| | | | |
| switch ( incType ){ | | switch ( incType ){ | |
| case NumberDouble: | | case NumberDouble: | |
|
| b.append( m->shortFieldName , incdouble ); break; | | b.append( n , incdouble ); break; | |
| case NumberLong: | | case NumberLong: | |
|
| b.append( m->shortFieldName , inclong ); break; | | b.append( n , inclong ); break; | |
| case NumberInt: | | case NumberInt: | |
|
| b.append( m->shortFieldName , incint ); break; | | b.append( n , incint ); break; | |
| default: | | default: | |
| assert(0); | | assert(0); | |
| } | | } | |
| } | | } | |
|
| | | | |
| | | string toString() const; | |
| }; | | }; | |
| | | | |
| /** | | /** | |
| * this is used to hold state, meta data while applying a ModSet to a B
SONObj | | * this is used to hold state, meta data while applying a ModSet to a B
SONObj | |
| * the goal is to make ModSet const so its re-usable | | * the goal is to make ModSet const so its re-usable | |
| */ | | */ | |
| class ModSetState : boost::noncopyable { | | class ModSetState : boost::noncopyable { | |
| struct FieldCmp { | | struct FieldCmp { | |
| bool operator()( const string &l, const string &r ) const { | | bool operator()( const string &l, const string &r ) const { | |
| return lexNumCmp( l.c_str(), r.c_str() ) < 0; | | return lexNumCmp( l.c_str(), r.c_str() ) < 0; | |
| | | | |
| skipping to change at line 534 | | skipping to change at line 566 | |
| const ModState& m = i->second; | | const ModState& m = i->second; | |
| if ( m.m->arrayDep() ){ | | if ( m.m->arrayDep() ){ | |
| if ( m.pushStartSize == -1 ) | | if ( m.pushStartSize == -1 ) | |
| b.appendNull( m.fieldName() ); | | b.appendNull( m.fieldName() ); | |
| else | | else | |
| b << m.fieldName() << BSON( "$size" << m.pushStartS
ize ); | | b << m.fieldName() << BSON( "$size" << m.pushStartS
ize ); | |
| } | | } | |
| } | | } | |
| } | | } | |
| | | | |
|
| | | string toString() const; | |
| | | | |
| friend class ModSet; | | friend class ModSet; | |
| }; | | }; | |
| | | | |
| } | | } | |
| | | | |
End of changes. 10 change blocks. |
| 24 lines changed or deleted | | 59 lines changed or added | |
|