| compiler.h | | compiler.h | |
|
| // Copyright 2012 the V8 project authors. All rights reserved. | | /* | |
| // Redistribution and use in source and binary forms, with or without | | * Copyright 2012 10gen Inc. | |
| // modification, are permitted provided that the following conditions are | | * | |
| // met: | | * Licensed under the Apache License, Version 2.0 (the "License"); | |
| // | | * you may not use this file except in compliance with the License. | |
| // * Redistributions of source code must retain the above copyright | | * You may obtain a copy of the License at | |
| // notice, this list of conditions and the following disclaimer. | | * | |
| // * Redistributions in binary form must reproduce the above | | * http://www.apache.org/licenses/LICENSE-2.0 | |
| // copyright notice, this list of conditions and the following | | * | |
| // disclaimer in the documentation and/or other materials provided | | * Unless required by applicable law or agreed to in writing, software | |
| // with the distribution. | | * distributed under the License is distributed on an "AS IS" BASIS, | |
| // * Neither the name of Google Inc. nor the names of its | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| // contributors may be used to endorse or promote products derived | | * See the License for the specific language governing permissions and | |
| // from this software without specific prior written permission. | | * limitations under the License. | |
| // | | */ | |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | | | |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | | #pragma once | |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | | | |
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | | /** | |
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | | * Include "mongo/platform/compiler.h" to get compiler-targeted macro defin | |
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | | itions and utilities. | |
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | * | |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | * The following macros are provided in all compiler environments: | |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | * | |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | | * | |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | * MONGO_COMPILER_NORETURN | |
| | | * | |
| #ifndef V8_COMPILER_H_ | | * Instructs the compiler that the decorated function will not return thr | |
| #define V8_COMPILER_H_ | | ough the normal return | |
| | | * path. | |
| #include "allocation.h" | | * | |
| #include "ast.h" | | * Correct: MONGO_COMPILER_NORETURN void myAbortFunction(); | |
| #include "zone.h" | | * | |
| | | * | |
| namespace v8 { | | * MONGO_COMPILER_VARIABLE_UNUSED | |
| namespace internal { | | * | |
| | | * Instructs the compiler not to warn if it detects no use of the decorat | |
| class ScriptDataImpl; | | ed variable. | |
| | | * Typically only useful for variables that are always declared but only | |
| // CompilationInfo encapsulates some information known at compile time. It | | used in | |
| // is constructed based on the resources available at compile-time. | | * conditionally-compiled code. | |
| class CompilationInfo { | | * | |
| public: | | * Correct: MONGO_COMPILER_VARIABLE_UNUSED int ignored; | |
| CompilationInfo(Handle<Script> script, Zone* zone); | | * | |
| CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone); | | * | |
| CompilationInfo(Handle<JSFunction> closure, Zone* zone); | | * MONGO_COMPILER_ALIGN_TYPE(ALIGNMENT) | |
| | | * | |
| virtual ~CompilationInfo(); | | * Instructs the compiler to use the given minimum alignment for the deco | |
| | | rated type. | |
| Isolate* isolate() { | | * | |
| ASSERT(Isolate::Current() == isolate_); | | * Alignments should probably always be powers of two. Also, note that m | |
| return isolate_; | | ost allocators will not | |
| } | | * be able to guarantee better than 16- or 32-byte alignment. | |
| Zone* zone() { | | * | |
| return zone_; | | * Correct: | |
| } | | * class MONGO_COMPILER_ALIGN_TYPE(16) MyClass {...}; | |
| bool is_lazy() const { return IsLazy::decode(flags_); } | | * | |
| bool is_eval() const { return IsEval::decode(flags_); } | | * Incorrect: | |
| bool is_global() const { return IsGlobal::decode(flags_); } | | * MONGO_COMPILER_ALIGN_TYPE(16) class MyClass {...}; | |
| bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; } | | * class MyClass{...} MONGO_COMPILER_ALIGN_TYPE(16); | |
| bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; | | * | |
| } | | * | |
| LanguageMode language_mode() const { | | * MONGO_COMPILER_ALIGN_VARIABLE(ALIGNMENT) | |
| return LanguageModeField::decode(flags_); | | * | |
| } | | * Instructs the compiler to use the given minimum alignment for the deco | |
| bool is_in_loop() const { return IsInLoop::decode(flags_); } | | rated variable. | |
| FunctionLiteral* function() const { return function_; } | | * | |
| Scope* scope() const { return scope_; } | | * Note that most allocators will not allow heap allocated alignments tha | |
| Scope* global_scope() const { return global_scope_; } | | t are better than 16- or | |
| Handle<Code> code() const { return code_; } | | * 32-byte aligned. Stack allocators may only guarantee up to the natura | |
| Handle<JSFunction> closure() const { return closure_; } | | l word length worth of | |
| Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } | | * alignment. | |
| Handle<Script> script() const { return script_; } | | * | |
| v8::Extension* extension() const { return extension_; } | | * Correct: | |
| ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; } | | * class MyClass { | |
| Handle<Context> calling_context() const { return calling_context_; } | | * MONGO_COMPILER_ALIGN_VARIABLE(8) char a; | |
| int osr_ast_id() const { return osr_ast_id_; } | | * }; | |
| | | * | |
| void MarkAsEval() { | | * MONGO_COMPILER_ALIGN_VARIABLE(8) class MyClass {...} singletonInstan | |
| ASSERT(!is_lazy()); | | ce; | |
| flags_ |= IsEval::encode(true); | | * | |
| } | | * Incorrect: | |
| void MarkAsGlobal() { | | * int MONGO_COMPILER_ALIGN_VARIABLE(16) a, b; | |
| ASSERT(!is_lazy()); | | * | |
| flags_ |= IsGlobal::encode(true); | | * | |
| } | | * MONGO_COMPILER_API_EXPORT | |
| void SetLanguageMode(LanguageMode language_mode) { | | * | |
| ASSERT(this->language_mode() == CLASSIC_MODE || | | * Instructs the compiler to label the given type, variable or function a | |
| this->language_mode() == language_mode || | | s part of the | |
| language_mode == EXTENDED_MODE); | | * exported interface of the library object under construction. | |
| flags_ = LanguageModeField::update(flags_, language_mode); | | * | |
| } | | * Correct: | |
| void MarkAsInLoop() { | | * MONGO_COMPILER_API_EXPORT int globalSwitch; | |
| ASSERT(is_lazy()); | | * class MONGO_COMPILER_API_EXPORT ExportedType { ... }; | |
| flags_ |= IsInLoop::encode(true); | | * MONGO_COMPILER_API_EXPORT SomeType exportedFunction(...); | |
| } | | * | |
| void MarkAsNative() { | | * NOTE: Rather than using this macro directly, one typically declares an | |
| flags_ |= IsNative::encode(true); | | other macro named for the | |
| } | | * library, which is conditionally defined to either MONGO_COMIPLER_API_E | |
| bool is_native() const { | | XPORT or | |
| return IsNative::decode(flags_); | | * MONGO_COMPILER_API_IMPORT based on whether the compiler is currently b | |
| } | | uilding the library or | |
| void SetFunction(FunctionLiteral* literal) { | | * building an object that depends on the library, respectively. For exa | |
| ASSERT(function_ == NULL); | | mple, MONGO_CLIENT_API | |
| function_ = literal; | | * might be defined to MONGO_COMPILER_API_EXPORT when building the MongoD | |
| } | | B shared library, and to | |
| void SetScope(Scope* scope) { | | * MONGO_COMPILER_API_IMPORT when building an application that links agai | |
| ASSERT(scope_ == NULL); | | nst the shared library. | |
| scope_ = scope; | | * | |
| } | | * | |
| void SetGlobalScope(Scope* global_scope) { | | * MONGO_COMPILER_API_IMPORT | |
| ASSERT(global_scope_ == NULL); | | * | |
| global_scope_ = global_scope; | | * Instructs the compiler to label the given type, variable or function a | |
| } | | s imported | |
| void SetCode(Handle<Code> code) { code_ = code; } | | * from another library, and not part of the library object under constru | |
| void SetExtension(v8::Extension* extension) { | | ction. | |
| ASSERT(!is_lazy()); | | * | |
| extension_ = extension; | | * Same correct/incorrect usage as for MONGO_COMPILER_API_EXPORT. | |
| } | | * | |
| void SetPreParseData(ScriptDataImpl* pre_parse_data) { | | * | |
| ASSERT(!is_lazy()); | | * MONGO_COMPILER_API_CALLING_CONVENTION | |
| pre_parse_data_ = pre_parse_data; | | * | |
| } | | * Explicitly decorates a function declaration the api calling conventio | |
| void SetCallingContext(Handle<Context> context) { | | n used for | |
| ASSERT(is_eval()); | | * shared libraries. | |
| calling_context_ = context; | | * | |
| } | | * Same correct/incorrect usage as for MONGO_COMPILER_API_EXPORT. | |
| void SetOsrAstId(int osr_ast_id) { | | */ | |
| ASSERT(IsOptimizing()); | | | |
| osr_ast_id_ = osr_ast_id; | | #if defined(_MSC_VER) | |
| } | | #include "mongo/platform/compiler_msvc.h" | |
| void MarkCompilingForDebugging(Handle<Code> current_code) { | | #elif defined(__GNUC__) | |
| ASSERT(mode_ != OPTIMIZE); | | #include "mongo/platform/compiler_gcc.h" | |
| ASSERT(current_code->kind() == Code::FUNCTION); | | #else | |
| flags_ |= IsCompilingForDebugging::encode(true); | | #error "Unsupported compiler family" | |
| if (current_code->is_compiled_optimizable()) { | | | |
| EnableDeoptimizationSupport(); | | | |
| } else { | | | |
| mode_ = CompilationInfo::NONOPT; | | | |
| } | | | |
| } | | | |
| bool IsCompilingForDebugging() { | | | |
| return IsCompilingForDebugging::decode(flags_); | | | |
| } | | | |
| | | | |
| bool has_global_object() const { | | | |
| return !closure().is_null() && (closure()->context()->global() != NULL) | | | |
| ; | | | |
| } | | | |
| | | | |
| GlobalObject* global_object() const { | | | |
| return has_global_object() ? closure()->context()->global() : NULL; | | | |
| } | | | |
| | | | |
| // Accessors for the different compilation modes. | | | |
| bool IsOptimizing() const { return mode_ == OPTIMIZE; } | | | |
| bool IsOptimizable() const { return mode_ == BASE; } | | | |
| void SetOptimizing(int osr_ast_id) { | | | |
| SetMode(OPTIMIZE); | | | |
| osr_ast_id_ = osr_ast_id; | | | |
| } | | | |
| void DisableOptimization(); | | | |
| | | | |
| // Deoptimization support. | | | |
| bool HasDeoptimizationSupport() const { | | | |
| return SupportsDeoptimization::decode(flags_); | | | |
| } | | | |
| void EnableDeoptimizationSupport() { | | | |
| ASSERT(IsOptimizable()); | | | |
| flags_ |= SupportsDeoptimization::encode(true); | | | |
| } | | | |
| | | | |
| // Determines whether or not to insert a self-optimization header. | | | |
| bool ShouldSelfOptimize(); | | | |
| | | | |
| // Disable all optimization attempts of this info for the rest of the | | | |
| // current compilation pipeline. | | | |
| void AbortOptimization(); | | | |
| | | | |
| void set_deferred_handles(DeferredHandles* deferred_handles) { | | | |
| ASSERT(deferred_handles_ == NULL); | | | |
| deferred_handles_ = deferred_handles; | | | |
| } | | | |
| | | | |
| void SaveHandles() { | | | |
| SaveHandle(&closure_); | | | |
| SaveHandle(&shared_info_); | | | |
| SaveHandle(&calling_context_); | | | |
| SaveHandle(&script_); | | | |
| } | | | |
| | | | |
| private: | | | |
| Isolate* isolate_; | | | |
| | | | |
| // Compilation mode. | | | |
| // BASE is generated by the full codegen, optionally prepared for bailout | | | |
| s. | | | |
| // OPTIMIZE is optimized code generated by the Hydrogen-based backend. | | | |
| // NONOPT is generated by the full codegen and is not prepared for | | | |
| // recompilation/bailouts. These functions are never recompiled. | | | |
| enum Mode { | | | |
| BASE, | | | |
| OPTIMIZE, | | | |
| NONOPT | | | |
| }; | | | |
| | | | |
| void Initialize(Mode mode) { | | | |
| mode_ = V8::UseCrankshaft() ? mode : NONOPT; | | | |
| ASSERT(!script_.is_null()); | | | |
| if (script_->type()->value() == Script::TYPE_NATIVE) { | | | |
| MarkAsNative(); | | | |
| } | | | |
| if (!shared_info_.is_null()) { | | | |
| ASSERT(language_mode() == CLASSIC_MODE); | | | |
| SetLanguageMode(shared_info_->language_mode()); | | | |
| } | | | |
| } | | | |
| | | | |
| void SetMode(Mode mode) { | | | |
| ASSERT(V8::UseCrankshaft()); | | | |
| mode_ = mode; | | | |
| } | | | |
| | | | |
| // Flags using template class BitField<type, start, length>. All are | | | |
| // false by default. | | | |
| // | | | |
| // Compilation is either eager or lazy. | | | |
| class IsLazy: public BitField<bool, 0, 1> {}; | | | |
| // Flags that can be set for eager compilation. | | | |
| class IsEval: public BitField<bool, 1, 1> {}; | | | |
| class IsGlobal: public BitField<bool, 2, 1> {}; | | | |
| // Flags that can be set for lazy compilation. | | | |
| class IsInLoop: public BitField<bool, 3, 1> {}; | | | |
| // Strict mode - used in eager compilation. | | | |
| class LanguageModeField: public BitField<LanguageMode, 4, 2> {}; | | | |
| // Is this a function from our natives. | | | |
| class IsNative: public BitField<bool, 6, 1> {}; | | | |
| // Is this code being compiled with support for deoptimization.. | | | |
| class SupportsDeoptimization: public BitField<bool, 7, 1> {}; | | | |
| // If compiling for debugging produce just full code matching the | | | |
| // initial mode setting. | | | |
| class IsCompilingForDebugging: public BitField<bool, 8, 1> {}; | | | |
| | | | |
| unsigned flags_; | | | |
| | | | |
| // Fields filled in by the compilation pipeline. | | | |
| // AST filled in by the parser. | | | |
| FunctionLiteral* function_; | | | |
| // The scope of the function literal as a convenience. Set to indicate | | | |
| // that scopes have been analyzed. | | | |
| Scope* scope_; | | | |
| // The global scope provided as a convenience. | | | |
| Scope* global_scope_; | | | |
| // The compiled code. | | | |
| Handle<Code> code_; | | | |
| | | | |
| // Possible initial inputs to the compilation process. | | | |
| Handle<JSFunction> closure_; | | | |
| Handle<SharedFunctionInfo> shared_info_; | | | |
| Handle<Script> script_; | | | |
| | | | |
| // Fields possibly needed for eager compilation, NULL by default. | | | |
| v8::Extension* extension_; | | | |
| ScriptDataImpl* pre_parse_data_; | | | |
| | | | |
| // The context of the caller is needed for eval code, and will be a null | | | |
| // handle otherwise. | | | |
| Handle<Context> calling_context_; | | | |
| | | | |
| // Compilation mode flag and whether deoptimization is allowed. | | | |
| Mode mode_; | | | |
| int osr_ast_id_; | | | |
| | | | |
| // The zone from which the compilation pipeline working on this | | | |
| // CompilationInfo allocates. | | | |
| Zone* zone_; | | | |
| | | | |
| DeferredHandles* deferred_handles_; | | | |
| | | | |
| template<typename T> | | | |
| void SaveHandle(Handle<T> *object) { | | | |
| if (!object->is_null()) { | | | |
| Handle<T> handle(*(*object)); | | | |
| *object = handle; | | | |
| } | | | |
| } | | | |
| | | | |
| DISALLOW_COPY_AND_ASSIGN(CompilationInfo); | | | |
| }; | | | |
| | | | |
| // Exactly like a CompilationInfo, except also creates and enters a | | | |
| // Zone on construction and deallocates it on exit. | | | |
| class CompilationInfoWithZone: public CompilationInfo { | | | |
| public: | | | |
| explicit CompilationInfoWithZone(Handle<Script> script) | | | |
| : CompilationInfo(script, &zone_), | | | |
| zone_(script->GetIsolate()), | | | |
| zone_scope_(&zone_, DELETE_ON_EXIT) {} | | | |
| explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info) | | | |
| : CompilationInfo(shared_info, &zone_), | | | |
| zone_(shared_info->GetIsolate()), | | | |
| zone_scope_(&zone_, DELETE_ON_EXIT) {} | | | |
| explicit CompilationInfoWithZone(Handle<JSFunction> closure) | | | |
| : CompilationInfo(closure, &zone_), | | | |
| zone_(closure->GetIsolate()), | | | |
| zone_scope_(&zone_, DELETE_ON_EXIT) {} | | | |
| | | | |
| private: | | | |
| Zone zone_; | | | |
| ZoneScope zone_scope_; | | | |
| }; | | | |
| | | | |
| // A wrapper around a CompilationInfo that detaches the Handles from | | | |
| // the underlying DeferredHandleScope and stores them in info_ on | | | |
| // destruction. | | | |
| class CompilationHandleScope BASE_EMBEDDED { | | | |
| public: | | | |
| explicit CompilationHandleScope(CompilationInfo* info) | | | |
| : deferred_(info->isolate()), info_(info) {} | | | |
| ~CompilationHandleScope() { | | | |
| info_->set_deferred_handles(deferred_.Detach()); | | | |
| } | | | |
| | | | |
| private: | | | |
| DeferredHandleScope deferred_; | | | |
| CompilationInfo* info_; | | | |
| }; | | | |
| | | | |
| class HGraph; | | | |
| class HGraphBuilder; | | | |
| class LChunk; | | | |
| | | | |
| // A helper class that calls the three compilation phases in | | | |
| // Crankshaft and keeps track of its state. The three phases | | | |
| // CreateGraph, OptimizeGraph and GenerateAndInstallCode can either | | | |
| // fail, bail-out to the full code generator or succeed. Apart from | | | |
| // their return value, the status of the phase last run can be checked | | | |
| // using last_status(). | | | |
| class OptimizingCompiler: public ZoneObject { | | | |
| public: | | | |
| explicit OptimizingCompiler(CompilationInfo* info) | | | |
| : info_(info), | | | |
| oracle_(NULL), | | | |
| graph_builder_(NULL), | | | |
| graph_(NULL), | | | |
| chunk_(NULL), | | | |
| time_taken_to_create_graph_(0), | | | |
| time_taken_to_optimize_(0), | | | |
| time_taken_to_codegen_(0), | | | |
| last_status_(FAILED) { } | | | |
| | | | |
| enum Status { | | | |
| FAILED, BAILED_OUT, SUCCEEDED | | | |
| }; | | | |
| | | | |
| MUST_USE_RESULT Status CreateGraph(); | | | |
| MUST_USE_RESULT Status OptimizeGraph(); | | | |
| MUST_USE_RESULT Status GenerateAndInstallCode(); | | | |
| | | | |
| Status last_status() const { return last_status_; } | | | |
| CompilationInfo* info() const { return info_; } | | | |
| | | | |
| MUST_USE_RESULT Status AbortOptimization() { | | | |
| info_->AbortOptimization(); | | | |
| info_->shared_info()->DisableOptimization(); | | | |
| return SetLastStatus(BAILED_OUT); | | | |
| } | | | |
| | | | |
| private: | | | |
| CompilationInfo* info_; | | | |
| TypeFeedbackOracle* oracle_; | | | |
| HGraphBuilder* graph_builder_; | | | |
| HGraph* graph_; | | | |
| LChunk* chunk_; | | | |
| int64_t time_taken_to_create_graph_; | | | |
| int64_t time_taken_to_optimize_; | | | |
| int64_t time_taken_to_codegen_; | | | |
| Status last_status_; | | | |
| | | | |
| MUST_USE_RESULT Status SetLastStatus(Status status) { | | | |
| last_status_ = status; | | | |
| return last_status_; | | | |
| } | | | |
| void RecordOptimizationStats(); | | | |
| | | | |
| struct Timer { | | | |
| Timer(OptimizingCompiler* compiler, int64_t* location) | | | |
| : compiler_(compiler), | | | |
| start_(OS::Ticks()), | | | |
| location_(location) { } | | | |
| | | | |
| ~Timer() { | | | |
| *location_ += (OS::Ticks() - start_); | | | |
| } | | | |
| | | | |
| OptimizingCompiler* compiler_; | | | |
| int64_t start_; | | | |
| int64_t* location_; | | | |
| }; | | | |
| }; | | | |
| | | | |
| // The V8 compiler | | | |
| // | | | |
| // General strategy: Source code is translated into an anonymous function w | | | |
| /o | | | |
| // parameters which then can be executed. If the source code contains other | | | |
| // functions, they will be compiled and allocated as part of the compilatio | | | |
| n | | | |
| // of the source code. | | | |
| | | | |
| // Please note this interface returns shared function infos. This means yo | | | |
| u | | | |
| // need to call Factory::NewFunctionFromSharedFunctionInfo before you have | | | |
| a | | | |
| // real function with a context. | | | |
| | | | |
| class Compiler : public AllStatic { | | | |
| public: | | | |
| // Default maximum number of function optimization attempts before we | | | |
| // give up. | | | |
| static const int kDefaultMaxOptCount = 10; | | | |
| | | | |
| static const int kMaxInliningLevels = 3; | | | |
| | | | |
| // Call count before primitive functions trigger their own optimization. | | | |
| static const int kCallsUntilPrimitiveOpt = 200; | | | |
| | | | |
| // All routines return a SharedFunctionInfo. | | | |
| // If an error occurs an exception is raised and the return handle | | | |
| // contains NULL. | | | |
| | | | |
| // Compile a String source within a context. | | | |
| static Handle<SharedFunctionInfo> Compile(Handle<String> source, | | | |
| Handle<Object> script_name, | | | |
| int line_offset, | | | |
| int column_offset, | | | |
| v8::Extension* extension, | | | |
| ScriptDataImpl* pre_data, | | | |
| Handle<Object> script_data, | | | |
| NativesFlag is_natives_code); | | | |
| | | | |
| // Compile a String source within a context for Eval. | | | |
| static Handle<SharedFunctionInfo> CompileEval(Handle<String> source, | | | |
| Handle<Context> context, | | | |
| bool is_global, | | | |
| LanguageMode language_mode, | | | |
| int scope_position); | | | |
| | | | |
| // Compile from function info (used for lazy compilation). Returns true o | | | |
| n | | | |
| // success and false if the compilation resulted in a stack overflow. | | | |
| static bool CompileLazy(CompilationInfo* info); | | | |
| | | | |
| static void RecompileParallel(Handle<JSFunction> function); | | | |
| | | | |
| // Compile a shared function info object (the function is possibly lazily | | | |
| // compiled). | | | |
| static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node | | | |
| , | | | |
| Handle<Script> script | | | |
| ); | | | |
| | | | |
| // Set the function info for a newly compiled function. | | | |
| static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info, | | | |
| FunctionLiteral* lit, | | | |
| bool is_toplevel, | | | |
| Handle<Script> script); | | | |
| | | | |
| static void InstallOptimizedCode(OptimizingCompiler* info); | | | |
| | | | |
| #ifdef ENABLE_DEBUGGER_SUPPORT | | | |
| static bool MakeCodeForLiveEdit(CompilationInfo* info); | | | |
| #endif | | #endif | |
|
| | | | |
| static void RecordFunctionCompilation(Logger::LogEventsAndTags tag, | | | |
| CompilationInfo* info, | | | |
| Handle<SharedFunctionInfo> shared); | | | |
| }; | | | |
| | | | |
| } } // namespace v8::internal | | | |
| | | | |
| #endif // V8_COMPILER_H_ | | | |
| | | | |
End of changes. 2 change blocks. |
| 472 lines changed or deleted | | 135 lines changed or added | |
|
| counters.h | | counters.h | |
|
| // Copyright 2012 the V8 project authors. All rights reserved. | | // counters.h | |
| // Redistribution and use in source and binary forms, with or without | | /* | |
| // modification, are permitted provided that the following conditions are | | * Copyright (C) 2010 10gen Inc. | |
| // met: | | * | |
| // | | * This program is free software: you can redistribute it and/or modify | |
| // * Redistributions of source code must retain the above copyright | | * it under the terms of the GNU Affero General Public License, version | |
| // notice, this list of conditions and the following disclaimer. | | 3, | |
| // * Redistributions in binary form must reproduce the above | | * as published by the Free Software Foundation. | |
| // copyright notice, this list of conditions and the following | | * | |
| // disclaimer in the documentation and/or other materials provided | | * This program is distributed in the hope that it will be useful, | |
| // with the distribution. | | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| // * Neither the name of Google Inc. nor the names of its | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
| // contributors may be used to endorse or promote products derived | | * GNU Affero General Public License for more details. | |
| // from this software without specific prior written permission. | | * | |
| // | | * You should have received a copy of the GNU Affero General Public Lice | |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | | nse | |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | | * | |
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | | * As a special exception, the copyright holders give permission to link | |
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | | the | |
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | | * code of portions of this program with the OpenSSL library under certa | |
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | in | |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | * conditions as described in each individual source file and distribute | |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | * linked combinations including the program with the OpenSSL library. Y | |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | | ou | |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | * must comply with the GNU Affero General Public License in all respect | |
| | | s for | |
| #ifndef V8_COUNTERS_H_ | | * all of the code used other than as permitted herein. If you modify fi | |
| #define V8_COUNTERS_H_ | | le(s) | |
| | | * with this exception, you may extend this exception to your version of | |
| #include "../include/v8.h" | | the | |
| #include "allocation.h" | | * file(s), but you are not obligated to do so. If you do not wish to do | |
| | | so, | |
| namespace v8 { | | * delete this exception statement from your version. If you delete this | |
| namespace internal { | | * exception statement from all source files in the program, then also d | |
| | | elete | |
| // StatsCounters is an interface for plugging into external | | * it in the license file. | |
| // counters for monitoring. Counters can be looked up and | | */ | |
| // manipulated by name. | | | |
| | | #pragma once | |
| class StatsTable { | | | |
| public: | | #include "mongo/pch.h" | |
| // Register an application-defined function where | | #include "mongo/db/jsobj.h" | |
| // counters can be looked up. | | #include "mongo/util/net/message.h" | |
| void SetCounterFunction(CounterLookupCallback f) { | | #include "mongo/util/processinfo.h" | |
| lookup_function_ = f; | | #include "mongo/util/concurrency/spin_lock.h" | |
| } | | #include "mongo/db/pdfile.h" | |
| | | | |
| // Register an application-defined function to create | | namespace mongo { | |
| // a histogram for passing to the AddHistogramSample function | | | |
| void SetCreateHistogramFunction(CreateHistogramCallback f) { | | /** | |
| create_histogram_function_ = f; | | * for storing operation counters | |
| } | | * note: not thread safe. ok with that for speed | |
| | | */ | |
| // Register an application-defined function to add a sample | | class OpCounters { | |
| // to a histogram created with CreateHistogram function | | public: | |
| void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) { | | | |
| add_histogram_sample_function_ = f; | | OpCounters(); | |
| } | | void incInsertInWriteLock(int n) { _insert.x += n; } | |
| | | void gotInsert() { _insert++; } | |
| bool HasCounterFunction() const { | | void gotQuery() { _query++; } | |
| return lookup_function_ != NULL; | | void gotUpdate() { _update++; } | |
| } | | void gotDelete() { _delete++; } | |
| | | void gotGetMore() { _getmore++; } | |
| // Lookup the location of a counter by name. If the lookup | | void gotCommand() { _command++; } | |
| // is successful, returns a non-NULL pointer for writing the | | | |
| // value of the counter. Each thread calling this function | | void gotOp( int op , bool isCommand ); | |
| // may receive a different location to store it's counter. | | | |
| // The return value must not be cached and re-used across | | BSONObj getObj() const; | |
| // threads, although a single thread is free to cache it. | | | |
| int* FindLocation(const char* name) { | | // thse are used by snmp, and other things, do not remove | |
| if (!lookup_function_) return NULL; | | const AtomicUInt * getInsert() const { return &_insert; } | |
| return lookup_function_(name); | | const AtomicUInt * getQuery() const { return &_query; } | |
| } | | const AtomicUInt * getUpdate() const { return &_update; } | |
| | | const AtomicUInt * getDelete() const { return &_delete; } | |
| // Create a histogram by name. If the create is successful, | | const AtomicUInt * getGetMore() const { return &_getmore; } | |
| // returns a non-NULL pointer for use with AddHistogramSample | | const AtomicUInt * getCommand() const { return &_command; } | |
| // function. min and max define the expected minimum and maximum | | | |
| // sample values. buckets is the maximum number of buckets | | private: | |
| // that the samples will be grouped into. | | void _checkWrap(); | |
| void* CreateHistogram(const char* name, | | | |
| int min, | | // todo: there will be a lot of cache line contention on these. ne | |
| int max, | | ed to do something | |
| size_t buckets) { | | // else eventually. | |
| if (!create_histogram_function_) return NULL; | | AtomicUInt _insert; | |
| return create_histogram_function_(name, min, max, buckets); | | AtomicUInt _query; | |
| } | | AtomicUInt _update; | |
| | | AtomicUInt _delete; | |
| // Add a sample to a histogram created with the CreateHistogram | | AtomicUInt _getmore; | |
| // function. | | AtomicUInt _command; | |
| void AddHistogramSample(void* histogram, int sample) { | | }; | |
| if (!add_histogram_sample_function_) return; | | | |
| return add_histogram_sample_function_(histogram, sample); | | extern OpCounters globalOpCounters; | |
| } | | extern OpCounters replOpCounters; | |
| | | | |
| private: | | class NetworkCounter { | |
| StatsTable(); | | public: | |
| | | NetworkCounter() : _bytesIn(0), _bytesOut(0), _requests(0), _overfl | |
| CounterLookupCallback lookup_function_; | | ows(0) {} | |
| CreateHistogramCallback create_histogram_function_; | | void hit( long long bytesIn , long long bytesOut ); | |
| AddHistogramSampleCallback add_histogram_sample_function_; | | void append( BSONObjBuilder& b ); | |
| | | private: | |
| friend class Isolate; | | long long _bytesIn; | |
| | | long long _bytesOut; | |
| DISALLOW_COPY_AND_ASSIGN(StatsTable); | | long long _requests; | |
| }; | | | |
| | | | |
| // StatsCounters are dynamically created values which can be tracked in | | | |
| // the StatsTable. They are designed to be lightweight to create and | | | |
| // easy to use. | | | |
| // | | | |
| // Internally, a counter represents a value in a row of a StatsTable. | | | |
| // The row has a 32bit value for each process/thread in the table and also | | | |
| // a name (stored in the table metadata). Since the storage location can b | | | |
| e | | | |
| // thread-specific, this class cannot be shared across threads. | | | |
| // | | | |
| // This class is designed to be POD initialized. It will be registered wit | | | |
| h | | | |
| // the counter system on first use. For example: | | | |
| // StatsCounter c = { "c:myctr", NULL, false }; | | | |
| struct StatsCounter { | | | |
| const char* name_; | | | |
| int* ptr_; | | | |
| bool lookup_done_; | | | |
| | | | |
| // Sets the counter to a specific value. | | | |
| void Set(int value) { | | | |
| int* loc = GetPtr(); | | | |
| if (loc) *loc = value; | | | |
| } | | | |
| | | | |
| // Increments the counter. | | | |
| void Increment() { | | | |
| int* loc = GetPtr(); | | | |
| if (loc) (*loc)++; | | | |
| } | | | |
| | | | |
| void Increment(int value) { | | | |
| int* loc = GetPtr(); | | | |
| if (loc) | | | |
| (*loc) += value; | | | |
| } | | | |
| | | | |
| // Decrements the counter. | | | |
| void Decrement() { | | | |
| int* loc = GetPtr(); | | | |
| if (loc) (*loc)--; | | | |
| } | | | |
| | | | |
| void Decrement(int value) { | | | |
| int* loc = GetPtr(); | | | |
| if (loc) (*loc) -= value; | | | |
| } | | | |
| | | | |
| // Is this counter enabled? | | | |
| // Returns false if table is full. | | | |
| bool Enabled() { | | | |
| return GetPtr() != NULL; | | | |
| } | | | |
| | | | |
| // Get the internal pointer to the counter. This is used | | | |
| // by the code generator to emit code that manipulates a | | | |
| // given counter without calling the runtime system. | | | |
| int* GetInternalPointer() { | | | |
| int* loc = GetPtr(); | | | |
| ASSERT(loc != NULL); | | | |
| return loc; | | | |
| } | | | |
| | | | |
| protected: | | | |
| // Returns the cached address of this counter location. | | | |
| int* GetPtr() { | | | |
| if (lookup_done_) return ptr_; | | | |
| lookup_done_ = true; | | | |
| ptr_ = FindLocationInStatsTable(); | | | |
| return ptr_; | | | |
| } | | | |
| | | | |
| private: | | | |
| int* FindLocationInStatsTable() const; | | | |
| }; | | | |
| | | | |
| // StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 }; | | | |
| struct StatsCounterTimer { | | | |
| StatsCounter counter_; | | | |
| | | | |
| int64_t start_time_; | | | |
| int64_t stop_time_; | | | |
| | | | |
| // Start the timer. | | | |
| void Start(); | | | |
| | | | |
| // Stop the timer and record the results. | | | |
| void Stop(); | | | |
| | | | |
| // Returns true if the timer is running. | | | |
| bool Running() { | | | |
| return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0; | | | |
| } | | | |
| }; | | | |
| | | | |
| // A Histogram represents a dynamically created histogram in the StatsTable | | | |
| . | | | |
| // | | | |
| // This class is designed to be POD initialized. It will be registered wit | | | |
| h | | | |
| // the histogram system on first use. For example: | | | |
| // Histogram h = { "myhist", 0, 10000, 50, NULL, false }; | | | |
| struct Histogram { | | | |
| const char* name_; | | | |
| int min_; | | | |
| int max_; | | | |
| int num_buckets_; | | | |
| void* histogram_; | | | |
| bool lookup_done_; | | | |
| | | | |
| // Add a single sample to this histogram. | | | |
| void AddSample(int sample); | | | |
| | | | |
| // Returns true if this histogram is enabled. | | | |
| bool Enabled() { | | | |
| return GetHistogram() != NULL; | | | |
| } | | | |
| | | | |
| protected: | | | |
| // Returns the handle to the histogram. | | | |
| void* GetHistogram() { | | | |
| if (!lookup_done_) { | | | |
| lookup_done_ = true; | | | |
| histogram_ = CreateHistogram(); | | | |
| } | | | |
| return histogram_; | | | |
| } | | | |
| | | | |
| private: | | | |
| void* CreateHistogram() const; | | | |
| }; | | | |
| | | | |
| // A HistogramTimer allows distributions of results to be created | | | |
| // HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 }; | | | |
| struct HistogramTimer { | | | |
| Histogram histogram_; | | | |
| | | | |
| int64_t start_time_; | | | |
| int64_t stop_time_; | | | |
| | | | |
| // Start the timer. | | | |
| void Start(); | | | |
| | | | |
| // Stop the timer and record the results. | | | |
| void Stop(); | | | |
| | | | |
| // Returns true if the timer is running. | | | |
| bool Running() { | | | |
| return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0); | | | |
| } | | | |
| }; | | | |
| | | | |
| // Helper class for scoping a HistogramTimer. | | | |
| class HistogramTimerScope BASE_EMBEDDED { | | | |
| public: | | | |
| explicit HistogramTimerScope(HistogramTimer* timer) : | | | |
| timer_(timer) { | | | |
| timer_->Start(); | | | |
| } | | | |
| ~HistogramTimerScope() { | | | |
| timer_->Stop(); | | | |
| } | | | |
| private: | | | |
| HistogramTimer* timer_; | | | |
| }; | | | |
| | | | |
|
| } } // namespace v8::internal | | long long _overflows; | |
| | | | |
|
| #endif // V8_COUNTERS_H_ | | SpinLock _lock; | |
| | | }; | |
| | | | |
| | | extern NetworkCounter networkCounter; | |
| | | } | |
| | | | |
End of changes. 3 change blocks. |
| 273 lines changed or deleted | | 106 lines changed or added | |
|
| expression_parser.h | | expression_parser.h | |
| | | | |
| skipping to change at line 95 | | skipping to change at line 95 | |
| */ | | */ | |
| static StatusWithMatchExpression _parse( const BSONObj& obj, int le
vel ); | | static StatusWithMatchExpression _parse( const BSONObj& obj, int le
vel ); | |
| | | | |
| /** | | /** | |
| * parses a field in a sub expression | | * parses a field in a sub expression | |
| * if the query is { x : { $gt : 5, $lt : 8 } } | | * if the query is { x : { $gt : 5, $lt : 8 } } | |
| * e is { $gt : 5, $lt : 8 } | | * e is { $gt : 5, $lt : 8 } | |
| */ | | */ | |
| static Status _parseSub( const char* name, | | static Status _parseSub( const char* name, | |
| const BSONObj& obj, | | const BSONObj& obj, | |
|
| AndMatchExpression* root ); | | AndMatchExpression* root, | |
| | | int level ); | |
| | | | |
| /** | | /** | |
| * parses a single field in a sub expression | | * parses a single field in a sub expression | |
| * if the query is { x : { $gt : 5, $lt : 8 } } | | * if the query is { x : { $gt : 5, $lt : 8 } } | |
| * e is $gt : 5 | | * e is $gt : 5 | |
| */ | | */ | |
| static StatusWithMatchExpression _parseSubField( const BSONObj& con
text, | | static StatusWithMatchExpression _parseSubField( const BSONObj& con
text, | |
| const AndMatchExpr
ession* andSoFar, | | const AndMatchExpr
ession* andSoFar, | |
| const char* name, | | const char* name, | |
|
| const BSONElement& | | const BSONElement& | |
| e ); | | e, | |
| | | int level ); | |
| | | | |
| static StatusWithMatchExpression _parseComparison( const char* name
, | | static StatusWithMatchExpression _parseComparison( const char* name
, | |
| ComparisonMatchE
xpression* cmp, | | ComparisonMatchE
xpression* cmp, | |
| const BSONElemen
t& e ); | | const BSONElemen
t& e ); | |
| | | | |
| static StatusWithMatchExpression _parseMOD( const char* name, | | static StatusWithMatchExpression _parseMOD( const char* name, | |
| const BSONElement& e ); | | const BSONElement& e ); | |
| | | | |
| static StatusWithMatchExpression _parseRegexElement( const char* na
me, | | static StatusWithMatchExpression _parseRegexElement( const char* na
me, | |
| const BSONElement&
e ); | | const BSONElement&
e ); | |
| | | | |
| static StatusWithMatchExpression _parseRegexDocument( const char* n
ame, | | static StatusWithMatchExpression _parseRegexDocument( const char* n
ame, | |
| const BSONObj& doc
); | | const BSONObj& doc
); | |
| | | | |
| static Status _parseArrayFilterEntries( ArrayFilterEntries* entries
, | | static Status _parseArrayFilterEntries( ArrayFilterEntries* entries
, | |
| const BSONObj& theArray ); | | const BSONObj& theArray ); | |
| | | | |
| // arrays | | // arrays | |
| | | | |
| static StatusWithMatchExpression _parseElemMatch( const char* name, | | static StatusWithMatchExpression _parseElemMatch( const char* name, | |
|
| const BSONElement& e ) | | const BSONElement | |
| ; | | & e, | |
| | | int level ); | |
| | | | |
| static StatusWithMatchExpression _parseAll( const char* name, | | static StatusWithMatchExpression _parseAll( const char* name, | |
|
| const BSONElement& e ); | | const BSONElement& e, | |
| | | int level ); | |
| | | | |
| // tree | | // tree | |
| | | | |
| static Status _parseTreeList( const BSONObj& arr, ListOfMatchExpres
sion* out, int level ); | | static Status _parseTreeList( const BSONObj& arr, ListOfMatchExpres
sion* out, int level ); | |
| | | | |
|
| static StatusWithMatchExpression _parseNot( const char* name, const | | static StatusWithMatchExpression _parseNot( const char* name, | |
| BSONElement& e ); | | const BSONElement& e, | |
| | | int level ); | |
| | | | |
| // The maximum allowed depth of a query tree. Just to guard against
stack overflow. | | // The maximum allowed depth of a query tree. Just to guard against
stack overflow. | |
| static const int kMaximumTreeDepth; | | static const int kMaximumTreeDepth; | |
| }; | | }; | |
| | | | |
| typedef boost::function<StatusWithMatchExpression(const char* name, int
type, const BSONObj& section)> MatchExpressionParserGeoCallback; | | typedef boost::function<StatusWithMatchExpression(const char* name, int
type, const BSONObj& section)> MatchExpressionParserGeoCallback; | |
| extern MatchExpressionParserGeoCallback expressionParserGeoCallback; | | extern MatchExpressionParserGeoCallback expressionParserGeoCallback; | |
| | | | |
| typedef boost::function<StatusWithMatchExpression(const BSONElement& wh
ere)> MatchExpressionParserWhereCallback; | | typedef boost::function<StatusWithMatchExpression(const BSONElement& wh
ere)> MatchExpressionParserWhereCallback; | |
| extern MatchExpressionParserWhereCallback expressionParserWhereCallback
; | | extern MatchExpressionParserWhereCallback expressionParserWhereCallback
; | |
| | | | |
End of changes. 5 change blocks. |
| 8 lines changed or deleted | | 13 lines changed or added | |
|
| plan_enumerator.h | | plan_enumerator.h | |
| | | | |
| skipping to change at line 275 | | skipping to change at line 275 | |
| * | | * | |
| * Returns false if the AND cannot be indexed. Otherwise returns tr
ue. | | * Returns false if the AND cannot be indexed. Otherwise returns tr
ue. | |
| */ | | */ | |
| bool partitionPreds(MatchExpression* node, | | bool partitionPreds(MatchExpression* node, | |
| PrepMemoContext context, | | PrepMemoContext context, | |
| vector<MatchExpression*>* indexOut, | | vector<MatchExpression*>* indexOut, | |
| vector<MemoID>* subnodesOut, | | vector<MemoID>* subnodesOut, | |
| vector<MemoID>* mandatorySubnodes); | | vector<MemoID>* mandatorySubnodes); | |
| | | | |
| /** | | /** | |
|
| * Finds a set of predicates that can be safely compounded with 'as | | * Finds a set of predicates that can be safely compounded with the | |
| signed', | | set | |
| * under the assumption that we are assignining predicates to a com | | * of predicates in 'assigned', under the assumption that we are as | |
| pound, | | signing | |
| * multikey index. | | * predicates to a compound, multikey index. | |
| * | | * | |
| * The list of candidate predicates that we could compound is passe
d | | * The list of candidate predicates that we could compound is passe
d | |
| * in 'couldCompound'. A subset of these predicates that is safe to | | * in 'couldCompound'. A subset of these predicates that is safe to | |
| * combine by compounding is returned in the out-parameter 'out'. | | * combine by compounding is returned in the out-parameter 'out'. | |
| * | | * | |
| * Does not take ownership of its arguments. | | * Does not take ownership of its arguments. | |
| * | | * | |
| * The rules for when to compound for multikey indices are reasonab
ly | | * The rules for when to compound for multikey indices are reasonab
ly | |
| * complex, and are dependent on the structure of $elemMatch's used | | * complex, and are dependent on the structure of $elemMatch's used | |
| * in the query. Ignoring $elemMatch for the time being, the rule i
s this: | | * in the query. Ignoring $elemMatch for the time being, the rule i
s this: | |
| | | | |
| skipping to change at line 315 | | skipping to change at line 315 | |
| * $elemMatch part of the path. | | * $elemMatch part of the path. | |
| * | | * | |
| * A few more examples: | | * A few more examples: | |
| * 1) {'a.b': {$elemMatch: {c: {$gt: 1}, d: 5}}}. In this case,
we can | | * 1) {'a.b': {$elemMatch: {c: {$gt: 1}, d: 5}}}. In this case,
we can | |
| * compound, because the $elemMatch is applied to the shared par
t of | | * compound, because the $elemMatch is applied to the shared par
t of | |
| * the path 'a.b'. | | * the path 'a.b'. | |
| * | | * | |
| * 2) {'a.b': 1, a: {$elemMatch: {b: {$gt: 0}}}}. We cannot comb
ine the | | * 2) {'a.b': 1, a: {$elemMatch: {b: {$gt: 0}}}}. We cannot comb
ine the | |
| * bounds here because the prefix 'a' is shared by two predicate
s which | | * bounds here because the prefix 'a' is shared by two predicate
s which | |
| * are not joined together by an $elemMatch. | | * are not joined together by an $elemMatch. | |
|
| | | * | |
| | | * NOTE: | |
| | | * Usually 'assigned' has just one predicate. However, in order t | |
| | | o support | |
| | | * mandatory predicate assignment (TEXT and GEO_NEAR), we allow m | |
| | | ultiple | |
| | | * already-assigned predicates to be passed. If a mandatory predi | |
| | | cate is over | |
| | | * a trailing field in a multikey compound index, then we assign | |
| | | both a predicate | |
| | | * over the leading field as well as the mandatory predicate prio | |
| | | r to calling | |
| | | * this function. | |
| | | * | |
| | | * Ex: | |
| | | * Say we have index {a: 1, b: 1, c: "2dsphere", d: 1} as well | |
| | | as a $near | |
| | | * predicate and a $within predicate over "c". The $near predi | |
| | | cate is mandatory | |
| | | * and must be assigned. The $within predicate is not mandator | |
| | | y. Furthermore, | |
| | | * it cannot be assigned in addition to the $near predicate be | |
| | | cause the index | |
| | | * is multikey. | |
| | | * | |
| | | * In this case the enumerator must assign the $near predicate | |
| | | , and pass it in | |
| | | * in 'assigned'. Otherwise it would be possible to assign the | |
| | | $within predicate, | |
| | | * and then not assign the $near because the $within is alread | |
| | | y assigned (and | |
| | | * has the same path). | |
| */ | | */ | |
|
| void getMultikeyCompoundablePreds(const MatchExpression* assigned, | | void getMultikeyCompoundablePreds(const vector<MatchExpression*>& a
ssigned, | |
| const vector<MatchExpression*>& c
ouldCompound, | | const vector<MatchExpression*>& c
ouldCompound, | |
| vector<MatchExpression*>* out); | | vector<MatchExpression*>* out); | |
| | | | |
| /** | | /** | |
| * 'andAssignment' contains assignments that we've already committe
d to outputting, | | * 'andAssignment' contains assignments that we've already committe
d to outputting, | |
| * including both single index assignments and ixisect assignments. | | * including both single index assignments and ixisect assignments. | |
| * | | * | |
| * 'ixisectAssigned' is a set of predicates that we are about to ad
d to 'andAssignment' | | * 'ixisectAssigned' is a set of predicates that we are about to ad
d to 'andAssignment' | |
| * as an index intersection assignment. | | * as an index intersection assignment. | |
| * | | * | |
| | | | |
| skipping to change at line 367 | | skipping to change at line 387 | |
| * Generate one-index-at-once assignments given the predicate/index
structure in idxToFirst | | * Generate one-index-at-once assignments given the predicate/index
structure in idxToFirst | |
| * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs th
e assignments into | | * and idxToNotFirst (and the sub-trees in 'subnodes'). Outputs th
e assignments into | |
| * 'andAssignment'. | | * 'andAssignment'. | |
| */ | | */ | |
| void enumerateOneIndex(const IndexToPredMap& idxToFirst, | | void enumerateOneIndex(const IndexToPredMap& idxToFirst, | |
| const IndexToPredMap& idxToNotFirst, | | const IndexToPredMap& idxToNotFirst, | |
| const vector<MemoID>& subnodes, | | const vector<MemoID>& subnodes, | |
| AndAssignment* andAssignment); | | AndAssignment* andAssignment); | |
| | | | |
| /** | | /** | |
|
| | | * Generate single-index assignments for queries which contain mand | |
| | | atory | |
| | | * predicates (TEXT and GEO_NEAR, which are required to use a compa | |
| | | tible index). | |
| | | * Outputs these assignments into 'andAssignment'. | |
| | | */ | |
| | | void enumerateMandatoryIndex(const IndexToPredMap& idxToFirst, | |
| | | const IndexToPredMap& idxToNotFirst, | |
| | | MatchExpression* mandatoryPred, | |
| | | const set<IndexID>& mandatoryIndices, | |
| | | AndAssignment* andAssignment); | |
| | | | |
| | | /** | |
| * Try to assign predicates in 'tryCompound' to 'thisIndex' as comp
ound assignments. | | * Try to assign predicates in 'tryCompound' to 'thisIndex' as comp
ound assignments. | |
| * Output the assignments in 'assign'. | | * Output the assignments in 'assign'. | |
| */ | | */ | |
| void compound(const vector<MatchExpression*>& tryCompound, | | void compound(const vector<MatchExpression*>& tryCompound, | |
| const IndexEntry& thisIndex, | | const IndexEntry& thisIndex, | |
| OneIndexAssignment* assign); | | OneIndexAssignment* assign); | |
| | | | |
|
| | | /** | |
| | | * Return the memo entry for 'node'. Does some sanity checking to | |
| | | ensure that a memo entry | |
| | | * actually exists. | |
| | | */ | |
| | | MemoID memoIDForNode(MatchExpression* node); | |
| | | | |
| std::string dumpMemo(); | | std::string dumpMemo(); | |
| | | | |
| // Map from expression to its MemoID. | | // Map from expression to its MemoID. | |
| unordered_map<MatchExpression*, MemoID> _nodeToId; | | unordered_map<MatchExpression*, MemoID> _nodeToId; | |
| | | | |
| // Map from MemoID to its precomputed solution info. | | // Map from MemoID to its precomputed solution info. | |
| unordered_map<MemoID, NodeAssignment*> _memo; | | unordered_map<MemoID, NodeAssignment*> _memo; | |
| | | | |
| // If true, there are no further enumeration states, and getNext sh
ould return false. | | // If true, there are no further enumeration states, and getNext sh
ould return false. | |
| // We could be _done immediately after init if we're unable to outp
ut an indexed plan. | | // We could be _done immediately after init if we're unable to outp
ut an indexed plan. | |
| | | | |
End of changes. 5 change blocks. |
| 6 lines changed or deleted | | 58 lines changed or added | |
|
| planner_access.h | | planner_access.h | |
| | | | |
| skipping to change at line 98 | | skipping to change at line 98 | |
| * However, for the query {a: {$elemMatch: {b: 1, c: 1}} we can compo
und the | | * However, for the query {a: {$elemMatch: {b: 1, c: 1}} we can compo
und the | |
| * bounds because the $elemMatch is applied to the shared prefix "a". | | * bounds because the $elemMatch is applied to the shared prefix "a". | |
| */ | | */ | |
| | | | |
| /** | | /** | |
| * Methods for creating a QuerySolutionNode tree that accesses the data
required by the query. | | * Methods for creating a QuerySolutionNode tree that accesses the data
required by the query. | |
| */ | | */ | |
| class QueryPlannerAccess { | | class QueryPlannerAccess { | |
| public: | | public: | |
| /** | | /** | |
|
| | | * Building the leaves (i.e. the index scans) is done by looping th | |
| | | rough | |
| | | * predicates one at a time. During the process, there is a fair am | |
| | | ount of state | |
| | | * information to keep track of, which we consolidate into this dat | |
| | | a structure. | |
| | | */ | |
| | | struct ScanBuildingState { | |
| | | | |
| | | ScanBuildingState(MatchExpression* theRoot, | |
| | | bool inArrayOp, | |
| | | const std::vector<IndexEntry>& indexList) | |
| | | : root(theRoot), | |
| | | inArrayOperator(inArrayOp), | |
| | | indices(indexList), | |
| | | currentScan(NULL), | |
| | | curChild(0), | |
| | | currentIndexNumber(IndexTag::kNoIndex), | |
| | | ixtag(NULL), | |
| | | tightness(IndexBoundsBuilder::INEXACT_FETCH), | |
| | | curOr(NULL), | |
| | | loosestBounds(IndexBoundsBuilder::EXACT) { | |
| | | } | |
| | | | |
| | | /** | |
| | | * Reset the scan building state in preparation for building a | |
| | | new scan. | |
| | | * | |
| | | * This always should be called prior to allocating a new 'curr | |
| | | entScan'. | |
| | | */ | |
| | | void resetForNextScan(IndexTag* newTag) { | |
| | | currentScan.reset(NULL); | |
| | | currentIndexNumber = newTag->index; | |
| | | tightness = IndexBoundsBuilder::INEXACT_FETCH; | |
| | | loosestBounds = IndexBoundsBuilder::EXACT; | |
| | | | |
| | | if (MatchExpression::OR == root->matchType()) { | |
| | | curOr.reset(new OrMatchExpression()); | |
| | | } | |
| | | } | |
| | | | |
| | | // The root of the MatchExpression tree for which we are curren | |
| | | tly building index | |
| | | // scans. Should be either an AND node or an OR node. | |
| | | MatchExpression* root; | |
| | | | |
| | | // Are we inside an array operator such as $elemMatch or $all? | |
| | | bool inArrayOperator; | |
| | | | |
| | | // A list of relevant indices which 'root' may be tagged to use | |
| | | . | |
| | | const std::vector<IndexEntry>& indices; | |
| | | | |
| | | // The index access node that we are currently constructing. We | |
| | | may merge | |
| | | // multiple tagged predicates into a single index scan. | |
| | | std::auto_ptr<QuerySolutionNode> currentScan; | |
| | | | |
| | | // An index into the child vector of 'root'. Indicates the chil | |
| | | d MatchExpression | |
| | | // for which we are currently either constructing a new scan or | |
| | | which we are about | |
| | | // to merge with 'currentScan'. | |
| | | size_t curChild; | |
| | | | |
| | | // An index into the 'indices', so that 'indices[currentIndexNu | |
| | | mber]' gives the | |
| | | // index used by 'currentScan'. If there is no currentScan, thi | |
| | | s should be set | |
| | | // to 'IndexTag::kNoIndex'. | |
| | | size_t currentIndexNumber; | |
| | | | |
| | | // The tag on 'curChild'. | |
| | | IndexTag* ixtag; | |
| | | | |
| | | // Whether the bounds for predicate 'curChild' are exact, inexa | |
| | | ct and covered by | |
| | | // the index, or inexact with a fetch required. | |
| | | IndexBoundsBuilder::BoundsTightness tightness; | |
| | | | |
| | | // If 'root' is an $or, the child predicates which are tagged w | |
| | | ith the same index are | |
| | | // detached from the original root and added here. 'curOr' may | |
| | | be attached as a filter | |
| | | // later on, or ignored and cleaned up by the auto_ptr. | |
| | | std::auto_ptr<MatchExpression> curOr; | |
| | | | |
| | | // The values of BoundsTightness range from loosest to tightest | |
| | | in this order: | |
| | | // | |
| | | // INEXACT_FETCH < INEXACT_COVERED < EXACT | |
| | | // | |
| | | // 'loosestBounds' stores the smallest of these three values en | |
| | | countered so far for | |
| | | // the current scan. If at least one of the child predicates as | |
| | | signed to the current | |
| | | // index is INEXACT_FETCH, then 'loosestBounds' is INEXACT_FETC | |
| | | H. If at least one of | |
| | | // the child predicates assigned to the current index is INEXAC | |
| | | T_COVERED but none are | |
| | | // INEXACT_FETCH, then 'loosestBounds' is INEXACT_COVERED. | |
| | | IndexBoundsBuilder::BoundsTightness loosestBounds; | |
| | | | |
| | | private: | |
| | | // Default constructor is not allowed. | |
| | | ScanBuildingState(); | |
| | | }; | |
| | | | |
| | | /** | |
| * Return a CollectionScanNode that scans as requested in 'query'. | | * Return a CollectionScanNode that scans as requested in 'query'. | |
| */ | | */ | |
| static QuerySolutionNode* makeCollectionScan(const CanonicalQuery&
query, | | static QuerySolutionNode* makeCollectionScan(const CanonicalQuery&
query, | |
| bool tailable, | | bool tailable, | |
| const QueryPlannerPara
ms& params); | | const QueryPlannerPara
ms& params); | |
| | | | |
| /** | | /** | |
| * Return a plan that uses the provided index as a proxy for a coll
ection scan. | | * Return a plan that uses the provided index as a proxy for a coll
ection scan. | |
| */ | | */ | |
| static QuerySolutionNode* scanWholeIndex(const IndexEntry& index, | | static QuerySolutionNode* scanWholeIndex(const IndexEntry& index, | |
| | | | |
| skipping to change at line 166 | | skipping to change at line 256 | |
| static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& quer
y, | | static QuerySolutionNode* buildIndexedOr(const CanonicalQuery& quer
y, | |
| MatchExpression* root, | | MatchExpression* root, | |
| bool inArrayOperator, | | bool inArrayOperator, | |
| const vector<IndexEntry>&
indices); | | const vector<IndexEntry>&
indices); | |
| | | | |
| /** | | /** | |
| * Traverses the tree rooted at the $elemMatch expression 'node', | | * Traverses the tree rooted at the $elemMatch expression 'node', | |
| * finding all predicates that can use an index directly and return
ing | | * finding all predicates that can use an index directly and return
ing | |
| * them in the out-parameter vector 'out'. | | * them in the out-parameter vector 'out'. | |
| * | | * | |
|
| * Traverses only through $and and $elemMatch nodes, not through ot | | * Traverses only through $and and array nodes like $all. | |
| her | | * | |
| * logical or array nodes like $or and $all. | | * Other nodes (i.e. nodes which cannot use an index directly, and | |
| | | which are | |
| | | * neither $and nor array nodes) are returned in 'subnodesOut' if t | |
| | | hey are | |
| | | * tagged to use an index. | |
| */ | | */ | |
| static void findElemMatchChildren(const MatchExpression* node, | | static void findElemMatchChildren(const MatchExpression* node, | |
|
| vector<MatchExpression*>* out); | | vector<MatchExpression*>* out, | |
| | | vector<MatchExpression*>* subnode | |
| | | sOut); | |
| | | | |
| /** | | /** | |
| * Helper used by buildIndexedAnd and buildIndexedOr. | | * Helper used by buildIndexedAnd and buildIndexedOr. | |
| * | | * | |
| * The children of AND and OR nodes are sorted by the index that th
e subtree rooted at | | * The children of AND and OR nodes are sorted by the index that th
e subtree rooted at | |
| * that node uses. Child nodes that use the same index are adjacen
t to one another to | | * that node uses. Child nodes that use the same index are adjacen
t to one another to | |
| * facilitate grouping of index scans. As such, the processing for
AND and OR is | | * facilitate grouping of index scans. As such, the processing for
AND and OR is | |
| * almost identical. | | * almost identical. | |
| * | | * | |
| * See tagForSort and sortUsingTags in index_tag.h for details on o
rdering the children | | * See tagForSort and sortUsingTags in index_tag.h for details on o
rdering the children | |
| * of OR and AND. | | * of OR and AND. | |
| * | | * | |
| * Does not take ownership of 'root' but may remove children from i
t. | | * Does not take ownership of 'root' but may remove children from i
t. | |
| */ | | */ | |
| static bool processIndexScans(const CanonicalQuery& query, | | static bool processIndexScans(const CanonicalQuery& query, | |
| MatchExpression* root, | | MatchExpression* root, | |
| bool inArrayOperator, | | bool inArrayOperator, | |
| const vector<IndexEntry>& indices, | | const vector<IndexEntry>& indices, | |
| vector<QuerySolutionNode*>* out); | | vector<QuerySolutionNode*>* out); | |
| | | | |
|
| | | /** | |
| | | * Used by processIndexScans(...) in order to recursively build a d | |
| | | ata access | |
| | | * plan for a "subnode", a node in the MatchExpression tree which i | |
| | | s indexed by | |
| | | * virtue of its children. | |
| | | * | |
| | | * The resulting scans are outputted in the out-parameter 'out'. | |
| | | */ | |
| | | static bool processIndexScansSubnode(const CanonicalQuery& query, | |
| | | ScanBuildingState* scanState, | |
| | | std::vector<QuerySolutionNode* | |
| | | >* out); | |
| | | | |
| | | /** | |
| | | * Used by processIndexScansSubnode(...) to build the leaves of the | |
| | | solution tree for an | |
| | | * ELEM_MATCH_OBJECT node beneath an AND. | |
| | | * | |
| | | * The resulting scans are outputted in the out-parameter 'out'. | |
| | | */ | |
| | | static bool processIndexScansElemMatch(const CanonicalQuery& query, | |
| | | ScanBuildingState* scanState | |
| | | , | |
| | | std::vector<QuerySolutionNod | |
| | | e*>* out); | |
| | | | |
| // | | // | |
| // Helpers for creating an index scan. | | // Helpers for creating an index scan. | |
| // | | // | |
| | | | |
| /** | | /** | |
| * Create a new data access node. | | * Create a new data access node. | |
| * | | * | |
| * If the node is an index scan, the bounds for 'expr' are computed
and placed into the | | * If the node is an index scan, the bounds for 'expr' are computed
and placed into the | |
| * first field's OIL position. The rest of the OILs are allocated
but uninitialized. | | * first field's OIL position. The rest of the OILs are allocated
but uninitialized. | |
| * | | * | |
| | | | |
| skipping to change at line 213 | | skipping to change at line 328 | |
| */ | | */ | |
| static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query, | | static QuerySolutionNode* makeLeafNode(const CanonicalQuery& query, | |
| const IndexEntry& index, | | const IndexEntry& index, | |
| size_t pos, | | size_t pos, | |
| MatchExpression* expr, | | MatchExpression* expr, | |
| IndexBoundsBuilder::BoundsTi
ghtness* tightnessOut); | | IndexBoundsBuilder::BoundsTi
ghtness* tightnessOut); | |
| | | | |
| /** | | /** | |
| * Merge the predicate 'expr' with the leaf node 'node'. | | * Merge the predicate 'expr' with the leaf node 'node'. | |
| */ | | */ | |
|
| static void mergeWithLeafNode(MatchExpression* expr, | | static void mergeWithLeafNode(MatchExpression* expr, ScanBuildingSt | |
| const IndexEntry& index, | | ate* scanState); | |
| size_t pos, | | | |
| IndexBoundsBuilder::BoundsTightness* | | | |
| tightnessOut, | | | |
| QuerySolutionNode* node, | | | |
| MatchExpression::MatchType mergeType) | | | |
| ; | | | |
| | | | |
| /** | | /** | |
| * Determines whether it is safe to merge the expression 'expr' wit
h | | * Determines whether it is safe to merge the expression 'expr' wit
h | |
|
| * the leaf node of the query solution, 'node'. | | * the leaf node of the query solution contained in 'scanState'. | |
| * | | | |
| * 'index' provides information about the index used by 'node'. | | | |
| * 'pos' gives the position in the index (for compound indices) tha | | | |
| t | | | |
| * 'expr' needs to use. Finally, 'mergeType' indicates whether we | | | |
| * will try to merge using an AND or OR. | | | |
| * | | * | |
| * Does not take ownership of its arguments. | | * Does not take ownership of its arguments. | |
| */ | | */ | |
| static bool shouldMergeWithLeaf(const MatchExpression* expr, | | static bool shouldMergeWithLeaf(const MatchExpression* expr, | |
|
| const IndexEntry& index, | | const ScanBuildingState& scanState) | |
| size_t pos, | | ; | |
| QuerySolutionNode* node, | | | |
| MatchExpression::MatchType mergeTyp | | | |
| e); | | | |
| | | | |
| /** | | /** | |
| * If index scan (regular or expression index), fill in any bounds
that are missing in | | * If index scan (regular or expression index), fill in any bounds
that are missing in | |
| * 'node' with the "all values for this field" interval. | | * 'node' with the "all values for this field" interval. | |
| * | | * | |
| * If geo, do nothing. | | * If geo, do nothing. | |
| * If text, punt to finishTextNode. | | * If text, punt to finishTextNode. | |
| */ | | */ | |
| static void finishLeafNode(QuerySolutionNode* node, const IndexEntr
y& index); | | static void finishLeafNode(QuerySolutionNode* node, const IndexEntr
y& index); | |
| | | | |
|
| | | /** | |
| | | * Fills in any missing bounds by calling finishLeafNode(...) for t | |
| | | he scan contained in | |
| | | * 'scanState'. The resulting scan is outputted in the out-paramete | |
| | | r 'out', transferring | |
| | | * ownership in the process. | |
| | | * | |
| | | * If 'scanState' is building an index scan for OR-related predicat | |
| | | es, filters | |
| | | * may be affixed to the scan as necessary. | |
| | | */ | |
| | | static void finishAndOutputLeaf(ScanBuildingState* scanState, | |
| | | std::vector<QuerySolutionNode*>* ou | |
| | | t); | |
| | | | |
| | | /** | |
| | | * Returns true if the current scan in 'scanState' requires a Fetch | |
| | | Node. | |
| | | */ | |
| | | static bool orNeedsFetch(const ScanBuildingState* scanState); | |
| | | | |
| static void finishTextNode(QuerySolutionNode* node, const IndexEntr
y& index); | | static void finishTextNode(QuerySolutionNode* node, const IndexEntr
y& index); | |
| | | | |
|
| private: | | | |
| /** | | /** | |
| * Add the filter 'match' to the query solution node 'node'. Takes | | * Add the filter 'match' to the query solution node 'node'. Takes | |
| * ownership of 'match'. | | * ownership of 'match'. | |
| * | | * | |
| * The MatchType, 'type', indicates whether 'match' is a child of a
n | | * The MatchType, 'type', indicates whether 'match' is a child of a
n | |
| * AND or an OR match expression. | | * AND or an OR match expression. | |
| */ | | */ | |
|
| static void _addFilterToSolutionNode(QuerySolutionNode* node, Match | | static void addFilterToSolutionNode(QuerySolutionNode* node, MatchE | |
| Expression* match, | | xpression* match, | |
| MatchExpression::MatchType typ | | MatchExpression::MatchType type | |
| e); | | ); | |
| | | | |
| | | /** | |
| | | * Once a predicate is merged into the current scan, there are a fe | |
| | | w things we might | |
| | | * want to do with the filter: | |
| | | * 1) Detach the filter from its parent and delete it because the | |
| | | predicate is | |
| | | * answered by exact index bounds. | |
| | | * 2) Leave the filter alone so that it can be affixed as part of | |
| | | a fetch node later. | |
| | | * 3) Detach the filter from its parent and attach it directly to | |
| | | an index scan node. | |
| | | * We can sometimes due this for INEXACT_COVERED predicates which | |
| | | are not answered exactly | |
| | | * by the bounds, but can be answered by examing the data in the | |
| | | index key. | |
| | | * 4) Detach the filter from its parent and attach it as a child | |
| | | of a separate | |
| | | * MatchExpression tree. This is done for proper handling of inex | |
| | | act bounds for $or | |
| | | * queries. | |
| | | * | |
| | | * This executes one of the four options above, according to the da | |
| | | ta in 'scanState'. | |
| | | */ | |
| | | static void handleFilter(ScanBuildingState* scanState); | |
| | | | |
| | | /** | |
| | | * Implements handleFilter(...) for OR queries. | |
| | | */ | |
| | | static void handleFilterAnd(ScanBuildingState* scanState); | |
| | | | |
| | | /** | |
| | | * Implements handleFilter(...) for AND queries. | |
| | | */ | |
| | | static void handleFilterOr(ScanBuildingState* scanState); | |
| }; | | }; | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
End of changes. 10 change blocks. |
| 29 lines changed or deleted | | 213 lines changed or added | |
|
| subplan_runner.h | | subplan_runner.h | |
| | | | |
| skipping to change at line 33 | | skipping to change at line 33 | |
| * file(s), but you are not obligated to do so. If you do not wish to do
so, | | * file(s), but you are not obligated to do so. If you do not wish to do
so, | |
| * delete this exception statement from your version. If you delete this | | * delete this exception statement from your version. If you delete this | |
| * exception statement from all source files in the program, then also d
elete | | * exception statement from all source files in the program, then also d
elete | |
| * it in the license file. | | * it in the license file. | |
| */ | | */ | |
| | | | |
| #pragma once | | #pragma once | |
| | | | |
| #include <boost/scoped_ptr.hpp> | | #include <boost/scoped_ptr.hpp> | |
| #include <string> | | #include <string> | |
|
| | | #include <queue> | |
| | | | |
| #include "mongo/base/status.h" | | #include "mongo/base/status.h" | |
| #include "mongo/db/query/runner.h" | | #include "mongo/db/query/runner.h" | |
| #include "mongo/db/query/query_planner_params.h" | | #include "mongo/db/query/query_planner_params.h" | |
|
| | | #include "mongo/db/query/query_solution.h" | |
| | | | |
| namespace mongo { | | namespace mongo { | |
| | | | |
| class BSONObj; | | class BSONObj; | |
| class CanonicalQuery; | | class CanonicalQuery; | |
| class DiskLoc; | | class DiskLoc; | |
| class TypeExplain; | | class TypeExplain; | |
| struct PlanInfo; | | struct PlanInfo; | |
| | | | |
| class SubplanRunner : public Runner { | | class SubplanRunner : public Runner { | |
| public: | | public: | |
|
| SubplanRunner(Collection* collection, | | /** | |
| const QueryPlannerParams& params, | | * Used to create SubplanRunner instances. The caller owns the inst | |
| CanonicalQuery* cq); | | ance | |
| | | * returned through 'out'. | |
| | | * | |
| | | * 'out' is valid only if an OK status is returned. | |
| | | */ | |
| | | static Status make(Collection* collection, | |
| | | const QueryPlannerParams& params, | |
| | | CanonicalQuery* cq, | |
| | | SubplanRunner** out); | |
| | | | |
| static bool canUseSubplanRunner(const CanonicalQuery& query); | | static bool canUseSubplanRunner(const CanonicalQuery& query); | |
| | | | |
| virtual ~SubplanRunner(); | | virtual ~SubplanRunner(); | |
| | | | |
| virtual Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut
); | | virtual Runner::RunnerState getNext(BSONObj* objOut, DiskLoc* dlOut
); | |
| | | | |
| virtual bool isEOF(); | | virtual bool isEOF(); | |
| | | | |
| virtual void saveState(); | | virtual void saveState(); | |
| | | | |
| skipping to change at line 79 | | skipping to change at line 88 | |
| | | | |
| virtual void kill(); | | virtual void kill(); | |
| | | | |
| virtual const Collection* collection() { | | virtual const Collection* collection() { | |
| return _collection; | | return _collection; | |
| } | | } | |
| | | | |
| virtual Status getInfo(TypeExplain** explain, | | virtual Status getInfo(TypeExplain** explain, | |
| PlanInfo** planInfo) const; | | PlanInfo** planInfo) const; | |
| | | | |
|
| | | /** | |
| | | * Plan each branch of the $or independently, and store the resulti | |
| | | ng | |
| | | * lists of query solutions in '_solutions'. | |
| | | * | |
| | | * Called from SubplanRunner::make so that getRunner can fail if | |
| | | * subquery planning fails, rather than returning a runner and fail | |
| | | ing | |
| | | * through getNext(...). | |
| | | */ | |
| | | Status planSubqueries(); | |
| | | | |
| private: | | private: | |
|
| | | SubplanRunner(Collection* collection, | |
| | | const QueryPlannerParams& params, | |
| | | CanonicalQuery* cq); | |
| | | | |
| bool runSubplans(); | | bool runSubplans(); | |
| | | | |
| enum SubplanRunnerState { | | enum SubplanRunnerState { | |
| PLANNING, | | PLANNING, | |
| RUNNING, | | RUNNING, | |
| }; | | }; | |
| | | | |
| SubplanRunnerState _state; | | SubplanRunnerState _state; | |
| | | | |
| Collection* _collection; | | Collection* _collection; | |
| | | | |
| skipping to change at line 102 | | skipping to change at line 125 | |
| | | | |
| std::auto_ptr<CanonicalQuery> _query; | | std::auto_ptr<CanonicalQuery> _query; | |
| | | | |
| bool _killed; | | bool _killed; | |
| | | | |
| Runner::YieldPolicy _policy; | | Runner::YieldPolicy _policy; | |
| | | | |
| boost::scoped_ptr<Runner> _underlyingRunner; | | boost::scoped_ptr<Runner> _underlyingRunner; | |
| | | | |
| std::string _ns; | | std::string _ns; | |
|
| | | | |
| | | // We do the subquery planning up front, and keep the resulting | |
| | | // query solutions here. Lists of query solutions are dequeued | |
| | | // and ownership is transferred to the underlying runners one | |
| | | // at a time. | |
| | | std::queue< std::vector<QuerySolution*> > _solutions; | |
| | | | |
| | | // Holds the canonicalized subqueries. Ownership is transferred | |
| | | // to the underlying runners one at a time. | |
| | | std::queue<CanonicalQuery*> _cqs; | |
| | | | |
| | | // We need this to extract cache-friendly index data from the index | |
| | | assignments. | |
| | | map<BSONObj, size_t> _indexMap; | |
| }; | | }; | |
| | | | |
| } // namespace mongo | | } // namespace mongo | |
| | | | |
End of changes. 6 change blocks. |
| 3 lines changed or deleted | | 43 lines changed or added | |
|
| syncclusterconnection.h | | syncclusterconnection.h | |
| | | | |
| skipping to change at line 49 | | skipping to change at line 49 | |
| * The class checks if a command is read or write style, and sends to a
single | | * The class checks if a command is read or write style, and sends to a
single | |
| * node if a read lock command and to all in two phases with a write st
yle command. | | * node if a read lock command and to all in two phases with a write st
yle command. | |
| */ | | */ | |
| class MONGO_CLIENT_API SyncClusterConnection : public DBClientBase { | | class MONGO_CLIENT_API SyncClusterConnection : public DBClientBase { | |
| public: | | public: | |
| | | | |
| using DBClientBase::query; | | using DBClientBase::query; | |
| using DBClientBase::update; | | using DBClientBase::update; | |
| using DBClientBase::remove; | | using DBClientBase::remove; | |
| | | | |
|
| | | class QueryHandler; | |
| | | | |
| /** | | /** | |
| * @param commaSeparated should be 3 hosts comma separated | | * @param commaSeparated should be 3 hosts comma separated | |
| */ | | */ | |
| SyncClusterConnection( const list<HostAndPort> &, double socketTime
out = 0); | | SyncClusterConnection( const list<HostAndPort> &, double socketTime
out = 0); | |
| SyncClusterConnection( string commaSeparated, double socketTimeout
= 0); | | SyncClusterConnection( string commaSeparated, double socketTimeout
= 0); | |
| SyncClusterConnection( const std::string& a, | | SyncClusterConnection( const std::string& a, | |
| const std::string& b, | | const std::string& b, | |
| const std::string& c, | | const std::string& c, | |
| double socketTimeout = 0 ); | | double socketTimeout = 0 ); | |
| ~SyncClusterConnection(); | | ~SyncClusterConnection(); | |
| | | | |
| skipping to change at line 117 | | skipping to change at line 119 | |
| virtual ConnectionString::ConnectionType type() const { return Conn
ectionString::SYNC; } | | virtual ConnectionString::ConnectionType type() const { return Conn
ectionString::SYNC; } | |
| | | | |
| void setAllSoTimeouts( double socketTimeout ); | | void setAllSoTimeouts( double socketTimeout ); | |
| double getSoTimeout() const { return _socketTimeout; } | | double getSoTimeout() const { return _socketTimeout; } | |
| | | | |
| virtual bool lazySupported() const { return false; } | | virtual bool lazySupported() const { return false; } | |
| | | | |
| virtual void setRunCommandHook(DBClientWithCommands::RunCommandHook
Func func); | | virtual void setRunCommandHook(DBClientWithCommands::RunCommandHook
Func func); | |
| virtual void setPostRunCommandHook(DBClientWithCommands::PostRunCom
mandHookFunc func); | | virtual void setPostRunCommandHook(DBClientWithCommands::PostRunCom
mandHookFunc func); | |
| | | | |
|
| | | /** | |
| | | * Allow custom query processing through an external (e.g. mongos-o | |
| | | nly) service. | |
| | | * | |
| | | * Takes ownership of attached handler. | |
| | | */ | |
| | | void attachQueryHandler( QueryHandler* handler ); | |
| | | | |
| protected: | | protected: | |
| virtual void _auth(const BSONObj& params); | | virtual void _auth(const BSONObj& params); | |
| | | | |
| private: | | private: | |
| SyncClusterConnection( SyncClusterConnection& prev, double socketTi
meout = 0 ); | | SyncClusterConnection( SyncClusterConnection& prev, double socketTi
meout = 0 ); | |
| string _toString() const; | | string _toString() const; | |
| bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSO
NObj &info, int options=0); | | bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSO
NObj &info, int options=0); | |
| auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query que
ry, int nToReturn, int nToSkip, | | auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query que
ry, int nToReturn, int nToSkip, | |
| const BSONObj *fieldsToRetu
rn, int queryOptions, int batchSize ); | | const BSONObj *fieldsToRetu
rn, int queryOptions, int batchSize ); | |
| int _lockType( const string& name ); | | int _lockType( const string& name ); | |
| void _checkLast(); | | void _checkLast(); | |
| void _connect( const std::string& host ); | | void _connect( const std::string& host ); | |
| | | | |
| string _address; | | string _address; | |
| vector<string> _connAddresses; | | vector<string> _connAddresses; | |
| vector<DBClientConnection*> _conns; | | vector<DBClientConnection*> _conns; | |
|
| map<string,int> _lockTypes; | | | |
| mongo::mutex _mutex; | | | |
| | | | |
| vector<BSONObj> _lastErrors; | | vector<BSONObj> _lastErrors; | |
| | | | |
|
| | | // Optionally attached by user | |
| | | scoped_ptr<QueryHandler> _customQueryHandler; | |
| | | | |
| | | mongo::mutex _mutex; | |
| | | map<string,int> _lockTypes; | |
| | | // End mutex | |
| | | | |
| double _socketTimeout; | | double _socketTimeout; | |
| }; | | }; | |
| | | | |
|
| | | /** | |
| | | * Interface for custom query processing for the SCC. | |
| | | * Allows plugging different host query behaviors for different types o | |
| | | f queries. | |
| | | */ | |
| | | class SyncClusterConnection::QueryHandler { | |
| | | public: | |
| | | | |
| | | virtual ~QueryHandler() {}; | |
| | | | |
| | | /** | |
| | | * Returns true if the query can be processed using this handler. | |
| | | */ | |
| | | virtual bool canHandleQuery( const string& ns, Query query ) = 0; | |
| | | | |
| | | /** | |
| | | * Returns a cursor on one of the hosts with the desired results fo | |
| | | r the query. | |
| | | * May throw or return an empty auto_ptr on failure. | |
| | | */ | |
| | | virtual auto_ptr<DBClientCursor> handleQuery( const vector<string>& | |
| | | hosts, | |
| | | const string &ns, | |
| | | Query query, | |
| | | int nToReturn, | |
| | | int nToSkip, | |
| | | const BSONObj *fields | |
| | | ToReturn, | |
| | | int queryOptions, | |
| | | int batchSize ) = 0; | |
| | | }; | |
| | | | |
| class MONGO_CLIENT_API UpdateNotTheSame : public UserException { | | class MONGO_CLIENT_API UpdateNotTheSame : public UserException { | |
| public: | | public: | |
| UpdateNotTheSame( int code , const string& msg , const vector<strin
g>& addrs , const vector<BSONObj>& lastErrors ) | | UpdateNotTheSame( int code , const string& msg , const vector<strin
g>& addrs , const vector<BSONObj>& lastErrors ) | |
| : UserException( code , msg ) , _addrs( addrs ) , _lastErrors(
lastErrors ) { | | : UserException( code , msg ) , _addrs( addrs ) , _lastErrors(
lastErrors ) { | |
| verify( _addrs.size() == _lastErrors.size() ); | | verify( _addrs.size() == _lastErrors.size() ); | |
| } | | } | |
| | | | |
| virtual ~UpdateNotTheSame() throw() { | | virtual ~UpdateNotTheSame() throw() { | |
| } | | } | |
| | | | |
| | | | |
End of changes. 5 change blocks. |
| 2 lines changed or deleted | | 49 lines changed or added | |
|