summaryrefslogtreecommitdiffstats
path: root/src/3rdparty/v8/src/mips/builtins-mips.cc
diff options
context:
space:
mode:
authorPeter Varga <pvarga@inf.u-szeged.hu>2012-11-23 15:47:13 +0100
committerThe Qt Project <gerrit-noreply@qt-project.org>2013-01-09 13:05:41 +0100
commit29614f3a7f84955e75dbdacdaa0abb18fc9e5231 (patch)
treea9a7d039ce37ae3af3b20699e732b8e7de496805 /src/3rdparty/v8/src/mips/builtins-mips.cc
parentf3e57accf67099f668b069b1e248b4d4e424385b (diff)
Updated V8 from git://github.com/v8/v8.git to 2215c442af2f0a4fce076e35ea061486d62778a1
Update V8 source to version 3.15.2 * Performance and stability improvements on all platforms. * Avoid overdeep recursion in regexp where a guarded expression with a minimum repetition count is inside another quantifier. * Exposed last seen heap object id via v8 public api. * Implemented heap profiler memory usage reporting. * Preserved error message during finally block in try..finally. * Improved heuristics to keep objects in fast mode with inherited constructors. * Fixed lazy sweeping heuristics to prevent old-space expansion. * Fixed sharing of literal boilerplates for optimized code. * Expose more detailed memory statistics. * Cleaned up hardfp ABI detection for ARM (V8 issue 2140). * Report "hidden properties" in heap profiler for properties case. * Added optimizing compiler support for JavaScript getters. * Fixed computation of call targets on prototypes in Crankshaft. * Fixed ICs for slow objects with native accessor. * Fixed transcendental cache on ARM in optimized code (issue 2234). * New heap inspection tools: counters for object sizes and counts, histograms for external fragmentation. * Added more support for heap analysis. * Interpret negative hexadecimal literals as NaN. (issue 2240) * Improved API calls that return empty handles. * Added checks for interceptors to negative lookup code in Crankshaft. * Introduced global contexts to represent lexical global scope(s). * Allocate block-scoped global bindings to global context. * Add empty-handle checks to API functions (#ifdef ENABLE_EXTRA_CHECKS) * Check the return value of API calls on ia32 and x64. * Activated fixed ES5 readonly semantics by default. * Added validity checking to API functions and calls. * Switched on code compaction on incremental GCs. * ARM: allowed VFP3 instructions when hardfloat is enabled. (Chromium issue 152506) * Killed off the SCons based build. (relevant for testing) * Added a faster API for creating v8::Integer objects. * Speeded up function deoptimization by avoiding quadratic pass over optimized function list. (Chromium issue 155270) * ES6: Added support for size to Set and Map (issue 2395) * ES6: Added support for Set and Map clear method (issue 2400) * Made formatting error message side-effect-free. (issue 2398) * Fixed V8 issues: 915, 1374, 1465, 1569, 1591, 1609 1645, 1804, 1914, 1981, 1991, 2016, 2017, 2034, 2077, 2087, 2094, 2119, 2120, 2133, 2134, 2139, 2143, 2151, 2156, 2166, 2170, 2172, 2177, 2179, 2185, 2193, 2194, 2201, 2210, 2212, 2219, 2220, 2225, 2239, 2245, 2252, 2260, 2261, 2294, 2297, 2308, 2312, 2313, 2317, 2318, 2322, 2326, 2364, 2372, 2380 Change-Id: Ifa82ec37b6150fdf1c4364c93987a32bbbf6bfd1 Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
Diffstat (limited to 'src/3rdparty/v8/src/mips/builtins-mips.cc')
-rw-r--r--src/3rdparty/v8/src/mips/builtins-mips.cc72
1 files changed, 56 insertions, 16 deletions
diff --git a/src/3rdparty/v8/src/mips/builtins-mips.cc b/src/3rdparty/v8/src/mips/builtins-mips.cc
index eeb84c3..0342e65 100644
--- a/src/3rdparty/v8/src/mips/builtins-mips.cc
+++ b/src/3rdparty/v8/src/mips/builtins-mips.cc
@@ -79,12 +79,13 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// Load the built-in InternalArray function from the current context.
static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
Register result) {
- // Load the global context.
+ // Load the native context.
- __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the InternalArray function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the InternalArray function from the native context.
__ lw(result,
MemOperand(result,
Context::SlotOffset(
@@ -94,12 +95,13 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
// Load the built-in Array function from the current context.
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
- // Load the global context.
+ // Load the native context.
- __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ lw(result,
- FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
- // Load the Array function from the global context.
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ lw(result,
+ FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+ // Load the Array function from the native context.
__ lw(result,
MemOperand(result,
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
@@ -118,7 +120,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1, false);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -214,7 +216,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
+ __ LoadInitialArrayMap(array_function, scratch2,
+ elements_array_storage, fill_with_hole);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert(
@@ -449,10 +452,10 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ Branch(call_generic_code);
__ bind(&not_double);
- // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
+ // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS.
// a3: JSArray
__ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
a2,
t5,
@@ -712,6 +715,43 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+ GenerateTailCallToSharedCode(masm);
+}
+
+
+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Push a copy of the function onto the stack.
+ __ push(a1);
+ // Push call kind information.
+ __ push(t1);
+
+ __ push(a1); // Function is also the parameter to the runtime call.
+ __ CallRuntime(Runtime::kParallelRecompile, 1);
+
+ // Restore call kind information.
+ __ pop(t1);
+ // Restore receiver.
+ __ pop(a1);
+
+ // Tear down internal frame.
+ }
+
+ GenerateTailCallToSharedCode(masm);
+}
+
+
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
bool count_constructions) {
@@ -1392,9 +1432,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// receiver.
__ bind(&use_global_receiver);
const int kGlobalIndex =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ lw(a2, FieldMemOperand(cp, kGlobalIndex));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
+ __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
__ lw(a2, FieldMemOperand(a2, kGlobalIndex));
__ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
@@ -1585,9 +1625,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Use the current global receiver object as the receiver.
__ bind(&use_global_receiver);
const int kGlobalOffset =
- Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
__ lw(a0, FieldMemOperand(cp, kGlobalOffset));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
__ lw(a0, FieldMemOperand(a0, kGlobalOffset));
__ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));