summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorRyan <ry@tinyclouds.org>2009-09-02 11:13:46 +0200
committerRyan <ry@tinyclouds.org>2009-09-02 11:13:46 +0200
commit97ce138621b375f24db98280972a56e063be0b1d (patch)
tree3bc3aa651233955ade15b1f5a00678e042be1076 /deps
parent78bb53b009e04c94f142aa3241b06c640395b170 (diff)
downloadnodejs-97ce138621b375f24db98280972a56e063be0b1d.tar.gz
nodejs-97ce138621b375f24db98280972a56e063be0b1d.tar.bz2
nodejs-97ce138621b375f24db98280972a56e063be0b1d.zip
Upgrade V8 to 1.3.9
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/ChangeLog27
-rw-r--r--deps/v8/SConstruct64
-rw-r--r--deps/v8/include/v8.h352
-rwxr-xr-xdeps/v8/src/SConscript42
-rw-r--r--deps/v8/src/api.cc185
-rw-r--r--deps/v8/src/api.h24
-rw-r--r--deps/v8/src/apiutils.h2
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/arm/assembler-arm.cc100
-rw-r--r--deps/v8/src/arm/assembler-arm.h17
-rw-r--r--deps/v8/src/arm/builtins-arm.cc2
-rw-r--r--deps/v8/src/arm/codegen-arm.cc82
-rw-r--r--deps/v8/src/arm/disasm-arm.cc64
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc43
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h16
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc1192
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h226
-rw-r--r--deps/v8/src/arm/simulator-arm.cc173
-rw-r--r--deps/v8/src/arm/simulator-arm.h27
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc14
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc26
-rw-r--r--deps/v8/src/assembler.cc42
-rw-r--r--deps/v8/src/assembler.h13
-rw-r--r--deps/v8/src/builtins.cc81
-rw-r--r--deps/v8/src/checks.h32
-rw-r--r--deps/v8/src/code-stubs.h1
-rw-r--r--deps/v8/src/compiler.cc7
-rw-r--r--deps/v8/src/d8.js2
-rw-r--r--deps/v8/src/debug-delay.js9
-rw-r--r--deps/v8/src/debug.cc6
-rw-r--r--deps/v8/src/execution.cc16
-rw-r--r--deps/v8/src/execution.h22
-rw-r--r--deps/v8/src/frames-inl.h5
-rw-r--r--deps/v8/src/frames.h9
-rw-r--r--deps/v8/src/globals.h23
-rw-r--r--deps/v8/src/handles.cc12
-rw-r--r--deps/v8/src/handles.h6
-rw-r--r--deps/v8/src/heap.cc99
-rw-r--r--deps/v8/src/heap.h164
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc37
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc42
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc2
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc140
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h49
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc41
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h23
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h5
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc129
-rw-r--r--deps/v8/src/jsregexp.cc50
-rw-r--r--deps/v8/src/mark-compact.cc2
-rw-r--r--deps/v8/src/messages.js2
-rw-r--r--deps/v8/src/objects-debug.cc2
-rw-r--r--deps/v8/src/objects-inl.h12
-rw-r--r--deps/v8/src/objects.cc53
-rw-r--r--deps/v8/src/objects.h18
-rw-r--r--deps/v8/src/parser.cc9
-rw-r--r--deps/v8/src/platform-win32.cc7
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp-inl.h2
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc2
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.h3
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h2
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc57
-rw-r--r--deps/v8/src/regexp-macro-assembler.h27
-rw-r--r--deps/v8/src/runtime.cc63
-rw-r--r--deps/v8/src/runtime.h3
-rw-r--r--deps/v8/src/serialize.cc26
-rw-r--r--deps/v8/src/stub-cache.cc12
-rw-r--r--deps/v8/src/stub-cache.h11
-rw-r--r--deps/v8/src/top.cc13
-rw-r--r--deps/v8/src/top.h4
-rw-r--r--deps/v8/src/v8-counters.h1
-rw-r--r--deps/v8/src/v8.cc4
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc18
-rw-r--r--deps/v8/src/x64/assembler-x64.h7
-rw-r--r--deps/v8/src/x64/builtins-x64.cc72
-rw-r--r--deps/v8/src/x64/cfg-x64.cc3
-rw-r--r--deps/v8/src/x64/codegen-x64.cc481
-rw-r--r--deps/v8/src/x64/codegen-x64.h8
-rw-r--r--deps/v8/src/x64/frames-x64.h3
-rw-r--r--deps/v8/src/x64/ic-x64.cc82
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc200
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h53
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc78
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h27
-rw-r--r--deps/v8/src/x64/register-allocator-x64-inl.h20
-rw-r--r--deps/v8/src/x64/register-allocator-x64.h4
-rw-r--r--deps/v8/src/x64/simulator-x64.h5
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc142
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc8
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h1
-rw-r--r--deps/v8/test/cctest/cctest.status4
-rw-r--r--deps/v8/test/cctest/test-api.cc61
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc4
-rw-r--r--deps/v8/test/cctest/test-heap.cc2
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc6
-rw-r--r--deps/v8/test/cctest/test-regexp.cc39
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc60
-rw-r--r--deps/v8/test/cctest/test-utils.cc2
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-constructor.js4
-rwxr-xr-xdeps/v8/test/mjsunit/simple-constructor.js66
-rw-r--r--deps/v8/test/mjsunit/transcendentals.js49
-rw-r--r--deps/v8/tools/gyp/v8.gyp17
-rwxr-xr-xdeps/v8/tools/js2c.py45
-rw-r--r--deps/v8/tools/v8.xcodeproj/project.pbxproj2
-rw-r--r--deps/v8/tools/visual_studio/debug.vsprops2
106 files changed, 4662 insertions, 997 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index d9d2b02ba..b07e7cc15 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,30 @@
+2009-09-02: Version 1.3.9
+
+ Optimized stack guard checks on ARM.
+
+ Optimized API operations by inlining more in the API.
+
+ Optimized creation of objects from simple constructor functions.
+
+ Enabled a number of missing optimizations in the 64-bit port.
+
+ Implemented native-code support for regular expressions on ARM.
+
+ Stopped using the 'sahf' instruction on 64-bit machines that do
+ not support it.
+
+ Fixed a bug in the support for forceful termination of JavaScript
+ execution.
+
+
+2009-08-26: Version 1.3.8
+
+ Changed the handling of idle notifications to allow idle
+ notifications when V8 has not yet been initialized.
+
+ Fixed ARM simulator compilation problem on Windows.
+
+
2009-08-25: Version 1.3.7
Reduced the size of generated code on ARM platforms by reducing
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 5e9747c93..71673c0f0 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -99,12 +99,10 @@ LIBRARY_FLAGS = {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPPATH': [join(root_dir, 'src')],
'regexp:native': {
- 'arch:ia32' : {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
- },
- 'arch:x64' : {
- 'CPPDEFINES': ['V8_NATIVE_REGEXP']
- }
+ },
+ 'mode:debug': {
+ 'CPPDEFINES': ['V8_ENABLE_CHECKS']
}
},
'gcc': {
@@ -178,17 +176,25 @@ LIBRARY_FLAGS = {
},
'msvc': {
'all': {
- 'DIALECTFLAGS': ['/nologo'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
- 'CPPDEFINES': ['WIN32', '_USE_32BIT_TIME_T'],
- 'LINKFLAGS': ['/NOLOGO', '/MACHINE:X86', '/INCREMENTAL:NO',
- '/NXCOMPAT', '/IGNORE:4221'],
- 'ARFLAGS': ['/NOLOGO'],
+ 'CPPDEFINES': ['WIN32'],
+ 'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'CCPDBFLAGS': ['/Zi']
},
+ 'verbose:off': {
+ 'DIALECTFLAGS': ['/nologo'],
+ 'ARFLAGS': ['/NOLOGO']
+ },
'arch:ia32': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
+ 'CPPDEFINES': ['V8_TARGET_ARCH_IA32', '_USE_32BIT_TIME_T'],
+ 'LINKFLAGS': ['/MACHINE:X86'],
+ 'ARFLAGS': ['/MACHINE:X86']
+ },
+ 'arch:x64': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
+ 'LINKFLAGS': ['/MACHINE:X64'],
+ 'ARFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od', '/Gm'],
@@ -250,11 +256,13 @@ V8_EXTRA_FLAGS = {
},
'msvc': {
'all': {
- 'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800']
+ 'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800']
},
- 'library:shared': {
- 'CPPDEFINES': ['BUILDING_V8_SHARED'],
- 'LIBS': ['winmm', 'ws2_32']
+ 'arch:ia32': {
+ 'WARNINGFLAGS': ['/W3']
+ },
+ 'arch:x64': {
+ 'WARNINGFLAGS': ['/W2']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
@@ -352,7 +360,10 @@ CCTEST_EXTRA_FLAGS = {
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
- }
+ },
+ 'arch:x64': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64']
+ },
}
}
@@ -417,10 +428,15 @@ SAMPLE_FLAGS = {
},
'msvc': {
'all': {
- 'CCFLAGS': ['/nologo'],
- 'LINKFLAGS': ['/nologo'],
'LIBS': ['winmm', 'ws2_32']
},
+ 'verbose:off': {
+ 'CCFLAGS': ['/nologo'],
+ 'LINKFLAGS': ['/NOLOGO']
+ },
+ 'verbose:on': {
+ 'LINKFLAGS': ['/VERBOSE']
+ },
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
@@ -442,7 +458,12 @@ SAMPLE_FLAGS = {
}
},
'arch:ia32': {
- 'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
+ 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
+ 'LINKFLAGS': ['/MACHINE:X86']
+ },
+ 'arch:x64': {
+ 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
+ 'LINKFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
@@ -585,6 +606,11 @@ SIMPLE_OPTIONS = {
'values': ['dumb', 'readline'],
'default': 'dumb',
'help': 'the console to use for the d8 shell'
+ },
+ 'verbose': {
+ 'values': ['on', 'off'],
+ 'default': 'off',
+ 'help': 'more output from compiler and linker'
}
}
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index a40c068ed..346050d5d 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -127,6 +127,12 @@ class FunctionTemplate;
class ObjectTemplate;
class Data;
+namespace internal {
+
+class Object;
+
+}
+
// --- W e a k H a n d l e s
@@ -227,8 +233,8 @@ template <class T> class V8EXPORT_INLINE Handle {
* The handles' references are not checked.
*/
template <class S> bool operator==(Handle<S> that) const {
- void** a = reinterpret_cast<void**>(**this);
- void** b = reinterpret_cast<void**>(*that);
+ internal::Object** a = reinterpret_cast<internal::Object**>(**this);
+ internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
if (b == 0) return false;
return *a == *b;
@@ -245,7 +251,11 @@ template <class T> class V8EXPORT_INLINE Handle {
}
template <class S> static inline Handle<T> Cast(Handle<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Handle<T>();
+#endif
return Handle<T>(T::Cast(*that));
}
@@ -275,7 +285,11 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
}
template <class S> inline Local(S* that) : Handle<T>(that) { }
template <class S> static inline Local<T> Cast(Local<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Local<T>();
+#endif
return Local<T>(T::Cast(*that));
}
@@ -344,7 +358,11 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
: Handle<T>(*that) { }
template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
if (that.IsEmpty()) return Persistent<T>();
+#endif
return Persistent<T>(T::Cast(*that));
}
@@ -423,7 +441,7 @@ class V8EXPORT HandleScope {
/**
* Creates a new handle with the given value.
*/
- static void** CreateHandle(void* value);
+ static internal::Object** CreateHandle(internal::Object* value);
private:
// Make it impossible to create heap-allocated or illegal handle
@@ -438,8 +456,8 @@ class V8EXPORT HandleScope {
class V8EXPORT Data {
public:
int extensions;
- void** next;
- void** limit;
+ internal::Object** next;
+ internal::Object** limit;
inline void Initialize() {
extensions = -1;
next = limit = NULL;
@@ -451,7 +469,7 @@ class V8EXPORT HandleScope {
// Allow for the active closing of HandleScopes which allows to pass a handle
// from the HandleScope being closed to the next top most HandleScope.
bool is_closed_;
- void** RawClose(void** value);
+ internal::Object** RawClose(internal::Object** value);
friend class ImplementationUtilities;
};
@@ -671,7 +689,7 @@ class V8EXPORT Value : public Data {
* Returns true if this value is an instance of the String type.
* See ECMA-262 8.4.
*/
- bool IsString() const;
+ inline bool IsString() const;
/**
* Returns true if this value is a function.
@@ -737,6 +755,10 @@ class V8EXPORT Value : public Data {
/** JS == */
bool Equals(Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
+
+ private:
+ inline bool QuickIsString() const;
+ bool FullIsString() const;
};
@@ -868,7 +890,7 @@ class V8EXPORT String : public Primitive {
* Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true.
*/
- ExternalStringResource* GetExternalStringResource() const;
+ inline ExternalStringResource* GetExternalStringResource() const;
/**
* Get the ExternalAsciiStringResource for an external ascii string.
@@ -876,7 +898,7 @@ class V8EXPORT String : public Primitive {
*/
ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
- static String* Cast(v8::Value* obj);
+ static inline String* Cast(v8::Value* obj);
/**
* Allocates a new string from either utf-8 encoded or ascii data.
@@ -1010,6 +1032,10 @@ class V8EXPORT String : public Primitive {
Value(const Value&);
void operator=(const Value&);
};
+
+ private:
+ void VerifyExternalStringResource(ExternalStringResource* val) const;
+ static void CheckCast(v8::Value* obj);
};
@@ -1020,9 +1046,10 @@ class V8EXPORT Number : public Primitive {
public:
double Value() const;
static Local<Number> New(double value);
- static Number* Cast(v8::Value* obj);
+ static inline Number* Cast(v8::Value* obj);
private:
Number();
+ static void CheckCast(v8::Value* obj);
};
@@ -1033,9 +1060,10 @@ class V8EXPORT Integer : public Number {
public:
static Local<Integer> New(int32_t value);
int64_t Value() const;
- static Integer* Cast(v8::Value* obj);
+ static inline Integer* Cast(v8::Value* obj);
private:
Integer();
+ static void CheckCast(v8::Value* obj);
};
@@ -1074,7 +1102,9 @@ class V8EXPORT Date : public Value {
*/
double NumberValue() const;
- static Date* Cast(v8::Value* obj);
+ static inline Date* Cast(v8::Value* obj);
+ private:
+ static void CheckCast(v8::Value* obj);
};
@@ -1153,14 +1183,13 @@ class V8EXPORT Object : public Value {
/** Gets the number of internal fields for this Object. */
int InternalFieldCount();
/** Gets the value in an internal field. */
- Local<Value> GetInternalField(int index);
+ inline Local<Value> GetInternalField(int index);
/** Sets the value in an internal field. */
void SetInternalField(int index, Handle<Value> value);
- // The two functions below do not perform index bounds checks and
- // they do not check that the VM is still running. Use with caution.
/** Gets a native pointer from an internal field. */
- void* GetPointerFromInternalField(int index);
+ inline void* GetPointerFromInternalField(int index);
+
/** Sets a native pointer in an internal field. */
void SetPointerInInternalField(int index, void* value);
@@ -1223,9 +1252,17 @@ class V8EXPORT Object : public Value {
void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
static Local<Object> New();
- static Object* Cast(Value* obj);
+ static inline Object* Cast(Value* obj);
private:
Object();
+ static void CheckCast(Value* obj);
+ Local<Value> CheckedGetInternalField(int index);
+
+ /**
+ * If quick access to the internal field is possible this method
+ * returns the value. Otherwise an empty handle is returned.
+ */
+ inline Local<Value> UncheckedGetInternalField(int index);
};
@@ -1243,9 +1280,10 @@ class V8EXPORT Array : public Object {
Local<Object> CloneElementAt(uint32_t index);
static Local<Array> New(int length = 0);
- static Array* Cast(Value* obj);
+ static inline Array* Cast(Value* obj);
private:
Array();
+ static void CheckCast(Value* obj);
};
@@ -1259,9 +1297,10 @@ class V8EXPORT Function : public Object {
Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
void SetName(Handle<String> name);
Handle<Value> GetName() const;
- static Function* Cast(Value* obj);
+ static inline Function* Cast(Value* obj);
private:
Function();
+ static void CheckCast(Value* obj);
};
@@ -1279,13 +1318,16 @@ class V8EXPORT Function : public Object {
class V8EXPORT External : public Value {
public:
static Local<Value> Wrap(void* data);
- static void* Unwrap(Handle<Value> obj);
+ static inline void* Unwrap(Handle<Value> obj);
static Local<External> New(void* value);
- static External* Cast(Value* obj);
+ static inline External* Cast(Value* obj);
void* Value() const;
private:
External();
+ static void CheckCast(v8::Value* obj);
+ static inline void* QuickUnwrap(Handle<v8::Value> obj);
+ static void* FullUnwrap(Handle<v8::Value> obj);
};
@@ -2297,12 +2339,14 @@ class V8EXPORT V8 {
private:
V8();
- static void** GlobalizeReference(void** handle);
- static void DisposeGlobal(void** global_handle);
- static void MakeWeak(void** global_handle, void* data, WeakReferenceCallback);
- static void ClearWeak(void** global_handle);
- static bool IsGlobalNearDeath(void** global_handle);
- static bool IsGlobalWeak(void** global_handle);
+ static internal::Object** GlobalizeReference(internal::Object** handle);
+ static void DisposeGlobal(internal::Object** global_handle);
+ static void MakeWeak(internal::Object** global_handle,
+ void* data,
+ WeakReferenceCallback);
+ static void ClearWeak(internal::Object** global_handle);
+ static bool IsGlobalNearDeath(internal::Object** global_handle);
+ static bool IsGlobalWeak(internal::Object** global_handle);
template <class T> friend class Handle;
template <class T> friend class Local;
@@ -2641,6 +2685,76 @@ class V8EXPORT Locker {
// --- I m p l e m e n t a t i o n ---
+
+namespace internal {
+
+
+// Tag information for HeapObject.
+const int kHeapObjectTag = 1;
+const int kHeapObjectTagSize = 2;
+const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
+
+
+// Tag information for Smi.
+const int kSmiTag = 0;
+const int kSmiTagSize = 1;
+const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
+
+
+/**
+ * This class exports constants and functionality from within v8 that
+ * is necessary to implement inline functions in the v8 api. Don't
+ * depend on functions and constants defined here.
+ */
+class Internals {
+ public:
+
+ // These values match non-compiler-dependent values defined within
+ // the implementation of v8.
+ static const int kHeapObjectMapOffset = 0;
+ static const int kMapInstanceTypeOffset = sizeof(void*) + sizeof(int);
+ static const int kStringResourceOffset = 2 * sizeof(void*);
+ static const int kProxyProxyOffset = sizeof(void*);
+ static const int kJSObjectHeaderSize = 3 * sizeof(void*);
+ static const int kFullStringRepresentationMask = 0x07;
+ static const int kExternalTwoByteRepresentationTag = 0x03;
+ static const int kAlignedPointerShift = 2;
+
+ // These constants are compiler dependent so their values must be
+ // defined within the implementation.
+ static int kJSObjectType;
+ static int kFirstNonstringType;
+ static int kProxyType;
+
+ static inline bool HasHeapObjectTag(internal::Object* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
+ kHeapObjectTag);
+ }
+
+ static inline bool HasSmiTag(internal::Object* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
+ }
+
+ static inline int SmiValue(internal::Object* value) {
+ return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
+ }
+
+ static inline bool IsExternalTwoByteString(int instance_type) {
+ int representation = (instance_type & kFullStringRepresentationMask);
+ return representation == kExternalTwoByteRepresentationTag;
+ }
+
+ template <typename T>
+ static inline T ReadField(Object* ptr, int offset) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
+ return *reinterpret_cast<T*>(addr);
+ }
+
+};
+
+}
+
+
template <class T>
Handle<T>::Handle() : val_(0) { }
@@ -2652,7 +2766,7 @@ Local<T>::Local() : Handle<T>() { }
template <class T>
Local<T> Local<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Local<T>();
- void** p = reinterpret_cast<void**>(*that);
+ internal::Object** p = reinterpret_cast<internal::Object**>(*that);
return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(*p)));
}
@@ -2660,7 +2774,7 @@ Local<T> Local<T>::New(Handle<T> that) {
template <class T>
Persistent<T> Persistent<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Persistent<T>();
- void** p = reinterpret_cast<void**>(*that);
+ internal::Object** p = reinterpret_cast<internal::Object**>(*that);
return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p)));
}
@@ -2668,21 +2782,21 @@ Persistent<T> Persistent<T>::New(Handle<T> that) {
template <class T>
bool Persistent<T>::IsNearDeath() const {
if (this->IsEmpty()) return false;
- return V8::IsGlobalNearDeath(reinterpret_cast<void**>(**this));
+ return V8::IsGlobalNearDeath(reinterpret_cast<internal::Object**>(**this));
}
template <class T>
bool Persistent<T>::IsWeak() const {
if (this->IsEmpty()) return false;
- return V8::IsGlobalWeak(reinterpret_cast<void**>(**this));
+ return V8::IsGlobalWeak(reinterpret_cast<internal::Object**>(**this));
}
template <class T>
void Persistent<T>::Dispose() {
if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<void**>(**this));
+ V8::DisposeGlobal(reinterpret_cast<internal::Object**>(**this));
}
@@ -2691,12 +2805,14 @@ Persistent<T>::Persistent() : Handle<T>() { }
template <class T>
void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
- V8::MakeWeak(reinterpret_cast<void**>(**this), parameters, callback);
+ V8::MakeWeak(reinterpret_cast<internal::Object**>(**this),
+ parameters,
+ callback);
}
template <class T>
void Persistent<T>::ClearWeak() {
- V8::ClearWeak(reinterpret_cast<void**>(**this));
+ V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
}
Local<Value> Arguments::operator[](int i) const {
@@ -2752,7 +2868,8 @@ Local<Object> AccessorInfo::Holder() const {
template <class T>
Local<T> HandleScope::Close(Handle<T> value) {
- void** after = RawClose(reinterpret_cast<void**>(*value));
+ internal::Object** before = reinterpret_cast<internal::Object**>(*value);
+ internal::Object** after = RawClose(before);
return Local<T>(reinterpret_cast<T*>(after));
}
@@ -2781,6 +2898,171 @@ void Template::Set(const char* name, v8::Handle<Data> value) {
}
+Local<Value> Object::GetInternalField(int index) {
+#ifndef V8_ENABLE_CHECKS
+ Local<Value> quick_result = UncheckedGetInternalField(index);
+ if (!quick_result.IsEmpty()) return quick_result;
+#endif
+ return CheckedGetInternalField(index);
+}
+
+
+Local<Value> Object::UncheckedGetInternalField(int index) {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(this);
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ if (instance_type == I::kJSObjectType) {
+ // If the object is a plain JSObject, which is the common case,
+ // we know where to find the internal fields and can return the
+ // value directly.
+ int offset = I::kJSObjectHeaderSize + (sizeof(void*) * index);
+ O* value = I::ReadField<O*>(obj, offset);
+ O** result = HandleScope::CreateHandle(value);
+ return Local<Value>(reinterpret_cast<Value*>(result));
+ } else {
+ return Local<Value>();
+ }
+}
+
+
+void* External::Unwrap(Handle<v8::Value> obj) {
+#ifdef V8_ENABLE_CHECKS
+ return FullUnwrap(obj);
+#else
+ return QuickUnwrap(obj);
+#endif
+}
+
+
+void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
+ if (I::HasSmiTag(obj)) {
+ int value = I::SmiValue(obj) << I::kAlignedPointerShift;
+ return reinterpret_cast<void*>(value);
+ } else {
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ if (instance_type == I::kProxyType) {
+ return I::ReadField<void*>(obj, I::kProxyProxyOffset);
+ } else {
+ return NULL;
+ }
+ }
+}
+
+
+void* Object::GetPointerFromInternalField(int index) {
+ return External::Unwrap(GetInternalField(index));
+}
+
+
+String* String::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<String*>(value);
+}
+
+
+String::ExternalStringResource* String::GetExternalStringResource() const {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ String::ExternalStringResource* result;
+ if (I::IsExternalTwoByteString(instance_type)) {
+ void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
+ result = reinterpret_cast<String::ExternalStringResource*>(value);
+ } else {
+ result = NULL;
+ }
+#ifdef V8_ENABLE_CHECKS
+ VerifyExternalStringResource(result);
+#endif
+ return result;
+}
+
+
+bool Value::IsString() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsString();
+#else
+ return QuickIsString();
+#endif
+}
+
+bool Value::QuickIsString() const {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
+ if (!I::HasHeapObjectTag(obj)) return false;
+ O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
+ int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
+ return (instance_type < I::kFirstNonstringType);
+}
+
+
+Number* Number::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Number*>(value);
+}
+
+
+Integer* Integer::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Integer*>(value);
+}
+
+
+Date* Date::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Date*>(value);
+}
+
+
+Object* Object::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Object*>(value);
+}
+
+
+Array* Array::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Array*>(value);
+}
+
+
+Function* Function::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Function*>(value);
+}
+
+
+External* External::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<External*>(value);
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript
index 6a38c1a79..fee3fab43 100755
--- a/deps/v8/src/SConscript
+++ b/deps/v8/src/SConscript
@@ -63,32 +63,22 @@ SOURCES = {
'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
'arm/virtual-frame-arm.cc'
],
- 'arch:ia32': {
- 'all': [
- 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
- 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
- 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
- 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
- 'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
- 'ia32/virtual-frame-ia32.cc'
- ],
- 'regexp:native': [
- 'ia32/regexp-macro-assembler-ia32.cc',
- ]
- },
- 'arch:x64': {
- 'all': [
- 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
- 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
- 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
- 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
- 'x64/register-allocator-x64.cc',
- 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
- ],
- 'regexp:native': [
- 'x64/regexp-macro-assembler-x64.cc'
- ]
- },
+ 'arch:ia32': [
+ 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc', 'ia32/cfg-ia32.cc',
+ 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
+ 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
+ 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
+ 'ia32/regexp-macro-assembler-ia32.cc', 'ia32/register-allocator-ia32.cc',
+ 'ia32/stub-cache-ia32.cc', 'ia32/virtual-frame-ia32.cc'
+ ],
+ 'arch:x64': [
+ 'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/cfg-x64.cc',
+ 'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
+ 'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
+ 'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
+ 'x64/regexp-macro-assembler-x64.cc', 'x64/register-allocator-x64.cc',
+ 'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
+ ],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index bb38356ec..679e038d3 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -75,7 +75,7 @@ namespace v8 {
i::V8::FatalProcessOutOfMemory(NULL); \
} \
bool call_depth_is_zero = thread_local.CallDepthIsZero(); \
- i::Top::OptionalRescheduleException(call_depth_is_zero, false); \
+ i::Top::OptionalRescheduleException(call_depth_is_zero); \
return value; \
} \
} while (false)
@@ -100,7 +100,9 @@ static i::HandleScopeImplementer thread_local;
static FatalErrorCallback exception_behavior = NULL;
-
+int i::Internals::kJSObjectType = JS_OBJECT_TYPE;
+int i::Internals::kFirstNonstringType = FIRST_NONSTRING_TYPE;
+int i::Internals::kProxyType = PROXY_TYPE;
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
@@ -223,7 +225,8 @@ ImplementationUtilities::HandleScopeData*
#ifdef DEBUG
-void ImplementationUtilities::ZapHandleRange(void** begin, void** end) {
+void ImplementationUtilities::ZapHandleRange(i::Object** begin,
+ i::Object** end) {
i::HandleScope::ZapRange(begin, end);
}
#endif
@@ -349,49 +352,47 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
}
-void** V8::GlobalizeReference(void** obj) {
+i::Object** V8::GlobalizeReference(i::Object** obj) {
if (IsDeadCheck("V8::Persistent::New")) return NULL;
LOG_API("Persistent::New");
i::Handle<i::Object> result =
- i::GlobalHandles::Create(*reinterpret_cast<i::Object**>(obj));
- return reinterpret_cast<void**>(result.location());
+ i::GlobalHandles::Create(*obj);
+ return result.location();
}
-void V8::MakeWeak(void** object, void* parameters,
+void V8::MakeWeak(i::Object** object, void* parameters,
WeakReferenceCallback callback) {
LOG_API("MakeWeak");
- i::GlobalHandles::MakeWeak(reinterpret_cast<i::Object**>(object), parameters,
- callback);
+ i::GlobalHandles::MakeWeak(object, parameters, callback);
}
-void V8::ClearWeak(void** obj) {
+void V8::ClearWeak(i::Object** obj) {
LOG_API("ClearWeak");
- i::GlobalHandles::ClearWeakness(reinterpret_cast<i::Object**>(obj));
+ i::GlobalHandles::ClearWeakness(obj);
}
-bool V8::IsGlobalNearDeath(void** obj) {
+bool V8::IsGlobalNearDeath(i::Object** obj) {
LOG_API("IsGlobalNearDeath");
if (!i::V8::IsRunning()) return false;
- return i::GlobalHandles::IsNearDeath(reinterpret_cast<i::Object**>(obj));
+ return i::GlobalHandles::IsNearDeath(obj);
}
-bool V8::IsGlobalWeak(void** obj) {
+bool V8::IsGlobalWeak(i::Object** obj) {
LOG_API("IsGlobalWeak");
if (!i::V8::IsRunning()) return false;
- return i::GlobalHandles::IsWeak(reinterpret_cast<i::Object**>(obj));
+ return i::GlobalHandles::IsWeak(obj);
}
-void V8::DisposeGlobal(void** obj) {
+void V8::DisposeGlobal(i::Object** obj) {
LOG_API("DisposeGlobal");
if (!i::V8::IsRunning()) return;
- i::Object** ptr = reinterpret_cast<i::Object**>(obj);
- if ((*ptr)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
- i::GlobalHandles::Destroy(ptr);
+ if ((*obj)->IsGlobalContext()) i::Heap::NotifyContextDisposed();
+ i::GlobalHandles::Destroy(obj);
}
// --- H a n d l e s ---
@@ -415,9 +416,8 @@ int HandleScope::NumberOfHandles() {
}
-void** v8::HandleScope::CreateHandle(void* value) {
- return reinterpret_cast<void**>(
- i::HandleScope::CreateHandle(reinterpret_cast<i::Object*>(value)));
+i::Object** v8::HandleScope::CreateHandle(i::Object* value) {
+ return i::HandleScope::CreateHandle(value);
}
@@ -481,7 +481,7 @@ v8::Local<v8::Value> Context::GetData() {
}
-void** v8::HandleScope::RawClose(void** value) {
+i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()",
"Local scope has already been closed")) {
@@ -490,13 +490,13 @@ void** v8::HandleScope::RawClose(void** value) {
LOG_API("CloseHandleScope");
// Read the result before popping the handle block.
- i::Object* result = reinterpret_cast<i::Object*>(*value);
+ i::Object* result = *value;
is_closed_ = true;
i::HandleScope::Leave(&previous_);
// Allocate a new handle on the previous handle block.
i::Handle<i::Object> handle(result);
- return reinterpret_cast<void**>(handle.location());
+ return handle.location();
}
@@ -1459,9 +1459,11 @@ bool Value::IsFunction() const {
}
-bool Value::IsString() const {
+bool Value::FullIsString() const {
if (IsDeadCheck("v8::Value::IsString()")) return false;
- return Utils::OpenHandle(this)->IsString();
+ bool result = Utils::OpenHandle(this)->IsString();
+ ASSERT_EQ(result, QuickIsString());
+ return result;
}
@@ -1613,83 +1615,75 @@ Local<Integer> Value::ToInteger() const {
}
-External* External::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::External::Cast()")) return 0;
+void External::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::External::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsProxy(),
"v8::External::Cast()",
"Could not convert to external");
- return static_cast<External*>(that);
}
-v8::Object* v8::Object::Cast(Value* that) {
- if (IsDeadCheck("v8::Object::Cast()")) return 0;
+void v8::Object::CheckCast(Value* that) {
+ if (IsDeadCheck("v8::Object::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSObject(),
"v8::Object::Cast()",
"Could not convert to object");
- return static_cast<v8::Object*>(that);
}
-v8::Function* v8::Function::Cast(Value* that) {
- if (IsDeadCheck("v8::Function::Cast()")) return 0;
+void v8::Function::CheckCast(Value* that) {
+ if (IsDeadCheck("v8::Function::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSFunction(),
"v8::Function::Cast()",
"Could not convert to function");
- return static_cast<v8::Function*>(that);
}
-v8::String* v8::String::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::String::Cast()")) return 0;
+void v8::String::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::String::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsString(),
"v8::String::Cast()",
"Could not convert to string");
- return static_cast<v8::String*>(that);
}
-v8::Number* v8::Number::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::Number::Cast()")) return 0;
+void v8::Number::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Number::Cast()",
"Could not convert to number");
- return static_cast<v8::Number*>(that);
}
-v8::Integer* v8::Integer::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::Integer::Cast()")) return 0;
+void v8::Integer::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::Integer::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsNumber(),
"v8::Integer::Cast()",
"Could not convert to number");
- return static_cast<v8::Integer*>(that);
}
-v8::Array* v8::Array::Cast(Value* that) {
- if (IsDeadCheck("v8::Array::Cast()")) return 0;
+void v8::Array::CheckCast(Value* that) {
+ if (IsDeadCheck("v8::Array::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsJSArray(),
"v8::Array::Cast()",
"Could not convert to array");
- return static_cast<v8::Array*>(that);
}
-v8::Date* v8::Date::Cast(v8::Value* that) {
- if (IsDeadCheck("v8::Date::Cast()")) return 0;
+void v8::Date::CheckCast(v8::Value* that) {
+ if (IsDeadCheck("v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(i::Heap::Date_symbol()),
"v8::Date::Cast()",
"Could not convert to date");
- return static_cast<v8::Date*>(that);
}
@@ -2450,16 +2444,17 @@ bool v8::String::IsExternalAscii() const {
}
-v8::String::ExternalStringResource*
-v8::String::GetExternalStringResource() const {
- EnsureInitialized("v8::String::GetExternalStringResource()");
+void v8::String::VerifyExternalStringResource(
+ v8::String::ExternalStringResource* value) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
+ v8::String::ExternalStringResource* expected;
if (i::StringShape(*str).IsExternalTwoByte()) {
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
- return reinterpret_cast<ExternalStringResource*>(resource);
+ expected = reinterpret_cast<ExternalStringResource*>(resource);
} else {
- return NULL;
+ expected = NULL;
}
+ CHECK_EQ(expected, value);
}
@@ -2519,7 +2514,7 @@ int v8::Object::InternalFieldCount() {
}
-Local<Value> v8::Object::GetInternalField(int index) {
+Local<Value> v8::Object::CheckedGetInternalField(int index) {
if (IsDeadCheck("v8::Object::GetInternalField()")) return Local<Value>();
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
if (!ApiCheck(index < obj->GetInternalFieldCount(),
@@ -2528,7 +2523,12 @@ Local<Value> v8::Object::GetInternalField(int index) {
return Local<Value>();
}
i::Handle<i::Object> value(obj->GetInternalField(index));
- return Utils::ToLocal(value);
+ Local<Value> result = Utils::ToLocal(value);
+#ifdef DEBUG
+ Local<Value> unchecked = UncheckedGetInternalField(index);
+ ASSERT(unchecked.IsEmpty() || (unchecked == result));
+#endif
+ return result;
}
@@ -2546,41 +2546,8 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
}
-void* v8::Object::GetPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* pointer = obj->GetInternalField(index);
- if (pointer->IsSmi()) {
- // Fast case, aligned native pointer.
- return pointer;
- }
-
- // Read from uninitialized field.
- if (!pointer->IsProxy()) {
- // Play safe even if it's something unexpected.
- ASSERT(pointer->IsUndefined());
- return NULL;
- }
-
- // Unaligned native pointer.
- return reinterpret_cast<void*>(i::Proxy::cast(pointer)->proxy());
-}
-
-
void v8::Object::SetPointerInInternalField(int index, void* value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* as_object = reinterpret_cast<i::Object*>(value);
- if (as_object->IsSmi()) {
- // Aligned pointer, store as is.
- obj->SetInternalField(index, as_object);
- } else {
- // Currently internal fields are used by DOM wrappers which only
- // get garbage collected by the mark-sweep collector, so we
- // pretenure the proxy.
- HandleScope scope;
- i::Handle<i::Proxy> proxy =
- i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
- if (!proxy.is_null()) obj->SetInternalField(index, *proxy);
- }
+ SetInternalField(index, External::Wrap(value));
}
@@ -2605,12 +2572,14 @@ bool v8::V8::Dispose() {
bool v8::V8::IdleNotification(bool is_high_priority) {
+ if (!i::V8::IsRunning()) return false;
return i::V8::IdleNotification(is_high_priority);
}
void v8::V8::LowMemoryNotification() {
#if defined(ANDROID)
+ if (!i::V8::IsRunning()) return;
i::Heap::CollectAllGarbage(true);
#endif
}
@@ -2836,8 +2805,6 @@ static void* ExternalValueImpl(i::Handle<i::Object> obj) {
static const intptr_t kAlignedPointerMask = 3;
-static const int kAlignedPointerShift = 2;
-
Local<Value> v8::External::Wrap(void* data) {
STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
@@ -2847,7 +2814,7 @@ Local<Value> v8::External::Wrap(void* data) {
if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
intptr_t data_value =
- static_cast<intptr_t>(data_ptr >> kAlignedPointerShift);
+ static_cast<intptr_t>(data_ptr >> i::Internals::kAlignedPointerShift);
STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
if (i::Smi::IsIntptrValid(data_value)) {
i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
@@ -2858,16 +2825,22 @@ Local<Value> v8::External::Wrap(void* data) {
}
-void* v8::External::Unwrap(v8::Handle<v8::Value> value) {
+void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
if (IsDeadCheck("v8::External::Unwrap()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(*value);
+ i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
+ void* result;
if (obj->IsSmi()) {
// The external value was an aligned pointer.
- uintptr_t result = static_cast<uintptr_t>(
- i::Smi::cast(*obj)->value()) << kAlignedPointerShift;
- return reinterpret_cast<void*>(result);
+ uintptr_t value = static_cast<uintptr_t>(
+ i::Smi::cast(*obj)->value()) << i::Internals::kAlignedPointerShift;
+ result = reinterpret_cast<void*>(value);
+ } else if (obj->IsProxy()) {
+ result = ExternalValueImpl(obj);
+ } else {
+ result = NULL;
}
- return ExternalValueImpl(obj);
+ ASSERT_EQ(result, QuickUnwrap(wrapper));
+ return result;
}
@@ -3729,19 +3702,17 @@ char* HandleScopeImplementer::RestoreThreadHelper(char* storage) {
void HandleScopeImplementer::Iterate(
ObjectVisitor* v,
- List<void**>* blocks,
+ List<i::Object**>* blocks,
v8::ImplementationUtilities::HandleScopeData* handle_data) {
// Iterate over all handles in the blocks except for the last.
for (int i = blocks->length() - 2; i >= 0; --i) {
- Object** block =
- reinterpret_cast<Object**>(blocks->at(i));
+ Object** block = blocks->at(i);
v->VisitPointers(block, &block[kHandleBlockSize]);
}
// Iterate over live handles in the last block (if any).
if (!blocks->is_empty()) {
- v->VisitPointers(reinterpret_cast<Object**>(blocks->last()),
- reinterpret_cast<Object**>(handle_data->next));
+ v->VisitPointers(blocks->last(), handle_data->next);
}
}
@@ -3756,7 +3727,7 @@ void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
HandleScopeImplementer* thread_local =
reinterpret_cast<HandleScopeImplementer*>(storage);
- List<void**>* blocks_of_archived_thread = thread_local->Blocks();
+ List<internal::Object**>* blocks_of_archived_thread = thread_local->Blocks();
v8::ImplementationUtilities::HandleScopeData* handle_data_of_archived_thread =
&thread_local->handle_scope_data_;
Iterate(v, blocks_of_archived_thread, handle_data_of_archived_thread);
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index f1057a812..ca8f523c9 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -338,7 +338,7 @@ class HandleScopeImplementer {
static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
- inline void** GetSpareOrNewBlock();
+ inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(int extensions);
inline void IncrementCallDepth() {call_depth++;}
@@ -356,13 +356,13 @@ class HandleScopeImplementer {
inline Handle<Object> RestoreContext();
inline bool HasSavedContexts();
- inline List<void**>* Blocks() { return &blocks; }
+ inline List<internal::Object**>* Blocks() { return &blocks; }
inline bool IgnoreOutOfMemory() { return ignore_out_of_memory; }
inline void SetIgnoreOutOfMemory(bool value) { ignore_out_of_memory = value; }
private:
- List<void**> blocks;
+ List<internal::Object**> blocks;
Object** spare;
int call_depth;
// Used as a stack to keep track of entered contexts.
@@ -374,7 +374,7 @@ class HandleScopeImplementer {
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
static void Iterate(ObjectVisitor* v,
- List<void**>* blocks,
+ List<internal::Object**>* blocks,
v8::ImplementationUtilities::HandleScopeData* handle_data);
char* RestoreThreadHelper(char* from);
char* ArchiveThreadHelper(char* to);
@@ -420,10 +420,10 @@ Handle<Object> HandleScopeImplementer::LastEnteredContext() {
// If there's a spare block, use it for growing the current scope.
-void** HandleScopeImplementer::GetSpareOrNewBlock() {
- void** block = (spare != NULL) ?
- reinterpret_cast<void**>(spare) :
- NewArray<void*>(kHandleBlockSize);
+internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
+ internal::Object** block = (spare != NULL) ?
+ spare :
+ NewArray<internal::Object*>(kHandleBlockSize);
spare = NULL;
return block;
}
@@ -435,18 +435,18 @@ void HandleScopeImplementer::DeleteExtensions(int extensions) {
spare = NULL;
}
for (int i = extensions; i > 1; --i) {
- void** block = blocks.RemoveLast();
+ internal::Object** block = blocks.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(block,
&block[kHandleBlockSize]);
#endif
DeleteArray(block);
}
- spare = reinterpret_cast<Object**>(blocks.RemoveLast());
+ spare = blocks.RemoveLast();
#ifdef DEBUG
v8::ImplementationUtilities::ZapHandleRange(
- reinterpret_cast<void**>(spare),
- reinterpret_cast<void**>(&spare[kHandleBlockSize]));
+ spare,
+ &spare[kHandleBlockSize]);
#endif
}
diff --git a/deps/v8/src/apiutils.h b/deps/v8/src/apiutils.h
index 57453438e..8c791ebdd 100644
--- a/deps/v8/src/apiutils.h
+++ b/deps/v8/src/apiutils.h
@@ -60,7 +60,7 @@ class ImplementationUtilities {
static HandleScopeData* CurrentHandleScope();
#ifdef DEBUG
- static void ZapHandleRange(void** begin, void** end);
+ static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif
};
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 4dda7ec5b..cb5faa234 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -204,7 +204,7 @@ void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
- if (pc_offset() > next_buffer_check_) {
+ if (pc_offset() >= next_buffer_check_) {
CheckConstPool(false, true);
}
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 3ed99f9e1..8bd06dbfe 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -329,19 +329,30 @@ const int kEndOfChain = -4;
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
+ if ((instr & ~Imm24Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ return instr - (Code::kHeaderSize - kHeapObjectTag);
+ }
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
int imm26 = ((instr & Imm24Mask) << 8) >> 6;
if ((instr & CondMask) == nv && (instr & B24) != 0)
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
- return pos + 8 + imm26;
+ return pos + kPcLoadDelta + imm26;
}
void Assembler::target_at_put(int pos, int target_pos) {
- int imm26 = target_pos - pos - 8;
Instr instr = instr_at(pos);
+ if ((instr & ~Imm24Mask) == 0) {
+ ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code* of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+ int imm26 = target_pos - (pos + kPcLoadDelta);
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
if ((instr & CondMask) == nv) {
// blx uses bit 24 to encode bit 2 of imm26
@@ -368,41 +379,45 @@ void Assembler::print(Label* L) {
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
Instr instr = instr_at(l.pos());
- ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
- int cond = instr & CondMask;
- const char* b;
- const char* c;
- if (cond == nv) {
- b = "blx";
- c = "";
+ if ((instr & ~Imm24Mask) == 0) {
+ PrintF("value\n");
} else {
- if ((instr & B24) != 0)
- b = "bl";
- else
- b = "b";
-
- switch (cond) {
- case eq: c = "eq"; break;
- case ne: c = "ne"; break;
- case hs: c = "hs"; break;
- case lo: c = "lo"; break;
- case mi: c = "mi"; break;
- case pl: c = "pl"; break;
- case vs: c = "vs"; break;
- case vc: c = "vc"; break;
- case hi: c = "hi"; break;
- case ls: c = "ls"; break;
- case ge: c = "ge"; break;
- case lt: c = "lt"; break;
- case gt: c = "gt"; break;
- case le: c = "le"; break;
- case al: c = ""; break;
- default:
- c = "";
- UNREACHABLE();
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
+ int cond = instr & CondMask;
+ const char* b;
+ const char* c;
+ if (cond == nv) {
+ b = "blx";
+ c = "";
+ } else {
+ if ((instr & B24) != 0)
+ b = "bl";
+ else
+ b = "b";
+
+ switch (cond) {
+ case eq: c = "eq"; break;
+ case ne: c = "ne"; break;
+ case hs: c = "hs"; break;
+ case lo: c = "lo"; break;
+ case mi: c = "mi"; break;
+ case pl: c = "pl"; break;
+ case vs: c = "vs"; break;
+ case vc: c = "vc"; break;
+ case hi: c = "hi"; break;
+ case ls: c = "ls"; break;
+ case ge: c = "ge"; break;
+ case lt: c = "lt"; break;
+ case gt: c = "gt"; break;
+ case le: c = "le"; break;
+ case al: c = ""; break;
+ default:
+ c = "";
+ UNREACHABLE();
+ }
}
+ PrintF("%s%s\n", b, c);
}
- PrintF("%s%s\n", b, c);
next(&l);
}
} else {
@@ -670,8 +685,23 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label
BlockConstPoolBefore(pc_offset() + kInstrSize);
+ return target_pos - (pc_offset() + kPcLoadDelta);
+}
- return target_pos - pc_offset() - 8;
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(at_offset);
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ }
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index b3ebb8be8..63f04473e 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -39,7 +39,7 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
-
+#include <stdio.h>
#include "assembler.h"
namespace v8 {
@@ -165,9 +165,10 @@ enum Coprocessor {
enum Condition {
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
- cs = 2 << 28, // C set unsigned higher or same.
+ nz = 1 << 28, // Z clear not zero.
+ cs = 2 << 28, // C set carry set.
hs = 2 << 28, // C set unsigned higher or same.
- cc = 3 << 28, // C clear unsigned lower.
+ cc = 3 << 28, // C clear carry clear.
lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, // N set negative.
pl = 5 << 28, // N clear positive or zero.
@@ -420,6 +421,10 @@ class Assembler : public Malloced {
// Manages the jump elimination optimization if the second parameter is true.
int branch_offset(Label* L, bool jump_elimination_allowed);
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc.
INLINE(static Address target_address_address_at(Address pc));
@@ -435,6 +440,10 @@ class Assembler : public Malloced {
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 1;
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static const int kPcLoadDelta = 8;
+
// ---------------------------------------------------------------------------
// Code generation
@@ -784,6 +793,8 @@ class Assembler : public Malloced {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ friend class RegExpMacroAssemblerARM;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 5745a06e8..daf2378eb 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -573,7 +573,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
- __ mov(r4, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
__ add(fp, sp, Operand(3 * kPointerSize));
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 12d828d7e..b94aa10bd 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -176,7 +176,8 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
}
#endif
- // Allocate space for locals and initialize them.
+ // Allocate space for locals and initialize them. This also checks
+ // for stack overflow.
frame_->AllocateStackSlots();
// Initialize the function return target after the locals are set
// up, because it needs the expected frame height from the frame.
@@ -278,7 +279,6 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->CallRuntime(Runtime::kTraceEnter, 0);
// Ignore the return value.
}
- CheckStack();
// Compile the body of the function in a vanilla state. Don't
// bother compiling all the code if the scope has an illegal
@@ -1110,8 +1110,19 @@ void CodeGenerator::CheckStack() {
VirtualFrame::SpilledScope spilled_scope;
if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack");
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ // Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is
+ // added to the implicit 8 byte offset that always applies to operations
+ // with pc and gives a return address 12 bytes down.
+ masm_->add(lr, pc, Operand(sizeof(Instr)));
+ masm_->cmp(sp, Operand(ip));
StackCheckStub stub;
- frame_->CallStub(&stub, 0);
+ // Call the stub if lower.
+ masm_->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
}
@@ -3322,7 +3333,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &check_frame_marker);
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
@@ -4934,36 +4945,21 @@ void CompareStub::Generate(MacroAssembler* masm) {
static void AllocateHeapNumber(
MacroAssembler* masm,
Label* need_gc, // Jump here if young space is full.
- Register result_reg, // The tagged address of the new heap number.
- Register allocation_top_addr_reg, // A scratch register.
+ Register result, // The tagged address of the new heap number.
+ Register scratch1, // A scratch register.
Register scratch2) { // Another scratch register.
- ExternalReference allocation_top =
- ExternalReference::new_space_allocation_top_address();
- ExternalReference allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
-
- // allocat := the address of the allocation top variable.
- __ mov(allocation_top_addr_reg, Operand(allocation_top));
- // result_reg := the old allocation top.
- __ ldr(result_reg, MemOperand(allocation_top_addr_reg));
- // scratch2 := the address of the allocation limit.
- __ mov(scratch2, Operand(allocation_limit));
- // scratch2 := the allocation limit.
- __ ldr(scratch2, MemOperand(scratch2));
- // result_reg := the new allocation top.
- __ add(result_reg, result_reg, Operand(HeapNumber::kSize));
- // Compare new new allocation top and limit.
- __ cmp(result_reg, Operand(scratch2));
- // Branch if out of space in young generation.
- __ b(hi, need_gc);
- // Store new allocation top.
- __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top
- // Tag and adjust back to start of new object.
- __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag));
- // Get heap number map into scratch2.
- __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
- // Store heap number map in new object.
- __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset));
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ __ AllocateObjectInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ true);
+
+ // Get heap number map and store it in the allocated object.
+ __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
+ __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -5623,17 +5619,11 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
void StackCheckStub::Generate(MacroAssembler* masm) {
- Label within_limit;
- __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit()));
- __ ldr(ip, MemOperand(ip));
- __ cmp(sp, Operand(ip));
- __ b(hs, &within_limit);
// Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
- __ bind(&within_limit);
__ StubReturn(1);
}
@@ -5677,9 +5667,9 @@ void UnarySubStub::Generate(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
AllocateHeapNumber(masm, &slow, r1, r2, r3);
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(r2, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
__ mov(r0, Operand(r1));
@@ -5984,9 +5974,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// r2: receiver
// r3: argc
// r4: argv
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
__ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- __ mov(r7, Operand(~ArgumentsAdaptorFrame::SENTINEL));
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
__ ldr(r5, MemOperand(r5));
@@ -6143,7 +6133,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor);
// Nothing to do: The formal number of parameters has already been
@@ -6172,7 +6162,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor);
// Check index against formal parameters count limit passed in
@@ -6214,7 +6204,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &runtime);
// Patch the arguments.length and the parameters pointer.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 0abe35b9e..2638409e8 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -119,6 +119,7 @@ class Decoder {
void DecodeType5(Instr* instr);
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
+ void DecodeUnconditional(Instr* instr);
const disasm::NameConverter& converter_;
v8::internal::Vector<char> out_buffer_;
@@ -774,6 +775,67 @@ void Decoder::DecodeType7(Instr* instr) {
}
+void Decoder::DecodeUnconditional(Instr* instr) {
+ if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) {
+ Format(instr, "'memop'h'pu 'rd, ");
+ bool immediate = instr->HasB();
+ switch (instr->PUField()) {
+ case 0: {
+ // Post index, negative.
+ if (instr->HasW()) {
+ Unknown(instr);
+ break;
+ }
+ if (immediate) {
+ Format(instr, "['rn], #-'imm12");
+ } else {
+ Format(instr, "['rn], -'rm");
+ }
+ break;
+ }
+ case 1: {
+ // Post index, positive.
+ if (instr->HasW()) {
+ Unknown(instr);
+ break;
+ }
+ if (immediate) {
+ Format(instr, "['rn], #+'imm12");
+ } else {
+ Format(instr, "['rn], +'rm");
+ }
+ break;
+ }
+ case 2: {
+ // Pre index or offset, negative.
+ if (immediate) {
+ Format(instr, "['rn, #-'imm12]'w");
+ } else {
+ Format(instr, "['rn, -'rm]'w");
+ }
+ break;
+ }
+ case 3: {
+ // Pre index or offset, positive.
+ if (immediate) {
+ Format(instr, "['rn, #+'imm12]'w");
+ } else {
+ Format(instr, "['rn, +'rm]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ return;
+ }
+ Format(instr, "break 'msg");
+}
+
+
// Disassemble the instruction at *instr_ptr into the output buffer.
int Decoder::InstructionDecode(byte* instr_ptr) {
Instr* instr = Instr::At(instr_ptr);
@@ -782,7 +844,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == special_condition) {
- Format(instr, "break 'msg");
+ DecodeUnconditional(instr);
return Instr::kInstrSize;
}
switch (instr->TypeField()) {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 4e337c4bb..2ca489868 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -768,6 +768,44 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+void MacroAssembler::AllocateObjectInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ bool tag_allocated_object) {
+ ASSERT(!result.is(scratch1));
+ ASSERT(!scratch1.is(scratch2));
+
+ // Load address of new object into result and allocation top address into
+ // scratch1.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ mov(scratch1, Operand(new_space_allocation_top));
+ ldr(result, MemOperand(scratch1));
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ mov(scratch2, Operand(new_space_allocation_limit));
+ ldr(scratch2, MemOperand(scratch2));
+ add(result, result, Operand(object_size));
+ cmp(result, Operand(scratch2));
+ b(hi, gc_required);
+
+ // Update allocation top. result temporarily holds the new top,
+ str(result, MemOperand(scratch1));
+
+ // Tag and adjust back to start of new object.
+ if (tag_allocated_object) {
+ sub(result, result, Operand(object_size - kHeapObjectTag));
+ } else {
+ sub(result, result, Operand(object_size));
+ }
+}
+
+
void MacroAssembler::CompareObjectType(Register function,
Register map,
Register type_reg,
@@ -825,9 +863,9 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
}
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
}
@@ -1022,4 +1060,5 @@ void MacroAssembler::Abort(const char* msg) {
// will not return here
}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index a35c98a20..4bbd9802f 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -188,6 +188,20 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space. If the new space is exhausted control
+ // continues at the gc_required label. The allocated object is returned in
+ // result. If the flag tag_allocated_object is true the result is tagged as
+ // as a heap object.
+ void AllocateObjectInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ bool tag_allocated_object);
+
+ // ---------------------------------------------------------------------------
// Support functions.
// Try to get function prototype of a function and puts the value in
@@ -231,7 +245,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub);
+ void CallStub(CodeStub* stub, Condition cond = al);
void CallJSExitStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 78ebc7e80..252d7839f 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,19 +26,1201 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
+#include "unicode.h"
+#include "log.h"
#include "ast.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
+#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
namespace v8 {
namespace internal {
-RegExpMacroAssemblerARM::RegExpMacroAssemblerARM() {
- UNIMPLEMENTED();
+#ifdef V8_NATIVE_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - r5 : Pointer to current code object (Code*) including heap object tag.
+ * - r6 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - r7 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - r8 : points to tip of backtrack stack
+ * - r9 : Unused, might be used by C code and expected unchanged.
+ * - r10 : End of input (points to byte after last character in input).
+ * - r11 : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - r12 : IP register, used by assembler. Very volatile.
+ * - r13/sp : points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ * - stack_area_base (High end of the memory area to use as
+ * backtracking stack)
+ * - at_start (if 1, start at start of string, if 0, don't)
+ * --- sp when called ---
+ * - link address
+ * - backup of registers r4..r11
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
+ * --- frame pointer ----
+ * - void* input_string (location of a handle containing the string)
+ * - Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a non-position.
+ * - register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * bool at_start,
+ * byte* stack_area_base)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc).
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
+ Mode mode,
+ int registers_to_save)
+ : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ EmitBacktrackConstantPool();
+ __ bind(&start_label_); // And then continue from here.
}
-RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {}
+RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerARM::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerARM::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ Label inside_string;
+ __ add(current_input_offset(),
+ current_input_offset(), Operand(by * char_size()));
+ }
+}
+
+
+void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
+ ASSERT(reg >= 0);
+ ASSERT(reg < num_registers_);
+ if (by != 0) {
+ __ ldr(r0, register_location(reg));
+ __ add(r0, r0, Operand(by));
+ __ str(r0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerARM::Backtrack() {
+ CheckPreemption();
+ // Pop Code* offset from backtrack stack, add Code* and jump to location.
+ Pop(r0);
+ __ add(pc, r0, Operand(r5));
+}
+
+
+void RegExpMacroAssemblerARM::Bind(Label* label) {
+ __ bind(label);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacter(uint32_t c, Label* on_equal) {
+ __ cmp(current_character(), Operand(c));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ __ cmp(current_character(), Operand(limit));
+ BranchOrBacktrack(gt, on_greater);
+}
+
+
+void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
+ Label not_at_start;
+ // Did we start the match at the start of the string at all?
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+ __ cmp(r0, Operand(0));
+ BranchOrBacktrack(eq, &not_at_start);
+
+ // If we did, are we still at the start of the input?
+ __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
+ __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+ __ cmp(r0, r1);
+ BranchOrBacktrack(eq, on_at_start);
+ __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
+ // Did we start the match at the start of the string at all?
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+ __ cmp(r0, Operand(0));
+ BranchOrBacktrack(eq, on_not_at_start);
+ // If we did, are we still at the start of the input?
+ __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
+ __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+ __ cmp(r0, r1);
+ BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterLT(uc16 limit, Label* on_less) {
+ __ cmp(current_character(), Operand(limit));
+ BranchOrBacktrack(lt, on_less);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ int byte_length = str.length() * char_size();
+ int byte_offset = cp_offset * char_size();
+ if (check_end_of_string) {
+ // Check that there are at least str.length() characters left in the input.
+ __ cmp(end_of_input_address(), Operand(-(byte_offset + byte_length)));
+ BranchOrBacktrack(gt, on_failure);
+ }
+
+ if (on_failure == NULL) {
+ // Instead of inlining a backtrack, (re)use the global backtrack target.
+ on_failure = &backtrack_label_;
+ }
+
+ __ add(r0, end_of_input_address(), Operand(current_input_offset()));
+ int stored_high_byte = 0;
+ for (int i = 0; i < str.length(); i++) {
+ if (mode_ == ASCII) {
+ __ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
+ // str[i] is known to be an ASCII character.
+ __ cmp(r1, Operand(str[i]));
+ } else {
+ __ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
+ uc16 match_char = str[i];
+ int match_high_byte = (match_char >> 8);
+ if (match_high_byte == 0) {
+ __ cmp(r1, Operand(str[i]));
+ } else {
+ if (match_high_byte != stored_high_byte) {
+ __ mov(r2, Operand(match_high_byte));
+ stored_high_byte = match_high_byte;
+ }
+ __ add(r3, r2, Operand(match_char & 0xff));
+ __ cmp(r1, r3);
+ }
+ }
+ BranchOrBacktrack(ne, on_failure);
+ }
+}
+
+
+void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
+ __ ldr(r0, MemOperand(backtrack_stackpointer(), 0));
+ __ cmp(current_input_offset(), r0);
+ __ add(backtrack_stackpointer(),
+ backtrack_stackpointer(), Operand(kPointerSize), LeaveCC, eq);
+ BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ __ ldr(r0, register_location(start_reg)); // Index of start of capture
+ __ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
+ __ sub(r1, r1, r0, SetCC); // Length of capture.
+
+ // If length is zero, either the capture is empty or it is not participating.
+ // In either case succeed immediately.
+ __ b(eq, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ cmn(r1, Operand(current_input_offset()));
+ BranchOrBacktrack(gt, on_no_match);
+
+ if (mode_ == ASCII) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // r0 - offset of start of capture
+ // r1 - length of capture
+ __ add(r0, r0, Operand(end_of_input_address()));
+ __ add(r2, end_of_input_address(), Operand(current_input_offset()));
+ __ add(r1, r0, Operand(r1));
+
+ // r0 - Address of start of capture.
+ // r1 - Address of end of capture
+ // r2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
+ __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
+ __ cmp(r4, r3);
+ __ b(eq, &loop_check);
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ orr(r3, r3, Operand(0x20)); // Convert capture character to lower-case.
+ __ orr(r4, r4, Operand(0x20)); // Also convert input character.
+ __ cmp(r4, r3);
+ __ b(ne, &fail);
+ __ sub(r3, r3, Operand('a'));
+ __ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
+ __ b(hi, &fail);
+
+
+ __ bind(&loop_check);
+ __ cmp(r0, r1);
+ __ b(lt, &loop);
+ __ jmp(&success);
+
+ __ bind(&fail);
+ BranchOrBacktrack(al, on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ sub(current_input_offset(), r2, end_of_input_address());
+ } else {
+ ASSERT(mode_ == UC16);
+ int argument_count = 3;
+ FrameAlign(argument_count, r2);
+
+ // r0 - offset of start of capture
+ // r1 - length of capture
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // r0: Address byte_offset1 - Address captured substring's start.
+ // r1: Address byte_offset2 - Address of current character position.
+ // r2: size_t byte_length - length of capture in bytes(!)
+
+ // Address of start of capture.
+ __ add(r0, r0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mov(r2, Operand(r1));
+ // Save length in callee-save register for use on return.
+ __ mov(r4, Operand(r1));
+ // Address of current input position.
+ __ add(r1, current_input_offset(), Operand(end_of_input_address()));
+
+ ExternalReference function =
+ ExternalReference::re_case_insensitive_compare_uc16();
+ CallCFunction(function, argument_count);
+
+ // Check if function returned non-zero for success or zero for failure.
+ __ cmp(r0, Operand(0));
+ BranchOrBacktrack(eq, on_no_match);
+ // On success, increment position by length of capture.
+ __ add(current_input_offset(), current_input_offset(), Operand(r4));
+ }
+
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ Label fallthrough;
+ Label success;
+
+ // Find length of back-referenced capture.
+ __ ldr(r0, register_location(start_reg));
+ __ ldr(r1, register_location(start_reg + 1));
+ __ sub(r1, r1, r0, SetCC); // Length to check.
+ // Succeed on empty capture (including no capture).
+ __ b(eq, &fallthrough);
+
+ // Check that there are enough characters left in the input.
+ __ cmn(r1, Operand(current_input_offset()));
+ BranchOrBacktrack(gt, on_no_match);
+
+ // Compute pointers to match string and capture string
+ __ add(r0, r0, Operand(end_of_input_address()));
+ __ add(r2, end_of_input_address(), Operand(current_input_offset()));
+ __ add(r1, r1, Operand(r0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == ASCII) {
+ __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
+ __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
+ __ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
+ }
+ __ cmp(r3, r4);
+ BranchOrBacktrack(ne, on_no_match);
+ __ cmp(r0, r1);
+ __ b(lt, &loop);
+
+ // Move current character position to position after match.
+ __ sub(current_input_offset(), r2, end_of_input_address());
+ __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotRegistersEqual(int reg1,
+ int reg2,
+ Label* on_not_equal) {
+ __ ldr(r0, register_location(reg1));
+ __ ldr(r1, register_location(reg2));
+ __ cmp(r0, r1);
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ __ cmp(current_character(), Operand(c));
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ and_(r0, current_character(), Operand(mask));
+ __ cmp(r0, Operand(c));
+ BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ __ and_(r0, current_character(), Operand(mask));
+ __ cmp(r0, Operand(c));
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ ASSERT(minus < String::kMaxUC16CharCode);
+ __ sub(r0, current_character(), Operand(minus));
+ __ and_(r0, r0, Operand(mask));
+ __ cmp(r0, Operand(c));
+ BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
+ int cp_offset,
+ bool check_offset,
+ Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check
+ switch (type) {
+ case 's':
+ // Match space-characters
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ Label success;
+ __ cmp(current_character(), Operand(' '));
+ __ b(eq, &success);
+ // Check range 0x09..0x0d
+ __ sub(r0, current_character(), Operand('\t'));
+ __ cmp(r0, Operand('\r' - '\t'));
+ BranchOrBacktrack(hi, on_no_match);
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // Match non-space characters.
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ if (mode_ == ASCII) {
+ // ASCII space characters are '\t'..'\r' and ' '.
+ __ cmp(current_character(), Operand(' '));
+ BranchOrBacktrack(eq, on_no_match);
+ __ sub(r0, current_character(), Operand('\t'));
+ __ cmp(r0, Operand('\r' - '\t'));
+ BranchOrBacktrack(ls, on_no_match);
+ return true;
+ }
+ return false;
+ case 'd':
+ // Match ASCII digits ('0'..'9')
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ __ sub(r0, current_character(), Operand('0'));
+ __ cmp(current_character(), Operand('9' - '0'));
+ BranchOrBacktrack(hi, on_no_match);
+ return true;
+ case 'D':
+ // Match non ASCII-digits
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ __ sub(r0, current_character(), Operand('0'));
+ __ cmp(r0, Operand('9' - '0'));
+ BranchOrBacktrack(ls, on_no_match);
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ if (check_offset) {
+ LoadCurrentCharacter(cp_offset, on_no_match, 1);
+ } else {
+ LoadCurrentCharacterUnchecked(cp_offset, 1);
+ }
+ __ eor(r0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(r0, r0, Operand(0x0b));
+ __ cmp(r0, Operand(0x0c - 0x0b));
+ BranchOrBacktrack(ls, on_no_match);
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ __ cmp(r0, Operand(1));
+ BranchOrBacktrack(ls, on_no_match);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ if (check_offset) {
+ CheckPosition(cp_offset, on_no_match);
+ }
+ return true;
+ // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+
+void RegExpMacroAssemblerARM::Fail() {
+ __ mov(r0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+
+Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+ // Push Link register.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Order here should correspond to order of offset constants in header file.
+ RegList registers_to_retain = r4.bit() | r5.bit() | r6.bit() |
+ r7.bit() | r8.bit() | r9.bit() | r10.bit() | fp.bit();
+ RegList argument_registers = r0.bit() | r1.bit() | r2.bit() | r3.bit();
+ __ stm(db_w, sp, argument_registers | registers_to_retain | lr.bit());
+ // Set frame pointer just above the arguments.
+ __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
+ __ push(r0); // Make room for "position - 1" constant (value is irrelevant).
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ mov(r0, Operand(stack_guard_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ sub(r0, sp, r0, SetCC);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ b(ls, &stack_limit_hit);
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ cmp(r0, Operand(num_registers_ * kPointerSize));
+ __ b(hs, &stack_ok);
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ mov(r0, Operand(EXCEPTION));
+ __ jmp(&exit_label_);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(r0);
+ __ cmp(r0, Operand(0));
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ b(ne, &exit_label_);
+
+ __ bind(&stack_ok);
+
+ // Allocate space on stack for registers.
+ __ sub(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ ldr(r0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ sub(current_input_offset(), r0, end_of_input_address());
+ // Set r0 to address of char before start of input
+ // (effectively string position -1).
+ __ sub(r0, current_input_offset(), Operand(char_size()));
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1
+
+ // Address of register 0.
+ __ add(r1, frame_pointer(), Operand(kRegisterZero));
+ __ mov(r2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ str(r0, MemOperand(r1, kPointerSize, NegPostIndex));
+ __ sub(r2, r2, Operand(1), SetCC);
+ __ b(ne, &init_loop);
+ }
+
+ // Initialize backtrack stack pointer.
+ __ ldr(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ // Initialize code pointer register
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+ // Load previous char as initial value of current character register.
+ Label at_start;
+ __ ldr(r0, MemOperand(frame_pointer(), kAtStart));
+ __ cmp(r0, Operand(0));
+ __ b(ne, &at_start);
+ LoadCurrentCharacterUnchecked(-1, 1); // Load previous char.
+ __ jmp(&start_label_);
+ __ bind(&at_start);
+ __ mov(current_character(), Operand('\n'));
+ __ jmp(&start_label_);
+
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // copy captures to output
+ __ ldr(r1, MemOperand(frame_pointer(), kInputStart));
+ __ ldr(r0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ sub(r1, end_of_input_address(), r1);
+ // r1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ mov(r1, Operand(r1, LSR, 1));
+ }
+ // r1 is length of input in characters.
+
+ ASSERT_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ ldr(r2, register_location(i));
+ __ ldr(r3, register_location(i + 1));
+ if (mode_ == UC16) {
+ __ add(r2, r1, Operand(r2, ASR, 1));
+ __ add(r3, r1, Operand(r3, ASR, 1));
+ } else {
+ __ add(r2, r1, Operand(r2));
+ __ add(r3, r1, Operand(r3));
+ }
+ __ str(r2, MemOperand(r0, kPointerSize, PostIndex));
+ __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
+ }
+ }
+ __ mov(r0, Operand(SUCCESS));
+ }
+ // Exit and return r0
+ __ bind(&exit_label_);
+ // Skip sp past regexp registers and local variables..
+ __ mov(sp, frame_pointer());
+ // Restore registers r4..r11 and return (restoring lr to pc).
+ __ ldm(ia_w, sp, registers_to_retain | pc.bit());
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+
+ CallCheckStackGuardState(r0);
+ __ cmp(r0, Operand(0));
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ b(ne, &exit_label_);
+
+ // String might have moved: Reload end of string from frame.
+ __ ldr(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+
+ Label grow_failed;
+
+ // Call GrowStack(backtrack_stackpointer())
+ int num_arguments = 2;
+ FrameAlign(num_arguments, r0);
+ __ mov(r0, backtrack_stackpointer());
+ __ add(r1, frame_pointer(), Operand(kStackHighEnd));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack();
+ CallCFunction(grow_stack, num_arguments);
+ // If return NULL, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &exit_with_exception);
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), r0);
+ // Restore saved registers and continue.
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ mov(r0, Operand(EXCEPTION));
+ __ jmp(&exit_label_);
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(&code_desc);
+ Handle<Code> code = Factory::NewCode(code_desc,
+ NULL,
+ Code::ComputeFlags(Code::REGEXP),
+ masm_->CodeObject());
+ LOG(RegExpCodeCreateEvent(*code, *source));
+ return Handle<Object>::cast(code);
+}
+
+
+void RegExpMacroAssemblerARM::GoTo(Label* to) {
+ BranchOrBacktrack(al, to);
+}
+
+
+void RegExpMacroAssemblerARM::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ ldr(r0, register_location(reg));
+ __ cmp(r0, Operand(comparand));
+ BranchOrBacktrack(ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerARM::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ __ ldr(r0, register_location(reg));
+ __ cmp(r0, Operand(comparand));
+ BranchOrBacktrack(lt, if_lt);
+}
-}} // namespace v8::internal
+void RegExpMacroAssemblerARM::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ __ ldr(r0, register_location(reg));
+ __ cmp(r0, Operand(current_input_offset()));
+ BranchOrBacktrack(eq, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerARM::Implementation() {
+ return kARMImplementation;
+}
+
+
+void RegExpMacroAssemblerARM::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
+ ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
+ LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerARM::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM::PopRegister(int register_index) {
+ Pop(r0);
+ __ str(r0, register_location(register_index));
+}
+
+
+static bool is_valid_memory_offset(int value) {
+ if (value < 0) value = -value;
+ return value < (1<<12);
+}
+
+
+void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ mov(r0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ int constant_offset = GetBacktrackConstantPoolEntry();
+ masm_->label_at_put(label, constant_offset);
+ // Reading pc-relative is based on the address 8 bytes ahead of
+ // the current opcode.
+ unsigned int offset_of_pc_register_read =
+ masm_->pc_offset() + Assembler::kPcLoadDelta;
+ int pc_offset_of_constant =
+ constant_offset - offset_of_pc_register_read;
+ ASSERT(pc_offset_of_constant < 0);
+ if (is_valid_memory_offset(pc_offset_of_constant)) {
+ masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
+ __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
+ } else {
+ // Not a 12-bit offset, so it needs to be loaded from the constant
+ // pool.
+ masm_->BlockConstPoolBefore(
+ masm_->pc_offset() + 2 * Assembler::kInstrSize);
+ __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
+ __ ldr(r0, MemOperand(pc, r0));
+ }
+ }
+ Push(r0);
+ CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerARM::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ __ ldr(r0, register_location(register_index));
+ Push(r0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerARM::ReadCurrentPositionFromRegister(int reg) {
+ __ ldr(current_input_offset(), register_location(reg));
+}
+
+
+void RegExpMacroAssemblerARM::ReadStackPointerFromRegister(int reg) {
+ __ ldr(backtrack_stackpointer(), register_location(reg));
+ __ ldr(r0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ add(backtrack_stackpointer(), backtrack_stackpointer(), Operand(r0));
+}
+
+
+void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
+ ASSERT(register_index >= num_saved_registers_); // Reserved for positions!
+ __ mov(r0, Operand(to));
+ __ str(r0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerARM::Succeed() {
+ __ jmp(&success_label_);
+}
+
+
+void RegExpMacroAssemblerARM::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ if (cp_offset == 0) {
+ __ str(current_input_offset(), register_location(reg));
+ } else {
+ __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ str(r0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
+ ASSERT(reg_from <= reg_to);
+ __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ str(r0, register_location(reg));
+ }
+}
+
+
+void RegExpMacroAssemblerARM::WriteStackPointerToRegister(int reg) {
+ __ ldr(r1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ sub(r0, backtrack_stackpointer(), r1);
+ __ str(r0, register_location(reg));
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
+ int num_arguments = 3;
+ FrameAlign(num_arguments, scratch);
+ // RegExp code frame pointer.
+ __ mov(r2, frame_pointer());
+ // Code* of self.
+ __ mov(r1, Operand(masm_->CodeObject()));
+ // r0 becomes return address pointer.
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state();
+ CallCFunctionUsingStub(stack_guard_check, num_arguments);
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ if (StackGuard::IsStackOverflow()) {
+ Top::StackOverflow();
+ return EXCEPTION;
+ }
+
+ // If not real stack overflow the stack guard was used to interrupt
+ // execution for another purpose.
+
+ // Prepare for possible GC.
+ HandleScope handles;
+ Handle<Code> code_handle(re_code);
+
+ Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+ // Current string.
+ bool is_ascii = subject->IsAsciiRepresentation();
+
+ ASSERT(re_code->instruction_start() <= *return_address);
+ ASSERT(*return_address <=
+ re_code->instruction_start() + re_code->instruction_size());
+
+ Object* result = Execution::HandleStackGuardInterrupt();
+
+ if (*code_handle != re_code) { // Return address no longer valid
+ int delta = *code_handle - re_code;
+ // Overwrite the return address on the stack.
+ *return_address += delta;
+ }
+
+ if (result->IsException()) {
+ return EXCEPTION;
+ }
+
+ // String might have changed.
+ if (subject->IsAsciiRepresentation() != is_ascii) {
+ // If we changed between an ASCII and an UC16 string, the specialized
+ // code cannot be used, and we need to restart regexp matching from
+ // scratch (including, potentially, compiling a new version of the code).
+ return RETRY;
+ }
+
+ // Otherwise, the content of the string might have moved. It must still
+ // be a sequential or external string with the same content.
+ // Update the start and end pointers in the stack frame to the current
+ // location (whether it has actually moved or not).
+ ASSERT(StringShape(*subject).IsSequential() ||
+ StringShape(*subject).IsExternal());
+
+ // The original start address of the characters to match.
+ const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+ // Find the current start address of the same character at the current string
+ // position.
+ int start_index = frame_entry<int>(re_frame, kStartIndex);
+ const byte* new_address = StringCharacterPosition(*subject, start_index);
+
+ if (start_address != new_address) {
+ // If there is a difference, update the object pointer and start and end
+ // addresses in the RegExp stack frame to match the new value.
+ const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+ int byte_length = end_address - start_address;
+ frame_entry<const String*>(re_frame, kInputString) = *subject;
+ frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+ frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+ }
+
+ return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
+ ASSERT(register_index < (1<<30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerARM::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ __ cmp(current_input_offset(), Operand(-cp_offset * char_size()));
+ BranchOrBacktrack(ge, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerARM::BranchOrBacktrack(Condition condition,
+ Label* to) {
+ if (condition == al) { // Unconditional.
+ if (to == NULL) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == NULL) {
+ __ b(condition, &backtrack_label_);
+ return;
+ }
+ __ b(condition, to);
+}
+
+
+void RegExpMacroAssemblerARM::SafeCall(Label* to, Condition cond) {
+ __ bl(to, cond);
+}
+
+
+void RegExpMacroAssemblerARM::SafeReturn() {
+ __ pop(lr);
+ __ add(pc, lr, Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ sub(lr, lr, Operand(masm_->CodeObject()));
+ __ push(lr);
+}
+
+
+void RegExpMacroAssemblerARM::Push(Register source) {
+ ASSERT(!source.is(backtrack_stackpointer()));
+ __ str(source,
+ MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
+}
+
+
+void RegExpMacroAssemblerARM::Pop(Register target) {
+ ASSERT(!target.is(backtrack_stackpointer()));
+ __ ldr(target,
+ MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
+}
+
+
+void RegExpMacroAssemblerARM::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_guard_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ mov(r0, Operand(stack_guard_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ cmp(sp, r0);
+ SafeCall(&check_preempt_label_, ls);
+}
+
+
+void RegExpMacroAssemblerARM::CheckStackLimit() {
+ if (FLAG_check_stack) {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit();
+ __ mov(r0, Operand(stack_limit));
+ __ ldr(r0, MemOperand(r0));
+ __ cmp(backtrack_stackpointer(), Operand(r0));
+ SafeCall(&stack_overflow_label_, ls);
+ }
+}
+
+
+void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
+ __ CheckConstPool(false, false);
+ __ BlockConstPoolBefore(
+ masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
+ backtrack_constant_pool_offset_ = masm_->pc_offset();
+ for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
+ __ emit(0);
+ }
+
+ backtrack_constant_pool_capacity_ = kBacktrackConstantPoolSize;
+}
+
+
+int RegExpMacroAssemblerARM::GetBacktrackConstantPoolEntry() {
+ while (backtrack_constant_pool_capacity_ > 0) {
+ int offset = backtrack_constant_pool_offset_;
+ backtrack_constant_pool_offset_ += kPointerSize;
+ backtrack_constant_pool_capacity_--;
+ if (masm_->pc_offset() - offset < 2 * KB) {
+ return offset;
+ }
+ }
+ Label new_pool_skip;
+ __ jmp(&new_pool_skip);
+ EmitBacktrackConstantPool();
+ __ bind(&new_pool_skip);
+ int offset = backtrack_constant_pool_offset_;
+ backtrack_constant_pool_offset_ += kPointerSize;
+ backtrack_constant_pool_capacity_--;
+ return offset;
+}
+
+
+void RegExpMacroAssemblerARM::FrameAlign(int num_arguments, Register scratch) {
+ int frameAlignment = OS::ActivationFrameAlignment();
+ // Up to four simple arguments are passed in registers r0..r3.
+ int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ if (frameAlignment != 0) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ __ mov(scratch, sp);
+ __ sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frameAlignment));
+ __ and_(sp, sp, Operand(-frameAlignment));
+ __ str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ __ sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+
+void RegExpMacroAssemblerARM::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ __ mov(r5, Operand(function));
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+ __ Call(r5);
+ int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
+ if (OS::ActivationFrameAlignment() > kIntSize) {
+ __ ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ __ add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+ }
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM::CallCFunctionUsingStub(
+ ExternalReference function,
+ int num_arguments) {
+ // Must pass all arguments in registers. The stub pushes on the stack.
+ ASSERT(num_arguments <= 4);
+ __ mov(r5, Operand(function));
+ RegExpCEntryStub stub;
+ __ CallStub(&stub);
+ if (OS::ActivationFrameAlignment() != 0) {
+ __ ldr(sp, MemOperand(sp, 0));
+ }
+ __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ Register offset = current_input_offset();
+ if (cp_offset != 0) {
+ __ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = r0;
+ }
+ // We assume that we cannot do unaligned loads on ARM, so this function
+ // must only be used to load a single character at a time.
+ ASSERT(characters == 1);
+ if (mode_ == ASCII) {
+ __ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
+ } else {
+ ASSERT(mode_ == UC16);
+ __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
+ }
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+ int stack_alignment = OS::ActivationFrameAlignment();
+ if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
+ // Stack is already aligned for call, so decrement by alignment
+ // to make room for storing the link register.
+ __ str(lr, MemOperand(sp, stack_alignment, NegPreIndex));
+ __ mov(r0, sp);
+ __ Call(r5);
+ __ ldr(pc, MemOperand(sp, stack_alignment, PostIndex));
+}
+
+#undef __
+
+#endif // V8_NATIVE_REGEXP
+
+}} // namespace v8::internal
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index de5518379..0711ac19f 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -31,12 +31,238 @@
namespace v8 {
namespace internal {
+
+#ifndef V8_NATIVE_REGEXP
class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
public:
RegExpMacroAssemblerARM();
virtual ~RegExpMacroAssemblerARM();
};
+#else
+class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerARM(Mode mode, int registers_to_save);
+ virtual ~RegExpMacroAssemblerARM();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ int cp_offset,
+ bool check_offset,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<Object> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetRegister(int register_index, int to);
+ virtual void Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Register 4..11.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+ static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
+ // Stack parameters placed by caller.
+ static const int kRegisterOutput = kReturnAddress + kPointerSize;
+ static const int kAtStart = kRegisterOutput + kPointerSize;
+ static const int kStackHighEnd = kAtStart + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kInputEnd = kFramePointer - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kInputStartMinusOne = kInputString - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ static const int kBacktrackConstantPoolSize = 4;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ void EmitBacktrackConstantPool();
+ int GetBacktrackConstantPoolEntry();
+
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return r6; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return r7; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return r10; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return r8; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return r5; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Condition condition, Label* to);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to, Condition cond = al);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored in
+ // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+ // are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ inline void FrameAlign(int num_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by FrameAlign. The called function is not allowed to trigger a garbage
+ // collection.
+ inline void CallCFunction(ExternalReference function,
+ int num_arguments);
+
+ // Calls a C function and cleans up the frame alignment done by
+ // by FrameAlign. The called function *is* allowed to trigger a garbage
+ // collection, but may not take more than four arguments (no arguments
+ // passed on the stack), and the first argument will be a pointer to the
+ // return address.
+ inline void CallCFunctionUsingStub(ExternalReference function,
+ int num_arguments);
+
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Manage a small pre-allocated pool for writing label targets
+ // to for pushing backtrack addresses.
+ int backtrack_constant_pool_offset_;
+ int backtrack_constant_pool_capacity_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+
+#endif // V8_NATIVE_REGEXP
+
+
}} // namespace v8::internal
#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index e5500aad5..e258e5a68 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
-
+#include <cstdarg>
#include "v8.h"
#include "disasm.h"
@@ -598,7 +598,7 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned read at %x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword read at %x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
return 0;
}
@@ -609,7 +609,7 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned read at %x\n", addr);
+ PrintF("Unaligned signed halfword read at %x\n", addr);
UNIMPLEMENTED();
return 0;
}
@@ -621,7 +621,7 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned write at %x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword write at %x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
}
@@ -632,7 +632,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
*ptr = value;
return;
}
- PrintF("Unaligned write at %x, pc=%p\n", addr, instr);
+ PrintF("Unaligned halfword write at %x, pc=%p\n", addr, instr);
UNIMPLEMENTED();
}
@@ -1051,7 +1051,6 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
}
set_register(r0, lo_res);
set_register(r1, hi_res);
- set_register(r0, result);
}
set_register(lr, saved_lr);
set_pc(get_register(lr));
@@ -1417,8 +1416,12 @@ void Simulator::DecodeType01(Instr* instr) {
case CMN: {
if (instr->HasS()) {
- Format(instr, "cmn'cond 'rn, 'shift_rm");
- Format(instr, "cmn'cond 'rn, 'imm");
+ // Format(instr, "cmn'cond 'rn, 'shift_rm");
+ // Format(instr, "cmn'cond 'rn, 'imm");
+ alu_out = rn_val + shifter_operand;
+ SetNZFlags(alu_out);
+ SetCFlag(!CarryFrom(rn_val, shifter_operand));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
} else {
ASSERT(type == 0);
int rm = instr->RmField();
@@ -1567,6 +1570,7 @@ void Simulator::DecodeType2(Instr* instr) {
void Simulator::DecodeType3(Instr* instr) {
+ ASSERT(instr->Bit(4) == 0);
int rd = instr->RdField();
int rn = instr->RnField();
int32_t rn_val = get_register(rn);
@@ -1606,7 +1610,12 @@ void Simulator::DecodeType3(Instr* instr) {
}
}
if (instr->HasB()) {
- UNIMPLEMENTED();
+ if (instr->HasL()) {
+ uint8_t byte = ReadB(addr);
+ set_register(rd, byte);
+ } else {
+ UNIMPLEMENTED();
+ }
} else {
if (instr->HasL()) {
set_register(rd, ReadW(addr, instr));
@@ -1631,12 +1640,13 @@ void Simulator::DecodeType4(Instr* instr) {
void Simulator::DecodeType5(Instr* instr) {
// Format(instr, "b'l'cond 'target");
- int off = (instr->SImmed24Field() << 2) + 8;
- intptr_t pc = get_pc();
+ int off = (instr->SImmed24Field() << 2);
+ intptr_t pc_address = get_pc();
if (instr->HasLink()) {
- set_register(lr, pc + Instr::kInstrSize);
+ set_register(lr, pc_address + Instr::kInstrSize);
}
- set_pc(pc+off);
+ int pc_reg = get_register(pc);
+ set_pc(pc_reg + off);
}
@@ -1655,14 +1665,75 @@ void Simulator::DecodeType7(Instr* instr) {
}
+void Simulator::DecodeUnconditional(Instr* instr) {
+ if (instr->Bits(7, 4) == 0x0B && instr->Bits(27, 25) == 0 && instr->HasL()) {
+ // Load halfword instruction, either register or immediate offset.
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int32_t addr = 0;
+ int32_t offset;
+ if (instr->Bit(22) == 0) {
+ // Register offset.
+ int rm = instr->RmField();
+ offset = get_register(rm);
+ } else {
+ // Immediate offset
+ offset = instr->Bits(3, 0) + (instr->Bits(11, 8) << 4);
+ }
+ switch (instr->PUField()) {
+ case 0: {
+ // Post index, negative.
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val -= offset;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 1: {
+ // Post index, positive.
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val += offset;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 2: {
+ // Pre index or offset, negative.
+ rn_val -= offset;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ case 3: {
+ // Pre index or offset, positive.
+ rn_val += offset;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ // Not sign extending, so load as unsigned.
+ uint16_t halfword = ReadH(addr, instr);
+ set_register(rd, halfword);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instr* instr) {
pc_modified_ = false;
- if (instr->ConditionField() == special_condition) {
- Debugger dbg(this);
- dbg.Stop(instr);
- return;
- }
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -1672,7 +1743,9 @@ void Simulator::InstructionDecode(Instr* instr) {
reinterpret_cast<byte*>(instr));
PrintF(" 0x%x %s\n", instr, buffer.start());
}
- if (ConditionallyExecute(instr)) {
+ if (instr->ConditionField() == special_condition) {
+ DecodeUnconditional(instr);
+ } else if (ConditionallyExecute(instr)) {
switch (instr->TypeField()) {
case 0:
case 1: {
@@ -1748,19 +1821,35 @@ void Simulator::Execute() {
}
-Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
- int32_t p3, int32_t p4) {
- // Setup parameters
- set_register(r0, p0);
- set_register(r1, p1);
- set_register(r2, p2);
- set_register(r3, p3);
- intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
- *(--stack_pointer) = p4;
- set_register(sp, reinterpret_cast<int32_t>(stack_pointer));
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Setup arguments
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(r0, va_arg(parameters, int32_t));
+ set_register(r1, va_arg(parameters, int32_t));
+ set_register(r2, va_arg(parameters, int32_t));
+ set_register(r3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
// Prepare to execute the code at entry
- set_register(pc, entry);
+ set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
// the LR the simulation stops when returning to this call point.
@@ -1794,14 +1883,14 @@ Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
Execute();
// Check that the callee-saved registers have been preserved.
- CHECK_EQ(get_register(r4), callee_saved_value);
- CHECK_EQ(get_register(r5), callee_saved_value);
- CHECK_EQ(get_register(r6), callee_saved_value);
- CHECK_EQ(get_register(r7), callee_saved_value);
- CHECK_EQ(get_register(r8), callee_saved_value);
- CHECK_EQ(get_register(r9), callee_saved_value);
- CHECK_EQ(get_register(r10), callee_saved_value);
- CHECK_EQ(get_register(r11), callee_saved_value);
+ CHECK_EQ(callee_saved_value, get_register(r4));
+ CHECK_EQ(callee_saved_value, get_register(r5));
+ CHECK_EQ(callee_saved_value, get_register(r6));
+ CHECK_EQ(callee_saved_value, get_register(r7));
+ CHECK_EQ(callee_saved_value, get_register(r8));
+ CHECK_EQ(callee_saved_value, get_register(r9));
+ CHECK_EQ(callee_saved_value, get_register(r10));
+ CHECK_EQ(callee_saved_value, get_register(r11));
// Restore callee-saved registers with the original value.
set_register(r4, r4_val);
@@ -1813,8 +1902,12 @@ Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
set_register(r10, r10_val);
set_register(r11, r11_val);
- int result = get_register(r0);
- return reinterpret_cast<Object*>(result);
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ int32_t result = get_register(r0);
+ return result;
}
} } // namespace assembler::arm
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 15b92a5f8..3917d6a5a 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -40,7 +40,7 @@
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(entry(p0, p1, p2, p3, p4))
+ (entry(p0, p1, p2, p3, p4))
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
@@ -49,13 +49,20 @@
#define GENERATED_CODE_STACK_LIMIT(limit) \
(reinterpret_cast<uintptr_t>(this) - limit)
+
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
+
#else // defined(__arm__)
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- assembler::arm::Simulator::current()->Call((int32_t)entry, (int32_t)p0, \
- (int32_t)p1, (int32_t)p2, (int32_t)p3, (int32_t)p4)
+ reinterpret_cast<Object*>( \
+ assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
+ p0, p1, p2, p3, p4))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code.
@@ -63,6 +70,10 @@
(assembler::arm::Simulator::current()->StackLimit())
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ assembler::arm::Simulator::current()->Call( \
+ FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+
#include "constants-arm.h"
@@ -109,11 +120,10 @@ class Simulator {
// Call on program start.
static void Initialize();
- // V8 generally calls into generated code with 5 parameters. This is a
- // convenience function, which sets up the simulator state and grabs the
- // result on return.
- v8::internal::Object* Call(int32_t entry, int32_t p0, int32_t p1,
- int32_t p2, int32_t p3, int32_t p4);
+ // V8 generally calls into generated JS code with 5 parameters and into
+ // generated RegExp code with 7 parameters. This is a convenience function,
+ // which sets up the simulator state and grabs the result on return.
+ int32_t Call(byte* entry, int argument_count, ...);
private:
enum special_values {
@@ -174,6 +184,7 @@ class Simulator {
void DecodeType5(Instr* instr);
void DecodeType6(Instr* instr);
void DecodeType7(Instr* instr);
+ void DecodeUnconditional(Instr* instr);
// Executes one instruction.
void InstructionDecode(Instr* instr);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 03e077940..745b541e5 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -791,7 +791,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
+ __ IncrementCounter(&Counters::call_global_inline, 1, r2, r3);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -1342,6 +1342,18 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
}
+Object* ConstructStubCompiler::CompileConstructStub(
+ SharedFunctionInfo* shared) {
+ // Not implemented yet - just jump to generic stub.
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 91952f375..5b5c870ae 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -102,7 +102,8 @@ void VirtualFrame::Enter() {
#ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies
// on r2 being available for use.
- { Label map_check, done;
+ if (FLAG_debug_code) {
+ Label map_check, done;
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
@@ -140,9 +141,26 @@ void VirtualFrame::AllocateStackSlots() {
Adjust(count);
// Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(ip);
- }
+ }
+ if (FLAG_check_stack) {
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ }
+ for (int i = 0; i < count; i++) {
+ __ push(ip);
+ }
+ if (FLAG_check_stack) {
+ // Put the lr setup instruction in the delay slot. The 'sizeof(Instr)' is
+ // added to the implicit 8 byte offset that always applies to operations
+ // with pc and gives a return address 12 bytes down.
+ masm()->add(lr, pc, Operand(sizeof(Instr)));
+ masm()->cmp(sp, Operand(r2));
+ StackCheckStub stub;
+ // Call the stub if lower.
+ masm()->mov(pc,
+ Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+ RelocInfo::CODE_TARGET),
+ LeaveCC,
+ lo);
}
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 546490ee3..3563ebd2e 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -42,6 +42,20 @@
#include "serialize.h"
#include "stub-cache.h"
#include "regexp-stack.h"
+#include "ast.h"
+#include "regexp-macro-assembler.h"
+// Include native regexp-macro-assembler.
+#ifdef V8_NATIVE_REGEXP
+#if V8_TARGET_ARCH_IA32
+#include "ia32/regexp-macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/regexp-macro-assembler-arm.h"
+#else // Unknown architecture.
+#error "Unknown architecture."
+#endif // Target architecture.
+#endif // V8_NATIVE_REGEXP
namespace v8 {
namespace internal {
@@ -597,6 +611,34 @@ ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
}
+#ifdef V8_NATIVE_REGEXP
+
+ExternalReference ExternalReference::re_check_stack_guard_state() {
+ Address function;
+#ifdef V8_TARGET_ARCH_X64
+ function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
+#elif V8_TARGET_ARCH_IA32
+ function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
+#elif V8_TARGET_ARCH_ARM
+ function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#else
+ UNREACHABLE("Unexpected architecture");
+#endif
+ return ExternalReference(Redirect(function));
+}
+
+ExternalReference ExternalReference::re_grow_stack() {
+ return ExternalReference(
+ Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
+}
+
+ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
+ return ExternalReference(Redirect(
+ FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
+}
+
+#endif
+
static double add_two_doubles(double x, double y) {
return x + y;
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index e217918ce..827389a1b 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -431,6 +431,19 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference debug_step_in_fp_address();
#endif
+#ifdef V8_NATIVE_REGEXP
+ // C functions called from RegExp generated code.
+
+ // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
+ static ExternalReference re_case_insensitive_compare_uc16();
+
+ // Function RegExpMacroAssembler*::CheckStackGuardState()
+ static ExternalReference re_check_stack_guard_state();
+
+ // Function NativeRegExpMacroAssembler::GrowStack()
+ static ExternalReference re_grow_stack();
+#endif
+
// This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references.
static void set_redirector(ExternalReferenceRedirector* redirector) {
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index dbd18f82a..4262dd2a8 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "api.h"
+#include "arguments.h"
#include "bootstrapper.h"
#include "builtins.h"
#include "ic-inl.h"
@@ -47,17 +48,13 @@ namespace internal {
// BUILTIN_END
//
// In the body of the builtin function, the variable 'receiver' is visible.
-// The arguments can be accessed through:
+// The arguments can be accessed through the Arguments object args.
//
-// BUILTIN_ARG(0): Receiver (also available as 'receiver')
-// BUILTIN_ARG(1): First argument
+// args[0]: Receiver (also available as 'receiver')
+// args[1]: First argument
// ...
-// BUILTIN_ARG(n): Last argument
-//
-// and they evaluate to undefined values if too few arguments were
-// passed to the builtin function invocation.
-//
-// __argc__ is the number of arguments including the receiver.
+// args[n]: Last argument
+// args.length(): Number of arguments including the receiver.
// ----------------------------------------------------------------------------
@@ -65,21 +62,8 @@ namespace internal {
// builtin was invoked as a constructor as part of the
// arguments. Maybe we also want to pass the called function?
#define BUILTIN(name) \
- static Object* Builtin_##name(int __argc__, Object** __argv__) { \
- Handle<Object> receiver(&__argv__[0]);
-
-
-// Use an inline function to avoid evaluating the index (n) more than
-// once in the BUILTIN_ARG macro.
-static inline Object* __builtin_arg__(int n, int argc, Object** argv) {
- ASSERT(n >= 0);
- return (argc > n) ? argv[-n] : Heap::undefined_value();
-}
-
-
-// NOTE: Argument 0 is the receiver. The first 'real' argument is
-// argument 1 - BUILTIN_ARG(1).
-#define BUILTIN_ARG(n) (__builtin_arg__(n, __argc__, __argv__))
+ static Object* Builtin_##name(Arguments args) { \
+ Handle<Object> receiver = args.at<Object>(0);
#define BUILTIN_END \
@@ -168,8 +152,8 @@ BUILTIN(ArrayCode) {
// Optimize the case where there is one argument and the argument is a
// small smi.
- if (__argc__ == 2) {
- Object* obj = BUILTIN_ARG(1);
+ if (args.length() == 2) {
+ Object* obj = args[1];
if (obj->IsSmi()) {
int len = Smi::cast(obj)->value();
if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
@@ -182,14 +166,14 @@ BUILTIN(ArrayCode) {
// Take the argument as the length.
obj = array->Initialize(0);
if (obj->IsFailure()) return obj;
- if (__argc__ == 2) return array->SetElementsLength(BUILTIN_ARG(1));
+ if (args.length() == 2) return array->SetElementsLength(args[1]);
}
// Optimize the case where there are no parameters passed.
- if (__argc__ == 1) return array->Initialize(4);
+ if (args.length() == 1) return array->Initialize(4);
// Take the arguments as elements.
- int number_of_elements = __argc__ - 1;
+ int number_of_elements = args.length() - 1;
Smi* len = Smi::FromInt(number_of_elements);
Object* obj = Heap::AllocateFixedArrayWithHoles(len->value());
if (obj->IsFailure()) return obj;
@@ -197,7 +181,7 @@ BUILTIN(ArrayCode) {
WriteBarrierMode mode = elms->GetWriteBarrierMode();
// Fill in the content
for (int index = 0; index < number_of_elements; index++) {
- elms->set(index, BUILTIN_ARG(index+1), mode);
+ elms->set(index, args[index+1], mode);
}
// Set length and elements on the array.
@@ -217,13 +201,13 @@ BUILTIN(ArrayPush) {
int len = Smi::cast(array->length())->value();
// Set new length.
- int new_length = len + __argc__ - 1;
+ int new_length = len + args.length() - 1;
FixedArray* elms = FixedArray::cast(array->elements());
if (new_length <= elms->length()) {
// Backing storage has extra space for the provided values.
- for (int index = 0; index < __argc__ - 1; index++) {
- elms->set(index + len, BUILTIN_ARG(index+1));
+ for (int index = 0; index < args.length() - 1; index++) {
+ elms->set(index + len, args[index+1]);
}
} else {
// New backing storage is needed.
@@ -235,8 +219,8 @@ BUILTIN(ArrayPush) {
// Fill out the new array with old elements.
for (int i = 0; i < len; i++) new_elms->set(i, elms->get(i), mode);
// Add the provided values.
- for (int index = 0; index < __argc__ - 1; index++) {
- new_elms->set(index + len, BUILTIN_ARG(index+1), mode);
+ for (int index = 0; index < args.length() - 1; index++) {
+ new_elms->set(index + len, args[index+1], mode);
}
// Set the new backing storage.
array->set_elements(new_elms);
@@ -353,7 +337,7 @@ BUILTIN(HandleApiCall) {
FunctionTemplateInfo* fun_data =
FunctionTemplateInfo::cast(function->shared()->function_data());
- Object* raw_holder = TypeCheck(__argc__, __argv__, fun_data);
+ Object* raw_holder = TypeCheck(args.length(), &args[0], fun_data);
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
@@ -380,19 +364,19 @@ BUILTIN(HandleApiCall) {
Handle<JSObject> holder_handle(JSObject::cast(raw_holder));
v8::Local<v8::Object> holder = v8::Utils::ToLocal(holder_handle);
LOG(ApiObjectAccess("call", JSObject::cast(*receiver)));
- v8::Arguments args = v8::ImplementationUtilities::NewArguments(
+ v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
holder,
callee,
is_construct,
- reinterpret_cast<void**>(__argv__ - 1),
- __argc__ - 1);
+ reinterpret_cast<void**>(&args[0] - 1),
+ args.length() - 1);
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
- value = callback(args);
+ value = callback(new_args);
}
if (value.IsEmpty()) {
result = Heap::undefined_value();
@@ -413,13 +397,12 @@ BUILTIN_END
// API. The object can be called as either a constructor (using new) or just as
// a function (without new).
static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
- int __argc__,
- Object** __argv__) {
+ Arguments args) {
// Non-functions are never called as constructors. Even if this is an object
// called as a constructor the delegate call is not a construct call.
ASSERT(!CalledAsConstructor());
- Handle<Object> receiver(&__argv__[0]);
+ Handle<Object> receiver = args.at<Object>(0);
// Get the object called.
JSObject* obj = JSObject::cast(*receiver);
@@ -448,18 +431,18 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
Handle<JSFunction> callee_handle(constructor);
v8::Local<v8::Function> callee = v8::Utils::ToLocal(callee_handle);
LOG(ApiObjectAccess("call non-function", JSObject::cast(*receiver)));
- v8::Arguments args = v8::ImplementationUtilities::NewArguments(
+ v8::Arguments new_args = v8::ImplementationUtilities::NewArguments(
data,
self,
callee,
is_construct_call,
- reinterpret_cast<void**>(__argv__ - 1),
- __argc__ - 1);
+ reinterpret_cast<void**>(&args[0] - 1),
+ args.length() - 1);
v8::Handle<v8::Value> value;
{
// Leaving JavaScript.
VMState state(EXTERNAL);
- value = callback(args);
+ value = callback(new_args);
}
if (value.IsEmpty()) {
result = Heap::undefined_value();
@@ -476,7 +459,7 @@ static Object* HandleApiCallAsFunctionOrConstructor(bool is_construct_call,
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a normal function call.
BUILTIN(HandleApiCallAsFunction) {
- return HandleApiCallAsFunctionOrConstructor(false, __argc__, __argv__);
+ return HandleApiCallAsFunctionOrConstructor(false, args);
}
BUILTIN_END
@@ -484,7 +467,7 @@ BUILTIN_END
// Handle calls to non-function objects created through the API. This delegate
// function is used when the call is a construct call.
BUILTIN(HandleApiCallAsConstructor) {
- return HandleApiCallAsFunctionOrConstructor(true, __argc__, __argv__);
+ return HandleApiCallAsFunctionOrConstructor(true, args);
}
BUILTIN_END
diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h
index b302e5bee..4ecbaf4cf 100644
--- a/deps/v8/src/checks.h
+++ b/deps/v8/src/checks.h
@@ -95,6 +95,38 @@ static inline void CheckNonEqualsHelper(const char* file,
}
}
+#ifdef V8_TARGET_ARCH_X64
+// Helper function used by the CHECK_EQ function when given intptr_t
+// arguments. Should not be called directly.
+static inline void CheckEqualsHelper(const char* file,
+ int line,
+ const char* expected_source,
+ intptr_t expected,
+ const char* value_source,
+ intptr_t value) {
+ if (expected != value) {
+ V8_Fatal(file, line,
+ "CHECK_EQ(%s, %s) failed\n# Expected: %i\n# Found: %i",
+ expected_source, value_source, expected, value);
+ }
+}
+
+
+// Helper function used by the CHECK_NE function when given intptr_t
+// arguments. Should not be called directly.
+static inline void CheckNonEqualsHelper(const char* file,
+ int line,
+ const char* unexpected_source,
+ intptr_t unexpected,
+ const char* value_source,
+ intptr_t value) {
+ if (unexpected == value) {
+ V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n# Value: %i",
+ unexpected_source, value_source, value);
+ }
+}
+#endif // V8_TARGET_ARCH_X64
+
// Helper function used by the CHECK function when given string
// arguments. Should not be called directly.
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 76ec7870f..ae86c20ef 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -57,6 +57,7 @@ class CodeStub BASE_EMBEDDED {
SetProperty, // ARM only
InvokeBuiltin, // ARM only
JSExit, // ARM only
+ RegExpCEntry, // ARM only
NUMBER_OF_IDS
};
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index feff49240..15f64794d 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -425,6 +425,13 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Set the expected number of properties for instances.
SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
+ // Set the optimication hints after performing lazy compilation, as these are
+ // not set when the function is set up as a lazily compiled function.
+ shared->SetThisPropertyAssignmentsInfo(
+ lit->has_only_this_property_assignments(),
+ lit->has_only_simple_this_property_assignments(),
+ *lit->this_property_assignments());
+
// Check the function has compiled code.
ASSERT(shared->is_compiled());
return true;
diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js
index 3a0eedbf1..da5be1f3d 100644
--- a/deps/v8/src/d8.js
+++ b/deps/v8/src/d8.js
@@ -1143,7 +1143,7 @@ function DebugResponseDetails(response) {
* @constructor
*/
function ProtocolPackage(json) {
- this.packet_ = eval('(' + json + ')');
+ this.packet_ = JSON.parse(json);
this.refs_ = [];
if (this.packet_.refs) {
for (var i = 0; i < this.packet_.refs.length; i++) {
diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js
index 4f6085128..ce70c75b4 100644
--- a/deps/v8/src/debug-delay.js
+++ b/deps/v8/src/debug-delay.js
@@ -466,9 +466,14 @@ Debug.source = function(f) {
return %FunctionGetSourceCode(f);
};
-Debug.assembler = function(f) {
+Debug.disassemble = function(f) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %FunctionGetAssemblerCode(f);
+ return %DebugDisassembleFunction(f);
+};
+
+Debug.disassembleConstructor = function(f) {
+ if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
+ return %DebugDisassembleConstructor(f);
};
Debug.sourcePosition = function(f) {
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index faeb29ba4..e341022aa 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -661,7 +661,7 @@ bool Debug::CompileDebuggerScript(int index) {
// Check for caught exceptions.
if (caught_exception) {
Handle<Object> message = MessageHandler::MakeMessageObject(
- "error_loading_debugger", NULL, HandleVector<Object>(&result, 1),
+ "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
Handle<String>());
MessageHandler::ReportMessage(NULL, message);
return false;
@@ -2001,9 +2001,7 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
event_listener_data_.location() };
Handle<Object> result = Execution::TryCall(fun, Top::global(),
argc, argv, &caught_exception);
- if (caught_exception) {
- // Silently ignore exceptions from debug event listeners.
- }
+ // Silently ignore exceptions from debug event listeners.
}
}
}
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 7c42e5e74..04ec9059f 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -156,9 +156,12 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
ASSERT(catcher.HasCaught());
ASSERT(Top::has_pending_exception());
ASSERT(Top::external_caught_exception());
- bool is_bottom_call = HandleScopeImplementer::instance()->CallDepthIsZero();
- Top::OptionalRescheduleException(is_bottom_call, true);
- result = v8::Utils::OpenHandle(*catcher.Exception());
+ if (Top::pending_exception() == Heap::termination_exception()) {
+ result = Factory::termination_exception();
+ } else {
+ result = v8::Utils::OpenHandle(*catcher.Exception());
+ }
+ Top::OptionalRescheduleException(true);
}
ASSERT(!Top::has_pending_exception());
@@ -234,8 +237,9 @@ StackGuard::StackGuard() {
(thread_local_.climit_ == kInterruptLimit &&
thread_local_.interrupt_flags_ != 0));
- thread_local_.initial_jslimit_ = thread_local_.jslimit_ =
- GENERATED_CODE_STACK_LIMIT(kLimitSize);
+ uintptr_t limit = GENERATED_CODE_STACK_LIMIT(kLimitSize);
+ thread_local_.initial_jslimit_ = thread_local_.jslimit_ = limit;
+ Heap::SetStackLimit(limit);
// NOTE: The check for overflow is not safe as there is no guarantee that
// the running thread has its stack in all memory up to address 0x00000000.
thread_local_.initial_climit_ = thread_local_.climit_ =
@@ -283,6 +287,7 @@ void StackGuard::SetStackLimit(uintptr_t limit) {
// leave them alone.
if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) {
thread_local_.jslimit_ = limit;
+ Heap::SetStackLimit(limit);
}
if (thread_local_.climit_ == thread_local_.initial_climit_) {
thread_local_.climit_ = limit;
@@ -397,6 +402,7 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access;
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ Heap::SetStackLimit(thread_local_.jslimit_);
return from + sizeof(ThreadLocal);
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 456cbe74d..4cdfd2be6 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -175,6 +175,10 @@ class StackGuard BASE_EMBEDDED {
#endif
static void Continue(InterruptFlag after_what);
+ static uintptr_t jslimit() {
+ return thread_local_.jslimit_;
+ }
+
private:
// You should hold the ExecutionAccess lock when calling this method.
static bool IsSet(const ExecutionAccess& lock);
@@ -188,6 +192,7 @@ class StackGuard BASE_EMBEDDED {
// You should hold the ExecutionAccess lock when calling this method.
static void set_limits(uintptr_t value, const ExecutionAccess& lock) {
+ Heap::SetStackLimit(value);
thread_local_.jslimit_ = value;
thread_local_.climit_ = value;
}
@@ -200,6 +205,7 @@ class StackGuard BASE_EMBEDDED {
set_limits(kIllegalLimit, lock);
} else {
thread_local_.jslimit_ = thread_local_.initial_jslimit_;
+ Heap::SetStackLimit(thread_local_.jslimit_);
thread_local_.climit_ = thread_local_.initial_climit_;
}
}
@@ -220,13 +226,15 @@ class StackGuard BASE_EMBEDDED {
class ThreadLocal {
public:
ThreadLocal()
- : initial_jslimit_(kIllegalLimit),
- jslimit_(kIllegalLimit),
- initial_climit_(kIllegalLimit),
- climit_(kIllegalLimit),
- nesting_(0),
- postpone_interrupts_nesting_(0),
- interrupt_flags_(0) {}
+ : initial_jslimit_(kIllegalLimit),
+ jslimit_(kIllegalLimit),
+ initial_climit_(kIllegalLimit),
+ climit_(kIllegalLimit),
+ nesting_(0),
+ postpone_interrupts_nesting_(0),
+ interrupt_flags_(0) {
+ Heap::SetStackLimit(kIllegalLimit);
+ }
uintptr_t initial_jslimit_;
uintptr_t jslimit_;
uintptr_t initial_climit_;
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index b04cf506d..c5f2f1a33 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -128,8 +128,9 @@ inline Address StandardFrame::ComputePCAddress(Address fp) {
inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
- int context = Memory::int_at(fp + StandardFrameConstants::kContextOffset);
- return context == ArgumentsAdaptorFrame::SENTINEL;
+ Object* marker =
+ Memory::Object_at(fp + StandardFrameConstants::kContextOffset);
+ return marker == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index f002e1216..768196d3c 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -434,15 +434,6 @@ class JavaScriptFrame: public StandardFrame {
// match the formal number of parameters.
class ArgumentsAdaptorFrame: public JavaScriptFrame {
public:
- // This sentinel value is temporarily used to distinguish arguments
- // adaptor frames from ordinary JavaScript frames. If a frame has
- // the sentinel as its context, it is an arguments adaptor frame. It
- // must be tagged as a small integer to avoid GC issues. Crud.
- enum {
- SENTINEL = (1 << kSmiTagSize) | kSmiTag,
- NON_SENTINEL = ~SENTINEL
- };
-
virtual Type type() const { return ARGUMENTS_ADAPTOR; }
// Determine the code for the frame.
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 195a2e2fc..efe0127e0 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -47,7 +47,14 @@ namespace internal {
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
#else
-#error Your architecture was not detected as supported by v8
+#error Your host architecture was not detected as supported by v8
+#endif
+
+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
+#define V8_TARGET_CAN_READ_UNALIGNED 1
+#elif V8_TARGET_ARCH_ARM
+#else
+#error Your target architecture is not supported by v8
#endif
// Support for alternative bool type. This is only enabled if the code is
@@ -134,17 +141,6 @@ const intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
const intptr_t kPointerAlignment = (1 << kPointerSizeLog2);
const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
-// Tag information for HeapObject.
-const int kHeapObjectTag = 1;
-const int kHeapObjectTagSize = 2;
-const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
-
-
-// Tag information for Smi.
-const int kSmiTag = 0;
-const int kSmiTagSize = 1;
-const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
-
// Tag information for Failure.
const int kFailureTag = 3;
@@ -429,9 +425,6 @@ enum StateTag {
#define HAS_FAILURE_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-#define HAS_HEAP_OBJECT_TAG(value) \
- ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == kHeapObjectTag)
-
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 6345d4181..fae006a4d 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -53,8 +53,8 @@ int HandleScope::NumberOfHandles() {
}
-void** HandleScope::Extend() {
- void** result = current_.next;
+Object** HandleScope::Extend() {
+ Object** result = current_.next;
ASSERT(result == current_.limit);
// Make sure there's at least one scope on the stack and that the
@@ -68,7 +68,7 @@ void** HandleScope::Extend() {
// If there's more room in the last block, we use that. This is used
// for fast creation of scopes after scope barriers.
if (!impl->Blocks()->is_empty()) {
- void** limit = &impl->Blocks()->last()[kHandleBlockSize];
+ Object** limit = &impl->Blocks()->last()[kHandleBlockSize];
if (current_.limit != limit) {
current_.limit = limit;
}
@@ -96,10 +96,10 @@ void HandleScope::DeleteExtensions() {
}
-void HandleScope::ZapRange(void** start, void** end) {
+void HandleScope::ZapRange(Object** start, Object** end) {
if (start == NULL) return;
- for (void** p = start; p < end; p++) {
- *p = reinterpret_cast<void*>(v8::internal::kHandleZapValue);
+ for (Object** p = start; p < end; p++) {
+ *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
}
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 8c9cbebfd..847aebb3b 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -121,7 +121,7 @@ class HandleScope {
// Creates a new handle with the given value.
template <typename T>
static inline T** CreateHandle(T* value) {
- void** cur = current_.next;
+ internal::Object** cur = current_.next;
if (cur == current_.limit) cur = Extend();
// Update the current next field, set the value in the created
// handle, and return the result.
@@ -164,13 +164,13 @@ class HandleScope {
}
// Extend the handle scope making room for more handles.
- static void** Extend();
+ static internal::Object** Extend();
// Deallocates any extensions used by the current scope.
static void DeleteExtensions();
// Zaps the handles in the half-open interval [start, end).
- static void ZapRange(void** start, void** end);
+ static void ZapRange(internal::Object** start, internal::Object** end);
friend class v8::HandleScope;
friend class v8::ImplementationUtilities;
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index ef4c2d4b0..c29815e55 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -39,6 +39,9 @@
#include "scanner.h"
#include "scopeinfo.h"
#include "v8threads.h"
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#include "regexp-macro-assembler.h"
+#endif
namespace v8 {
namespace internal {
@@ -254,6 +257,7 @@ void Heap::ReportStatisticsAfterGC() {
void Heap::GarbageCollectionPrologue() {
+ TranscendentalCache::Clear();
gc_count_++;
#ifdef DEBUG
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
@@ -465,9 +469,9 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
old_gen_allocation_limit_ =
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
old_gen_exhausted_ = false;
- } else {
- Scavenge();
}
+ Scavenge();
+
Counters::objs_since_last_young.Set(0);
PostGarbageCollectionProcessing();
@@ -521,12 +525,6 @@ void Heap::MarkCompact(GCTracer* tracer) {
Counters::objs_since_last_full.Set(0);
context_disposed_pending_ = false;
-
- Scavenge();
-
- // Shrink new space as much as possible after compacting full
- // garbage collections.
- if (is_compacting) new_space_.Shrink();
}
@@ -1326,6 +1324,14 @@ void Heap::CreateCEntryStub() {
}
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+void Heap::CreateRegExpCEntryStub() {
+ RegExpCEntryStub stub;
+ set_re_c_entry_code(*stub.GetCode());
+}
+#endif
+
+
void Heap::CreateCEntryDebugBreakStub() {
CEntryDebugBreakStub stub;
set_c_entry_debug_break_code(*stub.GetCode());
@@ -1362,6 +1368,9 @@ void Heap::CreateFixedStubs() {
Heap::CreateCEntryDebugBreakStub();
Heap::CreateJSEntryStub();
Heap::CreateJSConstructEntryStub();
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+ Heap::CreateRegExpCEntryStub();
+#endif
}
@@ -2785,6 +2794,41 @@ STRUCT_LIST(MAKE_CASE)
}
+bool Heap::IdleNotification() {
+ static const int kIdlesBeforeCollection = 7;
+ static int number_idle_notifications = 0;
+ static int last_gc_count = gc_count_;
+
+ bool finished = false;
+
+ if (last_gc_count == gc_count_) {
+ number_idle_notifications++;
+ } else {
+ number_idle_notifications = 0;
+ last_gc_count = gc_count_;
+ }
+
+ if (number_idle_notifications >= kIdlesBeforeCollection) {
+ // The first time through we collect without forcing compaction.
+ // The second time through we force compaction and quit.
+ bool force_compaction =
+ number_idle_notifications > kIdlesBeforeCollection;
+ CollectAllGarbage(force_compaction);
+ last_gc_count = gc_count_;
+ if (force_compaction) {
+ // Shrink new space.
+ new_space_.Shrink();
+ number_idle_notifications = 0;
+ finished = true;
+ }
+ }
+
+ // Uncommit unused memory in new space.
+ Heap::UncommitFromSpace();
+ return finished;
+}
+
+
#ifdef DEBUG
void Heap::Print() {
@@ -2950,7 +2994,7 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
#ifdef DEBUG
void Heap::ZapFromSpace() {
- ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
+ ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
for (Address a = new_space_.FromSpaceLow();
a < new_space_.FromSpaceHigh();
a += kPointerSize) {
@@ -3223,6 +3267,17 @@ bool Heap::Setup(bool create_heap_objects) {
}
+void Heap::SetStackLimit(intptr_t limit) {
+ // On 64 bit machines, pointers are generally out of range of Smis. We write
+ // something that looks like an out of range Smi to the GC.
+
+ // Set up the special root array entry containing the stack guard.
+ // This is actually an address, but the tag makes the GC ignore it.
+ roots_[kStackLimitRootIndex] =
+ reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag);
+}
+
+
void Heap::TearDown() {
GlobalHandles::TearDown();
@@ -3932,4 +3987,30 @@ bool Heap::GarbageCollectionGreedyCheck() {
}
#endif
+
+TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
+ : type_(t) {
+ uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
+ uint32_t in1 = 0xffffffffu; // generated by the FPU.
+ for (int i = 0; i < kCacheSize; i++) {
+ elements_[i].in[0] = in0;
+ elements_[i].in[1] = in1;
+ elements_[i].output = NULL;
+ }
+}
+
+
+TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
+
+
+void TranscendentalCache::Clear() {
+ for (int i = 0; i < kNumberOfCaches; i++) {
+ if (caches_[i] != NULL) {
+ delete caches_[i];
+ caches_[i] = NULL;
+ }
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 9fd3fecb7..028dd1118 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -28,15 +28,31 @@
#ifndef V8_HEAP_H_
#define V8_HEAP_H_
+#include <math.h>
+
#include "zone-inl.h"
+
namespace v8 {
namespace internal {
// Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V) \
- V(Map, meta_map, MetaMap) \
+#define UNCONDITIONAL_STRONG_ROOT_LIST(V) \
+ /* Cluster the most popular ones in a few cache lines here at the top. */ \
+ V(Smi, stack_limit, StackLimit) \
+ V(Object, undefined_value, UndefinedValue) \
+ V(Object, the_hole_value, TheHoleValue) \
+ V(Object, null_value, NullValue) \
+ V(Object, true_value, TrueValue) \
+ V(Object, false_value, FalseValue) \
V(Map, heap_number_map, HeapNumberMap) \
+ V(Map, global_context_map, GlobalContextMap) \
+ V(Map, fixed_array_map, FixedArrayMap) \
+ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
+ V(Map, meta_map, MetaMap) \
+ V(Object, termination_exception, TerminationException) \
+ V(Map, hash_table_map, HashTableMap) \
+ V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(Map, short_string_map, ShortStringMap) \
V(Map, medium_string_map, MediumStringMap) \
V(Map, long_string_map, LongStringMap) \
@@ -95,11 +111,8 @@ namespace internal {
V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
V(Map, byte_array_map, ByteArrayMap) \
V(Map, pixel_array_map, PixelArrayMap) \
- V(Map, fixed_array_map, FixedArrayMap) \
- V(Map, hash_table_map, HashTableMap) \
V(Map, context_map, ContextMap) \
V(Map, catch_context_map, CatchContextMap) \
- V(Map, global_context_map, GlobalContextMap) \
V(Map, code_map, CodeMap) \
V(Map, oddball_map, OddballMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
@@ -109,17 +122,9 @@ namespace internal {
V(Map, one_pointer_filler_map, OnePointerFillerMap) \
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
V(Object, nan_value, NanValue) \
- V(Object, undefined_value, UndefinedValue) \
- V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
- V(Object, termination_exception, TerminationException) \
V(Object, minus_zero_value, MinusZeroValue) \
- V(Object, null_value, NullValue) \
- V(Object, true_value, TrueValue) \
- V(Object, false_value, FalseValue) \
V(String, empty_string, EmptyString) \
- V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
- V(Object, the_hole_value, TheHoleValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Proxy, prototype_accessors, PrototypeAccessors) \
@@ -132,8 +137,15 @@ namespace internal {
V(FixedArray, number_string_cache, NumberStringCache) \
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, natives_source_cache, NativesSourceCache) \
- V(Object, last_script_id, LastScriptId)
+ V(Object, last_script_id, LastScriptId) \
+#if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
+#define STRONG_ROOT_LIST(V) \
+ UNCONDITIONAL_STRONG_ROOT_LIST(V) \
+ V(Code, re_c_entry_code, RegExpCEntryCode)
+#else
+#define STRONG_ROOT_LIST(V) UNCONDITIONAL_STRONG_ROOT_LIST(V)
+#endif
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -227,6 +239,11 @@ class Heap : public AllStatic {
// Destroys all memory allocated by the heap.
static void TearDown();
+ // Sets the stack limit in the roots_ array. Some architectures generate code
+ // that looks here, because it is faster than loading from the static jslimit_
+ // variable.
+ static void SetStackLimit(intptr_t limit);
+
// Returns whether Setup has been called.
static bool HasBeenSetup();
@@ -843,37 +860,7 @@ class Heap : public AllStatic {
}
// Can be called when the embedding application is idle.
- static bool IdleNotification() {
- static const int kIdlesBeforeCollection = 7;
- static int number_idle_notifications = 0;
- static int last_gc_count = gc_count_;
-
- bool finished = false;
-
- if (last_gc_count == gc_count_) {
- number_idle_notifications++;
- } else {
- number_idle_notifications = 0;
- last_gc_count = gc_count_;
- }
-
- if (number_idle_notifications >= kIdlesBeforeCollection) {
- // The first time through we collect without forcing compaction.
- // The second time through we force compaction and quit.
- bool force_compaction =
- number_idle_notifications > kIdlesBeforeCollection;
- CollectAllGarbage(force_compaction);
- last_gc_count = gc_count_;
- if (force_compaction) {
- number_idle_notifications = 0;
- finished = true;
- }
- }
-
- // Uncommit unused memory in new space.
- Heap::UncommitFromSpace();
- return finished;
- }
+ static bool IdleNotification();
// Declare all the root indices.
enum RootListIndex {
@@ -1048,6 +1035,8 @@ class Heap : public AllStatic {
static void CreateCEntryDebugBreakStub();
static void CreateJSEntryStub();
static void CreateJSConstructEntryStub();
+ static void CreateRegExpCEntryStub();
+
static void CreateFixedStubs();
static Object* CreateOddball(Map* map,
@@ -1533,6 +1522,91 @@ class GCTracer BASE_EMBEDDED {
int previous_marked_count_;
};
+
+class TranscendentalCache {
+ public:
+ enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
+
+ explicit TranscendentalCache(Type t);
+
+ // Returns a heap number with f(input), where f is a math function specified
+ // by the 'type' argument.
+ static inline Object* Get(Type type, double input) {
+ TranscendentalCache* cache = caches_[type];
+ if (cache == NULL) {
+ caches_[type] = cache = new TranscendentalCache(type);
+ }
+ return cache->Get(input);
+ }
+
+ // The cache contains raw Object pointers. This method disposes of
+ // them before a garbage collection.
+ static void Clear();
+
+ private:
+ inline Object* Get(double input) {
+ Converter c;
+ c.dbl = input;
+ int hash = Hash(c);
+ Element e = elements_[hash];
+ if (e.in[0] == c.integers[0] &&
+ e.in[1] == c.integers[1]) {
+ ASSERT(e.output != NULL);
+ return e.output;
+ }
+ double answer = Calculate(input);
+ Object* heap_number = Heap::AllocateHeapNumber(answer);
+ if (!heap_number->IsFailure()) {
+ elements_[hash].in[0] = c.integers[0];
+ elements_[hash].in[1] = c.integers[1];
+ elements_[hash].output = heap_number;
+ }
+ return heap_number;
+ }
+
+ inline double Calculate(double input) {
+ switch (type_) {
+ case ACOS:
+ return acos(input);
+ case ASIN:
+ return asin(input);
+ case ATAN:
+ return atan(input);
+ case COS:
+ return cos(input);
+ case EXP:
+ return exp(input);
+ case LOG:
+ return log(input);
+ case SIN:
+ return sin(input);
+ case TAN:
+ return tan(input);
+ default:
+ return 0.0; // Never happens.
+ }
+ }
+ static const int kCacheSize = 512;
+ struct Element {
+ uint32_t in[2];
+ Object* output;
+ };
+ union Converter {
+ double dbl;
+ uint32_t integers[2];
+ };
+ inline static int Hash(const Converter& c) {
+ uint32_t hash = (c.integers[0] ^ c.integers[1]);
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+ return (hash & (kCacheSize - 1));
+ }
+ static TranscendentalCache* caches_[kNumberOfCaches];
+ Element elements_[kCacheSize];
+ Type type_;
+};
+
+
} } // namespace v8::internal
#endif // V8_HEAP_H_
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 6de9de6ee..55dc92dd9 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -132,15 +132,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Make sure that the maximum heap object size will never cause us
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
- ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
- __ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
- __ add(edi, Operand(ebx)); // Calculate new top
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ cmp(edi, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above_equal, &rt_call);
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
+ __ AllocateObjectInNewSpace(edi, ebx, edi, no_reg, &rt_call, false);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -165,15 +158,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(less, &loop);
}
- // Mostly done with the JSObject. Add the heap tag and store the new top, so
- // that we can continue and jump into the continuation code at any time from
- // now on. Any failures need to undo the setting of the new top, so that the
- // heap is in a consistent state and verifiable.
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
// eax: initial map
// ebx: JSObject
// edi: start of next object
__ or_(Operand(ebx), Immediate(kHeapObjectTag));
- __ mov(Operand::StaticVariable(new_space_allocation_top), edi);
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -198,10 +190,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// edx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
- __ lea(ecx, Operand(edi, edx, times_pointer_size, FixedArray::kHeaderSize));
- __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(above_equal, &undo_allocation);
- __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
+ __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ edx,
+ edi,
+ ecx,
+ no_reg,
+ &undo_allocation,
+ true);
// Initialize the FixedArray.
// ebx: JSObject
@@ -245,8 +241,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// allocated objects unused properties.
// ebx: JSObject (previous new top)
__ bind(&undo_allocation);
- __ xor_(Operand(ebx), Immediate(kHeapObjectTag)); // clear the heap tag
- __ mov(Operand::StaticVariable(new_space_allocation_top), ebx);
+ __ UndoAllocationInNewSpace(ebx);
}
// Allocate the new receiver object using the runtime call.
@@ -669,7 +664,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ mov(ebp, Operand(esp));
// Store the arguments adaptor context sentinel.
- __ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack.
__ push(edi);
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index bf1f81b13..a9face1d7 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -2139,7 +2139,8 @@ void CodeGenerator::CallApplyLazy(Property* apply,
Label invoke, adapted;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
@@ -4912,7 +4913,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker);
__ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
@@ -6947,21 +6948,18 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register result) {
- ExternalReference allocation_top =
- ExternalReference::new_space_allocation_top_address();
- ExternalReference allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ mov(Operand(scratch1), Immediate(allocation_top));
- __ mov(result, Operand(scratch1, 0));
- __ lea(scratch2, Operand(result, HeapNumber::kSize)); // scratch2: new top
- __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
- __ j(above, need_gc, not_taken);
-
- __ mov(Operand(scratch1, 0), scratch2); // store new top
+ // Allocate heap number in new space.
+ __ AllocateObjectInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ false);
+
+ // Set the map and tag the result.
__ mov(Operand(result, HeapObject::kMapOffset),
Immediate(Factory::heap_number_map()));
- // Tag old top and use as result.
- __ add(Operand(result), Immediate(kHeapObjectTag));
+ __ or_(Operand(result), Immediate(kHeapObjectTag));
}
@@ -7109,7 +7107,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Nothing to do: The formal number of parameters has already been
@@ -7141,7 +7139,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Check index against formal parameters count limit passed in
@@ -7192,7 +7190,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
@@ -7724,11 +7722,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(ebp);
__ mov(ebp, Operand(esp));
- // Save callee-saved registers (C calling conventions).
+ // Push marker in two places.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Push something that is not an arguments adaptor.
- __ push(Immediate(~ArgumentsAdaptorFrame::SENTINEL));
- __ push(Immediate(Smi::FromInt(marker))); // @ function offset
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (C calling conventions).
__ push(edi);
__ push(esi);
__ push(ebx);
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index fa9b8a20f..e39808b2e 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -604,7 +604,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
__ cmp(eax, FIRST_JS_OBJECT_TYPE);
- __ j(less, &miss, not_taken);
+ __ j(below, &miss, not_taken);
// If this assert fails, we have to check upper bound too.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index e362cd3c5..754b74abe 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -620,6 +620,146 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+void MacroAssembler::LoadAllocationTopHelper(
+ Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Just return if allocation top is already known.
+ if (result_contains_top_on_entry) {
+ // No use of scratch if allocation top is provided.
+ ASSERT(scratch.is(no_reg));
+ return;
+ }
+
+ // Move address of new object to result. Use scratch register if available.
+ if (scratch.is(no_reg)) {
+ mov(result, Operand::StaticVariable(new_space_allocation_top));
+ } else {
+ ASSERT(!scratch.is(result_end));
+ mov(Operand(scratch), Immediate(new_space_allocation_top));
+ mov(result, Operand(scratch, 0));
+ }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Update new top. Use scratch if available.
+ if (scratch.is(no_reg)) {
+ mov(Operand::StaticVariable(new_space_allocation_top), result_end);
+ } else {
+ mov(Operand(scratch, 0), result_end);
+ }
+}
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, object_size));
+ cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ j(above, gc_required, not_taken);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, element_count, element_size, header_size));
+ cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ if (!object_size.is(result_end)) {
+ mov(result_end, object_size);
+ }
+ add(result_end, Operand(result));
+ cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
+ j(above, gc_required, not_taken);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Make sure the object has no tag before resetting top.
+ and_(Operand(object), Immediate(~kHeapObjectTagMask));
+#ifdef DEBUG
+ cmp(object, Operand::StaticVariable(new_space_allocation_top));
+ Check(below, "Undo allocation of non allocated memory");
+#endif
+ mov(Operand::StaticVariable(new_space_allocation_top), object);
+}
+
+
void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 42620dd8c..f10ec16aa 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -184,6 +184,48 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space. If the new space is exhausted control
+ // continues at the gc_required label. The allocated object is returned in
+ // result and end of the new object is returned in result_end. The register
+ // scratch can be passed as no_reg in which case an additional object
+ // reference will be added to the reloc info. The returned pointers in result
+ // and result_end have not yet been tagged as heap objects. If
+ // result_contains_top_on_entry is true the contnt of result is known to be
+ // the allocation top on entry (could be result_end from a previous call to
+ // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // should be no_reg as it is never used.
+ void AllocateObjectInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. Make sure that no pointers are left to the
+ // object(s) no longer allocated as they would be invalid when allocation is
+ // un-done.
+ void UndoAllocationInNewSpace(Register object);
+
+ // ---------------------------------------------------------------------------
// Support functions.
// Check if result is zero and op is negative.
@@ -303,6 +345,13 @@ class MacroAssembler: public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ // Allocation support helpers.
+ void LoadAllocationTopHelper(Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry);
+ void UpdateAllocationTopHelper(Register result_end, Register scratch);
};
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index bc8107633..7af4e89e0 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -102,6 +102,7 @@ RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
success_label_(),
backtrack_label_(),
exit_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code later.
__ bind(&start_label_); // And then continue from here.
}
@@ -337,8 +338,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ add(edx, Operand(esi));
__ mov(Operand(esp, 0 * kPointerSize), edx);
- Address function_address = FUNCTION_ADDR(&CaseInsensitiveCompareUC16);
- CallCFunction(function_address, argument_count);
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16();
+ CallCFunction(compare, argument_count);
// Pop original values before reacting on result value.
__ pop(ebx);
__ pop(backtrack_stackpointer());
@@ -745,7 +747,8 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ lea(eax, Operand(ebp, kStackHighEnd));
__ mov(Operand(esp, 1 * kPointerSize), eax);
__ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer());
- CallCFunction(FUNCTION_ADDR(&GrowStack), num_arguments);
+ ExternalReference grow_stack = ExternalReference::re_grow_stack();
+ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ or_(eax, Operand(eax));
@@ -817,7 +820,9 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacter(int cp_offset,
int characters) {
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -913,7 +918,9 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) {
// Next address on the stack (will be address of return address).
__ lea(eax, Operand(esp, -kPointerSize));
__ mov(Operand(esp, 0 * kPointerSize), eax);
- CallCFunction(FUNCTION_ADDR(&CheckStackGuardState), num_arguments);
+ ExternalReference check_stack_guard =
+ ExternalReference::re_check_stack_guard_state();
+ CallCFunction(check_stack_guard, num_arguments);
}
@@ -996,22 +1003,6 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
}
-Address RegExpMacroAssemblerIA32::GrowStack(Address stack_pointer,
- Address* stack_base) {
- size_t size = RegExpStack::stack_capacity();
- Address old_stack_base = RegExpStack::stack_base();
- ASSERT(old_stack_base == *stack_base);
- ASSERT(stack_pointer <= old_stack_base);
- ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
- if (new_stack_base == NULL) {
- return NULL;
- }
- *stack_base = new_stack_base;
- return new_stack_base - (old_stack_base - stack_pointer);
-}
-
-
Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
ASSERT(register_index < (1<<30));
if (num_registers_ <= register_index) {
@@ -1135,9 +1126,9 @@ void RegExpMacroAssemblerIA32::FrameAlign(int num_arguments, Register scratch) {
}
-void RegExpMacroAssemblerIA32::CallCFunction(Address function_address,
+void RegExpMacroAssemblerIA32::CallCFunction(ExternalReference function,
int num_arguments) {
- __ mov(Operand(eax), Immediate(reinterpret_cast<int32_t>(function_address)));
+ __ mov(Operand(eax), Immediate(function));
__ call(Operand(eax));
if (OS::ActivationFrameAlignment() != 0) {
__ mov(esp, Operand(esp, num_arguments * kPointerSize));
@@ -1172,6 +1163,10 @@ void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
}
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+ __ int3(); // Unused on ia32.
+}
+
#undef __
#endif // V8_NATIVE_REGEXP
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index d11439299..5ffd46275 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -107,6 +107,13 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void ClearRegisters(int reg_from, int reg_to);
virtual void WriteStackPointerToRegister(int reg);
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+
private:
// Offsets from ebp of function parameters and stored registers.
static const int kFramePointer = 0;
@@ -144,23 +151,9 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState(Register scratch);
- // Called from RegExp if the backtrack stack limit is hit.
- // Tries to expand the stack. Returns the new stack-pointer if
- // successful, and updates the stack_top address, or returns 0 if unable
- // to grow the stack.
- // This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top);
-
// The ebp-relative location of a regexp register.
Operand register_location(int register_index);
@@ -209,7 +202,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// by FrameAlign. The called function is not allowed to trigger a garbage
// collection, since that might move the code and invalidate the return
// address (unless this is somehow accounted for).
- inline void CallCFunction(Address function_address, int num_arguments);
+ inline void CallCFunction(ExternalReference function, int num_arguments);
MacroAssembler* masm_;
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index 4d02c03ab..3bed2681f 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -44,4 +44,9 @@
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
+
#endif // V8_IA32_SIMULATOR_IA32_H_
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index a626377e4..049c57e4f 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -1740,6 +1740,135 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
}
+// Specialized stub for constructing objects from functions which only have only
+// simple assignments of the form this.x = ...; in their body.
+Object* ConstructStubCompiler::CompileConstructStub(
+ SharedFunctionInfo* shared) {
+ // ----------- S t a t e -------------
+ // -- eax : argc
+ // -- edi : constructor
+ // -- esp[0] : return address
+ // -- esp[4] : last argument
+ // -----------------------------------
+ Label generic_stub_call;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check to see whether there are any break points in the function code. If
+ // there are jump to the generic constructor stub which calls the actual
+ // code for the function thereby hitting the break points.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
+ __ cmp(ebx, Factory::undefined_value());
+ __ j(not_equal, &generic_stub_call, not_taken);
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &generic_stub_call);
+ __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ j(not_equal, &generic_stub_call);
+
+#ifdef DEBUG
+ // Cannot construct functions this way.
+ // edi: constructor
+ // ebx: initial map
+ __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
+ __ Assert(not_equal, "Function constructed by construct stub.");
+#endif
+
+ // Now allocate the JSObject on the heap by moving the new space allocation
+ // top forward.
+ // edi: constructor
+ // ebx: initial map
+ __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
+ __ shl(ecx, kPointerSizeLog2);
+ // Make sure that the maximum heap object size will never cause us
+ // problems here.
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
+ __ AllocateObjectInNewSpace(ecx, edx, ecx, no_reg, &generic_stub_call, false);
+
+ // Allocated the JSObject, now initialize the fields and add the heap tag.
+ // ebx: initial map
+ // edx: JSObject (untagged)
+ __ mov(Operand(edx, JSObject::kMapOffset), ebx);
+ __ mov(ebx, Factory::empty_fixed_array());
+ __ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
+ __ mov(Operand(edx, JSObject::kElementsOffset), ebx);
+
+ // Push the allocated object to the stack. This is the object that will be
+ // returned (after it is tagged).
+ __ push(edx);
+
+ // eax: argc
+ // edx: JSObject (untagged)
+ // Load the address of the first in-object property into edx.
+ __ lea(edx, Operand(edx, JSObject::kHeaderSize));
+ // Calculate the location of the first argument. The stack contains the
+ // allocated object and the return address on top of the argc arguments.
+ __ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
+
+ // Use edi for holding undefined which is used in several places below.
+ __ mov(edi, Factory::undefined_value());
+
+ // eax: argc
+ // ecx: first argument
+ // edx: first in-object property of the JSObject
+ // edi: undefined
+ // Fill the initialized properties with a constant value or a passed argument
+ // depending on the this.x = ...; assignment in the function.
+ for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+ if (shared->IsThisPropertyAssignmentArgument(i)) {
+ Label not_passed;
+ // Set the property to undefined.
+ __ mov(Operand(edx, i * kPointerSize), edi);
+ // Check if the argument assigned to the property is actually passed.
+ int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ cmp(eax, arg_number);
+ __ j(below_equal, &not_passed);
+ // Argument passed - find it on the stack.
+ __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+ __ mov(Operand(edx, i * kPointerSize), ebx);
+ __ bind(&not_passed);
+ } else {
+ // Set the property to the constant value.
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ __ mov(Operand(edx, i * kPointerSize), Immediate(constant));
+ }
+ }
+
+ // Fill the unused in-object property fields with undefined.
+ for (int i = shared->this_property_assignments_count();
+ i < shared->CalculateInObjectProperties();
+ i++) {
+ __ mov(Operand(edx, i * kPointerSize), edi);
+ }
+
+ // Move argc to ebx and retrieve and tag the JSObject to return.
+ __ mov(ebx, eax);
+ __ pop(eax);
+ __ or_(Operand(eax), Immediate(kHeapObjectTag));
+
+ // Remove caller arguments and receiver from the stack and return.
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+ __ push(ecx);
+ __ IncrementCounter(&Counters::constructed_objects, 1);
+ __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ __ ret(0);
+
+ // Jump to the generic stub in case the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_stub_call);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 06208aa50..e518662d0 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -51,6 +51,7 @@
#include "x64/macro-assembler-x64.h"
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
+#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#else
#error Unsupported target architecture.
@@ -419,9 +420,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
#ifdef V8_NATIVE_REGEXP
-#ifdef V8_TARGET_ARCH_ARM
- UNIMPLEMENTED();
-#else // Native regexp supported.
+
OffsetsVector captures(number_of_capture_registers);
int* captures_vector = captures.vector();
NativeRegExpMacroAssembler::Result res;
@@ -455,9 +454,9 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]);
}
-#endif // Native regexp supported.
#else // ! V8_NATIVE_REGEXP
+
bool is_ascii = subject->IsAsciiRepresentation();
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null();
@@ -487,6 +486,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
SetCapture(*array, i, register_vector[i]);
SetCapture(*array, i + 1, register_vector[i + 1]);
}
+
#endif // V8_NATIVE_REGEXP
SetLastCaptureCount(*array, number_of_capture_registers);
@@ -1723,6 +1723,8 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
GetQuickCheckDetails(details, compiler, 0, trace->at_start() == Trace::FALSE);
if (details->cannot_match()) return false;
if (!details->Rationalize(compiler->ascii())) return false;
+ ASSERT(details->characters() == 1 ||
+ compiler->macro_assembler()->CanReadUnaligned());
uint32_t mask = details->mask();
uint32_t value = details->value();
@@ -2522,20 +2524,20 @@ void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler) {
int preload_characters = EatsAtLeast(4, 0);
-#ifdef V8_HOST_CAN_READ_UNALIGNED
- bool ascii = compiler->ascii();
- if (ascii) {
- if (preload_characters > 4) preload_characters = 4;
- // We can't preload 3 characters because there is no machine instruction
- // to do that. We can't just load 4 because we could be reading
- // beyond the end of the string, which could cause a memory fault.
- if (preload_characters == 3) preload_characters = 2;
+ if (compiler->macro_assembler()->CanReadUnaligned()) {
+ bool ascii = compiler->ascii();
+ if (ascii) {
+ if (preload_characters > 4) preload_characters = 4;
+ // We can't preload 3 characters because there is no machine instruction
+ // to do that. We can't just load 4 because we could be reading
+ // beyond the end of the string, which could cause a memory fault.
+ if (preload_characters == 3) preload_characters = 2;
+ } else {
+ if (preload_characters > 2) preload_characters = 2;
+ }
} else {
- if (preload_characters > 2) preload_characters = 2;
+ if (preload_characters > 1) preload_characters = 1;
}
-#else
- if (preload_characters > 1) preload_characters = 1;
-#endif
return preload_characters;
}
@@ -4470,16 +4472,12 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
is_ascii ? NativeRegExpMacroAssembler::ASCII
: NativeRegExpMacroAssembler::UC16;
-#ifdef V8_TARGET_ARCH_IA32
- RegExpMacroAssemblerIA32 macro_assembler(mode,
- (data->capture_count + 1) * 2);
-#endif
-#ifdef V8_TARGET_ARCH_X64
- RegExpMacroAssemblerX64 macro_assembler(mode,
- (data->capture_count + 1) * 2);
-#endif
-#ifdef V8_TARGET_ARCH_ARM
- UNIMPLEMENTED();
+#if V8_TARGET_ARCH_IA32
+ RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_X64
+ RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_ARM
+ RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
#endif
#else // ! V8_NATIVE_REGEXP
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 10e81ac6a..d139093a1 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -76,7 +76,7 @@ void MarkCompactCollector::CollectGarbage() {
SweepLargeObjectSpace();
- if (compacting_collection_) {
+ if (IsCompacting()) {
EncodeForwardingAddresses();
UpdatePointers();
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 8328fe5c4..255e54409 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -163,7 +163,7 @@ function FormatMessage(message) {
illegal_break: "Illegal break statement",
illegal_continue: "Illegal continue statement",
illegal_return: "Illegal return statement",
- error_loading_debugger: "Error loading debugger %0",
+ error_loading_debugger: "Error loading debugger",
no_input_to_regexp: "No input to %0",
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index f71317117..ef4aae531 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -463,6 +463,8 @@ void Map::MapPrint() {
PrintF(" - type: %s\n", TypeToString(instance_type()));
PrintF(" - instance size: %d\n", instance_size());
PrintF(" - inobject properties: %d\n", inobject_properties());
+ PrintF(" - pre-allocated property fields: %d\n",
+ pre_allocated_property_fields());
PrintF(" - unused property fields: %d\n", unused_property_fields());
if (is_hidden_prototype()) {
PrintF(" - hidden_prototype\n");
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 7f3628d8d..cabc8a218 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -131,7 +131,7 @@ bool Object::IsSmi() {
bool Object::IsHeapObject() {
- return HAS_HEAP_OBJECT_TAG(this);
+ return Internals::HasHeapObjectTag(this);
}
@@ -300,6 +300,10 @@ uint32_t StringShape::full_representation_tag() {
}
+STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
+ Internals::kFullStringRepresentationMask);
+
+
uint32_t StringShape::size_tag() {
return (type_ & kStringSizeMask);
}
@@ -325,6 +329,10 @@ bool StringShape::IsExternalTwoByte() {
}
+STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
+ Internals::kExternalTwoByteRepresentationTag);
+
+
uc32 FlatStringReader::Get(int index) {
ASSERT(0 <= index && index <= length_);
if (is_ascii_) {
@@ -730,7 +738,7 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
int Smi::value() {
- return static_cast<int>(reinterpret_cast<intptr_t>(this)) >> kSmiTagSize;
+ return Internals::SmiValue(this);
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index e4a3a67a5..9ea131fa7 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2923,6 +2923,20 @@ Object* Map::CopyDropDescriptors() {
// Please note instance_type and instance_size are set when allocated.
Map::cast(result)->set_inobject_properties(inobject_properties());
Map::cast(result)->set_unused_property_fields(unused_property_fields());
+
+ // If the map has pre-allocated properties always start out with a descriptor
+ // array describing these properties.
+ if (pre_allocated_property_fields() > 0) {
+ ASSERT(constructor()->IsJSFunction());
+ JSFunction* ctor = JSFunction::cast(constructor());
+ Object* descriptors =
+ ctor->initial_map()->instance_descriptors()->RemoveTransitions();
+ if (descriptors->IsFailure()) return descriptors;
+ Map::cast(result)->set_instance_descriptors(
+ DescriptorArray::cast(descriptors));
+ Map::cast(result)->set_pre_allocated_property_fields(
+ pre_allocated_property_fields());
+ }
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
Map::cast(result)->ClearCodeCache();
@@ -4800,7 +4814,6 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_this_property_assignments,
bool only_simple_this_property_assignments,
FixedArray* assignments) {
- ASSERT(this_property_assignments()->IsUndefined());
set_compiler_hints(BooleanBit::set(compiler_hints(),
kHasOnlyThisPropertyAssignments,
only_this_property_assignments));
@@ -4812,6 +4825,18 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
}
+void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() {
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kHasOnlyThisPropertyAssignments,
+ false));
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kHasOnlySimpleThisPropertyAssignments,
+ false));
+ set_this_property_assignments(Heap::undefined_value());
+ set_this_property_assignments_count(0);
+}
+
+
String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
Object* obj = this_property_assignments();
ASSERT(obj->IsFixedArray());
@@ -4822,6 +4847,32 @@ String* SharedFunctionInfo::GetThisPropertyAssignmentName(int index) {
}
+bool SharedFunctionInfo::IsThisPropertyAssignmentArgument(int index) {
+ Object* obj = this_property_assignments();
+ ASSERT(obj->IsFixedArray());
+ ASSERT(index < this_property_assignments_count());
+ obj = FixedArray::cast(obj)->get(index * 3 + 1);
+ return Smi::cast(obj)->value() != -1;
+}
+
+
+int SharedFunctionInfo::GetThisPropertyAssignmentArgument(int index) {
+ ASSERT(IsThisPropertyAssignmentArgument(index));
+ Object* obj =
+ FixedArray::cast(this_property_assignments())->get(index * 3 + 1);
+ return Smi::cast(obj)->value();
+}
+
+
+Object* SharedFunctionInfo::GetThisPropertyAssignmentConstant(int index) {
+ ASSERT(!IsThisPropertyAssignmentArgument(index));
+ Object* obj =
+ FixedArray::cast(this_property_assignments())->get(index * 3 + 2);
+ return obj;
+}
+
+
+
// Support function for printing the source code to a StringStream
// without any allocation in the heap.
void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 763752b27..3f6f5fff5 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1234,6 +1234,8 @@ class HeapObject: public Object {
static const int kMapOffset = Object::kHeaderSize;
static const int kHeaderSize = kMapOffset + kPointerSize;
+ STATIC_CHECK(kMapOffset == Internals::kHeapObjectMapOffset);
+
protected:
// helpers for calling an ObjectVisitor to iterate over pointers in the
// half-open range [start, end) specified as integer offsets
@@ -1664,6 +1666,8 @@ class JSObject: public HeapObject {
static const int kElementsOffset = kPropertiesOffset + kPointerSize;
static const int kHeaderSize = kElementsOffset + kPointerSize;
+ STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
+
Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
private:
@@ -2631,7 +2635,7 @@ class Code: public HeapObject {
int ExecutableSize() {
// Check that the assumptions about the layout of the code object holds.
ASSERT_EQ(instruction_start() - address(),
- Code::kHeaderSize);
+ static_cast<intptr_t>(Code::kHeaderSize));
return instruction_size() + Code::kHeaderSize;
}
@@ -2897,6 +2901,8 @@ class Map: public HeapObject {
static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
static const int kBitField2Offset = kInstanceAttributesOffset + 3;
+ STATIC_CHECK(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
+
// Bit positions for bit field.
static const int kUnused = 0; // To be used for marking recently used maps.
static const int kHasNonInstancePrototype = 1;
@@ -3108,6 +3114,9 @@ class SharedFunctionInfo: public HeapObject {
bool has_only_simple_this_property_assignments,
FixedArray* this_property_assignments);
+ // Clear information on assignments of the form this.x = ...;
+ void ClearThisPropertyAssignmentsInfo();
+
// Indicate that this function only consists of assignments of the form
// this.x = ...;.
inline bool has_only_this_property_assignments();
@@ -3122,6 +3131,9 @@ class SharedFunctionInfo: public HeapObject {
inline int this_property_assignments_count();
inline void set_this_property_assignments_count(int value);
String* GetThisPropertyAssignmentName(int index);
+ bool IsThisPropertyAssignmentArgument(int index);
+ int GetThisPropertyAssignmentArgument(int index);
+ Object* GetThisPropertyAssignmentConstant(int index);
// [source code]: Source code for the function.
bool HasSourceCode();
@@ -4128,6 +4140,8 @@ class ExternalString: public String {
static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
static const int kSize = kResourceOffset + kPointerSize;
+ STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalString);
};
@@ -4341,6 +4355,8 @@ class Proxy: public HeapObject {
static const int kProxyOffset = HeapObject::kHeaderSize;
static const int kSize = kProxyOffset + kPointerSize;
+ STATIC_CHECK(kProxyOffset == Internals::kProxyProxyOffset);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Proxy);
};
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 5cc6341f9..0abb9ed77 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -2397,12 +2397,6 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
// WithStatement ::
// 'with' '(' Expression ')' Statement
- // We do not allow the use of 'with' statements in the internal JS
- // code. If 'with' statements were allowed, the simplified setup of
- // the runtime context chain would allow access to properties in the
- // global object from within a 'with' statement.
- ASSERT(extension_ != NULL || !Bootstrapper::IsActive());
-
Expect(Token::WITH, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
Expression* expr = ParseExpression(true, CHECK_OK);
@@ -3088,9 +3082,6 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
Handle<String> name = callee->name();
Variable* var = top_scope_->Lookup(name);
if (var == NULL) {
- // We do not allow direct calls to 'eval' in our internal
- // JS files. Use builtin functions instead.
- ASSERT(extension_ != NULL || !Bootstrapper::IsActive());
top_scope_->RecordEvalCall();
is_potentially_direct_eval = true;
}
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index de6cd125b..d4a183d94 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -1317,8 +1317,11 @@ double OS::nan_value() {
int OS::ActivationFrameAlignment() {
- // Floating point code runs faster if the stack is 8-byte aligned.
- return 8;
+#ifdef _WIN64
+ return 16; // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#else
+ return 8; // Floating-point math runs faster with 8-byte alignment.
+#endif
}
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp-macro-assembler-irregexp-inl.h
index 5074f210a..b487468ef 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp-inl.h
@@ -38,6 +38,7 @@
namespace v8 {
namespace internal {
+#ifndef V8_NATIVE_REGEXP
void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
uint32_t twenty_four_bits) {
@@ -70,6 +71,7 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
pc_ += 4;
}
+#endif // ! V8_NATIVE_REGEXP
} } // namespace v8::internal
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 21b622ef0..f9c7eeee0 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -36,6 +36,7 @@
namespace v8 {
namespace internal {
+#ifndef V8_NATIVE_REGEXP
RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
: buffer_(buffer),
@@ -458,5 +459,6 @@ void RegExpMacroAssemblerIrregexp::Expand() {
}
}
+#endif // !V8_NATIVE_REGEXP
} } // namespace v8::internal
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp-macro-assembler-irregexp.h
index dd64e7ae8..642a28390 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.h
@@ -31,6 +31,7 @@
namespace v8 {
namespace internal {
+#ifndef V8_NATIVE_REGEXP
class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
public:
@@ -133,6 +134,8 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
};
+#endif // !V8_NATIVE_REGEXP
+
} } // namespace v8::internal
#endif // V8_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index 28434d7ca..28ca5f340 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -37,7 +37,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
explicit RegExpMacroAssemblerTracer(RegExpMacroAssembler* assembler);
virtual ~RegExpMacroAssemblerTracer();
virtual int stack_limit_slack() { return assembler_->stack_limit_slack(); }
-
+ virtual bool CanReadUnaligned() { return assembler_->CanReadUnaligned(); }
virtual void AdvanceCurrentPosition(int by); // Signed cp change.
virtual void AdvanceRegister(int reg, int by); // r[reg] += by.
virtual void Backtrack();
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 7f830fe48..0d00ceec3 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -30,6 +30,13 @@
#include "assembler.h"
#include "regexp-stack.h"
#include "regexp-macro-assembler.h"
+#if V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#elif V8_TARGET_ARCH_IA32
+#include "ia32/simulator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/simulator-x64.h"
+#endif
namespace v8 {
namespace internal {
@@ -42,6 +49,15 @@ RegExpMacroAssembler::~RegExpMacroAssembler() {
}
+bool RegExpMacroAssembler::CanReadUnaligned() {
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+ return true;
+#else
+ return false;
+#endif
+}
+
+
#ifdef V8_NATIVE_REGEXP // Avoid unused code, e.g., on ARM.
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
@@ -51,6 +67,15 @@ NativeRegExpMacroAssembler::NativeRegExpMacroAssembler() {
NativeRegExpMacroAssembler::~NativeRegExpMacroAssembler() {
}
+
+bool NativeRegExpMacroAssembler::CanReadUnaligned() {
+#ifdef V8_TARGET_CAN_READ_UNALIGNED
+ return true;
+#else
+ return false;
+#endif
+}
+
const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
String* subject,
int start_index) {
@@ -162,13 +187,14 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
- int result = matcher_func(input,
- start_offset,
- input_start,
- input_end,
- output,
- at_start_val,
- stack_base);
+ int result = CALL_GENERATED_REGEXP_CODE(matcher_func,
+ input,
+ start_offset,
+ input_start,
+ input_end,
+ output,
+ at_start_val,
+ stack_base);
ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);
@@ -213,5 +239,22 @@ int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
return 1;
}
+
+Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
+ Address* stack_base) {
+ size_t size = RegExpStack::stack_capacity();
+ Address old_stack_base = RegExpStack::stack_base();
+ ASSERT(old_stack_base == *stack_base);
+ ASSERT(stack_pointer <= old_stack_base);
+ ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
+ Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
+ if (new_stack_base == NULL) {
+ return NULL;
+ }
+ *stack_base = new_stack_base;
+ intptr_t stack_content_size = old_stack_base - stack_pointer;
+ return new_stack_base - stack_content_size;
+}
+
#endif // V8_NATIVE_REGEXP
} } // namespace v8::internal
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index e59082768..26aab2c72 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -61,6 +61,7 @@ class RegExpMacroAssembler {
// kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
// at least once for every stack_limit() pushes that are executed.
virtual int stack_limit_slack() = 0;
+ virtual bool CanReadUnaligned();
virtual void AdvanceCurrentPosition(int by) = 0; // Signed cp change.
virtual void AdvanceRegister(int reg, int by) = 0; // r[reg] += by.
// Continues execution from the position pushed on the top of the backtrack
@@ -182,6 +183,7 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
NativeRegExpMacroAssembler();
virtual ~NativeRegExpMacroAssembler();
+ virtual bool CanReadUnaligned();
static Result Match(Handle<Code> regexp,
Handle<String> subject,
@@ -195,6 +197,13 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
Address byte_offset2,
size_t byte_length);
+ // Called from RegExp if the backtrack stack limit is hit.
+ // Tries to expand the stack. Returns the new stack-pointer if
+ // successful, and updates the stack_top address, or returns 0 if unable
+ // to grow the stack.
+ // This function must not trigger a garbage collection.
+ static Address GrowStack(Address stack_pointer, Address* stack_top);
+
static const byte* StringCharacterPosition(String* subject, int start_index);
static Result Execute(Code* code,
@@ -205,7 +214,25 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int* output,
bool at_start);
};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
#endif // V8_NATIVE_REGEXP
+
} } // namespace v8::internal
#endif // V8_REGEXP_MACRO_ASSEMBLER_H_
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 845ac6308..f772d32d9 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -45,6 +45,7 @@
#include "v8threads.h"
#include "smart-pointer.h"
#include "parser.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
@@ -1235,6 +1236,9 @@ static Object* Runtime_SetCode(Arguments args) {
// Array, and Object, and some web code
// doesn't like seeing source code for constructors.
target->shared()->set_script(Heap::undefined_value());
+ // Clear the optimization hints related to the compiled code as these are no
+ // longer valid when the code is overwritten.
+ target->shared()->ClearThisPropertyAssignmentsInfo();
context = Handle<Context>(fun->context());
// Make sure we get a fresh copy of the literal vector to avoid
@@ -4054,7 +4058,7 @@ static Object* Runtime_Math_acos(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(acos(x));
+ return TranscendentalCache::Get(TranscendentalCache::ACOS, x);
}
@@ -4063,7 +4067,7 @@ static Object* Runtime_Math_asin(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(asin(x));
+ return TranscendentalCache::Get(TranscendentalCache::ASIN, x);
}
@@ -4072,7 +4076,7 @@ static Object* Runtime_Math_atan(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(atan(x));
+ return TranscendentalCache::Get(TranscendentalCache::ATAN, x);
}
@@ -4113,7 +4117,7 @@ static Object* Runtime_Math_cos(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(cos(x));
+ return TranscendentalCache::Get(TranscendentalCache::COS, x);
}
@@ -4122,7 +4126,7 @@ static Object* Runtime_Math_exp(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(exp(x));
+ return TranscendentalCache::Get(TranscendentalCache::EXP, x);
}
@@ -4140,7 +4144,7 @@ static Object* Runtime_Math_log(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(log(x));
+ return TranscendentalCache::Get(TranscendentalCache::LOG, x);
}
@@ -4228,7 +4232,7 @@ static Object* Runtime_Math_sin(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(sin(x));
+ return TranscendentalCache::Get(TranscendentalCache::SIN, x);
}
@@ -4246,7 +4250,7 @@ static Object* Runtime_Math_tan(Arguments args) {
ASSERT(args.length() == 1);
CONVERT_DOUBLE_CHECKED(x, args[0]);
- return Heap::AllocateHeapNumber(tan(x));
+ return TranscendentalCache::Get(TranscendentalCache::TAN, x);
}
@@ -4326,11 +4330,21 @@ static Object* Runtime_NewClosure(Arguments args) {
}
-static Handle<Code> ComputeConstructStub(Handle<Map> map) {
+static Code* ComputeConstructStub(Handle<SharedFunctionInfo> shared) {
// TODO(385): Change this to create a construct stub specialized for
// the given map to make allocation of simple objects - and maybe
// arrays - much faster.
- return Handle<Code>(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ if (FLAG_inline_new
+ && shared->has_only_simple_this_property_assignments()) {
+ ConstructStubCompiler compiler;
+ Object* code = compiler.CompileConstructStub(*shared);
+ if (code->IsFailure()) {
+ return Builtins::builtin(Builtins::JSConstructStubGeneric);
+ }
+ return Code::cast(code);
+ }
+
+ return Builtins::builtin(Builtins::JSConstructStubGeneric);
}
@@ -4373,15 +4387,25 @@ static Object* Runtime_NewObject(Arguments args) {
}
}
+ // The function should be compiled for the optimization hints to be available.
+ if (!function->shared()->is_compiled()) {
+ CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
+ CLEAR_EXCEPTION,
+ 0);
+ }
+
bool first_allocation = !function->has_initial_map();
Handle<JSObject> result = Factory::NewJSObject(function);
if (first_allocation) {
Handle<Map> map = Handle<Map>(function->initial_map());
- Handle<Code> stub = ComputeConstructStub(map);
+ Handle<Code> stub = Handle<Code>(
+ ComputeConstructStub(Handle<SharedFunctionInfo>(function->shared())));
function->shared()->set_construct_stub(*stub);
}
+
Counters::constructed_objects.Increment();
Counters::constructed_objects_runtime.Increment();
+
return *result;
}
@@ -7386,7 +7410,7 @@ static Object* Runtime_SystemBreak(Arguments args) {
}
-static Object* Runtime_FunctionGetAssemblerCode(Arguments args) {
+static Object* Runtime_DebugDisassembleFunction(Arguments args) {
#ifdef DEBUG
HandleScope scope;
ASSERT(args.length() == 1);
@@ -7401,6 +7425,21 @@ static Object* Runtime_FunctionGetAssemblerCode(Arguments args) {
}
+static Object* Runtime_DebugDisassembleConstructor(Arguments args) {
+#ifdef DEBUG
+ HandleScope scope;
+ ASSERT(args.length() == 1);
+ // Get the function and make sure it is compiled.
+ CONVERT_ARG_CHECKED(JSFunction, func, 0);
+ if (!func->is_compiled() && !CompileLazy(func, KEEP_EXCEPTION)) {
+ return Failure::Exception();
+ }
+ func->shared()->construct_stub()->PrintLn();
+#endif // DEBUG
+ return Heap::undefined_value();
+}
+
+
static Object* Runtime_FunctionGetInferredName(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index d47ca18e7..1be677a11 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -303,7 +303,8 @@ namespace internal {
F(DebugConstructedBy, 2) \
F(DebugGetPrototype, 1) \
F(SystemBreak, 0) \
- F(FunctionGetAssemblerCode, 1) \
+ F(DebugDisassembleFunction, 1) \
+ F(DebugDisassembleConstructor, 1) \
F(FunctionGetInferredName, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index d2fd1e4fc..a16032ad3 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -734,6 +734,20 @@ void ExternalReferenceTable::PopulateTable() {
UNCLASSIFIED,
17,
"compare_doubles");
+#ifdef V8_NATIVE_REGEXP
+ Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
+ UNCLASSIFIED,
+ 18,
+ "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
+ Add(ExternalReference::re_check_stack_guard_state().address(),
+ UNCLASSIFIED,
+ 19,
+ "RegExpMacroAssembler*::CheckStackGuardState()");
+ Add(ExternalReference::re_grow_stack().address(),
+ UNCLASSIFIED,
+ 20,
+ "NativeRegExpMacroAssembler::GrowStack()");
+#endif
}
@@ -1119,6 +1133,11 @@ void Serializer::PutHeader() {
#else
writer_->PutC('0');
#endif
+#ifdef V8_NATIVE_REGEXP
+ writer_->PutC('N');
+#else // Interpreted regexp
+ writer_->PutC('I');
+#endif
// Write sizes of paged memory spaces. Allocate extra space for the old
// and code spaces, because objects in new space will be promoted to them.
writer_->PutC('S');
@@ -1238,7 +1257,7 @@ Address Serializer::PutObject(HeapObject* obj) {
// Write out the object prologue: type, size, and simulated address of obj.
writer_->PutC('[');
- CHECK_EQ(0, size & kObjectAlignmentMask);
+ CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask));
writer_->PutInt(type);
writer_->PutInt(size >> kObjectAlignmentBits);
PutEncodedAddress(addr); // encodes AllocationSpace
@@ -1475,6 +1494,11 @@ void Deserializer::GetHeader() {
// synchronization tags.
if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
#endif
+#ifdef V8_NATIVE_REGEXP
+ reader_.ExpectC('N');
+#else // Interpreted regexp.
+ reader_.ExpectC('I');
+#endif
// Ensure sufficient capacity in paged memory spaces to avoid growth
// during deserialization.
reader_.ExpectC('S');
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index b25f5b4bb..2906c2292 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -1097,4 +1097,16 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) {
}
+Object* ConstructStubCompiler::GetCode() {
+ Code::Flags flags = Code::ComputeFlags(Code::STUB);
+ Object* result = GetCodeWithFlags(flags, "ConstructStub");
+ if (!result->IsFailure()) {
+ Code* code = Code::cast(result);
+ USE(code);
+ LOG(CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+ }
+ return result;
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 3b3caad56..e26892026 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -561,6 +561,17 @@ class CallStubCompiler: public StubCompiler {
};
+class ConstructStubCompiler: public StubCompiler {
+ public:
+ explicit ConstructStubCompiler() {}
+
+ Object* CompileConstructStub(SharedFunctionInfo* shared);
+
+ private:
+ Object* GetCode();
+};
+
+
} } // namespace v8::internal
#endif // V8_STUB_CACHE_H_
diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc
index 550703aa1..5c22bcf3c 100644
--- a/deps/v8/src/top.cc
+++ b/deps/v8/src/top.cc
@@ -855,23 +855,18 @@ void Top::TraceException(bool flag) {
}
-bool Top::OptionalRescheduleException(bool is_bottom_call,
- bool force_clear_catchable) {
+bool Top::OptionalRescheduleException(bool is_bottom_call) {
// Allways reschedule out of memory exceptions.
if (!is_out_of_memory()) {
bool is_termination_exception =
pending_exception() == Heap::termination_exception();
- // Do not reschedule the exception if this is the bottom call or
- // if we are asked to clear catchable exceptions. Termination
- // exceptions are not catchable and are only cleared if this is
- // the bottom call.
- bool clear_exception = is_bottom_call ||
- (force_clear_catchable && !is_termination_exception);
+ // Do not reschedule the exception if this is the bottom call.
+ bool clear_exception = is_bottom_call;
if (is_termination_exception) {
- thread_local_.external_caught_exception_ = false;
if (is_bottom_call) {
+ thread_local_.external_caught_exception_ = false;
clear_pending_exception();
return false;
}
diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h
index d4d73c21f..5b3d6a075 100644
--- a/deps/v8/src/top.h
+++ b/deps/v8/src/top.h
@@ -157,8 +157,8 @@ class Top {
// exceptions. If an exception was thrown and not handled by an external
// handler the exception is scheduled to be rethrown when we return to running
// JavaScript code. If an exception is scheduled true is returned.
- static bool OptionalRescheduleException(bool is_bottom_call,
- bool force_clear_catchable);
+ static bool OptionalRescheduleException(bool is_bottom_call);
+
static bool* external_caught_exception_address() {
return &thread_local_.external_caught_exception_;
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 43cd5e3d6..0b941f672 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -141,6 +141,7 @@ namespace internal {
SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
+ SC(constructed_objects_stub, V8.ConstructedObjectsStub) \
SC(for_in, V8.ForIn) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index bbcc9d1f1..a20415875 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -98,6 +98,10 @@ bool V8::Initialize(Deserializer *des) {
StubCache::Clear();
}
+ // Deserializing may put strange things in the root array's copy of the
+ // stack guard.
+ Heap::SetStackLimit(StackGuard::jslimit());
+
// Setup the CPU support. Must be done after heap setup and after
// any deserialization because we have to have the initial heap
// objects in place for creating the code object used for probing.
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 65fd116bf..39d18fd4d 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 1
#define MINOR_VERSION 3
-#define BUILD_NUMBER 7
+#define BUILD_NUMBER 9
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index a02557e96..6304324da 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -120,13 +120,23 @@ void CpuFeatures::Probe() {
supported_ = kDefaultCpuFeatures | (1 << CPUID);
{ Scope fscope(CPUID);
__ cpuid();
+ // Move the result from ecx:edx to rdi.
+ __ movl(rdi, rdx); // Zero-extended to 64 bits.
+ __ shl(rcx, Immediate(32));
+ __ or_(rdi, rcx);
+
+ // Get the sahf supported flag, from CPUID(0x80000001)
+ __ movq(rax, 0x80000001, RelocInfo::NONE);
+ __ cpuid();
}
supported_ = kDefaultCpuFeatures;
- // Move the result from ecx:edx to rax and make sure to mark the
- // CPUID feature as supported.
- __ movl(rax, rdx); // Zero-extended to 64 bits.
- __ shl(rcx, Immediate(32));
+ // Put the CPU flags in rax.
+ // rax = (rcx & 1) | (rdi & ~1) | (1 << CPUID).
+ __ movl(rax, Immediate(1));
+ __ and_(rcx, rax); // Bit 0 is set if SAHF instruction supported.
+ __ not_(rax);
+ __ and_(rax, rdi);
__ or_(rax, rcx);
__ or_(rax, Immediate(1 << CPUID));
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 9d602b9ef..4d341c672 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -361,7 +361,12 @@ class CpuFeatures : public AllStatic {
// Feature flags bit positions. They are mostly based on the CPUID spec.
// (We assign CPUID itself to one of the currently reserved bits --
// feel free to change this if needed.)
- enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
+ enum Feature { SSE3 = 32,
+ SSE2 = 26,
+ CMOV = 15,
+ RDTSC = 4,
+ CPUID = 10,
+ SAHF = 0};
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe();
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 6988d7208..44d8b46c0 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -53,7 +53,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ movq(rbp, rsp);
// Store the arguments adaptor context sentinel.
- __ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Push the function on the stack.
__ push(rdi);
@@ -139,9 +139,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Fill remaining expected arguments with undefined values.
Label fill;
- __ movq(kScratchRegister,
- Factory::undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
__ incq(rcx);
__ push(kScratchRegister);
@@ -218,9 +216,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
- __ Cmp(rbx, Factory::null_value());
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
- __ Cmp(rbx, Factory::undefined_value());
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver);
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
@@ -386,9 +384,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movq(rbx, Operand(rbp, kReceiverOffset));
__ testl(rbx, Immediate(kSmiTagMask));
__ j(zero, &call_to_object);
- __ Cmp(rbx, Factory::null_value());
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
- __ Cmp(rbx, Factory::undefined_value());
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &use_global_receiver);
// If given receiver is already a JavaScript object then there's no
@@ -542,22 +540,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
- ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address();
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(rbx, Operand(kScratchRegister, 0));
- __ addq(rdi, rbx); // Calculate new top
- ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ movq(kScratchRegister, new_space_allocation_limit);
- __ cmpq(rdi, Operand(kScratchRegister, 0));
- __ j(above_equal, &rt_call);
+ __ AllocateObjectInNewSpace(rdi, rbx, rdi, no_reg, &rt_call, false);
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
// rdi: start of next object
__ movq(Operand(rbx, JSObject::kMapOffset), rax);
- __ Move(rcx, Factory::empty_fixed_array());
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
__ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
__ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
// Set extra fields in the newly allocated object.
@@ -565,7 +554,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rbx: JSObject
// rdi: start of next object
{ Label loop, entry;
- __ Move(rdx, Factory::undefined_value());
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
@@ -576,16 +565,14 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ j(less, &loop);
}
- // Mostly done with the JSObject. Add the heap tag and store the new top, so
- // that we can continue and jump into the continuation code at any time from
- // now on. Any failures need to undo the setting of the new top, so that the
- // heap is in a consistent state and verifiable.
+ // Add the object tag to make the JSObject real, so that we can continue and
+ // jump into the continuation code at any time from now on. Any failures
+ // need to undo the allocation, so that the heap is in a consistent state
+ // and verifiable.
// rax: initial map
// rbx: JSObject
// rdi: start of next object
__ or_(rbx, Immediate(kHeapObjectTag));
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(Operand(kScratchRegister, 0), rdi);
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -610,18 +597,21 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rdx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
- __ lea(rax, Operand(rdi, rdx, times_pointer_size, FixedArray::kHeaderSize));
- __ movq(kScratchRegister, new_space_allocation_limit);
- __ cmpq(rax, Operand(kScratchRegister, 0));
- __ j(above_equal, &undo_allocation);
- __ store_rax(new_space_allocation_top);
+ __ AllocateObjectInNewSpace(FixedArray::kHeaderSize,
+ times_pointer_size,
+ rdx,
+ rdi,
+ rax,
+ no_reg,
+ &undo_allocation,
+ true);
// Initialize the FixedArray.
// rbx: JSObject
// rdi: FixedArray
// rdx: number of elements
// rax: start of next object
- __ Move(rcx, Factory::fixed_array_map());
+ __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
__ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
__ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
@@ -631,7 +621,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// rax: start of next object
// rdx: number of elements
{ Label loop, entry;
- __ Move(rdx, Factory::undefined_value());
+ __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
__ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
@@ -659,9 +649,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// allocated objects unused properties.
// rbx: JSObject (previous new top)
__ bind(&undo_allocation);
- __ xor_(rbx, Immediate(kHeapObjectTag)); // clear the heap tag
- __ movq(kScratchRegister, new_space_allocation_top);
- __ movq(Operand(kScratchRegister, 0), rbx);
+ __ UndoAllocationInNewSpace(rbx);
}
// Allocate the new receiver object using the runtime call.
@@ -756,7 +744,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// an internal frame and the pushed function and receiver, and
// register rax and rbx holds the argument count and argument array,
// while rdi holds the function pointer and rsi the context.
-#ifdef __MSVC__
+#ifdef _WIN64
// MSVC parameters in:
// rcx : entry (ignored)
// rdx : function
@@ -766,7 +754,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Clear the context before we push it when entering the JS frame.
__ xor_(rsi, rsi);
- // Enter an internal frame.
__ EnterInternalFrame();
// Load the function context into rsi.
@@ -783,7 +770,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
__ movq(rdi, rdx);
-#else // !defined(__MSVC__)
+#else // !defined(_WIN64)
// GCC parameters in:
// rdi : entry (ignored)
// rsi : function
@@ -807,7 +794,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Load the number of arguments and setup pointer to the arguments.
__ movq(rax, rcx);
__ movq(rbx, r8);
-#endif // __MSVC__
+#endif // _WIN64
+
+ // Set up the roots register.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(r13, roots_address);
+
// Current stack contents:
// [rsp + 2 * kPointerSize ... ]: Internal frame
// [rsp + kPointerSize] : function
diff --git a/deps/v8/src/x64/cfg-x64.cc b/deps/v8/src/x64/cfg-x64.cc
index 34ddbbf02..0b71d8ec7 100644
--- a/deps/v8/src/x64/cfg-x64.cc
+++ b/deps/v8/src/x64/cfg-x64.cc
@@ -71,8 +71,7 @@ void EntryNode::Compile(MacroAssembler* masm) {
__ push(rdi);
int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
if (count > 0) {
- __ movq(kScratchRegister, Factory::undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
for (int i = 0; i < count; i++) {
__ push(kScratchRegister);
}
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index d61b37b2c..8d313c951 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -537,7 +537,6 @@ bool CodeGenerator::HasValidEntryRegisters() {
&& (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
&& (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
&& (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
- && (allocator()->count(r13) == (frame()->is_used(r13) ? 1 : 0))
&& (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
}
#endif
@@ -649,6 +648,196 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#ifdef DEBUG
+ void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
+#endif
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+void CodeGenerator::CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position) {
+ ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
+ ASSERT(arguments->IsArguments());
+
+ JumpTarget slow, done;
+
+ // Load the apply function onto the stack. This will usually
+ // give us a megamorphic load site. Not super, but it works.
+ Reference ref(this, apply);
+ ref.GetValue(NOT_INSIDE_TYPEOF);
+ ASSERT(ref.type() == Reference::NAMED);
+
+ // Load the receiver and the existing arguments object onto the
+ // expression stack. Avoid allocating the arguments object here.
+ Load(receiver);
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+
+ // Emit the source position information after having loaded the
+ // receiver and the arguments.
+ CodeForSourcePosition(position);
+
+ // Check if the arguments object has been lazily allocated
+ // already. If so, just use that instead of copying the arguments
+ // from the stack. This also deals with cases where a local variable
+ // named 'arguments' has been introduced.
+ frame_->Dup();
+ Result probe = frame_->Pop();
+ bool try_lazy = true;
+ if (probe.is_constant()) {
+ try_lazy = probe.handle()->IsTheHole();
+ } else {
+ __ Cmp(probe.reg(), Factory::the_hole_value());
+ probe.Unuse();
+ slow.Branch(not_equal);
+ }
+
+ if (try_lazy) {
+ JumpTarget build_args;
+
+ // Get rid of the arguments object probe.
+ frame_->Drop();
+
+ // Before messing with the execution stack, we sync all
+ // elements. This is bound to happen anyway because we're
+ // about to call a function.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
+ // Check that the receiver really is a JavaScript object.
+ { frame_->PushElementAt(0);
+ Result receiver = frame_->Pop();
+ receiver.ToRegister();
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ // We allow all JSObjects including JSFunctions. As long as
+ // JS_FUNCTION_TYPE is the last instance type and it is right
+ // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
+ // bound.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ build_args.Branch(below);
+ }
+
+ // Verify that we're invoking Function.prototype.apply.
+ { frame_->PushElementAt(1);
+ Result apply = frame_->Pop();
+ apply.ToRegister();
+ __ testl(apply.reg(), Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ Result tmp = allocator_->Allocate();
+ __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
+ build_args.Branch(not_equal);
+ __ movq(tmp.reg(),
+ FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+ Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
+ __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+ apply_code);
+ build_args.Branch(not_equal);
+ }
+
+ // Get the function receiver from the stack. Check that it
+ // really is a function.
+ __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+ __ testl(rdi, Immediate(kSmiTagMask));
+ build_args.Branch(zero);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ build_args.Branch(not_equal);
+
+ // Copy the arguments to this function possibly from the
+ // adaptor frame below it.
+ Label invoke, adapted;
+ __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adapted);
+
+ // No arguments adaptor frame. Copy fixed number of arguments.
+ __ movq(rax, Immediate(scope_->num_parameters()));
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ __ push(frame_->ParameterAt(i));
+ }
+ __ jmp(&invoke);
+
+ // Arguments adaptor frame present. Copy arguments from there, but
+ // avoid copying too many arguments to avoid stack overflows.
+ __ bind(&adapted);
+ static const uint32_t kArgumentsLimit = 1 * KB;
+ __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ shrl(rax, Immediate(kSmiTagSize));
+ __ movq(rcx, rax);
+ __ cmpq(rax, Immediate(kArgumentsLimit));
+ build_args.Branch(above);
+
+ // Loop through the arguments pushing them onto the execution
+ // stack. We don't inform the virtual frame of the push, so we don't
+ // have to worry about getting rid of the elements from the virtual
+ // frame.
+ Label loop;
+ __ bind(&loop);
+ __ testl(rcx, rcx);
+ __ j(zero, &invoke);
+ __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+ __ decl(rcx);
+ __ jmp(&loop);
+
+ // Invoke the function. The virtual frame knows about the receiver
+ // so make sure to forget that explicitly.
+ __ bind(&invoke);
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+ frame_->Forget(1);
+ Result result = allocator()->Allocate(rax);
+ frame_->SetElementAt(0, &result);
+ done.Jump();
+
+ // Slow-case: Allocate the arguments object since we know it isn't
+ // there, and fall-through to the slow-case where we call
+ // Function.prototype.apply.
+ build_args.Bind();
+ Result arguments_object = StoreArgumentsObject(false);
+ frame_->Push(&arguments_object);
+ slow.Bind();
+ }
+
+ // Flip the apply function and the function to call on the stack, so
+ // the function looks like the receiver of the apply call. This way,
+ // the generic Function.prototype.apply implementation can deal with
+ // the call like it usually does.
+ Result a2 = frame_->Pop();
+ Result a1 = frame_->Pop();
+ Result ap = frame_->Pop();
+ Result fn = frame_->Pop();
+ frame_->Push(&ap);
+ frame_->Push(&fn);
+ frame_->Push(&a1);
+ frame_->Push(&a2);
+ CallFunctionStub call_function(2, NOT_IN_LOOP);
+ Result res = frame_->CallStub(&call_function, 3);
+ frame_->Push(&res);
+
+ // All done. Restore context register after call.
+ if (try_lazy) done.Bind();
+ frame_->RestoreContextRegister();
+}
+
+
class DeferredStackCheck: public DeferredCode {
public:
DeferredStackCheck() {
@@ -668,37 +857,13 @@ void DeferredStackCheck::Generate() {
void CodeGenerator::CheckStack() {
if (FLAG_check_stack) {
DeferredStackCheck* deferred = new DeferredStackCheck;
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ movq(kScratchRegister, stack_guard_limit);
- __ cmpq(rsp, Operand(kScratchRegister, 0));
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
deferred->Branch(below);
deferred->BindExit();
}
}
-class CallFunctionStub: public CodeStub {
- public:
- CallFunctionStub(int argc, InLoopFlag in_loop)
- : argc_(argc), in_loop_(in_loop) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- int argc_;
- InLoopFlag in_loop_;
-
-#ifdef DEBUG
- void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
-#endif
-
- Major MajorKey() { return CallFunction; }
- int MinorKey() { return argc_; }
- InLoopFlag InLoop() { return in_loop_; }
-};
-
-
void CodeGenerator::VisitAndSpill(Statement* statement) {
// TODO(X64): No architecture specific code. Move to shared location.
ASSERT(in_spilled_code());
@@ -772,9 +937,7 @@ void CodeGenerator::VisitDeclaration(Declaration* node) {
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- __ movq(kScratchRegister, Factory::the_hole_value(),
- RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(Heap::kTheHoleValueRootIndex);
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
@@ -1480,9 +1643,9 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPop(rax);
// rax: value to be iterated over
- __ Cmp(rax, Factory::undefined_value());
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
exit.Branch(equal);
- __ Cmp(rax, Factory::null_value());
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
exit.Branch(equal);
// Stack layout in body:
@@ -1518,7 +1681,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Runtime::kGetPropertyNamesFast)
__ movq(rdx, rax);
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ Cmp(rcx, Factory::meta_map());
+ __ CompareRoot(rcx, Heap::kMetaMapRootIndex);
fixed_array.Branch(not_equal);
// Get enum cache
@@ -1587,7 +1750,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
__ movq(rbx, rax);
// If the property has been removed while iterating, we just skip it.
- __ Cmp(rbx, Factory::null_value());
+ __ CompareRoot(rbx, Heap::kNullValueRootIndex);
node->continue_target()->Branch(equal);
end_del_check.Bind();
@@ -1862,10 +2025,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
// Fake a top of stack value (unneeded when FALLING) and set the
// state in ecx, then jump around the unlink blocks if any.
- __ movq(kScratchRegister,
- Factory::undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(Heap::kUndefinedValueRootIndex);
__ movq(rcx, Immediate(Smi::FromInt(FALLING)));
if (nof_unlinks > 0) {
finally_block.Jump();
@@ -1910,10 +2070,7 @@ void CodeGenerator::VisitTryFinally(TryFinally* node) {
frame_->EmitPush(rax);
} else {
// Fake TOS for targets that shadowed breaks and continues.
- __ movq(kScratchRegister,
- Factory::undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
+ frame_->EmitPush(Heap::kUndefinedValueRootIndex);
}
__ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
if (--nof_unlinks > 0) {
@@ -2155,7 +2312,7 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
// jump to the deferred code passing the literals array.
DeferredRegExpLiteral* deferred =
new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node);
- __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
deferred->Branch(equal);
deferred->BindExit();
literals.Unuse();
@@ -2226,7 +2383,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// If so, jump to the deferred code passing the literals array.
DeferredObjectLiteral* deferred =
new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
- __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
deferred->Branch(equal);
deferred->BindExit();
literals.Unuse();
@@ -2359,7 +2516,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// If so, jump to the deferred code passing the literals array.
DeferredArrayLiteral* deferred =
new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
- __ Cmp(boilerplate.reg(), Factory::undefined_value());
+ __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
deferred->Branch(equal);
deferred->BindExit();
literals.Unuse();
@@ -2612,27 +2769,40 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------
- // TODO(X64): Consider optimizing Function.prototype.apply calls
- // with arguments object. Requires lazy arguments allocation;
- // see http://codereview.chromium.org/147075.
+ Handle<String> name = Handle<String>::cast(literal->handle());
- // Push the name of the function and the receiver onto the stack.
- frame_->Push(literal->handle());
- Load(property->obj());
+ if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
+ name->IsEqualTo(CStrVector("apply")) &&
+ args->length() == 2 &&
+ args->at(1)->AsVariableProxy() != NULL &&
+ args->at(1)->AsVariableProxy()->IsArguments()) {
+ // Use the optimized Function.prototype.apply that avoids
+ // allocating lazily allocated arguments objects.
+ CallApplyLazy(property,
+ args->at(0),
+ args->at(1)->AsVariableProxy(),
+ node->position());
- // Load the arguments.
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- Load(args->at(i));
- }
+ } else {
+ // Push the name of the function and the receiver onto the stack.
+ frame_->Push(name);
+ Load(property->obj());
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ Load(args->at(i));
+ }
- // Call the IC initialization code.
- CodeForSourcePosition(node->position());
- Result result =
- frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting());
- frame_->RestoreContextRegister();
- // Replace the function on the stack with the result.
- frame_->SetElementAt(0, &result);
+ // Call the IC initialization code.
+ CodeForSourcePosition(node->position());
+ Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET,
+ arg_count,
+ loop_nesting());
+ frame_->RestoreContextRegister();
+ // Replace the function on the stack with the result.
+ frame_->SetElementAt(0, &result);
+ }
} else {
// -------------------------------------------
@@ -3304,7 +3474,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->true_target()->Branch(zero);
frame_->Spill(answer.reg());
__ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ Cmp(answer.reg(), Factory::heap_number_map());
+ __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
answer.Unuse();
destination()->Split(equal);
@@ -3323,14 +3493,14 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->Split(below); // Unsigned byte comparison needed.
} else if (check->Equals(Heap::boolean_symbol())) {
- __ Cmp(answer.reg(), Factory::true_value());
+ __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
destination()->true_target()->Branch(equal);
- __ Cmp(answer.reg(), Factory::false_value());
+ __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
answer.Unuse();
destination()->Split(equal);
} else if (check->Equals(Heap::undefined_symbol())) {
- __ Cmp(answer.reg(), Factory::undefined_value());
+ __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
destination()->true_target()->Branch(equal);
__ testl(answer.reg(), Immediate(kSmiTagMask));
@@ -3355,7 +3525,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
} else if (check->Equals(Heap::object_symbol())) {
__ testl(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
- __ Cmp(answer.reg(), Factory::null_value());
+ __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
destination()->true_target()->Branch(equal);
// It can be an undetectable object.
@@ -3473,7 +3643,7 @@ void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
- Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &check_frame_marker);
__ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
@@ -3564,7 +3734,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
// If the index is negative or non-smi trigger the slow case.
ASSERT(kSmiTag == 0);
__ testl(index.reg(),
- Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000U)));
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
__ j(not_zero, &slow_case);
// Untag the index.
__ sarl(index.reg(), Immediate(kSmiTagSize));
@@ -3649,7 +3819,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ bind(&slow_case);
// Move the undefined value into the result register, which will
// trigger the slow case.
- __ Move(temp.reg(), Factory::undefined_value());
+ __ LoadRoot(temp.reg(), Heap::kUndefinedValueRootIndex);
__ bind(&end);
frame_->Push(&temp);
@@ -4092,15 +4262,15 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
// Fast case checks.
// 'false' => false.
- __ Cmp(value.reg(), Factory::false_value());
+ __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
dest->false_target()->Branch(equal);
// 'true' => true.
- __ Cmp(value.reg(), Factory::true_value());
+ __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
dest->true_target()->Branch(equal);
// 'undefined' => false.
- __ Cmp(value.reg(), Factory::undefined_value());
+ __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
dest->false_target()->Branch(equal);
// Smi => false iff zero.
@@ -4319,10 +4489,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
value,
&slow));
if (potential_slot->var()->mode() == Variable::CONST) {
- __ Cmp(value.reg(), Factory::the_hole_value());
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
done.Branch(not_equal, &value);
- __ movq(value.reg(), Factory::undefined_value(),
- RelocInfo::EMBEDDED_OBJECT);
+ __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
}
// There is always control flow to slow from
// ContextSlotOperandCheckExtensions so we have to jump around
@@ -4360,9 +4529,9 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
Comment cmnt(masm_, "[ Load const");
JumpTarget exit;
__ movq(rcx, SlotOperand(slot, rcx));
- __ Cmp(rcx, Factory::the_hole_value());
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
exit.Branch(not_equal);
- __ movq(rcx, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT);
+ __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
exit.Bind();
frame_->EmitPush(rcx);
@@ -4416,7 +4585,7 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
- __ Cmp(value.reg(), Factory::the_hole_value());
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
frame_->Push(&value);
exit.Branch(not_equal);
Result arguments = StoreArgumentsObject(false);
@@ -4477,7 +4646,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ Init const");
__ movq(rcx, SlotOperand(slot, rcx));
- __ Cmp(rcx, Factory::the_hole_value());
+ __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
exit.Branch(not_equal);
}
@@ -4561,7 +4730,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
__ movq(tmp.reg(), context);
}
// Load map for comparison into register, outside loop.
- __ Move(kScratchRegister, Factory::global_context_map());
+ __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
__ bind(&next);
// Terminate at global context.
__ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
@@ -4665,7 +4834,7 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
// been assigned a proper value.
skip_arguments = !arguments.handle()->IsTheHole();
} else {
- __ Cmp(arguments.reg(), Factory::the_hole_value());
+ __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
arguments.Unuse();
done.Branch(not_equal);
}
@@ -4803,7 +4972,7 @@ void CodeGenerator::Comparison(Condition cc,
right_side.Unuse();
left_side.Unuse();
operand.ToRegister();
- __ Cmp(operand.reg(), Factory::null_value());
+ __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
if (strict) {
operand.Unuse();
dest->Split(equal);
@@ -4811,7 +4980,7 @@ void CodeGenerator::Comparison(Condition cc,
// The 'null' value is only equal to 'undefined' if using non-strict
// comparisons.
dest->true_target()->Branch(equal);
- __ Cmp(operand.reg(), Factory::undefined_value());
+ __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
dest->true_target()->Branch(equal);
__ testl(operand.reg(), Immediate(kSmiTagMask));
dest->false_target()->Branch(equal);
@@ -5354,7 +5523,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
overwrite_mode);
// Check for negative or non-Smi left hand side.
__ testl(operand->reg(),
- Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000)));
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000)));
deferred->Branch(not_zero);
if (int_value < 0) int_value = -int_value;
if (int_value == 1) {
@@ -5894,7 +6063,7 @@ void Reference::GetValue(TypeofState typeof_state) {
// Check that the key is a non-negative smi.
__ testl(key.reg(),
- Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u)));
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000u)));
deferred->Branch(not_zero);
// Get the elements array from the receiver and check that it
@@ -5931,7 +6100,7 @@ void Reference::GetValue(TypeofState typeof_state) {
FixedArray::kHeaderSize - kHeapObjectTag));
elements.Unuse();
index.Unuse();
- __ Cmp(value.reg(), Factory::the_hole_value());
+ __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
@@ -6140,7 +6309,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ movq(rax, Operand(rsp, 1 * kPointerSize));
// 'null' => false.
- __ Cmp(rax, Factory::null_value());
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, &false_result);
// Get the map and type of the heap object.
@@ -6171,7 +6340,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ bind(&not_string);
// HeapNumber => false iff +0, -0, or NaN.
// These three cases set C3 when compared to zero in the FPU.
- __ Cmp(rdx, Factory::heap_number_map());
+ __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &true_result);
// TODO(x64): Don't use fp stack, use MMX registers?
__ fldz(); // Load zero onto fp stack
@@ -6217,7 +6386,7 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
// If the product is zero and the non-zero factor is negative,
// the spec requires us to return floating point negative zero.
- if (answer != 0 || (left >= 0 && right >= 0)) {
+ if (answer != 0 || (left + right) >= 0) {
answer_object = Smi::FromInt(static_cast<int>(answer));
}
}
@@ -6285,24 +6454,54 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
void UnarySubStub::Generate(MacroAssembler* masm) {
Label slow;
Label done;
-
+ Label try_float;
+ Label special;
// Check whether the value is a smi.
__ testl(rax, Immediate(kSmiTagMask));
- // TODO(X64): Add inline code that handles floats, as on ia32 platform.
- __ j(not_zero, &slow);
+ __ j(not_zero, &try_float);
+
// Enter runtime system if the value of the smi is zero
// to make sure that we switch between 0 and -0.
// Also enter it if the value of the smi is Smi::kMinValue
__ testl(rax, Immediate(0x7FFFFFFE));
- __ j(zero, &slow);
+ __ j(zero, &special);
__ neg(rax);
__ jmp(&done);
+
+ __ bind(&special);
+ // Either zero or -0x4000000, neither of which become a smi when negated.
+ __ testl(rax, rax);
+ __ j(not_zero, &slow);
+ __ Move(rax, Factory::minus_zero_value());
+ __ jmp(&done);
+
// Enter runtime system.
__ bind(&slow);
__ pop(rcx); // pop return address
__ push(rax);
__ push(rcx); // push return address
__ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ __ jmp(&done);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ Cmp(rdx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ // Operand is a float, negate its value by flipping sign bit.
+ __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ movq(kScratchRegister, Immediate(0x01));
+ __ shl(kScratchRegister, Immediate(63));
+ __ xor_(rdx, kScratchRegister); // Flip sign.
+ // rdx is value to store.
+ if (overwrite_) {
+ __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+ } else {
+ FloatingPointHelper::AllocateHeapNumber(masm, &slow, rbx, rcx);
+ // rcx: allocated 'empty' number
+ __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+ __ movq(rax, rcx);
+ }
__ bind(&done);
__ StubReturn(1);
@@ -6377,7 +6576,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// One operand is a smi.
// Check whether the non-smi is a heap number.
- ASSERT_EQ(1, kSmiTagMask);
+ ASSERT_EQ(static_cast<intptr_t>(1), kSmiTagMask);
// rcx still holds rax & kSmiTag, which is either zero or one.
__ decq(rcx); // If rax is a smi, all 1s, else all 0s.
__ movq(rbx, rdx);
@@ -6584,7 +6783,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// Loop through the prototype chain looking for the function prototype.
Label loop, is_instance, is_not_instance;
- __ Move(kScratchRegister, Factory::null_value());
+ __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
__ cmpq(rcx, rbx);
__ j(equal, &is_instance);
@@ -6618,7 +6817,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
Label runtime;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime);
// Value in rcx is Smi encoded.
@@ -6651,7 +6850,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
Label adaptor;
__ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Check index against formal parameters count limit passed in
@@ -6701,7 +6900,7 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
Label adaptor;
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ cmpq(rcx, Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor);
// Nothing to do: The formal number of parameters has already been
@@ -6763,10 +6962,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Pass failure code returned from last attempt as first argument to GC.
-#ifdef __MSVC__
- __ movq(rcx, rax); // argc.
-#else // ! defined(__MSVC__)
- __ movq(rdi, rax); // argv.
+#ifdef _WIN64
+ __ movq(rcx, rax);
+#else // ! defined(_WIN64)
+ __ movq(rdi, rax);
#endif
__ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC),
@@ -6782,11 +6981,14 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
}
// Call C function.
-#ifdef __MSVC__
- // MSVC passes arguments in rcx, rdx, r8, r9
- __ movq(rcx, r14); // argc.
- __ movq(rdx, r15); // argv.
-#else // ! defined(__MSVC__)
+#ifdef _WIN64
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
+ // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
+ __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc.
+ __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv.
+ // Pass a pointer to the Arguments object as the first argument.
+ __ lea(rcx, Operand(rsp, 4 * kPointerSize));
+#else // ! defined(_WIN64)
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
__ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
@@ -6835,7 +7037,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of termination exceptions which are uncatchable
// by javascript code.
- __ Cmp(rax, Factory::termination_exception());
+ __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
__ j(equal, throw_termination_exception);
// Handle normal exception.
@@ -7012,11 +7214,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(rbp);
__ movq(rbp, rsp);
- // Save callee-saved registers (X64 calling conventions).
+ // Push the stack frame type marker twice.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- // Push something that is not an arguments adaptor.
- __ push(Immediate(ArgumentsAdaptorFrame::NON_SENTINEL));
- __ push(Immediate(Smi::FromInt(marker))); // @ function offset
+ __ push(Immediate(Smi::FromInt(marker))); // context slot
+ __ push(Immediate(Smi::FromInt(marker))); // function slot
+ // Save callee-saved registers (X64 calling conventions).
__ push(r12);
__ push(r13);
__ push(r14);
@@ -7139,24 +7341,18 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
Label* need_gc,
Register scratch,
Register result) {
- ExternalReference allocation_top =
- ExternalReference::new_space_allocation_top_address();
- ExternalReference allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ movq(scratch, allocation_top); // scratch: address of allocation top.
- __ movq(result, Operand(scratch, 0));
- __ addq(result, Immediate(HeapNumber::kSize)); // New top.
- __ movq(kScratchRegister, allocation_limit);
- __ cmpq(result, Operand(kScratchRegister, 0));
- __ j(above, need_gc);
-
- __ movq(Operand(scratch, 0), result); // store new top
- __ addq(result, Immediate(kHeapObjectTag - HeapNumber::kSize));
- __ movq(kScratchRegister,
- Factory::heap_number_map(),
- RelocInfo::EMBEDDED_OBJECT);
+ // Allocate heap number in new space.
+ __ AllocateObjectInNewSpace(HeapNumber::kSize,
+ result,
+ scratch,
+ no_reg,
+ need_gc,
+ false);
+
+ // Set the map and tag the result.
+ __ addq(result, Immediate(kHeapObjectTag));
+ __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
__ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- // Tag old top and use as result.
}
@@ -7556,18 +7752,29 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ fild_s(Operand(rsp, 0 * kPointerSize));
__ fucompp();
__ fnstsw_ax();
- __ sahf(); // TODO(X64): Not available.
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
-
+ if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
+ __ sahf();
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+ } else {
+ __ and_(rax, Immediate(0x4400));
+ __ cmpl(rax, Immediate(0x4000));
+ __ j(not_zero, &operand_conversion_failure);
+ }
// Check if left operand is int32.
__ fist_s(Operand(rsp, 1 * kPointerSize));
__ fild_s(Operand(rsp, 1 * kPointerSize));
__ fucompp();
__ fnstsw_ax();
- __ sahf(); // TODO(X64): Not available. Test bits in ax directly
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
+ if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
+ __ sahf();
+ __ j(not_zero, &operand_conversion_failure);
+ __ j(parity_even, &operand_conversion_failure);
+ } else {
+ __ and_(rax, Immediate(0x4400));
+ __ cmpl(rax, Immediate(0x4000));
+ __ j(not_zero, &operand_conversion_failure);
+ }
}
// Get int32 operands and perform bitop.
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index bfdff5674..2ae8145e8 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -481,6 +481,14 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+ // Use an optimized version of Function.prototype.apply that avoid
+ // allocating the arguments object and just copies the arguments
+ // from the stack.
+ void CallApplyLazy(Property* apply,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position);
+
void CheckStack();
struct InlineRuntimeLUT {
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 24c78da99..5442be960 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -60,6 +60,7 @@ class StackHandlerConstants : public AllStatic {
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -10 * kPointerSize;
+ static const int kArgvOffset = 6 * kPointerSize;
};
@@ -90,10 +91,12 @@ class StandardFrameConstants : public AllStatic {
class JavaScriptFrameConstants : public AllStatic {
public:
+ // FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+ // Caller SP-relative.
static const int kParam0Offset = -2 * kPointerSize;
static const int kReceiverOffset = -1 * kPointerSize;
};
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 1c74a44d0..e2f7c30d2 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -339,7 +339,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&fast);
__ movq(rax, Operand(rcx, rax, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- __ Cmp(rax, Factory::the_hole_value());
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, &slow);
@@ -613,9 +613,9 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
// Check for boolean.
__ bind(&non_string);
- __ Cmp(rdx, Factory::true_value());
+ __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
__ j(equal, &boolean);
- __ Cmp(rdx, Factory::false_value());
+ __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
__ j(not_equal, &miss);
__ bind(&boolean);
StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -785,7 +785,19 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -805,7 +817,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
// Cache miss: Jump to runtime.
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
@@ -819,7 +831,52 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
+
void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss, probe, global;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the receiver is a valid JS object.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+ __ j(below, &miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object (unlikely).
+ __ CmpInstanceType(rbx, JS_GLOBAL_PROXY_TYPE);
+ __ j(equal, &global);
+
+ // Check for non-global object that requires access check.
+ __ testl(FieldOperand(rbx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &miss);
+
+ // Search the dictionary placing the result in eax.
+ __ bind(&probe);
+ GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx);
+ GenerateCheckNonObjectOrLoaded(masm, &miss, rax);
+ __ ret(0);
+
+ // Global object access: Check access rights.
+ __ bind(&global);
+ __ CheckAccessGlobalProxy(rax, rdx, &miss);
+ __ jmp(&probe);
+
+ // Cache miss: Restore receiver from stack and jump to runtime.
+ __ bind(&miss);
+ __ movq(rax, Operand(rsp, 1 * kPointerSize));
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
@@ -906,6 +963,21 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ __ movq(rdx, Operand(rsp, kPointerSize));
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+
+ // Cache miss: Jump to runtime.
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 8f8398dc9..ae45eaba5 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -46,6 +46,22 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
}
+void MacroAssembler::LoadRoot(Register destination,
+ Heap::RootListIndex index) {
+ movq(destination, Operand(r13, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+ push(Operand(r13, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::CompareRoot(Register with,
+ Heap::RootListIndex index) {
+ cmpq(with, Operand(r13, index << kPointerSizeLog2));
+}
+
static void RecordWriteHelper(MacroAssembler* masm,
Register object,
@@ -276,7 +292,7 @@ void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
addq(rsp, Immediate(num_arguments * kPointerSize));
}
- movq(rax, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT);
+ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
@@ -584,8 +600,14 @@ void MacroAssembler::FCmp() {
fcompp();
push(rax);
fnstsw_ax();
- // TODO(X64): Check that sahf is safe to use, using CPUProbe.
- sahf();
+ if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
+ sahf();
+ } else {
+ shrl(rax, Immediate(8));
+ and_(rax, Immediate(0xFF));
+ push(rax);
+ popfq();
+ }
pop(rax);
}
@@ -628,7 +650,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
- Cmp(result, Factory::the_hole_value());
+ CompareRoot(result, Heap::kTheHoleValueRootIndex);
j(equal, miss);
// If the function does not have an initial map, we're done.
@@ -994,9 +1016,6 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
}
#endif
- // Reserve space for two arguments: argc and argv
- subq(rsp, Immediate(2 * kPointerSize));
-
// Get the required frame alignment for the OS.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
@@ -1005,6 +1024,17 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
and_(rsp, kScratchRegister);
}
+#ifdef _WIN64
+ // Reserve space for the Arguments object. The Windows 64-bit ABI
+ // requires us to pass this structure as a pointer to its location on
+ // the stack. The structure contains 2 pointers.
+ // The structure on the stack must be 16-byte aligned.
+ // We also need backing space for 4 parameters, even though
+ // we only pass one parameter, and it is in a register.
+ subq(rsp, Immediate(6 * kPointerSize));
+ ASSERT(kFrameAlignment == 2 * kPointerSize); // Change the padding if needed.
+#endif
+
// Patch the saved entry sp.
movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
}
@@ -1179,12 +1209,12 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Preserve original value of holder_reg.
push(holder_reg);
movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
- Cmp(holder_reg, Factory::null_value());
+ CompareRoot(holder_reg, Heap::kNullValueRootIndex);
Check(not_equal, "JSGlobalProxy::context() should not be null.");
// Read the first word and compare to global_context_map(),
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- Cmp(holder_reg, Factory::global_context_map());
+ CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
Check(equal, "JSGlobalObject::global_context should be a global context.");
pop(holder_reg);
}
@@ -1201,4 +1231,156 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
+void MacroAssembler::LoadAllocationTopHelper(
+ Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Just return if allocation top is already known.
+ if (result_contains_top_on_entry) {
+ // No use of scratch if allocation top is provided.
+ ASSERT(scratch.is(no_reg));
+ return;
+ }
+
+ // Move address of new object to result. Use scratch register if available.
+ if (scratch.is(no_reg)) {
+ movq(kScratchRegister, new_space_allocation_top);
+ movq(result, Operand(kScratchRegister, 0));
+ } else {
+ ASSERT(!scratch.is(result_end));
+ movq(scratch, new_space_allocation_top);
+ movq(result, Operand(scratch, 0));
+ }
+}
+
+
+void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Update new top.
+ if (result_end.is(rax)) {
+ // rax can be stored directly to a memory location.
+ store_rax(new_space_allocation_top);
+ } else {
+ // Register required - use scratch provided if available.
+ if (scratch.is(no_reg)) {
+ movq(kScratchRegister, new_space_allocation_top);
+ movq(Operand(kScratchRegister, 0), result_end);
+ } else {
+ movq(Operand(scratch, 0), result_end);
+ }
+ }
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, object_size));
+ movq(kScratchRegister, new_space_allocation_limit);
+ cmpq(result_end, Operand(kScratchRegister, 0));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+ ASSERT(!result.is(result_end));
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ lea(result_end, Operand(result, element_count, element_size, header_size));
+ movq(kScratchRegister, new_space_allocation_limit);
+ cmpq(result_end, Operand(kScratchRegister, 0));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::AllocateObjectInNewSpace(
+ Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry) {
+
+ // Load address of new object into result.
+ LoadAllocationTopHelper(result,
+ result_end,
+ scratch,
+ result_contains_top_on_entry);
+
+
+ // Calculate new top and bail out if new space is exhausted.
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ if (!object_size.is(result_end)) {
+ movq(result_end, object_size);
+ }
+ addq(result_end, result);
+ movq(kScratchRegister, new_space_allocation_limit);
+ cmpq(result_end, Operand(kScratchRegister, 0));
+ j(above, gc_required);
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+
+ // Make sure the object has no tag before resetting top.
+ and_(object, Immediate(~kHeapObjectTagMask));
+ movq(kScratchRegister, new_space_allocation_top);
+#ifdef DEBUG
+ cmpq(object, Operand(kScratchRegister, 0));
+ Check(below, "Undo allocation of non allocated memory");
+#endif
+ movq(Operand(kScratchRegister, 0), object);
+}
+
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index cba55eb27..6f5c32c2b 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -66,6 +66,10 @@ class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
+ void LoadRoot(Register destination, Heap::RootListIndex index);
+ void CompareRoot(Register with, Heap::RootListIndex index);
+ void PushRoot(Heap::RootListIndex index);
+
// ---------------------------------------------------------------------------
// GC Support
@@ -223,6 +227,48 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space. If the new space is exhausted control
+ // continues at the gc_required label. The allocated object is returned in
+ // result and end of the new object is returned in result_end. The register
+ // scratch can be passed as no_reg in which case an additional object
+ // reference will be added to the reloc info. The returned pointers in result
+ // and result_end have not yet been tagged as heap objects. If
+ // result_contains_top_on_entry is true the content of result is known to be
+ // the allocation top on entry (could be result_end from a previous call to
+ // AllocateObjectInNewSpace). If result_contains_top_on_entry is true scratch
+ // should be no_reg as it is never used.
+ void AllocateObjectInNewSpace(int object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ void AllocateObjectInNewSpace(Register object_size,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ bool result_contains_top_on_entry);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. Make sure that no pointers are left to the
+ // object(s) no longer allocated as they would be invalid when allocation is
+ // un-done.
+ void UndoAllocationInNewSpace(Register object);
+
+ // ---------------------------------------------------------------------------
// Support functions.
// Check if result is zero and op is negative.
@@ -341,6 +387,13 @@ class MacroAssembler: public Assembler {
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ // Allocation support helpers.
+ void LoadAllocationTopHelper(Register result,
+ Register result_end,
+ Register scratch,
+ bool result_contains_top_on_entry);
+ void UpdateAllocationTopHelper(Register result_end, Register scratch);
};
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 1e38d6da9..373f35949 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -39,6 +39,8 @@
namespace v8 {
namespace internal {
+#ifdef V8_NATIVE_REGEXP
+
/*
* This assembler uses the following register assignment convention
* - rdx : currently loaded character(s) as ASCII or UC16. Must be loaded using
@@ -110,6 +112,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
success_label_(),
backtrack_label_(),
exit_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
__ jmp(&entry_label_); // We'll write the entry code when we know more.
__ bind(&start_label_); // And then continue from here.
}
@@ -319,7 +322,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
} else {
ASSERT(mode_ == UC16);
// Save important/volatile registers before calling C function.
-#ifndef __MSVC__
+#ifndef _WIN64
// Callee save on Win64
__ push(rsi);
__ push(rdi);
@@ -333,7 +336,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
-#ifdef __MSVC__
+#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
__ lea(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
@@ -350,13 +353,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Set byte_length.
__ movq(rdx, rbx);
#endif
- Address function_address = FUNCTION_ADDR(&CaseInsensitiveCompareUC16);
- CallCFunction(function_address, num_arguments);
+ ExternalReference compare =
+ ExternalReference::re_case_insensitive_compare_uc16();
+ CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_->CodeObject());
__ pop(backtrack_stackpointer());
-#ifndef __MSVC__
+#ifndef _WIN64
__ pop(rdi);
__ pop(rsi);
#endif
@@ -604,7 +608,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
-#ifdef __MSVC__
+#ifdef _WIN64
// MSVC passes arguments in rcx, rdx, r8, r9, with backing stack slots.
// Store register parameters in pre-allocated stack slots,
__ movq(Operand(rbp, kInputString), rcx);
@@ -740,7 +744,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Exit and return rax
__ bind(&exit_label_);
-#ifdef __MSVC__
+#ifdef _WIN64
// Restore callee save registers.
__ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
__ pop(rbx);
@@ -794,7 +798,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
Label grow_failed;
// Save registers before calling C function
-#ifndef __MSVC__
+#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
__ push(rsi);
__ push(rdi);
@@ -803,16 +807,17 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Call GrowStack(backtrack_stackpointer())
int num_arguments = 2;
FrameAlign(num_arguments);
-#ifdef __MSVC__
+#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx.
// First argument, backtrack stackpointer, is already in rcx.
__ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
#else
- // AMD64 ABI passes paremeters in rdi, rsi.
+ // AMD64 ABI passes parameters in rdi, rsi.
__ movq(rdi, backtrack_stackpointer()); // First argument.
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
#endif
- CallCFunction(FUNCTION_ADDR(&GrowStack), num_arguments);
+ ExternalReference grow_stack = ExternalReference::re_grow_stack();
+ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
__ testq(rax, rax);
@@ -821,7 +826,7 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_->CodeObject());
-#ifndef __MSVC__
+#ifndef _WIN64
__ pop(rdi);
__ pop(rsi);
#endif
@@ -889,7 +894,9 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacter(int cp_offset,
int characters) {
ASSERT(cp_offset >= -1); // ^ and \b can look behind one character.
ASSERT(cp_offset < (1<<30)); // Be sane! (And ensure negation works)
- CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ if (check_bounds) {
+ CheckPosition(cp_offset + characters - 1, on_end_of_input);
+ }
LoadCurrentCharacterUnchecked(cp_offset, characters);
}
@@ -980,7 +987,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// store anything volatile in a C call or overwritten by this function.
int num_arguments = 3;
FrameAlign(num_arguments);
-#ifdef __MSVC__
+#ifdef _WIN64
// Second argument: Code* of self. (Do this before overwriting r8).
__ movq(rdx, code_object_pointer());
// Third argument: RegExp code frame pointer.
@@ -997,7 +1004,9 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
// return address).
__ lea(rdi, Operand(rsp, -kPointerSize));
#endif
- CallCFunction(FUNCTION_ADDR(&CheckStackGuardState), num_arguments);
+ ExternalReference stack_check =
+ ExternalReference::re_check_stack_guard_state();
+ CallCFunction(stack_check, num_arguments);
}
@@ -1080,23 +1089,6 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
}
-Address RegExpMacroAssemblerX64::GrowStack(Address stack_pointer,
- Address* stack_base) {
- size_t size = RegExpStack::stack_capacity();
- Address old_stack_base = RegExpStack::stack_base();
- ASSERT(old_stack_base == *stack_base);
- ASSERT(stack_pointer <= old_stack_base);
- ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
- Address new_stack_base = RegExpStack::EnsureCapacity(size * 2);
- if (new_stack_base == NULL) {
- return NULL;
- }
- *stack_base = new_stack_base;
- intptr_t stack_content_size = old_stack_base - stack_pointer;
- return new_stack_base - stack_content_size;
-}
-
-
Operand RegExpMacroAssemblerX64::register_location(int register_index) {
ASSERT(register_index < (1<<30));
if (num_registers_ <= register_index) {
@@ -1242,10 +1234,10 @@ void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
// (on Win64 only) and the original value of rsp.
__ movq(kScratchRegister, rsp);
ASSERT(IsPowerOf2(frameAlignment));
-#ifdef __MSVC__
+#ifdef _WIN64
// Allocate space for parameters and old rsp.
__ subq(rsp, Immediate((num_arguments + 1) * kPointerSize));
- __ and_(rsp, -frameAlignment);
+ __ and_(rsp, Immediate(-frameAlignment));
__ movq(Operand(rsp, num_arguments * kPointerSize), kScratchRegister);
#else
// Allocate space for old rsp.
@@ -1256,17 +1248,16 @@ void RegExpMacroAssemblerX64::FrameAlign(int num_arguments) {
}
-void RegExpMacroAssemblerX64::CallCFunction(Address function_address,
+void RegExpMacroAssemblerX64::CallCFunction(ExternalReference function,
int num_arguments) {
- // Don't compile regexps with serialization enabled. The addresses of the C++
- // function being called isn't relocatable.
- ASSERT(!Serializer::enabled());
- __ movq(rax, reinterpret_cast<intptr_t>(function_address), RelocInfo::NONE);
+ __ movq(rax, function);
__ call(rax);
ASSERT(OS::ActivationFrameAlignment() != 0);
-#ifdef __MSVC__
+#ifdef _WIN64
__ movq(rsp, Operand(rsp, num_arguments * kPointerSize));
#else
+ // All arguments passed in registers.
+ ASSERT(num_arguments <= 6);
__ pop(rsp);
#endif
}
@@ -1297,5 +1288,12 @@ void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
}
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+ __ int3(); // Unused on x64.
+}
+
#undef __
+
+#endif // V8_NATIVE_REGEXP
+
}} // namespace v8::internal
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index a270bc186..ab9647704 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -31,6 +31,8 @@
namespace v8 {
namespace internal {
+#ifdef V8_NATIVE_REGEXP
+
class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
public:
RegExpMacroAssemblerX64(Mode mode, int registers_to_save);
@@ -113,6 +115,13 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
int* output,
bool at_start);
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+
private:
// Offsets from rbp of function parameters and stored registers.
static const int kFramePointer = 0;
@@ -181,23 +190,9 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// Check whether we are exceeding the stack limit on the backtrack stack.
void CheckStackLimit();
- // Called from RegExp if the stack-guard is triggered.
- // If the code object is relocated, the return address is fixed before
- // returning.
- static int CheckStackGuardState(Address* return_address,
- Code* re_code,
- Address re_frame);
-
// Generate a call to CheckStackGuardState.
void CallCheckStackGuardState();
- // Called from RegExp if the backtrack stack limit is hit.
- // Tries to expand the stack. Returns the new stack-pointer if
- // successful, and updates the stack_top address, or returns 0 if unable
- // to grow the stack.
- // This function must not trigger a garbage collection.
- static Address GrowStack(Address stack_pointer, Address* stack_top);
-
// The rbp-relative location of a regexp register.
Operand register_location(int register_index);
@@ -264,7 +259,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// by FrameAlign. The called function is not allowed to trigger a garbage
// collection, since that might move the code and invalidate the return
// address (unless this is somehow accounted for by the called function).
- inline void CallCFunction(Address function_address, int num_arguments);
+ inline void CallCFunction(ExternalReference function, int num_arguments);
MacroAssembler* masm_;
@@ -290,6 +285,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
Label stack_overflow_label_;
};
+#endif // V8_NATIVE_REGEXP
+
}} // namespace v8::internal
#endif // V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
diff --git a/deps/v8/src/x64/register-allocator-x64-inl.h b/deps/v8/src/x64/register-allocator-x64-inl.h
index 54729d6d9..d630b335a 100644
--- a/deps/v8/src/x64/register-allocator-x64-inl.h
+++ b/deps/v8/src/x64/register-allocator-x64-inl.h
@@ -51,18 +51,18 @@ int RegisterAllocator::ToNumber(Register reg) {
2, // rcx
3, // rdx
1, // rbx
- -1, // rsp
- -1, // rbp
- -1, // rsi
+ -1, // rsp Stack pointer.
+ -1, // rbp Frame pointer.
+ -1, // rsi Context.
4, // rdi
5, // r8
6, // r9
- -1, // r10
- 7, // r11
- 11, // r12
- 10, // r13
- 8, // r14
- 9 // r15
+ -1, // r10 Scratch register.
+ 9, // r11
+ 10, // r12
+ -1, // r13 Roots array. This is callee saved.
+ 7, // r14
+ 8 // r15
};
return kNumbers[reg.code()];
}
@@ -71,7 +71,7 @@ int RegisterAllocator::ToNumber(Register reg) {
Register RegisterAllocator::ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] =
- { rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 };
+ { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
return kRegisters[num];
}
diff --git a/deps/v8/src/x64/register-allocator-x64.h b/deps/v8/src/x64/register-allocator-x64.h
index 86727962e..8d666d223 100644
--- a/deps/v8/src/x64/register-allocator-x64.h
+++ b/deps/v8/src/x64/register-allocator-x64.h
@@ -33,9 +33,7 @@ namespace internal {
class RegisterAllocatorConstants : public AllStatic {
public:
- // Register allocation is not yet implemented on x64, but C++
- // forbids 0-length arrays so we use 1 as the number of registers.
- static const int kNumRegisters = 12;
+ static const int kNumRegisters = 11;
static const int kInvalidRegister = -1;
};
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index 6b4d718f6..184c166bc 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -45,4 +45,9 @@
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
+// Call the generated regexp code directly. The entry function pointer should
+// expect seven int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+ entry(p0, p1, p2, p3, p4, p5, p6)
+
#endif // V8_X64_SIMULATOR_X64_H_
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 091c826cc..fcddfc4e7 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -434,7 +434,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
holder_obj);
Label interceptor_failed;
- __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
@@ -612,7 +612,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
__ pop(receiver); // restore holder
__ LeaveInternalFrame();
- __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
Label invoke;
__ j(not_equal, &invoke);
@@ -755,9 +755,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case BOOLEAN_CHECK: {
Label fast;
// Check that the object is a boolean.
- __ Cmp(rdx, Factory::true_value());
+ __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
__ j(equal, &fast);
- __ Cmp(rdx, Factory::false_value());
+ __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
__ j(not_equal, &miss);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
@@ -1125,10 +1125,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ Cmp(rax, Factory::the_hole_value());
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
} else if (FLAG_debug_code) {
- __ Cmp(rax, Factory::the_hole_value());
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
@@ -1738,6 +1738,136 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
}
+// Specialized stub for constructing objects from functions which only have only
+// simple assignments of the form this.x = ...; in their body.
+Object* ConstructStubCompiler::CompileConstructStub(
+ SharedFunctionInfo* shared) {
+ // ----------- S t a t e -------------
+ // -- rax : argc
+ // -- rdi : constructor
+ // -- rsp[0] : return address
+ // -- rsp[4] : last argument
+ // -----------------------------------
+ Label generic_stub_call;
+
+ // Use r8 for holding undefined which is used in several places below.
+ __ Move(r8, Factory::undefined_value());
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Check to see whether there are any break points in the function code. If
+ // there are jump to the generic constructor stub which calls the actual
+ // code for the function thereby hitting the break points.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kDebugInfoOffset));
+ __ cmpq(rbx, r8);
+ __ j(not_equal, &generic_stub_call);
+#endif
+
+ // Load the initial map and verify that it is in fact a map.
+ __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi.
+ __ testq(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &generic_stub_call);
+ __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ j(not_equal, &generic_stub_call);
+
+#ifdef DEBUG
+ // Cannot construct functions this way.
+ // rdi: constructor
+ // rbx: initial map
+ __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
+ __ Assert(not_equal, "Function constructed by construct stub.");
+#endif
+
+ // Now allocate the JSObject in new space.
+ // rdi: constructor
+ // rbx: initial map
+ __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ shl(rcx, Immediate(kPointerSizeLog2));
+ // Make sure that the maximum heap object size will never cause us
+ // problems here.
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= JSObject::kMaxInstanceSize);
+ __ AllocateObjectInNewSpace(rcx, rdx, rcx, no_reg, &generic_stub_call, false);
+
+ // Allocated the JSObject, now initialize the fields and add the heap tag.
+ // rbx: initial map
+ // rdx: JSObject (untagged)
+ __ movq(Operand(rdx, JSObject::kMapOffset), rbx);
+ __ Move(rbx, Factory::empty_fixed_array());
+ __ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
+ __ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
+
+ // rax: argc
+ // rdx: JSObject (untagged)
+ // Load the address of the first in-object property into r9.
+ __ lea(r9, Operand(rdx, JSObject::kHeaderSize));
+ // Calculate the location of the first argument. The stack contains only the
+ // return address on top of the argc arguments.
+ __ lea(rcx, Operand(rsp, rax, times_pointer_size, 0));
+
+ // rax: argc
+ // rcx: first argument
+ // rdx: JSObject (untagged)
+ // r8: undefined
+ // r9: first in-object property of the JSObject
+ // Fill the initialized properties with a constant value or a passed argument
+ // depending on the this.x = ...; assignment in the function.
+ for (int i = 0; i < shared->this_property_assignments_count(); i++) {
+ if (shared->IsThisPropertyAssignmentArgument(i)) {
+ Label not_passed;
+ // Set the property to undefined.
+ __ movq(Operand(r9, i * kPointerSize), r8);
+ // Check if the argument assigned to the property is actually passed.
+ int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ cmpq(rax, Immediate(arg_number));
+ __ j(below_equal, &not_passed);
+ // Argument passed - find it on the stack.
+ __ movq(rbx, Operand(rcx, arg_number * -kPointerSize));
+ __ movq(Operand(r9, i * kPointerSize), rbx);
+ __ bind(&not_passed);
+ } else {
+ // Set the property to the constant value.
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ __ Move(Operand(r9, i * kPointerSize), constant);
+ }
+ }
+
+ // Fill the unused in-object property fields with undefined.
+ for (int i = shared->this_property_assignments_count();
+ i < shared->CalculateInObjectProperties();
+ i++) {
+ __ movq(Operand(r9, i * kPointerSize), r8);
+ }
+
+ // rax: argc
+ // rdx: JSObject (untagged)
+ // Move argc to rbx and the JSObject to return to rax and tag it.
+ __ movq(rbx, rax);
+ __ movq(rax, rdx);
+ __ or_(rax, Immediate(kHeapObjectTag));
+
+ // rax: JSObject
+ // rbx: argc
+ // Remove caller arguments and receiver from the stack and return.
+ __ pop(rcx);
+ __ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
+ __ push(rcx);
+ __ IncrementCounter(&Counters::constructed_objects, 1);
+ __ IncrementCounter(&Counters::constructed_objects_stub, 1);
+ __ ret(0);
+
+ // Jump to the generic stub in case the specialized code cannot handle the
+ // construction.
+ __ bind(&generic_stub_call);
+ Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
+ Handle<Code> generic_construct_stub(code);
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index fb911a187..c2866a714 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -205,6 +205,14 @@ void VirtualFrame::EmitPush(Handle<Object> value) {
}
+void VirtualFrame::EmitPush(Heap::RootListIndex index) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ PushRoot(index);
+}
+
+
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index 577a18bd4..006148d7d 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -375,6 +375,7 @@ class VirtualFrame : public ZoneObject {
// corresponding push instruction.
void EmitPush(Register reg);
void EmitPush(const Operand& operand);
+ void EmitPush(Heap::RootListIndex index);
void EmitPush(Immediate immediate);
// Uses kScratchRegister, emits appropriate relocation info.
void EmitPush(Handle<Object> value);
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 68aabb516..67e9d8a45 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -50,6 +50,10 @@ test-api/RegExpInterruption: SKIP
test-api/OutOfMemory: SKIP
test-api/OutOfMemoryNested: SKIP
+# BUG(432): Fail on ARM hardware.
+test-regexp/MacroAssemblerNativeSimple: PASS || FAIL
+test-regexp/MacroAssemblerNativeSimpleUC16: PASS || FAIL
+
# BUG(355): Test crashes on ARM.
test-log/ProfLazyMode: SKIP
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index c8f855b11..80f91d39b 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -2673,40 +2673,67 @@ THREADED_TEST(SimpleExtensions) {
}
-static const char* kEvalExtensionSource =
- "function UseEval() {"
+static const char* kEvalExtensionSource1 =
+ "function UseEval1() {"
" var x = 42;"
" return eval('x');"
"}";
+static const char* kEvalExtensionSource2 =
+ "(function() {"
+ " var x = 42;"
+ " function e() {"
+ " return eval('x');"
+ " }"
+ " this.UseEval2 = e;"
+ "})()";
+
+
THREADED_TEST(UseEvalFromExtension) {
v8::HandleScope handle_scope;
- v8::RegisterExtension(new Extension("evaltest", kEvalExtensionSource));
- const char* extension_names[] = { "evaltest" };
- v8::ExtensionConfiguration extensions(1, extension_names);
+ v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1));
+ v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2));
+ const char* extension_names[] = { "evaltest1", "evaltest2" };
+ v8::ExtensionConfiguration extensions(2, extension_names);
v8::Handle<Context> context = Context::New(&extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("UseEval()"))->Run();
+ v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run();
+ CHECK_EQ(result, v8::Integer::New(42));
+ result = Script::Compile(v8_str("UseEval2()"))->Run();
CHECK_EQ(result, v8::Integer::New(42));
}
-static const char* kWithExtensionSource =
- "function UseWith() {"
+static const char* kWithExtensionSource1 =
+ "function UseWith1() {"
" var x = 42;"
" with({x:87}) { return x; }"
"}";
+
+static const char* kWithExtensionSource2 =
+ "(function() {"
+ " var x = 42;"
+ " function e() {"
+ " with ({x:87}) { return x; }"
+ " }"
+ " this.UseWith2 = e;"
+ "})()";
+
+
THREADED_TEST(UseWithFromExtension) {
v8::HandleScope handle_scope;
- v8::RegisterExtension(new Extension("withtest", kWithExtensionSource));
- const char* extension_names[] = { "withtest" };
- v8::ExtensionConfiguration extensions(1, extension_names);
+ v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1));
+ v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2));
+ const char* extension_names[] = { "withtest1", "withtest2" };
+ v8::ExtensionConfiguration extensions(2, extension_names);
v8::Handle<Context> context = Context::New(&extensions);
Context::Scope lock(context);
- v8::Handle<Value> result = Script::Compile(v8_str("UseWith()"))->Run();
+ v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run();
+ CHECK_EQ(result, v8::Integer::New(87));
+ result = Script::Compile(v8_str("UseWith2()"))->Run();
CHECK_EQ(result, v8::Integer::New(87));
}
@@ -7815,6 +7842,7 @@ THREADED_TEST(PixelArray) {
free(pixel_data);
}
+
THREADED_TEST(ScriptContextDependence) {
v8::HandleScope scope;
LocalContext c1;
@@ -7830,6 +7858,7 @@ THREADED_TEST(ScriptContextDependence) {
CHECK_EQ(indep->Run()->Int32Value(), 101);
}
+
THREADED_TEST(StackTrace) {
v8::HandleScope scope;
LocalContext context;
@@ -7842,3 +7871,11 @@ THREADED_TEST(StackTrace) {
v8::String::Utf8Value stack(try_catch.StackTrace());
CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL);
}
+
+
+// Test that idle notification can be handled when V8 has not yet been
+// set up.
+THREADED_TEST(IdleNotification) {
+ for (int i = 0; i < 100; i++) v8::V8::IdleNotification(true);
+ for (int i = 0; i < 100; i++) v8::V8::IdleNotification(false);
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index fe1621c2a..34f16395e 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -185,7 +185,7 @@ TEST(3) {
Label L, C;
__ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | sp.bit() | lr.bit());
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
__ sub(fp, ip, Operand(4));
__ mov(r4, Operand(r0));
__ ldr(r0, MemOperand(r4, OFFSET_OF(T, i)));
@@ -199,7 +199,7 @@ TEST(3) {
__ add(r0, r2, Operand(r0));
__ mov(r2, Operand(r2, ASR, 3));
__ strh(r2, MemOperand(r4, OFFSET_OF(T, s)));
- __ ldm(ia, sp, r4.bit() | fp.bit() | sp.bit() | pc.bit());
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
CodeDesc desc;
assm.GetCode(&desc);
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 37dbdd7c6..eb32b6539 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -179,7 +179,7 @@ TEST(HeapObjects) {
TEST(Tagging) {
InitializeVM();
int request = 24;
- ASSERT_EQ(request, OBJECT_SIZE_ALIGN(request));
+ CHECK_EQ(request, static_cast<int>(OBJECT_SIZE_ALIGN(request)));
CHECK(Smi::FromInt(42)->IsSmi());
CHECK(Failure::RetryAfterGC(request, NEW_SPACE)->IsFailure());
CHECK_EQ(request, Failure::RetryAfterGC(request, NEW_SPACE)->requested());
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 1ef0a93d9..43df6ba7a 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -336,8 +336,10 @@ static void CFuncDoTrace() {
#elif defined _MSC_VER && defined V8_TARGET_ARCH_IA32
__asm mov [fp], ebp // NOLINT
#elif defined _MSC_VER && defined V8_TARGET_ARCH_X64
- // FIXME: I haven't really tried to compile it.
- __asm movq [fp], rbp // NOLINT
+ // TODO(X64): __asm extension is not supported by the Microsoft Visual C++
+ // 64-bit compiler.
+ fp = 0;
+ UNIMPLEMENTED();
#endif
DoTrace(fp);
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 89c7868d8..81c220520 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -40,6 +40,7 @@
#include "regexp-macro-assembler-irregexp.h"
#ifdef V8_NATIVE_REGEXP
#ifdef V8_TARGET_ARCH_ARM
+#include "arm/macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
#ifdef V8_TARGET_ARCH_X64
@@ -605,11 +606,12 @@ TEST(DispatchTableConstruction) {
#ifdef V8_NATIVE_REGEXP
-#ifdef V8_TARGET_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
typedef RegExpMacroAssemblerIA32 ArchRegExpMacroAssembler;
-#endif
-#ifdef V8_TARGET_ARCH_X64
+#elif V8_TARGET_ARCH_X64
typedef RegExpMacroAssemblerX64 ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_ARM
+typedef RegExpMacroAssemblerARM ArchRegExpMacroAssembler;
#endif
class ContextInitializer {
@@ -845,7 +847,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
v8::V8::Initialize();
ContextInitializer initializer;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 3);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4);
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -870,7 +872,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- int output[3];
+ int output[4];
NativeRegExpMacroAssembler::Result result =
Execute(*code,
*input,
@@ -884,6 +886,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
CHECK_EQ(0, output[0]);
CHECK_EQ(2, output[1]);
CHECK_EQ(6, output[2]);
+ CHECK_EQ(-1, output[3]);
}
@@ -891,7 +894,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
v8::V8::Initialize();
ContextInitializer initializer;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 3);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::UC16, 4);
m.WriteCurrentPositionToRegister(0, 0);
m.AdvanceCurrentPosition(2);
@@ -918,7 +921,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- int output[3];
+ int output[4];
NativeRegExpMacroAssembler::Result result =
Execute(*code,
*input,
@@ -932,6 +935,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
CHECK_EQ(0, output[0]);
CHECK_EQ(2, output[1]);
CHECK_EQ(6, output[2]);
+ CHECK_EQ(-1, output[3]);
}
@@ -1055,12 +1059,12 @@ TEST(MacroAssemblerNativeRegisters) {
v8::V8::Initialize();
ContextInitializer initializer;
- ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 5);
+ ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 6);
uc16 foo_chars[3] = {'f', 'o', 'o'};
Vector<const uc16> foo(foo_chars, 3);
- enum registers { out1, out2, out3, out4, out5, sp, loop_cnt };
+ enum registers { out1, out2, out3, out4, out5, out6, sp, loop_cnt };
Label fail;
Label backtrack;
m.WriteCurrentPositionToRegister(out1, 0); // Output: [0]
@@ -1114,7 +1118,7 @@ TEST(MacroAssemblerNativeRegisters) {
m.GoTo(&loop3);
m.Bind(&exit_loop3);
m.PopCurrentPosition();
- m.WriteCurrentPositionToRegister(out5, 0); // [0,3,6,9,9]
+ m.WriteCurrentPositionToRegister(out5, 0); // [0,3,6,9,9,-1]
m.Succeed();
@@ -1132,15 +1136,15 @@ TEST(MacroAssemblerNativeRegisters) {
Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
- int output[5];
+ int output[6];
NativeRegExpMacroAssembler::Result result =
Execute(*code,
- *input,
- 0,
- start_adr,
- start_adr + input->length(),
- output,
- true);
+ *input,
+ 0,
+ start_adr,
+ start_adr + input->length(),
+ output,
+ true);
CHECK_EQ(NativeRegExpMacroAssembler::SUCCESS, result);
CHECK_EQ(0, output[0]);
@@ -1148,6 +1152,7 @@ TEST(MacroAssemblerNativeRegisters) {
CHECK_EQ(6, output[2]);
CHECK_EQ(9, output[3]);
CHECK_EQ(9, output[4]);
+ CHECK_EQ(-1, output[5]);
}
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 323be1d77..552f49df1 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -193,3 +193,63 @@ TEST(TerminateMultipleV8Threads) {
delete semaphore;
semaphore = NULL;
}
+
+
+int call_count = 0;
+
+
+v8::Handle<v8::Value> TerminateOrReturnObject(const v8::Arguments& args) {
+ if (++call_count == 10) {
+ v8::V8::TerminateExecution();
+ return v8::Undefined();
+ }
+ v8::Local<v8::Object> result = v8::Object::New();
+ result->Set(v8::String::New("x"), v8::Integer::New(42));
+ return result;
+}
+
+
+v8::Handle<v8::Value> LoopGetProperty(const v8::Arguments& args) {
+ v8::TryCatch try_catch;
+ v8::Script::Compile(v8::String::New("function f() {"
+ " try {"
+ " while(true) {"
+ " terminate_or_return_object().x;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " fail();"
+ " }"
+ "}"
+ "f()"))->Run();
+ CHECK(try_catch.HasCaught());
+ CHECK(try_catch.Exception()->IsNull());
+ CHECK(try_catch.Message().IsEmpty());
+ CHECK(!try_catch.CanContinue());
+ return v8::Undefined();
+}
+
+
+// Test that we correctly handle termination exceptions if they are
+// triggered by the creation of error objects in connection with ICs.
+TEST(TerminateLoadICException) {
+ v8::HandleScope scope;
+ v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
+ global->Set(v8::String::New("terminate_or_return_object"),
+ v8::FunctionTemplate::New(TerminateOrReturnObject));
+ global->Set(v8::String::New("fail"), v8::FunctionTemplate::New(Fail));
+ global->Set(v8::String::New("loop"),
+ v8::FunctionTemplate::New(LoopGetProperty));
+
+ v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ v8::Context::Scope context_scope(context);
+ // Run a loop that will be infinite if thread termination does not work.
+ v8::Handle<v8::String> source =
+ v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+ call_count = 0;
+ v8::Script::Compile(source)->Run();
+ // Test that we can run the code again after thread termination.
+ call_count = 0;
+ v8::Script::Compile(source)->Run();
+ context.Dispose();
+}
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 23b3254c2..ffcaf8abc 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -158,7 +158,7 @@ TEST(Utils1) {
// int8_t and intptr_t signed integers.
CHECK_EQ(-2, -8 >> 2);
CHECK_EQ(-2, static_cast<int8_t>(-8) >> 2);
- CHECK_EQ(-2, static_cast<intptr_t>(-8) >> 2);
+ CHECK_EQ(-2, static_cast<int>(static_cast<intptr_t>(-8) >> 2));
}
diff --git a/deps/v8/test/mjsunit/debug-stepin-constructor.js b/deps/v8/test/mjsunit/debug-stepin-constructor.js
index 6dbe5d192..6ee334735 100644
--- a/deps/v8/test/mjsunit/debug-stepin-constructor.js
+++ b/deps/v8/test/mjsunit/debug-stepin-constructor.js
@@ -59,6 +59,10 @@ function f() {
break_break_point_hit_count = 0;
f();
assertEquals(5, break_break_point_hit_count);
+f();
+assertEquals(10, break_break_point_hit_count);
+f();
+assertEquals(15, break_break_point_hit_count);
// Test step into constructor with builtin constructor.
function g() {
diff --git a/deps/v8/test/mjsunit/simple-constructor.js b/deps/v8/test/mjsunit/simple-constructor.js
index b26d6519b..e9ae92100 100755
--- a/deps/v8/test/mjsunit/simple-constructor.js
+++ b/deps/v8/test/mjsunit/simple-constructor.js
@@ -53,9 +53,11 @@ function f4(x) {
}
o1_1 = new f1();
+assertEquals(1, o1_1.x, "1");
o1_2 = new f1();
-assertArrayEquals(["x"], props(o1_1));
-assertArrayEquals(["x"], props(o1_2));
+assertEquals(1, o1_1.x, "2");
+assertArrayEquals(["x"], props(o1_1), "3");
+assertArrayEquals(["x"], props(o1_2), "4");
o2_1 = new f2(0);
o2_2 = new f2(0);
@@ -76,3 +78,63 @@ o4_1_1 = new f4(1);
o4_1_2 = new f4(1);
assertArrayEquals(["x", "y"], props(o4_1_1));
assertArrayEquals(["x", "y"], props(o4_1_2));
+
+function f5(x, y) {
+ this.x = x;
+ this.y = y;
+}
+
+function f6(x, y) {
+ this.y = y;
+ this.x = x;
+}
+
+function f7(x, y, z) {
+ this.x = x;
+ this.y = y;
+}
+
+function testArgs(fun) {
+ obj = new fun();
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals(void 0, obj.x);
+ assertEquals(void 0, obj.y);
+
+ obj = new fun("x");
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals("x", obj.x);
+ assertEquals(void 0, obj.y);
+
+ obj = new fun("x", "y");
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals("x", obj.x);
+ assertEquals("y", obj.y);
+
+ obj = new fun("x", "y", "z");
+ assertArrayEquals(["x", "y"], props(obj));
+ assertEquals("x", obj.x);
+ assertEquals("y", obj.y);
+}
+
+for (var i = 0; i < 10; i++) {
+ testArgs(f5);
+ testArgs(f6);
+ testArgs(f7);
+}
+
+function g(){
+ this.x=1
+}
+
+o = new g();
+assertEquals(1, o.x);
+o = new g();
+assertEquals(1, o.x);
+g.prototype = {y:2}
+o = new g();
+assertEquals(1, o.x);
+assertEquals(2, o.y);
+o = new g();
+assertEquals(1, o.x);
+assertEquals(2, o.y);
+
diff --git a/deps/v8/test/mjsunit/transcendentals.js b/deps/v8/test/mjsunit/transcendentals.js
new file mode 100644
index 000000000..78e6c4807
--- /dev/null
+++ b/deps/v8/test/mjsunit/transcendentals.js
@@ -0,0 +1,49 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Two fp numbers that have the same hash value (see TranscendentalCache
+// in heap.h).
+var x = 0x123456789ABCD / 0x2000000000000;
+var y = 0x1134567899BCD / 0x2000000000000;
+
+assertTrue(Math.sin(x) != Math.sin(y));
+
+assertTrue(Math.cos(x) != Math.cos(y));
+
+assertTrue(Math.tan(x) != Math.tan(y));
+
+assertTrue(Math.log(x) != Math.log(y));
+
+assertTrue(Math.asin(x) != Math.asin(y));
+
+assertTrue(Math.acos(x) != Math.acos(y));
+
+assertTrue(Math.atan(x) != Math.atan(y));
+
+assertTrue(Math.exp(x) != Math.exp(y));
+
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index c57419879..1222ea9c9 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -66,6 +66,7 @@
'DEBUG',
'_DEBUG',
'ENABLE_DISASSEMBLER',
+ 'V8_ENABLE_CHECKS'
],
'msvs_settings': {
'VCCLCompilerTool': {
@@ -432,18 +433,14 @@
'../../src/ia32/jump-target-ia32.cc',
'../../src/ia32/macro-assembler-ia32.cc',
'../../src/ia32/macro-assembler-ia32.h',
+ '../../src/ia32/regexp-macro-assembler-ia32.cc',
+ '../../src/ia32/regexp-macro-assembler-ia32.h',
'../../src/ia32/register-allocator-ia32.cc',
'../../src/ia32/stub-cache-ia32.cc',
'../../src/ia32/virtual-frame-ia32.cc',
'../../src/ia32/virtual-frame-ia32.h',
],
}],
- ['target_arch=="ia32" and v8_regexp=="native"', {
- 'sources': [
- '../../src/ia32/regexp-macro-assembler-ia32.cc',
- '../../src/ia32/regexp-macro-assembler-ia32.h',
- ],
- }],
['target_arch=="x64"', {
'include_dirs+': [
'../../src/x64',
@@ -465,18 +462,14 @@
'../../src/x64/jump-target-x64.cc',
'../../src/x64/macro-assembler-x64.cc',
'../../src/x64/macro-assembler-x64.h',
+ '../../src/x64/regexp-macro-assembler-x64.cc',
+ '../../src/x64/regexp-macro-assembler-x64.h',
'../../src/x64/register-allocator-x64.cc',
'../../src/x64/stub-cache-x64.cc',
'../../src/x64/virtual-frame-x64.cc',
'../../src/x64/virtual-frame-x64.h',
],
}],
- ['target_arch=="x64" and v8_regexp=="native"', {
- 'sources': [
- '../../src/x64/regexp-macro-assembler-x64.cc',
- '../../src/x64/regexp-macro-assembler-x64.h',
- ],
- }],
['OS=="linux"', {
'link_settings': {
'libraries': [
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 52fe35c9c..cae39e897 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -45,6 +45,13 @@ def ToCArray(lines):
return ", ".join(result)
+def RemoveCommentsAndTrailingWhitespace(lines):
+ lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
+ lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
+ lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
+ return lines
+
+
def CompressScript(lines, do_jsmin):
# If we're not expecting this code to be user visible, we can run it through
# a more aggressive minifier.
@@ -55,9 +62,7 @@ def CompressScript(lines, do_jsmin):
# people print the source code using Function.prototype.toString().
# Note that we could easily compress the scripts mode but don't
# since we want it to remain readable.
- lines = re.sub('//.*\n', '\n', lines) # end-of-line comments
- lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
- lines = re.sub('\s+\n+', '\n', lines) # trailing whitespace
+ lines = RemoveCommentsAndTrailingWhitespace(lines)
return lines
@@ -96,6 +101,22 @@ def ParseValue(string):
return string
+EVAL_PATTERN = re.compile(r'\beval\s*\(');
+WITH_PATTERN = re.compile(r'\bwith\s*\(');
+
+
+def Validate(lines, file):
+ lines = RemoveCommentsAndTrailingWhitespace(lines)
+ # Because of simplified context setup, eval and with is not
+ # allowed in the natives files.
+ eval_match = EVAL_PATTERN.search(lines)
+ if eval_match:
+ raise ("Eval disallowed in natives: %s" % file)
+ with_match = WITH_PATTERN.search(lines)
+ if with_match:
+ raise ("With statements disallowed in natives: %s" % file)
+
+
def ExpandConstants(lines, constants):
for key, value in constants.items():
lines = lines.replace(key, str(value))
@@ -155,9 +176,9 @@ class PythonMacro:
args.append(mapping[arg])
return str(self.fun(*args))
-CONST_PATTERN = re.compile('^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
-MACRO_PATTERN = re.compile('^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
-PYTHON_MACRO_PATTERN = re.compile('^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
+CONST_PATTERN = re.compile(r'^const\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
+MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
+PYTHON_MACRO_PATTERN = re.compile(r'^python\s+macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
def ReadMacros(lines):
constants = { }
@@ -275,15 +296,17 @@ def JS2C(source, target, env):
# Build source code lines
source_lines = [ ]
source_lines_empty = []
- for s in modules:
- delay = str(s).endswith('-delay.js')
- lines = ReadFile(str(s))
+ for module in modules:
+ filename = str(module)
+ delay = filename.endswith('-delay.js')
+ lines = ReadFile(filename)
do_jsmin = lines.find('// jsminify this file, js2c: jsmin') != -1
lines = ExpandConstants(lines, consts)
lines = ExpandMacros(lines, macros)
+ Validate(lines, filename)
lines = CompressScript(lines, do_jsmin)
data = ToCArray(lines)
- id = (os.path.split(str(s))[1])[:-3]
+ id = (os.path.split(filename)[1])[:-3]
if delay: id = id[:-6]
if delay:
delay_ids.append((id, len(lines)))
@@ -291,7 +314,7 @@ def JS2C(source, target, env):
ids.append((id, len(lines)))
source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data })
source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 })
-
+
# Build delay support functions
get_index_cases = [ ]
get_script_source_cases = [ ]
diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj
index 45e63612e..f9241f99e 100644
--- a/deps/v8/tools/v8.xcodeproj/project.pbxproj
+++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj
@@ -1394,6 +1394,7 @@
GCC_PREPROCESSOR_DEFINITIONS = (
"$(GCC_PREPROCESSOR_DEFINITIONS)",
DEBUG,
+ V8_ENABLE_CHECKS,
);
GCC_SYMBOLS_PRIVATE_EXTERN = YES;
GCC_TREAT_WARNINGS_AS_ERRORS = YES;
@@ -1457,6 +1458,7 @@
V8_TARGET_ARCH_IA32,
V8_NATIVE_REGEXP,
DEBUG,
+ V8_ENABLE_CHECKS,
);
HEADER_SEARCH_PATHS = ../src;
PRODUCT_NAME = v8_shell;
diff --git a/deps/v8/tools/visual_studio/debug.vsprops b/deps/v8/tools/visual_studio/debug.vsprops
index 0abf92447..5e3555a9d 100644
--- a/deps/v8/tools/visual_studio/debug.vsprops
+++ b/deps/v8/tools/visual_studio/debug.vsprops
@@ -7,7 +7,7 @@
<Tool
Name="VCCLCompilerTool"
Optimization="0"
- PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER"
+ PreprocessorDefinitions="DEBUG;_DEBUG;ENABLE_DISASSEMBLER;V8_ENABLE_CHECKS"
RuntimeLibrary="1"
/>
<Tool