diff options
author | Ryan <ry@tinyclouds.org> | 2009-07-20 13:18:42 +0200 |
---|---|---|
committer | Ryan <ry@tinyclouds.org> | 2009-07-20 13:18:42 +0200 |
commit | 88e9a5f122822b96e0ccfbba2083fe6ef43f3fdc (patch) | |
tree | 3cad417de864555fd27d573af866f2a1b3c352ad | |
parent | f4dfbe37a3f1fef2c91068958dfe1888ba100332 (diff) | |
download | nodejs-88e9a5f122822b96e0ccfbba2083fe6ef43f3fdc.tar.gz nodejs-88e9a5f122822b96e0ccfbba2083fe6ef43f3fdc.tar.bz2 nodejs-88e9a5f122822b96e0ccfbba2083fe6ef43f3fdc.zip |
Upgrade V8 to 1.2.14
153 files changed, 11061 insertions, 4631 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 13061120d..83ebc0253 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,53 @@ +2009-07-13: Version 1.2.14 + + Added separate paged heap space for global property cells and + avoid updating the write barrier when storing into them. + + Improved peep-hole optimization on ARM platforms by not emitting + unnecessary debug information. + + Re-enabled ICs for loads and calls that skip a global object + during lookup through the prototype chain. + + Allowed access through global proxies to use ICs. + + Fixed issue 401. + + +2009-07-09: Version 1.2.13 + + Fixed issue 397, issue 398, and issue 399. + + Added support for breakpoint groups. + + Fixed bugs introduced with the new global object representation. + + Fixed a few bugs in the ARM code generator. + + +2009-07-06: Version 1.2.12 + + Added stack traces collection to Error objects accessible through + the e.stack property. + + Changed RegExp parser to use a recursive data structure instead of + stack-based recursion. + + Optimized Date object construction and string concatenation. + + Improved performance of div, mod, and mul on ARM platforms. + + +2009-07-02: Version 1.2.11 + + Improved performance on IA-32 and ARM. + + Fixed profiler sampler implementation on Mac OS X. + + Changed the representation of global objects to improve + performance of adding a lot of new properties. + + 2009-06-29: Version 1.2.10 Improved debugger support. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 0baf71b5d..78b050d73 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -95,7 +95,12 @@ ANDROID_LINKFLAGS = ['-nostdlib', LIBRARY_FLAGS = { 'all': { 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'], - 'CPPPATH': [join(root_dir, 'src')] + 'CPPPATH': [join(root_dir, 'src')], + 'regexp:native': { + 'arch:ia32' : { + 'CPPDEFINES': ['V8_NATIVE_REGEXP'] + } + } }, 'gcc': { 'all': { @@ -167,6 +172,7 @@ LIBRARY_FLAGS = { 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'] }, 'arch:x64': { + 'CCFLAGS': ['-fno-strict-aliasing'], 'CPPDEFINES': ['V8_TARGET_ARCH_X64'] }, 'prof:oprofile': { @@ -546,6 +552,11 @@ SIMPLE_OPTIONS = { 'default': ARCH_GUESS, 'help': 'the architecture to build for (' + ARCH_GUESS + ')' }, + 'regexp': { + 'values': ['native', 'interpreted'], + 'default': 'native', + 'help': 'Whether to use native or interpreted regexp implementation' + }, 'snapshot': { 'values': ['on', 'off', 'nobuild'], 'default': 'off', @@ -677,6 +688,8 @@ def VerifyOptions(env): return False if not IsLegal(env, 'sample', ["shell", "process"]): return False + if not IsLegal(env, 'regexp', ["native", "interpreted"]): + return False if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on': Abort("Profiling on windows only supported for static library.") if env['prof'] == 'oprofile' and env['os'] != 'linux': diff --git a/deps/v8/benchmarks/README.txt b/deps/v8/benchmarks/README.txt index 561e88b39..eb759cc92 100644 --- a/deps/v8/benchmarks/README.txt +++ b/deps/v8/benchmarks/README.txt @@ -57,4 +57,7 @@ of the benchmark. Changes from Version 4 to Version 5 =================================== -Removed duplicate line in random seed code. +Removed duplicate line in random seed code, and changed the name of +the Object.prototype.inherits function in the DeltaBlue benchmark to +inheritsFrom to avoid name clashes when running in Chromium with +extensions enabled. diff --git a/deps/v8/benchmarks/deltablue.js b/deps/v8/benchmarks/deltablue.js index 253046f80..7e25d2e13 100644 --- a/deps/v8/benchmarks/deltablue.js +++ b/deps/v8/benchmarks/deltablue.js @@ -46,7 +46,7 @@ var DeltaBlue = new BenchmarkSuite('DeltaBlue', 71104, [ /* --- O b j e c t M o d e l --- */ -Object.prototype.inherits = function (shuper) { +Object.prototype.inheritsFrom = function (shuper) { function Inheriter() { } Inheriter.prototype = shuper.prototype; this.prototype = new Inheriter(); @@ -216,7 +216,7 @@ function UnaryConstraint(v, strength) { this.addConstraint(); } -UnaryConstraint.inherits(Constraint); +UnaryConstraint.inheritsFrom(Constraint); /** * Adds this constraint to the constraint graph @@ -294,7 +294,7 @@ function StayConstraint(v, str) { StayConstraint.superConstructor.call(this, v, str); } -StayConstraint.inherits(UnaryConstraint); +StayConstraint.inheritsFrom(UnaryConstraint); StayConstraint.prototype.execute = function () { // Stay constraints do nothing @@ -312,7 +312,7 @@ function EditConstraint(v, str) { EditConstraint.superConstructor.call(this, v, str); } -EditConstraint.inherits(UnaryConstraint); +EditConstraint.inheritsFrom(UnaryConstraint); /** * Edits indicate that a variable is to be changed by imperative code. @@ -346,7 +346,7 @@ function BinaryConstraint(var1, var2, strength) { this.addConstraint(); } -BinaryConstraint.inherits(Constraint); +BinaryConstraint.inheritsFrom(Constraint); /** * Decides if this constratint can be satisfied and which way it @@ -459,7 +459,7 @@ function ScaleConstraint(src, scale, offset, dest, strength) { ScaleConstraint.superConstructor.call(this, src, dest, strength); } -ScaleConstraint.inherits(BinaryConstraint); +ScaleConstraint.inheritsFrom(BinaryConstraint); /** * Adds this constraint to the constraint graph. @@ -515,7 +515,7 @@ function EqualityConstraint(var1, var2, strength) { EqualityConstraint.superConstructor.call(this, var1, var2, strength); } -EqualityConstraint.inherits(BinaryConstraint); +EqualityConstraint.inheritsFrom(BinaryConstraint); /** * Enforce this constraint. Assume that it is satisfied. diff --git a/deps/v8/benchmarks/revisions.html b/deps/v8/benchmarks/revisions.html index b86c876fe..99d7be42b 100644 --- a/deps/v8/benchmarks/revisions.html +++ b/deps/v8/benchmarks/revisions.html @@ -22,7 +22,10 @@ the benchmark suite. <div class="subtitle"><h3>Version 5 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v5/run.html">link</a>)</h3></div> -<p>Removed a duplicate line in the base random seed code. +<p>Removed duplicate line in random seed code, and changed the name of +the Object.prototype.inherits function in the DeltaBlue benchmark to +inheritsFrom to avoid name clashes when running in Chromium with +extensions enabled. </p> <div class="subtitle"><h3>Version 4 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v4/run.html">link</a>)</h3></div> diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 85b13ec9a..f1057a812 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -244,9 +244,10 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom( // Implementations of ToLocal -#define MAKE_TO_LOCAL(Name, From, To) \ +#define MAKE_TO_LOCAL(Name, From, To) \ Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \ - return Local<To>(reinterpret_cast<To*>(obj.location())); \ + ASSERT(!obj->IsTheHole()); \ + return Local<To>(reinterpret_cast<To*>(obj.location())); \ } MAKE_TO_LOCAL(ToLocal, Context, Context) diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js index 2981eec59..6451e6260 100644 --- a/deps/v8/src/apinatives.js +++ b/deps/v8/src/apinatives.js @@ -51,6 +51,7 @@ function Instantiate(data, name) { var Constructor = %GetTemplateField(data, kApiConstructorOffset); var result = Constructor ? new (Instantiate(Constructor))() : {}; ConfigureTemplateInstance(result, data); + result = %ToFastProperties(result); return result; default: throw 'Unknown API tag <' + tag + '>'; diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 6ec8f460b..3ed99f9e1 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -491,6 +491,20 @@ static bool fits_shifter(uint32_t imm32, } +// We have to use the temporary register for things that can be relocated even +// if they can be encoded in the ARM's 12 bits of immediate-offset instruction +// space. There is no guarantee that the relocated location can be similarly +// encoded. +static bool MustUseIp(RelocInfo::Mode rmode) { + if (rmode == RelocInfo::EXTERNAL_REFERENCE) { + return Serializer::enabled(); + } else if (rmode == RelocInfo::NONE) { + return false; + } + return true; +} + + void Assembler::addrmod1(Instr instr, Register rn, Register rd, @@ -501,8 +515,7 @@ void Assembler::addrmod1(Instr instr, // immediate uint32_t rotate_imm; uint32_t immed_8; - if ((x.rmode_ != RelocInfo::NONE && - x.rmode_ != RelocInfo::EXTERNAL_REFERENCE) || + if (MustUseIp(x.rmode_) || !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { // The immediate operand cannot be encoded as a shifter operand, so load // it first to register ip and change the original instruction to use ip. @@ -684,6 +697,7 @@ void Assembler::bl(int branch_offset, Condition cond) { void Assembler::blx(int branch_offset) { // v5 and above + WriteRecordedPositions(); ASSERT((branch_offset & 1) == 0); int h = ((branch_offset & 2) >> 1)*B24; int imm24 = branch_offset >> 2; @@ -693,12 +707,14 @@ void Assembler::blx(int branch_offset) { // v5 and above void Assembler::blx(Register target, Condition cond) { // v5 and above + WriteRecordedPositions(); ASSERT(!target.is(pc)); emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code()); } void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t + WriteRecordedPositions(); ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code()); } @@ -797,6 +813,9 @@ void Assembler::orr(Register dst, Register src1, const Operand& src2, void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { + if (dst.is(pc)) { + WriteRecordedPositions(); + } addrmod1(cond | 13*B21 | s, r0, dst, src); } @@ -816,7 +835,6 @@ void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, SBit s, Condition cond) { ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); - ASSERT(!dst.is(src1)); emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -825,7 +843,7 @@ void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, void Assembler::mul(Register dst, Register src1, Register src2, SBit s, Condition cond) { ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dst.is(src1)); + // dst goes in bits 16-19 for this instruction! emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -837,7 +855,7 @@ void Assembler::smlal(Register dstL, SBit s, Condition cond) { ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL)); + ASSERT(!dstL.is(dstH)); emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -850,7 +868,7 @@ void Assembler::smull(Register dstL, SBit s, Condition cond) { ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL)); + ASSERT(!dstL.is(dstH)); emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -863,7 +881,7 @@ void Assembler::umlal(Register dstL, SBit s, Condition cond) { ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL)); + ASSERT(!dstL.is(dstH)); emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -876,7 +894,7 @@ void Assembler::umull(Register dstL, SBit s, Condition cond) { ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); - ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL)); + ASSERT(!dstL.is(dstH)); emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | src2.code()*B8 | B7 | B4 | src1.code()); } @@ -906,8 +924,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // immediate uint32_t rotate_imm; uint32_t immed_8; - if ((src.rmode_ != RelocInfo::NONE && - src.rmode_ != RelocInfo::EXTERNAL_REFERENCE)|| + if (MustUseIp(src.rmode_) || !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { // immediate operand cannot be encoded, load it first to register ip RecordRelocInfo(src.rmode_, src.imm32_); @@ -926,6 +943,9 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src, // Load/Store instructions void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { + if (dst.is(pc)) { + WriteRecordedPositions(); + } addrmod2(cond | B26 | L, dst, src); // Eliminate pattern: push(r), pop(r) @@ -1263,7 +1283,6 @@ void Assembler::RecordPosition(int pos) { if (pos == RelocInfo::kNoPosition) return; ASSERT(pos >= 0); current_position_ = pos; - WriteRecordedPositions(); } @@ -1271,7 +1290,6 @@ void Assembler::RecordStatementPosition(int pos) { if (pos == RelocInfo::kNoPosition) return; ASSERT(pos >= 0); current_statement_position_ = pos; - WriteRecordedPositions(); } diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 662661991..3f7ccf54a 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -41,6 +41,23 @@ namespace internal { #define __ ACCESS_MASM(masm_) +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc); +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Label* rhs_not_nan, + Label* slow, + bool strict); +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm); +static void MultiplyByKnownInt(MacroAssembler* masm, + Register source, + Register destination, + int known_int); +static bool IsEasyToMultiplyBy(int x); + + + // ------------------------------------------------------------------------- // Platform-specific DeferredCode functions. @@ -683,33 +700,69 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target, class GenericBinaryOpStub : public CodeStub { public: GenericBinaryOpStub(Token::Value op, - OverwriteMode mode) - : op_(op), mode_(mode) { } + OverwriteMode mode, + int constant_rhs = CodeGenerator::kUnknownIntValue) + : op_(op), + mode_(mode), + constant_rhs_(constant_rhs), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { } private: Token::Value op_; OverwriteMode mode_; + int constant_rhs_; + bool specialized_on_rhs_; + + static const int kMaxKnownRhs = 0x40000000; // Minor key encoding in 16 bits. class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 14> {}; + class OpBits: public BitField<Token::Value, 2, 6> {}; + class KnownIntBits: public BitField<int, 8, 8> {}; Major MajorKey() { return GenericBinaryOp; } int MinorKey() { // Encode the parameters in a unique 16 bit value. return OpBits::encode(op_) - | ModeBits::encode(mode_); + | ModeBits::encode(mode_) + | KnownIntBits::encode(MinorKeyForKnownInt()); } void Generate(MacroAssembler* masm); void HandleNonSmiBitwiseOp(MacroAssembler* masm); + static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { + if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; + if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; + if (op == Token::MOD) { + if (constant_rhs <= 1) return false; + if (constant_rhs <= 10) return true; + if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; + return false; + } + return false; + } + + int MinorKeyForKnownInt() { + if (!specialized_on_rhs_) return 0; + if (constant_rhs_ <= 10) return constant_rhs_ + 1; + ASSERT(IsPowerOf2(constant_rhs_)); + int key = 12; + int d = constant_rhs_; + while ((d & 1) == 0) { + key++; + d >>= 1; + } + return key; + } + const char* GetName() { switch (op_) { case Token::ADD: return "GenericBinaryOpStub_ADD"; case Token::SUB: return "GenericBinaryOpStub_SUB"; case Token::MUL: return "GenericBinaryOpStub_MUL"; case Token::DIV: return "GenericBinaryOpStub_DIV"; + case Token::MOD: return "GenericBinaryOpStub_MOD"; case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; @@ -721,13 +774,22 @@ class GenericBinaryOpStub : public CodeStub { } #ifdef DEBUG - void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); } + void Print() { + if (!specialized_on_rhs_) { + PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); + } else { + PrintF("GenericBinaryOpStub (%s by %d)\n", + Token::String(op_), + constant_rhs_); + } + } #endif }; void CodeGenerator::GenericBinaryOperation(Token::Value op, - OverwriteMode overwrite_mode) { + OverwriteMode overwrite_mode, + int constant_rhs) { VirtualFrame::SpilledScope spilled_scope; // sp[0] : y // sp[1] : x @@ -738,6 +800,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, case Token::ADD: // fall through. case Token::SUB: // fall through. case Token::MUL: + case Token::DIV: + case Token::MOD: case Token::BIT_OR: case Token::BIT_AND: case Token::BIT_XOR: @@ -746,27 +810,11 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, case Token::SAR: { frame_->EmitPop(r0); // r0 : y frame_->EmitPop(r1); // r1 : x - GenericBinaryOpStub stub(op, overwrite_mode); + GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs); frame_->CallStub(&stub, 0); break; } - case Token::DIV: { - Result arg_count = allocator_->Allocate(r0); - ASSERT(arg_count.is_valid()); - __ mov(arg_count.reg(), Operand(1)); - frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2); - break; - } - - case Token::MOD: { - Result arg_count = allocator_->Allocate(r0); - ASSERT(arg_count.is_valid()); - __ mov(arg_count.reg(), Operand(1)); - frame_->InvokeBuiltin(Builtins::MOD, CALL_JS, &arg_count, 2); - break; - } - case Token::COMMA: frame_->EmitPop(r0); // simply discard left value @@ -830,6 +878,10 @@ void DeferredInlineSmiOperation::Generate() { break; } + // For these operations there is no optimistic operation that needs to be + // reverted. + case Token::MUL: + case Token::MOD: case Token::BIT_OR: case Token::BIT_XOR: case Token::BIT_AND: { @@ -860,11 +912,32 @@ void DeferredInlineSmiOperation::Generate() { break; } - GenericBinaryOpStub stub(op_, overwrite_mode_); + GenericBinaryOpStub stub(op_, overwrite_mode_, value_); __ CallStub(&stub); } +static bool PopCountLessThanEqual2(unsigned int x) { + x &= x - 1; + return (x & (x - 1)) == 0; +} + + +// Returns the index of the lowest bit set. +static int BitPosition(unsigned x) { + int bit_posn = 0; + while ((x & 0xf) == 0) { + bit_posn += 4; + x >>= 4; + } + while ((x & 1) == 0) { + bit_posn++; + x >>= 1; + } + return bit_posn; +} + + void CodeGenerator::SmiOperation(Token::Value op, Handle<Object> value, bool reversed, @@ -884,6 +957,7 @@ void CodeGenerator::SmiOperation(Token::Value op, JumpTarget exit; frame_->EmitPop(r0); + bool something_to_inline = true; switch (op) { case Token::ADD: { DeferredCode* deferred = @@ -913,6 +987,7 @@ void CodeGenerator::SmiOperation(Token::Value op, break; } + case Token::BIT_OR: case Token::BIT_XOR: case Token::BIT_AND: { @@ -934,75 +1009,125 @@ void CodeGenerator::SmiOperation(Token::Value op, case Token::SHR: case Token::SAR: { if (reversed) { - __ mov(ip, Operand(value)); - frame_->EmitPush(ip); - frame_->EmitPush(r0); - GenericBinaryOperation(op, mode); - - } else { - int shift_value = int_value & 0x1f; // least significant 5 bits - DeferredCode* deferred = - new DeferredInlineSmiOperation(op, shift_value, false, mode); - __ tst(r0, Operand(kSmiTagMask)); - deferred->Branch(ne); - __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags - switch (op) { - case Token::SHL: { + something_to_inline = false; + break; + } + int shift_value = int_value & 0x1f; // least significant 5 bits + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, shift_value, false, mode); + __ tst(r0, Operand(kSmiTagMask)); + deferred->Branch(ne); + __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags + switch (op) { + case Token::SHL: { + if (shift_value != 0) { __ mov(r2, Operand(r2, LSL, shift_value)); - // check that the *unsigned* result fits in a smi - __ add(r3, r2, Operand(0x40000000), SetCC); - deferred->Branch(mi); - break; } - case Token::SHR: { - // LSR by immediate 0 means shifting 32 bits. - if (shift_value != 0) { - __ mov(r2, Operand(r2, LSR, shift_value)); - } - // check that the *unsigned* result fits in a smi - // neither of the two high-order bits can be set: - // - 0x80000000: high bit would be lost when smi tagging - // - 0x40000000: this number would convert to negative when - // smi tagging these two cases can only happen with shifts - // by 0 or 1 when handed a valid smi - __ and_(r3, r2, Operand(0xc0000000), SetCC); - deferred->Branch(ne); - break; + // check that the *unsigned* result fits in a smi + __ add(r3, r2, Operand(0x40000000), SetCC); + deferred->Branch(mi); + break; + } + case Token::SHR: { + // LSR by immediate 0 means shifting 32 bits. + if (shift_value != 0) { + __ mov(r2, Operand(r2, LSR, shift_value)); } - case Token::SAR: { - if (shift_value != 0) { - // ASR by immediate 0 means shifting 32 bits. - __ mov(r2, Operand(r2, ASR, shift_value)); - } - break; + // check that the *unsigned* result fits in a smi + // neither of the two high-order bits can be set: + // - 0x80000000: high bit would be lost when smi tagging + // - 0x40000000: this number would convert to negative when + // smi tagging these two cases can only happen with shifts + // by 0 or 1 when handed a valid smi + __ and_(r3, r2, Operand(0xc0000000), SetCC); + deferred->Branch(ne); + break; + } + case Token::SAR: { + if (shift_value != 0) { + // ASR by immediate 0 means shifting 32 bits. + __ mov(r2, Operand(r2, ASR, shift_value)); } - default: UNREACHABLE(); + break; } - __ mov(r0, Operand(r2, LSL, kSmiTagSize)); - deferred->BindExit(); + default: UNREACHABLE(); + } + __ mov(r0, Operand(r2, LSL, kSmiTagSize)); + deferred->BindExit(); + break; + } + + case Token::MOD: { + if (reversed || int_value < 2 || !IsPowerOf2(int_value)) { + something_to_inline = false; + break; } + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode); + unsigned mask = (0x80000000u | kSmiTagMask); + __ tst(r0, Operand(mask)); + deferred->Branch(ne); // Go to deferred code on non-Smis and negative. + mask = (int_value << kSmiTagSize) - 1; + __ and_(r0, r0, Operand(mask)); + deferred->BindExit(); break; } - default: - if (!reversed) { - frame_->EmitPush(r0); - __ mov(r0, Operand(value)); - frame_->EmitPush(r0); - } else { - __ mov(ip, Operand(value)); - frame_->EmitPush(ip); - frame_->EmitPush(r0); + case Token::MUL: { + if (!IsEasyToMultiplyBy(int_value)) { + something_to_inline = false; + break; + } + DeferredCode* deferred = + new DeferredInlineSmiOperation(op, int_value, reversed, mode); + unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; + max_smi_that_wont_overflow <<= kSmiTagSize; + unsigned mask = 0x80000000u; + while ((mask & max_smi_that_wont_overflow) == 0) { + mask |= mask >> 1; } - GenericBinaryOperation(op, mode); + mask |= kSmiTagMask; + // This does a single mask that checks for a too high value in a + // conservative way and for a non-Smi. It also filters out negative + // numbers, unfortunately, but since this code is inline we prefer + // brevity to comprehensiveness. + __ tst(r0, Operand(mask)); + deferred->Branch(ne); + MultiplyByKnownInt(masm_, r0, r0, int_value); + deferred->BindExit(); + break; + } + + default: + something_to_inline = false; break; } + if (!something_to_inline) { + if (!reversed) { + frame_->EmitPush(r0); + __ mov(r0, Operand(value)); + frame_->EmitPush(r0); + GenericBinaryOperation(op, mode, int_value); + } else { + __ mov(ip, Operand(value)); + frame_->EmitPush(ip); + frame_->EmitPush(r0); + GenericBinaryOperation(op, mode, kUnknownIntValue); + } + } + exit.Bind(); } -void CodeGenerator::Comparison(Condition cc, bool strict) { +void CodeGenerator::Comparison(Condition cc, + Expression* left, + Expression* right, + bool strict) { + if (left != NULL) LoadAndSpill(left); + if (right != NULL) LoadAndSpill(right); + VirtualFrame::SpilledScope spilled_scope; // sp[0] : y // sp[1] : x @@ -1026,43 +1151,19 @@ void CodeGenerator::Comparison(Condition cc, bool strict) { __ tst(r2, Operand(kSmiTagMask)); smi.Branch(eq); - // Perform non-smi comparison by runtime call. - frame_->EmitPush(r1); - - // Figure out which native to call and setup the arguments. - Builtins::JavaScript native; - int arg_count = 1; - if (cc == eq) { - native = strict ? Builtins::STRICT_EQUALS : Builtins::EQUALS; - } else { - native = Builtins::COMPARE; - int ncr; // NaN compare result - if (cc == lt || cc == le) { - ncr = GREATER; - } else { - ASSERT(cc == gt || cc == ge); // remaining cases - ncr = LESS; - } - frame_->EmitPush(r0); - arg_count++; - __ mov(r0, Operand(Smi::FromInt(ncr))); - } + // Perform non-smi comparison by stub. + // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. + // We call with 0 args because there are 0 on the stack. + CompareStub stub(cc, strict); + frame_->CallStub(&stub, 0); - // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) - // tagged as a small integer. - frame_->EmitPush(r0); - Result arg_count_register = allocator_->Allocate(r0); - ASSERT(arg_count_register.is_valid()); - __ mov(arg_count_register.reg(), Operand(arg_count)); - Result result = frame_->InvokeBuiltin(native, - CALL_JS, - &arg_count_register, - arg_count + 1); + Result result = allocator_->Allocate(r0); + ASSERT(result.is_valid()); __ cmp(result.reg(), Operand(0)); result.Unuse(); exit.Jump(); - // test smi equality by pointer comparison. + // Do smi comparisons by pointer comparison. smi.Bind(); __ cmp(r1, Operand(r0)); @@ -1505,8 +1606,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { // Duplicate TOS. __ ldr(r0, frame_->Top()); frame_->EmitPush(r0); - LoadAndSpill(clause->label()); - Comparison(eq, true); + Comparison(eq, NULL, clause->label(), true); Branch(false, &next_test); // Before entering the body from the test, remove the switch value from @@ -2321,16 +2421,22 @@ void CodeGenerator::VisitConditional(Conditional* node) { Comment cmnt(masm_, "[ Conditional"); JumpTarget then; JumpTarget else_; - JumpTarget exit; LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true); - Branch(false, &else_); - then.Bind(); - LoadAndSpill(node->then_expression(), typeof_state()); - exit.Jump(); - else_.Bind(); - LoadAndSpill(node->else_expression(), typeof_state()); - exit.Bind(); + if (has_valid_frame()) { + Branch(false, &else_); + } + if (has_valid_frame() || then.is_linked()) { + then.Bind(); + LoadAndSpill(node->then_expression(), typeof_state()); + } + if (else_.is_linked()) { + JumpTarget exit; + if (has_valid_frame()) exit.Jump(); + else_.Bind(); + LoadAndSpill(node->else_expression(), typeof_state()); + if (exit.is_linked()) exit.Bind(); + } ASSERT(frame_->height() == original_height + 1); } @@ -3180,6 +3286,66 @@ void CodeGenerator::VisitCallNew(CallNew* node) { } +void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { + VirtualFrame::SpilledScope spilled_scope; + ASSERT(args->length() == 1); + JumpTarget leave, null, function, non_function_constructor; + + // Load the object into r0. + LoadAndSpill(args->at(0)); + frame_->EmitPop(r0); + + // If the object is a smi, we return null. + __ tst(r0, Operand(kSmiTagMask)); + null.Branch(eq); + + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. + __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); + null.Branch(lt); + + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ cmp(r1, Operand(JS_FUNCTION_TYPE)); + function.Branch(eq); + + // Check if the constructor in the map is a function. + __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); + __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); + non_function_constructor.Branch(ne); + + // The r0 register now contains the constructor function. Grab the + // instance class name from there. + __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset)); + frame_->EmitPush(r0); + leave.Jump(); + + // Functions have class 'Function'. + function.Bind(); + __ mov(r0, Operand(Factory::function_class_symbol())); + frame_->EmitPush(r0); + leave.Jump(); + + // Objects with a non-function constructor have class 'Object'. + non_function_constructor.Bind(); + __ mov(r0, Operand(Factory::Object_symbol())); + frame_->EmitPush(r0); + leave.Jump(); + + // Non-JS objects have class null. + null.Bind(); + __ mov(r0, Operand(Factory::null_value())); + frame_->EmitPush(r0); + + // All done. + leave.Bind(); +} + + void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { VirtualFrame::SpilledScope spilled_scope; ASSERT(args->length() == 1); @@ -3255,7 +3421,7 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); LoadAndSpill(args->at(0)); frame_->EmitPop(r0); - __ tst(r0, Operand(kSmiTagMask | 0x80000000)); + __ tst(r0, Operand(kSmiTagMask | 0x80000000u)); cc_reg_ = eq; } @@ -3290,6 +3456,28 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { } +void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { + VirtualFrame::SpilledScope spilled_scope; + ASSERT(args->length() == 0); + + // Get the frame pointer for the calling frame. + __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset)); + __ cmp(r1, Operand(ArgumentsAdaptorFrame::SENTINEL)); + __ b(ne, &check_frame_marker); + __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset)); + __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); + cc_reg_ = eq; +} + + void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { VirtualFrame::SpilledScope spilled_scope; ASSERT(args->length() == 0); @@ -3423,7 +3611,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { false_target(), true_target(), true); - cc_reg_ = NegateCondition(cc_reg_); + // LoadCondition may (and usually does) leave a test and branch to + // be emitted by the caller. In that case, negate the condition. + if (has_cc()) cc_reg_ = NegateCondition(cc_reg_); } else if (op == Token::DELETE) { Property* property = node->expression()->AsProperty(); @@ -3495,8 +3685,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { case Token::SUB: { bool overwrite = - (node->AsBinaryOperation() != NULL && - node->AsBinaryOperation()->ResultOverwriteAllowed()); + (node->expression()->AsBinaryOperation() != NULL && + node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); UnarySubStub stub(overwrite); frame_->CallStub(&stub, 0); break; @@ -3960,34 +4150,34 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { return; } - LoadAndSpill(left); - LoadAndSpill(right); switch (op) { case Token::EQ: - Comparison(eq, false); + Comparison(eq, left, right, false); break; case Token::LT: - Comparison(lt); + Comparison(lt, left, right); break; case Token::GT: - Comparison(gt); + Comparison(gt, left, right); break; case Token::LTE: - Comparison(le); + Comparison(le, left, right); break; case Token::GTE: - Comparison(ge); + Comparison(ge, left, right); break; case Token::EQ_STRICT: - Comparison(eq, true); + Comparison(eq, left, right, true); break; case Token::IN: { + LoadAndSpill(left); + LoadAndSpill(right); Result arg_count = allocator_->Allocate(r0); ASSERT(arg_count.is_valid()); __ mov(arg_count.reg(), Operand(1)); // not counting receiver @@ -4000,6 +4190,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { } case Token::INSTANCEOF: { + LoadAndSpill(left); + LoadAndSpill(right); InstanceofStub stub; Result result = frame_->CallStub(&stub, 2); // At this point if instanceof succeeded then r0 == 0. @@ -4291,7 +4483,7 @@ static void CountLeadingZeros( __ add(zeros, zeros, Operand(2), LeaveCC, eq); __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); // Top bit. - __ tst(scratch, Operand(0x80000000)); + __ tst(scratch, Operand(0x80000000u)); __ add(zeros, zeros, Operand(1), LeaveCC, eq); #endif } @@ -4443,7 +4635,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) { // We test for the special value that has a different exponent. This test // has the neat side effect of setting the flags according to the sign. ASSERT(HeapNumber::kSignMask == 0x80000000u); - __ cmp(the_int_, Operand(0x80000000)); + __ cmp(the_int_, Operand(0x80000000u)); __ b(eq, &max_negative_int); // Set up the correct exponent in scratch_. All non-Smi int32s have the same. // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). @@ -4482,6 +4674,408 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) { } +// Handle the case where the lhs and rhs are the same object. +// Equality is almost reflexive (everything but NaN), so this is a test +// for "identity and not NaN". +static void EmitIdenticalObjectComparison(MacroAssembler* masm, + Label* slow, + Condition cc) { + Label not_identical; + __ cmp(r0, Operand(r1)); + __ b(ne, ¬_identical); + + Register exp_mask_reg = r5; + __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask)); + + // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), + // so we do the second best thing - test it ourselves. + Label heap_number, return_equal; + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cc == lt || cc == gt) { + __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); + __ b(ge, slow); + } else { + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(eq, &heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cc != eq) { + __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); + __ b(ge, slow); + } + } + __ bind(&return_equal); + if (cc == lt) { + __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. + } else if (cc == gt) { + __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. + } else { + __ mov(r0, Operand(0)); // Things are <=, >=, ==, === themselves. + } + __ mov(pc, Operand(lr)); // Return. + + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cc != lt && cc != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ and_(r3, r2, Operand(exp_mask_reg)); + __ cmp(r3, Operand(exp_mask_reg)); + __ b(ne, &return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ orr(r0, r3, Operand(r2), SetCC); + // For equal we already have the right value in r0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if not + // (it's a NaN). For <= and >= we need to load r0 with the failing value + // if it's a NaN. + if (cc != eq) { + // All-zero means Infinity means equal. + __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal + if (cc == le) { + __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. + } + } + __ mov(pc, Operand(lr)); // Return. + } + // No fall through here. + + __ bind(¬_identical); +} + + +// See comment at call site. +static void EmitSmiNonsmiComparison(MacroAssembler* masm, + Label* rhs_not_nan, + Label* slow, + bool strict) { + Label lhs_is_smi; + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &lhs_is_smi); + + // Rhs is a Smi. Check whether the non-smi is a heap number. + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + if (strict) { + // If lhs was not a number and rhs was a Smi then strict equality cannot + // succeed. Return non-equal (r0 is already not zero) + __ mov(pc, Operand(lr), LeaveCC, ne); // Return. + } else { + // Smi compared non-strictly with a non-Smi non-heap-number. Call + // the runtime. + __ b(ne, slow); + } + + // Rhs is a smi, lhs is a number. + __ push(lr); + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub1(r3, r2, r7, r6); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // r3 and r2 are rhs as double. + __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); + __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + // We now have both loaded as doubles but we can skip the lhs nan check + // since it's a Smi. + __ pop(lr); + __ jmp(rhs_not_nan); + + __ bind(&lhs_is_smi); + // Lhs is a Smi. Check whether the non-smi is a heap number. + __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); + if (strict) { + // If lhs was not a number and rhs was a Smi then strict equality cannot + // succeed. Return non-equal. + __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal. + __ mov(pc, Operand(lr), LeaveCC, ne); // Return. + } else { + // Smi compared non-strictly with a non-Smi non-heap-number. Call + // the runtime. + __ b(ne, slow); + } + + // Lhs is a smi, rhs is a number. + // r0 is Smi and r1 is heap number. + __ push(lr); + __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); + __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub2(r1, r0, r7, r6); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + // Fall through to both_loaded_as_doubles. +} + + +void EmitNanCheck(MacroAssembler* masm, Label* rhs_not_nan, Condition cc) { + bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); + Register lhs_exponent = exp_first ? r0 : r1; + Register rhs_exponent = exp_first ? r2 : r3; + Register lhs_mantissa = exp_first ? r1 : r0; + Register rhs_mantissa = exp_first ? r3 : r2; + Label one_is_nan, neither_is_nan; + + Register exp_mask_reg = r5; + + __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask)); + __ and_(r4, rhs_exponent, Operand(exp_mask_reg)); + __ cmp(r4, Operand(exp_mask_reg)); + __ b(ne, rhs_not_nan); + __ mov(r4, + Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), + SetCC); + __ b(ne, &one_is_nan); + __ cmp(rhs_mantissa, Operand(0)); + __ b(ne, &one_is_nan); + + __ bind(rhs_not_nan); + __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask)); + __ and_(r4, lhs_exponent, Operand(exp_mask_reg)); + __ cmp(r4, Operand(exp_mask_reg)); + __ b(ne, &neither_is_nan); + __ mov(r4, + Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), + SetCC); + __ b(ne, &one_is_nan); + __ cmp(lhs_mantissa, Operand(0)); + __ b(eq, &neither_is_nan); + + __ bind(&one_is_nan); + // NaN comparisons always fail. + // Load whatever we need in r0 to make the comparison fail. + if (cc == lt || cc == le) { + __ mov(r0, Operand(GREATER)); + } else { + __ mov(r0, Operand(LESS)); + } + __ mov(pc, Operand(lr)); // Return. + + __ bind(&neither_is_nan); +} + + +// See comment at call site. +static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { + bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); + Register lhs_exponent = exp_first ? r0 : r1; + Register rhs_exponent = exp_first ? r2 : r3; + Register lhs_mantissa = exp_first ? r1 : r0; + Register rhs_mantissa = exp_first ? r3 : r2; + + // r0, r1, r2, r3 have the two doubles. Neither is a NaN. + if (cc == eq) { + // Doubles are not equal unless they have the same bit pattern. + // Exception: 0 and -0. + __ cmp(lhs_mantissa, Operand(rhs_mantissa)); + __ orr(r0, lhs_mantissa, Operand(rhs_mantissa), LeaveCC, ne); + // Return non-zero if the numbers are unequal. + __ mov(pc, Operand(lr), LeaveCC, ne); + + __ sub(r0, lhs_exponent, Operand(rhs_exponent), SetCC); + // If exponents are equal then return 0. + __ mov(pc, Operand(lr), LeaveCC, eq); + + // Exponents are unequal. The only way we can return that the numbers + // are equal is if one is -0 and the other is 0. We already dealt + // with the case where both are -0 or both are 0. + // We start by seeing if the mantissas (that are equal) or the bottom + // 31 bits of the rhs exponent are non-zero. If so we return not + // equal. + __ orr(r4, rhs_mantissa, Operand(rhs_exponent, LSL, kSmiTagSize), SetCC); + __ mov(r0, Operand(r4), LeaveCC, ne); + __ mov(pc, Operand(lr), LeaveCC, ne); // Return conditionally. + // Now they are equal if and only if the lhs exponent is zero in its + // low 31 bits. + __ mov(r0, Operand(lhs_exponent, LSL, kSmiTagSize)); + __ mov(pc, Operand(lr)); + } else { + // Call a native function to do a comparison between two non-NaNs. + // Call C routine that may not cause GC or other trouble. + __ mov(r5, Operand(ExternalReference::compare_doubles())); + __ Jump(r5); // Tail call. + } +} + + +// See comment at call site. +static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) { + // If either operand is a JSObject or an oddball value, then they are + // not equal since their pointers are different. + // There is no test for undetectability in strict equality. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + Label first_non_object; + // Get the type of the first operand into r2 and compare it with + // FIRST_JS_OBJECT_TYPE. + __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE); + __ b(lt, &first_non_object); + + // Return non-zero (r0 is not zero) + Label return_not_equal; + __ bind(&return_not_equal); + __ mov(pc, Operand(lr)); // Return. + + __ bind(&first_non_object); + // Check for oddballs: true, false, null, undefined. + __ cmp(r2, Operand(ODDBALL_TYPE)); + __ b(eq, &return_not_equal); + + __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE); + __ b(ge, &return_not_equal); + + // Check for oddballs: true, false, null, undefined. + __ cmp(r3, Operand(ODDBALL_TYPE)); + __ b(eq, &return_not_equal); +} + + +// See comment at call site. +static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, + Label* both_loaded_as_doubles, + Label* not_heap_numbers, + Label* slow) { + __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); + __ b(ne, not_heap_numbers); + __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE); + __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. + + // Both are heap numbers. Load them up then jump to the code we have + // for that. + __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); + __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); + __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); + __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ jmp(both_loaded_as_doubles); +} + + +// Fast negative check for symbol-to-symbol equality. +static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { + // r2 is object type of r0. + __ tst(r2, Operand(kIsNotStringMask)); + __ b(ne, slow); + __ tst(r2, Operand(kIsSymbolMask)); + __ b(eq, slow); + __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); + __ b(ge, slow); + __ tst(r3, Operand(kIsSymbolMask)); + __ b(eq, slow); + + // Both are symbols. We already checked they weren't the same pointer + // so they are not equal. + __ mov(r0, Operand(1)); // Non-zero indicates not equal. + __ mov(pc, Operand(lr)); // Return. +} + + +// On entry r0 and r1 are the things to be compared. On exit r0 is 0, +// positive or negative to indicate the result of the comparison. +void CompareStub::Generate(MacroAssembler* masm) { + Label slow; // Call builtin. + Label not_smis, both_loaded_as_doubles, rhs_not_nan; + + // NOTICE! This code is only reached after a smi-fast-case check, so + // it is certain that at least one operand isn't a smi. + + // Handle the case where the objects are identical. Either returns the answer + // or goes to slow. Only falls through if the objects were not identical. + EmitIdenticalObjectComparison(masm, &slow, cc_); + + // If either is a Smi (we know that not both are), then they can only + // be strictly equal if the other is a HeapNumber. + ASSERT_EQ(0, kSmiTag); + ASSERT_EQ(0, Smi::FromInt(0)); + __ and_(r2, r0, Operand(r1)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, ¬_smis); + // One operand is a smi. EmitSmiNonsmiComparison generates code that can: + // 1) Return the answer. + // 2) Go to slow. + // 3) Fall through to both_loaded_as_doubles. + // 4) Jump to rhs_not_nan. + // In cases 3 and 4 we have found out we were dealing with a number-number + // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles. + EmitSmiNonsmiComparison(masm, &rhs_not_nan, &slow, strict_); + + __ bind(&both_loaded_as_doubles); + // r0, r1, r2, r3 are the double representations of the left hand side + // and the right hand side. + + // Checks for NaN in the doubles we have loaded. Can return the answer or + // fall through if neither is a NaN. Also binds rhs_not_nan. + EmitNanCheck(masm, &rhs_not_nan, cc_); + + // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the + // answer. Never falls through. + EmitTwoNonNanDoubleComparison(masm, cc_); + + __ bind(¬_smis); + // At this point we know we are dealing with two different objects, + // and neither of them is a Smi. The objects are in r0 and r1. + if (strict_) { + // This returns non-equal for some object types, or falls through if it + // was not lucky. + EmitStrictTwoHeapObjectCompare(masm); + } + + Label check_for_symbols; + // Check for heap-number-heap-number comparison. Can jump to slow case, + // or load both doubles into r0, r1, r2, r3 and jump to the code that handles + // that case. If the inputs are not doubles then jumps to check_for_symbols. + // In this case r2 will contain the type of r0. + EmitCheckForTwoHeapNumbers(masm, + &both_loaded_as_doubles, + &check_for_symbols, + &slow); + + __ bind(&check_for_symbols); + if (cc_ == eq) { + // Either jumps to slow or returns the answer. Assumes that r2 is the type + // of r0 on entry. + EmitCheckForSymbols(masm, &slow); + } + + __ bind(&slow); + __ push(lr); + __ push(r1); + __ push(r0); + // Figure out which native to call and setup the arguments. + Builtins::JavaScript native; + int arg_count = 1; // Not counting receiver. + if (cc_ == eq) { + native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + } else { + native = Builtins::COMPARE; + int ncr; // NaN compare result + if (cc_ == lt || cc_ == le) { + ncr = GREATER; + } else { + ASSERT(cc_ == gt || cc_ == ge); // remaining cases + ncr = LESS; + } + arg_count++; + __ mov(r0, Operand(Smi::FromInt(ncr))); + __ push(r0); + } + + // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) + // tagged as a small integer. + __ mov(r0, Operand(arg_count)); + __ InvokeBuiltin(native, CALL_JS); + __ cmp(r0, Operand(0)); + __ pop(pc); +} + + // Allocates a heap number or jumps to the label if the young space is full and // a scavenge is needed. static void AllocateHeapNumber( @@ -4538,7 +5132,8 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // The new heap number is in r5. r6 and r7 are scratch. AllocateHeapNumber(masm, &slow, r5, r6, r7); // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. - ConvertToDoubleStub stub1(r3, r2, r0, r6); + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub1(r3, r2, r7, r6); __ push(lr); __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. @@ -4854,6 +5449,85 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { } +// Can we multiply by x with max two shifts and an add. +// This answers yes to all integers from 2 to 10. +static bool IsEasyToMultiplyBy(int x) { + if (x < 2) return false; // Avoid special cases. + if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows. + if (IsPowerOf2(x)) return true; // Simple shift. + if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift. + if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. + return false; +} + + +// Can multiply by anything that IsEasyToMultiplyBy returns true for. +// Source and destination may be the same register. This routine does +// not set carry and overflow the way a mul instruction would. +static void MultiplyByKnownInt(MacroAssembler* masm, + Register source, + Register destination, + int known_int) { + if (IsPowerOf2(known_int)) { + __ mov(destination, Operand(source, LSL, BitPosition(known_int))); + } else if (PopCountLessThanEqual2(known_int)) { + int first_bit = BitPosition(known_int); + int second_bit = BitPosition(known_int ^ (1 << first_bit)); + __ add(destination, source, Operand(source, LSL, second_bit - first_bit)); + if (first_bit != 0) { + __ mov(destination, Operand(destination, LSL, first_bit)); + } + } else { + ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. + int the_bit = BitPosition(known_int + 1); + __ rsb(destination, source, Operand(source, LSL, the_bit)); + } +} + + +// This function (as opposed to MultiplyByKnownInt) takes the known int in a +// a register for the cases where it doesn't know a good trick, and may deliver +// a result that needs shifting. +static void MultiplyByKnownInt2( + MacroAssembler* masm, + Register result, + Register source, + Register known_int_register, // Smi tagged. + int known_int, + int* required_shift) { // Including Smi tag shift + switch (known_int) { + case 3: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 1; + break; + case 5: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 1; + break; + case 6: + __ add(result, source, Operand(source, LSL, 1)); + *required_shift = 2; + break; + case 7: + __ rsb(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 9: + __ add(result, source, Operand(source, LSL, 3)); + *required_shift = 1; + break; + case 10: + __ add(result, source, Operand(source, LSL, 2)); + *required_shift = 2; + break; + default: + ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. + __ mul(result, source, known_int_register); + *required_shift = 0; + } +} + + void GenericBinaryOpStub::Generate(MacroAssembler* masm) { // r1 : x // r0 : y @@ -4919,14 +5593,114 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { __ tst(r3, Operand(r3)); __ mov(r0, Operand(r3), LeaveCC, ne); __ Ret(ne); - // Slow case. + // We need -0 if we were multiplying a negative number with 0 to get 0. + // We know one of them was zero. + __ add(r2, r0, Operand(r1), SetCC); + __ mov(r0, Operand(Smi::FromInt(0)), LeaveCC, pl); + __ Ret(pl); // Return Smi 0 if the non-zero one was positive. + // Slow case. We fall through here if we multiplied a negative number + // with 0, because that would mean we should produce -0. __ bind(&slow); HandleBinaryOpSlowCases(masm, ¬_smi, Builtins::MUL, Token::MUL, - mode_); + mode_); + break; + } + + case Token::DIV: + case Token::MOD: { + Label not_smi; + if (specialized_on_rhs_) { + Label smi_is_unsuitable; + __ BranchOnNotSmi(r1, ¬_smi); + if (IsPowerOf2(constant_rhs_)) { + if (op_ == Token::MOD) { + __ and_(r0, + r1, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), + SetCC); + // We now have the answer, but if the input was negative we also + // have the sign bit. Our work is done if the result is + // positive or zero: + __ Ret(pl); + // A mod of a negative left hand side must return a negative number. + // Unfortunately if the answer is 0 then we must return -0. And we + // already optimistically trashed r0 so we may need to restore it. + __ eor(r0, r0, Operand(0x80000000u), SetCC); + // Next two instructions are conditional on the answer being -0. + __ mov(r0, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); + __ b(eq, &smi_is_unsuitable); + // We need to subtract the dividend. Eg. -3 % 4 == -3. + __ sub(r0, r0, Operand(Smi::FromInt(constant_rhs_))); + } else { + ASSERT(op_ == Token::DIV); + __ tst(r1, + Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); + __ b(ne, &smi_is_unsuitable); // Go slow on negative or remainder. + int shift = 0; + int d = constant_rhs_; + while ((d & 1) == 0) { + d >>= 1; + shift++; + } + __ mov(r0, Operand(r1, LSR, shift)); + __ bic(r0, r0, Operand(kSmiTagMask)); + } + } else { + // Not a power of 2. + __ tst(r1, Operand(0x80000000u)); + __ b(ne, &smi_is_unsuitable); + // Find a fixed point reciprocal of the divisor so we can divide by + // multiplying. + double divisor = 1.0 / constant_rhs_; + int shift = 32; + double scale = 4294967296.0; // 1 << 32. + uint32_t mul; + // Maximise the precision of the fixed point reciprocal. + while (true) { + mul = static_cast<uint32_t>(scale * divisor); + if (mul >= 0x7fffffff) break; + scale *= 2.0; + shift++; + } + mul++; + __ mov(r2, Operand(mul)); + __ umull(r3, r2, r2, r1); + __ mov(r2, Operand(r2, LSR, shift - 31)); + // r2 is r1 / rhs. r2 is not Smi tagged. + // r0 is still the known rhs. r0 is Smi tagged. + // r1 is still the unkown lhs. r1 is Smi tagged. + int required_r4_shift = 0; // Including the Smi tag shift of 1. + // r4 = r2 * r0. + MultiplyByKnownInt2(masm, + r4, + r2, + r0, + constant_rhs_, + &required_r4_shift); + // r4 << required_r4_shift is now the Smi tagged rhs * (r1 / rhs). + if (op_ == Token::DIV) { + __ sub(r3, r1, Operand(r4, LSL, required_r4_shift), SetCC); + __ b(ne, &smi_is_unsuitable); // There was a remainder. + __ mov(r0, Operand(r2, LSL, kSmiTagSize)); + } else { + ASSERT(op_ == Token::MOD); + __ sub(r0, r1, Operand(r4, LSL, required_r4_shift)); + } + } + __ Ret(); + __ bind(&smi_is_unsuitable); + } else { + __ jmp(¬_smi); + } + HandleBinaryOpSlowCases(masm, + ¬_smi, + op_ == Token::MOD ? Builtins::MOD : Builtins::DIV, + op_, + mode_); break; } @@ -4951,7 +5725,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { __ and_(r2, r2, Operand(0x1f)); __ mov(r0, Operand(r1, ASR, r2)); // Smi tag result. - __ and_(r0, r0, Operand(~kSmiTagMask)); + __ bic(r0, r0, Operand(kSmiTagMask)); break; case Token::SHR: // Remove tags from operands. We can't do this on a 31 bit number @@ -5015,7 +5789,6 @@ void StackCheckStub::Generate(MacroAssembler* masm) { void UnarySubStub::Generate(MacroAssembler* masm) { Label undo; Label slow; - Label done; Label not_smi; // Enter runtime system if the value is not a smi. @@ -5041,9 +5814,6 @@ void UnarySubStub::Generate(MacroAssembler* masm) { __ mov(r0, Operand(0)); // Set number of arguments. __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); - __ bind(&done); - __ StubReturn(1); - __ bind(¬_smi); __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); __ b(ne, &slow); @@ -5203,9 +5973,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // support moving the C entry code stub. This should be fixed, but currently // this is OK because the CEntryStub gets generated so early in the V8 boot // sequence that it is not moving ever. - __ add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4 - __ push(lr); - __ Jump(r5); + masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4 + masm->push(lr); + masm->Jump(r5); if (always_allocate) { // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 @@ -5629,6 +6399,13 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { } +int CompareStub::MinorKey() { + // Encode the two parameters in a unique 16 bit value. + ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); + return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 4fab90016..6391a8eb9 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -186,6 +186,8 @@ class CodeGenerator: public AstVisitor { bool in_spilled_code() const { return in_spilled_code_; } void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; } + static const int kUnknownIntValue = -1; + private: // Construction/Destruction CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval); @@ -291,8 +293,13 @@ class CodeGenerator: public AstVisitor { void ToBoolean(JumpTarget* true_target, JumpTarget* false_target); - void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode); - void Comparison(Condition cc, bool strict = false); + void GenericBinaryOperation(Token::Value op, + OverwriteMode overwrite_mode, + int known_rhs = kUnknownIntValue); + void Comparison(Condition cc, + Expression* left, + Expression* right, + bool strict = false); void SmiOperation(Token::Value op, Handle<Object> value, @@ -333,11 +340,15 @@ class CodeGenerator: public AstVisitor { void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args); void GenerateIsArray(ZoneList<Expression*>* args); + // Support for construct call checks. + void GenerateIsConstructCall(ZoneList<Expression*>* args); + // Support for arguments.length and arguments[?]. void GenerateArgumentsLength(ZoneList<Expression*>* args); void GenerateArgumentsAccess(ZoneList<Expression*>* args); - // Support for accessing the value field of an object (used by Date). + // Support for accessing the class and value fields of an object. + void GenerateClassOf(ZoneList<Expression*>* args); void GenerateValueOf(ZoneList<Expression*>* args); void GenerateSetValueOf(ZoneList<Expression*>* args); diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index d5f967fcb..f0311dfc1 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -36,6 +36,19 @@ # define USE_ARM_EABI 1 #endif +// This means that interwork-compatible jump instructions are generated. We +// want to generate them on the simulator too so it makes snapshots that can +// be used on real hardware. +#if defined(__THUMB_INTERWORK__) || !defined(__arm__) +# define USE_THUMB_INTERWORK 1 +#endif + +// Simulator should support ARM5 instructions. +#if !defined(__arm__) +# define __ARM_ARCH_5__ 1 +# define __ARM_ARCH_5T__ 1 +#endif + namespace assembler { namespace arm { @@ -97,6 +110,24 @@ enum Opcode { }; +// Some special instructions encoded as a TEQ with S=0 (bit 20). +enum Opcode9Bits { + BX = 1, + BXJ = 2, + BLX = 3, + BKPT = 7 +}; + + +// Some special instructions encoded as a CMN with S=0 (bit 20). +enum Opcode11Bits { + CLZ = 1 +}; + + +// S + + // Shifter types for Data-processing operands as defined in section A5.1.2. enum Shift { no_shift = -1, diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 8083ce30a..d193ab9d8 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -438,6 +438,18 @@ int Decoder::FormatOption(Instr* instr, const char* format) { return 6; } case 'u': { // 'u: signed or unsigned multiplies + // The manual gets the meaning of bit 22 backwards in the multiply + // instruction overview on page A3.16.2. The instructions that + // exist in u and s variants are the following: + // smull A4.1.87 + // umull A4.1.129 + // umlal A4.1.128 + // smlal A4.1.76 + // For these 0 means u and 1 means s. As can be seen on their individual + // pages. The other 18 mul instructions have the bit set or unset in + // arbitrary ways that are unrelated to the signedness of the instruction. + // None of these 18 instructions exist in both a 'u' and an 's' variant. + if (instr->Bit(22) == 0) { Print("u"); } else { @@ -494,12 +506,25 @@ void Decoder::DecodeType01(Instr* instr) { // multiply instructions if (instr->Bit(23) == 0) { if (instr->Bit(21) == 0) { - Format(instr, "mul'cond's 'rd, 'rm, 'rs"); + // The MUL instruction description (A 4.1.33) refers to Rd as being + // the destination for the operation, but it confusingly uses the + // Rn field to encode it. + Format(instr, "mul'cond's 'rn, 'rm, 'rs"); } else { - Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn"); + // The MLA instruction description (A 4.1.28) refers to the order + // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the + // Rn field to encode the Rd register and the Rd field to encode + // the Rn register. + Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); } } else { - Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm"); + // The signed/long multiply instructions use the terms RdHi and RdLo + // when referring to the target registers. They are mapped to the Rn + // and Rd fields as follows: + // RdLo == Rd field + // RdHi == Rn field + // The order of registers is: <RdLo>, <RdHi>, <Rm>, <Rs> + Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs"); } } else { Unknown(instr); // not used by V8 @@ -593,7 +618,17 @@ void Decoder::DecodeType01(Instr* instr) { if (instr->HasS()) { Format(instr, "teq'cond 'rn, 'shift_op"); } else { - Unknown(instr); // not used by V8 + switch (instr->Bits(7, 4)) { + case BX: + Format(instr, "bx'cond 'rm"); + break; + case BLX: + Format(instr, "blx'cond 'rm"); + break; + default: + Unknown(instr); // not used by V8 + break; + } } break; } @@ -609,7 +644,14 @@ void Decoder::DecodeType01(Instr* instr) { if (instr->HasS()) { Format(instr, "cmn'cond 'rn, 'shift_op"); } else { - Unknown(instr); // not used by V8 + switch (instr->Bits(7, 4)) { + case CLZ: + Format(instr, "clz'cond 'rd, 'rm"); + break; + default: + Unknown(instr); // not used by V8 + break; + } } break; } diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 8b4e087e3..2ca74a954 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -67,11 +67,15 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // Load the map into t0. __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset)); // Test the has_named_interceptor bit in the map. - __ ldr(t0, FieldMemOperand(t1, Map::kInstanceAttributesOffset)); - __ tst(t0, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8)))); + __ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset)); + __ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8)))); // Jump to miss if the interceptor bit is set. __ b(ne, miss); + // Bail out if we have a JS global proxy object. + __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset)); + __ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE)); + __ b(eq, miss); // Check that the properties array is a dictionary. __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset)); @@ -81,13 +85,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // Compute the capacity mask. const int kCapacityOffset = - Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize; + Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; __ ldr(r3, FieldMemOperand(t0, kCapacityOffset)); __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int __ sub(r3, r3, Operand(1)); const int kElementsStartOffset = - Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize; + Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; // Generate an unrolled loop that performs a few probes before // giving up. Measurements done on Gmail indicate that 2 probes @@ -98,12 +102,12 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset)); __ mov(t1, Operand(t1, LSR, String::kHashShift)); if (i > 0) { - __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i))); + __ add(t1, t1, Operand(StringDictionary::GetProbeOffset(i))); } __ and_(t1, t1, Operand(r3)); // Scale the index by multiplying by the element size. - ASSERT(Dictionary::kElementSize == 3); + ASSERT(StringDictionary::kEntrySize == 3); __ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3 // Check if the key is identical to the name. @@ -188,11 +192,14 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { // -- [sp] : receiver // ----------------------------------- - // NOTE: Right now, this code always misses on ARM which is - // sub-optimal. We should port the fast case code from IA-32. + Label miss; + + // Load receiver. + __ ldr(r0, MemOperand(sp, 0)); - Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss)); - __ Jump(ic, RelocInfo::CODE_TARGET); + StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss); + __ bind(&miss); + StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); } @@ -213,7 +220,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // Probe the stub cache. Code::Flags flags = Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc); - StubCache::GenerateProbe(masm, flags, r1, r2, r3); + StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg); // If the stub cache probing failed, the receiver might be a value. // For value objects, we use the map of the prototype objects for @@ -250,7 +257,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // Probe the stub cache for the value object. __ bind(&probe); - StubCache::GenerateProbe(masm, flags, r1, r2, r3); + StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg); // Cache miss: Jump to runtime. __ bind(&miss); @@ -418,7 +425,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, NOT_IN_LOOP, MONOMORPHIC); - StubCache::GenerateProbe(masm, flags, r0, r2, r3); + StubCache::GenerateProbe(masm, flags, r0, r2, r3, no_reg); // Cache miss: Jump to runtime. Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); @@ -757,7 +764,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, NOT_IN_LOOP, MONOMORPHIC); - StubCache::GenerateProbe(masm, flags, r1, r2, r3); + StubCache::GenerateProbe(masm, flags, r1, r2, r3, no_reg); // Cache miss: Jump to runtime. Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss))); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 897b5a7c1..47e2749c1 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -46,14 +46,14 @@ MacroAssembler::MacroAssembler(void* buffer, int size) // We always generate arm code, never thumb code, even if V8 is compiled to // thumb, so we require inter-working support -#if defined(__thumb__) && !defined(__THUMB_INTERWORK__) +#if defined(__thumb__) && !defined(USE_THUMB_INTERWORK) #error "flag -mthumb-interwork missing" #endif // We do not support thumb inter-working with an arm architecture not supporting // the blx instruction (below v5t) -#if defined(__THUMB_INTERWORK__) +#if defined(USE_THUMB_INTERWORK) #if !defined(__ARM_ARCH_5T__) && \ !defined(__ARM_ARCH_5TE__) && \ !defined(__ARM_ARCH_7A__) && \ @@ -65,12 +65,12 @@ MacroAssembler::MacroAssembler(void* buffer, int size) // Using blx may yield better code, so use it when required or when available -#if defined(__THUMB_INTERWORK__) || defined(__ARM_ARCH_5__) +#if defined(USE_THUMB_INTERWORK) || defined(__ARM_ARCH_5__) #define USE_BLX 1 #endif // Using bx does not yield better code, so use it only when required -#if defined(__THUMB_INTERWORK__) +#if defined(USE_THUMB_INTERWORK) #define USE_BX 1 #endif @@ -290,11 +290,24 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { // Align the stack at this point. After this point we have 5 pushes, // so in fact we have to unalign here! See also the assert on the // alignment immediately below. - if (OS::ActivationFrameAlignment() != kPointerSize) { +#if defined(V8_HOST_ARCH_ARM) + // Running on the real platform. Use the alignment as mandated by the local + // environment. + // Note: This will break if we ever start generating snapshots on one ARM + // platform for another ARM platform with a different alignment. + int activation_frame_alignment = OS::ActivationFrameAlignment(); +#else // defined(V8_HOST_ARCH_ARM) + // If we are using the simulator then we should always align to the expected + // alignment. As the simulator is used to generate snapshots we do not know + // if the target platform will need alignment, so we will always align at + // this point here. + int activation_frame_alignment = 2 * kPointerSize; +#endif // defined(V8_HOST_ARCH_ARM) + if (activation_frame_alignment != kPointerSize) { // This code needs to be made more general if this assert doesn't hold. - ASSERT(OS::ActivationFrameAlignment() == 2 * kPointerSize); + ASSERT(activation_frame_alignment == 2 * kPointerSize); mov(r7, Operand(Smi::FromInt(0))); - tst(sp, Operand(OS::ActivationFrameAlignment() - 1)); + tst(sp, Operand(activation_frame_alignment - 1)); push(r7, eq); // Conditional push instruction. } diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index af4f28efe..e5500aad5 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -1046,6 +1046,9 @@ void Simulator::SoftwareInterrupt(Instr* instr) { int64_t result = target(arg0, arg1, arg2, arg3); int32_t lo_res = static_cast<int32_t>(result); int32_t hi_res = static_cast<int32_t>(result >> 32); + if (::v8::internal::FLAG_trace_sim) { + PrintF("Returned %08x\n", lo_res); + } set_register(r0, lo_res); set_register(r1, hi_res); set_register(r0, result); @@ -1077,41 +1080,63 @@ void Simulator::DecodeType01(Instr* instr) { // multiply instruction or extra loads and stores if (instr->Bits(7, 4) == 9) { if (instr->Bit(24) == 0) { - // multiply instructions - int rd = instr->RdField(); + // Raw field decoding here. Multiply instructions have their Rd in + // funny places. + int rn = instr->RnField(); int rm = instr->RmField(); int rs = instr->RsField(); int32_t rs_val = get_register(rs); int32_t rm_val = get_register(rm); if (instr->Bit(23) == 0) { if (instr->Bit(21) == 0) { - // Format(instr, "mul'cond's 'rd, 'rm, 'rs"); + // The MUL instruction description (A 4.1.33) refers to Rd as being + // the destination for the operation, but it confusingly uses the + // Rn field to encode it. + // Format(instr, "mul'cond's 'rn, 'rm, 'rs"); + int rd = rn; // Remap the rn field to the Rd register. int32_t alu_out = rm_val * rs_val; set_register(rd, alu_out); if (instr->HasS()) { SetNZFlags(alu_out); } } else { - Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn"); + // The MLA instruction description (A 4.1.28) refers to the order + // of registers as "Rd, Rm, Rs, Rn". But confusingly it uses the + // Rn field to encode the Rd register and the Rd field to encode + // the Rn register. + Format(instr, "mla'cond's 'rn, 'rm, 'rs, 'rd"); } } else { - // Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm"); - int rn = instr->RnField(); + // The signed/long multiply instructions use the terms RdHi and RdLo + // when referring to the target registers. They are mapped to the Rn + // and Rd fields as follows: + // RdLo == Rd + // RdHi == Rn (This is confusingly stored in variable rd here + // because the mul instruction from above uses the + // Rn field to encode the Rd register. Good luck figuring + // this out without reading the ARM instruction manual + // at a very detailed level.) + // Format(instr, "'um'al'cond's 'rd, 'rn, 'rs, 'rm"); + int rd_hi = rn; // Remap the rn field to the RdHi register. + int rd_lo = instr->RdField(); int32_t hi_res = 0; int32_t lo_res = 0; - if (instr->Bit(22) == 0) { - // signed multiply - UNIMPLEMENTED(); + if (instr->Bit(22) == 1) { + int64_t left_op = static_cast<int32_t>(rm_val); + int64_t right_op = static_cast<int32_t>(rs_val); + uint64_t result = left_op * right_op; + hi_res = static_cast<int32_t>(result >> 32); + lo_res = static_cast<int32_t>(result & 0xffffffff); } else { // unsigned multiply - uint64_t left_op = rm_val; - uint64_t right_op = rs_val; + uint64_t left_op = static_cast<uint32_t>(rm_val); + uint64_t right_op = static_cast<uint32_t>(rs_val); uint64_t result = left_op * right_op; hi_res = static_cast<int32_t>(result >> 32); lo_res = static_cast<int32_t>(result & 0xffffffff); } - set_register(rn, hi_res); - set_register(rd, lo_res); + set_register(rd_lo, lo_res); + set_register(rd_hi, hi_res); if (instr->HasS()) { UNIMPLEMENTED(); } @@ -1357,7 +1382,21 @@ void Simulator::DecodeType01(Instr* instr) { SetNZFlags(alu_out); SetCFlag(shifter_carry_out); } else { - UNIMPLEMENTED(); + ASSERT(type == 0); + int rm = instr->RmField(); + switch (instr->Bits(7, 4)) { + case BX: + set_pc(get_register(rm)); + break; + case BLX: { + uint32_t old_pc = get_pc(); + set_pc(get_register(rm)); + set_register(lr, old_pc + Instr::kInstrSize); + break; + } + default: + UNIMPLEMENTED(); + } } break; } @@ -1381,7 +1420,27 @@ void Simulator::DecodeType01(Instr* instr) { Format(instr, "cmn'cond 'rn, 'shift_rm"); Format(instr, "cmn'cond 'rn, 'imm"); } else { - UNIMPLEMENTED(); + ASSERT(type == 0); + int rm = instr->RmField(); + int rd = instr->RdField(); + switch (instr->Bits(7, 4)) { + case CLZ: { + uint32_t bits = get_register(rm); + int leading_zeros = 0; + if (bits == 0) { + leading_zeros = 32; + } else { + while ((bits & 0x80000000u) == 0) { + bits <<= 1; + leading_zeros++; + } + } + set_register(rd, leading_zeros); + break; + } + default: + UNIMPLEMENTED(); + } } break; } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 782455792..6d9ace84c 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -85,7 +85,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver, Register name, - Register scratch) { + Register scratch, + Register extra) { Label miss; // Make sure that code is valid. The shifting code relies on the @@ -170,110 +171,6 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, } -void StubCompiler::GenerateLoadField(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register scratch1, - Register scratch2, - int index, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ tst(receiver, Operand(kSmiTagMask)); - __ b(eq, miss_label); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - GenerateFastPropertyLoad(masm, r0, reg, holder, index); - __ Ret(); -} - - -void StubCompiler::GenerateLoadConstant(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register scratch1, - Register scratch2, - Object* value, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ tst(receiver, Operand(kSmiTagMask)); - __ b(eq, miss_label); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - - // Return the constant value. - __ mov(r0, Operand(Handle<Object>(value))); - __ Ret(); -} - - -void StubCompiler::GenerateLoadCallback(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register name, - Register scratch1, - Register scratch2, - AccessorInfo* callback, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ tst(receiver, Operand(kSmiTagMask)); - __ b(eq, miss_label); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - - // Push the arguments on the JS stack of the caller. - __ push(receiver); // receiver - __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data - __ push(ip); - __ push(name); // name - __ push(reg); // holder - - // Do tail-call to the runtime system. - ExternalReference load_callback_property = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 4); -} - - -void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Smi* lookup_hint, - Register receiver, - Register name, - Register scratch1, - Register scratch2, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ tst(receiver, Operand(kSmiTagMask)); - __ b(eq, miss_label); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - - // Push the arguments on the JS stack of the caller. - __ push(receiver); // receiver - __ push(reg); // holder - __ push(name); // name - __ mov(scratch1, Operand(lookup_hint)); - __ push(scratch1); - - // Do tail-call to the runtime system. - ExternalReference load_ic_property = - ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); - __ TailCallRuntime(load_ic_property, 4); -} - - void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, Register receiver, Register scratch, @@ -350,6 +247,17 @@ void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm, } +void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, + Register receiver, + Register scratch1, + Register scratch2, + Label* miss_label) { + __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); + __ mov(r0, scratch1); + __ Ret(); +} + + // Generate StoreField code, value is passed in r0 register. // After executing generated code, the receiver_reg and name_reg // may be clobbered. @@ -461,6 +369,147 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { #define __ ACCESS_MASM(masm()) +Register StubCompiler::CheckPrototypes(JSObject* object, + Register object_reg, + JSObject* holder, + Register holder_reg, + Register scratch, + String* name, + Label* miss) { + // Check that the maps haven't changed. + Register result = + masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss); + + // If we've skipped any global objects, it's not enough to verify + // that their maps haven't changed. + while (object != holder) { + if (object->IsGlobalObject()) { + GlobalObject* global = GlobalObject::cast(object); + Object* probe = global->EnsurePropertyCell(name); + if (probe->IsFailure()) { + set_failure(Failure::cast(probe)); + return result; + } + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe); + ASSERT(cell->value()->IsTheHole()); + __ mov(scratch, Operand(Handle<Object>(cell))); + __ ldr(scratch, + FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); + __ cmp(scratch, Operand(Factory::the_hole_value())); + __ b(ne, miss); + } + object = JSObject::cast(object->GetPrototype()); + } + + // Return the register containin the holder. + return result; +} + + +void StubCompiler::GenerateLoadField(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + int index, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + GenerateFastPropertyLoad(masm(), r0, reg, holder, index); + __ Ret(); +} + + +void StubCompiler::GenerateLoadConstant(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + Object* value, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + + // Return the constant value. + __ mov(r0, Operand(Handle<Object>(value))); + __ Ret(); +} + + +void StubCompiler::GenerateLoadCallback(JSObject* object, + JSObject* holder, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + AccessorInfo* callback, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + + // Push the arguments on the JS stack of the caller. + __ push(receiver); // receiver + __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data + __ push(ip); + __ push(name_reg); // name + __ push(reg); // holder + + // Do tail-call to the runtime system. + ExternalReference load_callback_property = + ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); + __ TailCallRuntime(load_callback_property, 4); +} + + +void StubCompiler::GenerateLoadInterceptor(JSObject* object, + JSObject* holder, + Smi* lookup_hint, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ tst(receiver, Operand(kSmiTagMask)); + __ b(eq, miss); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); + + // Push the arguments on the JS stack of the caller. + __ push(receiver); // receiver + __ push(reg); // holder + __ push(name_reg); // name + __ mov(scratch1, Operand(lookup_hint)); + __ push(scratch1); + + // Do tail-call to the runtime system. + ExternalReference load_ic_property = + ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); + __ TailCallRuntime(load_ic_property, 4); +} + + Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { // ----------- S t a t e ------------- // -- r1: function @@ -496,9 +545,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { Object* CallStubCompiler::CompileCallField(Object* object, JSObject* holder, int index, - String* name, - Code::Flags flags) { - ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags)); + String* name) { // ----------- S t a t e ------------- // -- lr: return address // ----------------------------------- @@ -514,7 +561,7 @@ Object* CallStubCompiler::CompileCallField(Object* object, // Do the right check and compute the holder register. Register reg = - masm()->CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss); + CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss); GenerateFastPropertyLoad(masm(), r1, reg, holder, index); // Check that the function really is a function. @@ -540,16 +587,15 @@ Object* CallStubCompiler::CompileCallField(Object* object, __ Jump(ic, RelocInfo::CODE_TARGET); // Return the generated code. - return GetCodeWithFlags(flags, name); + return GetCode(FIELD, name); } Object* CallStubCompiler::CompileCallConstant(Object* object, JSObject* holder, JSFunction* function, - CheckType check, - Code::Flags flags) { - ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags)); + String* name, + CheckType check) { // ----------- S t a t e ------------- // -- lr: return address // ----------------------------------- @@ -572,7 +618,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, switch (check) { case RECEIVER_MAP_CHECK: // Check that the maps haven't changed. - __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss); + CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss); // Patch the receiver on the stack with the global proxy if // necessary. @@ -590,8 +636,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateLoadGlobalFunctionPrototype(masm(), Context::STRING_FUNCTION_INDEX, r2); - __ CheckMaps(JSObject::cast(object->GetPrototype()), - r2, holder, r3, r1, &miss); + CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3, + r1, name, &miss); break; case NUMBER_CHECK: { @@ -606,8 +652,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateLoadGlobalFunctionPrototype(masm(), Context::NUMBER_FUNCTION_INDEX, r2); - __ CheckMaps(JSObject::cast(object->GetPrototype()), - r2, holder, r3, r1, &miss); + CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3, + r1, name, &miss); break; } @@ -623,13 +669,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateLoadGlobalFunctionPrototype(masm(), Context::BOOLEAN_FUNCTION_INDEX, r2); - __ CheckMaps(JSObject::cast(object->GetPrototype()), - r2, holder, r3, r1, &miss); + CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3, + r1, name, &miss); break; } case JSARRAY_HAS_FAST_ELEMENTS_CHECK: - __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss); + CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss); // Make sure object->elements()->map() != Heap::hash_table_map() // Get the elements array of the object. __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset)); @@ -648,6 +694,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). + ASSERT(function->is_compiled()); Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); __ InvokeCode(code, expected, arguments(), @@ -663,7 +710,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, if (function->shared()->name()->IsString()) { function_name = String::cast(function->shared()->name()); } - return GetCodeWithFlags(flags, function_name); + return GetCode(CONSTANT_FUNCTION, function_name); } @@ -687,6 +734,72 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object, } +Object* CallStubCompiler::CompileCallGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name) { + // ----------- S t a t e ------------- + // -- lr: return address + // ----------------------------------- + Label miss; + + __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3); + + // Get the number of arguments. + const int argc = arguments().immediate(); + + // Get the receiver from the stack. + __ ldr(r0, MemOperand(sp, argc * kPointerSize)); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual calls. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ tst(r0, Operand(kSmiTagMask)); + __ b(eq, &miss); + } + + // Check that the maps haven't changed. + CheckPrototypes(object, r0, holder, r3, r2, name, &miss); + + // Get the value from the cell. + __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell))); + __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); + + // Check that the cell contains the same function. + __ cmp(r1, Operand(Handle<JSFunction>(function))); + __ b(ne, &miss); + + // Patch the receiver on the stack with the global proxy if + // necessary. + if (object->IsGlobalObject()) { + __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); + __ str(r3, MemOperand(sp, argc * kPointerSize)); + } + + // Setup the context (function already in r1). + __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); + + // Jump to the cached code (tail call). + ASSERT(function->is_compiled()); + Handle<Code> code(function->code()); + ParameterCount expected(function->shared()->formal_parameter_count()); + __ InvokeCode(code, expected, arguments(), + RelocInfo::CODE_TARGET, JUMP_FUNCTION); + + // Handle call cache miss. + __ bind(&miss); + __ DecrementCounter(&Counters::call_global_inline, 1, r1, r3); + __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3); + Handle<Code> ic = ComputeCallMiss(arguments().immediate()); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, name); +} + + Object* StoreStubCompiler::CompileStoreField(JSObject* object, int index, Map* transition, @@ -827,6 +940,43 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, } +Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, + JSGlobalPropertyCell* cell, + String* name) { + // ----------- S t a t e ------------- + // -- r0 : value + // -- r2 : name + // -- lr : return address + // -- [sp] : receiver + // ----------------------------------- + Label miss; + + __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3); + + // Check that the map of the global has not changed. + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); + __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ cmp(r3, Operand(Handle<Map>(object->map()))); + __ b(ne, &miss); + + // Store the value in the cell. + __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell))); + __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + + __ Ret(); + + // Handle store cache miss. + __ bind(&miss); + __ DecrementCounter(&Counters::named_store_global_inline, 1, r1, r3); + __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, name); +} + + Object* LoadStubCompiler::CompileLoadField(JSObject* object, JSObject* holder, int index, @@ -840,7 +990,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, __ ldr(r0, MemOperand(sp, 0)); - GenerateLoadField(masm(), object, holder, r0, r3, r1, index, &miss); + GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -861,7 +1011,7 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object, Label miss; __ ldr(r0, MemOperand(sp, 0)); - GenerateLoadCallback(masm(), object, holder, r0, r2, r3, r1, callback, &miss); + GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -883,7 +1033,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, __ ldr(r0, MemOperand(sp, 0)); - GenerateLoadConstant(masm(), object, holder, r0, r3, r1, value, &miss); + GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -904,14 +1054,14 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object, __ ldr(r0, MemOperand(sp, 0)); - GenerateLoadInterceptor(masm(), - object, + GenerateLoadInterceptor(object, holder, holder->InterceptorPropertyLookupHint(name), r0, r2, r3, r1, + name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -921,6 +1071,56 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object, } +Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + String* name, + bool is_dont_delete) { + // ----------- S t a t e ------------- + // -- r2 : name + // -- lr : return address + // -- [sp] : receiver + // ----------------------------------- + Label miss; + + __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3); + + // Get the receiver from the stack. + __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual calls. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &miss); + } + + // Check that the map of the global has not changed. + CheckPrototypes(object, r1, holder, r3, r0, name, &miss); + + // Get the value from the cell. + __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell))); + __ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); + + // Check for deleted property if property can actually be deleted. + if (!is_dont_delete) { + __ cmp(r0, Operand(Factory::the_hole_value())); + __ b(eq, &miss); + } + + __ Ret(); + + __ bind(&miss); + __ DecrementCounter(&Counters::named_load_global_inline, 1, r1, r3); + __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(NORMAL, name); +} + + // TODO(1224671): IC stubs for keyed loads have not been implemented // for ARM. Object* KeyedLoadStubCompiler::CompileLoadField(String* name, @@ -940,7 +1140,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ cmp(r2, Operand(Handle<String>(name))); __ b(ne, &miss); - GenerateLoadField(masm(), receiver, holder, r0, r3, r1, index, &miss); + GenerateLoadField(receiver, holder, r0, r3, r1, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -965,8 +1165,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ cmp(r2, Operand(Handle<String>(name))); __ b(ne, &miss); - GenerateLoadCallback(masm(), receiver, holder, r0, r2, r3, - r1, callback, &miss); + GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -992,7 +1191,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ cmp(r2, Operand(Handle<String>(name))); __ b(ne, &miss); - GenerateLoadConstant(masm(), receiver, holder, r0, r3, r1, value, &miss); + GenerateLoadConstant(receiver, holder, r0, r3, r1, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1018,14 +1217,14 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ cmp(r2, Operand(Handle<String>(name))); __ b(ne, &miss); - GenerateLoadInterceptor(masm(), - receiver, + GenerateLoadInterceptor(receiver, holder, Smi::FromInt(JSObject::kLookupInHolder), r0, r2, r3, r1, + name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 7b7778c04..5d0310df5 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -608,6 +608,22 @@ static double mul_two_doubles(double x, double y) { } +static double div_two_doubles(double x, double y) { + return x / y; +} + + +static double mod_two_doubles(double x, double y) { + return fmod(x, y); +} + + +static int native_compare_doubles(double x, double y) { + if (x == y) return 0; + return x < y ? 1 : -1; +} + + ExternalReference ExternalReference::double_fp_operation( Token::Value operation) { typedef double BinaryFPOperation(double x, double y); @@ -622,6 +638,12 @@ ExternalReference ExternalReference::double_fp_operation( case Token::MUL: function = &mul_two_doubles; break; + case Token::DIV: + function = &div_two_doubles; + break; + case Token::MOD: + function = &mod_two_doubles; + break; default: UNREACHABLE(); } @@ -630,6 +652,12 @@ ExternalReference ExternalReference::double_fp_operation( } +ExternalReference ExternalReference::compare_doubles() { + return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles), + false)); +} + + ExternalReferenceRedirector* ExternalReference::redirector_ = NULL; diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 979dd90f3..879ee5410 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -413,6 +413,7 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference new_space_allocation_limit_address(); static ExternalReference double_fp_operation(Token::Value operation); + static ExternalReference compare_doubles(); Address address() const {return reinterpret_cast<Address>(address_);} diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 15d762f05..64d61cca3 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -1575,16 +1575,10 @@ class RegExpQuantifier: public RegExpTree { }; -enum CaptureAvailability { - CAPTURE_AVAILABLE, - CAPTURE_UNREACHABLE, - CAPTURE_PERMANENTLY_UNREACHABLE -}; - class RegExpCapture: public RegExpTree { public: explicit RegExpCapture(RegExpTree* body, int index) - : body_(body), index_(index), available_(CAPTURE_AVAILABLE) { } + : body_(body), index_(index) { } virtual void* Accept(RegExpVisitor* visitor, void* data); virtual RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success); @@ -1600,16 +1594,11 @@ class RegExpCapture: public RegExpTree { virtual int max_match() { return body_->max_match(); } RegExpTree* body() { return body_; } int index() { return index_; } - inline CaptureAvailability available() { return available_; } - inline void set_available(CaptureAvailability availability) { - available_ = availability; - } static int StartRegister(int index) { return index * 2; } static int EndRegister(int index) { return index * 2 + 1; } private: RegExpTree* body_; int index_; - CaptureAvailability available_; }; diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 3810c6a19..ad5396ec6 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -134,7 +134,7 @@ void Bootstrapper::TearDown() { } -// Pending fixups are code positions that have refer to builtin code +// Pending fixups are code positions that refer to builtin code // objects that were not available at the time the code was generated. // The pending list is processed whenever an environment has been // created. @@ -216,7 +216,6 @@ bool PendingFixups::Process(Handle<JSBuiltinsObject> builtins) { *reinterpret_cast<Object**>(pc) = f->code(); } } else { - ASSERT(is_pc_relative); Assembler::set_target_address_at(pc, f->code()->instruction_start()); } @@ -539,7 +538,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template, { // --- G l o b a l --- // Step 1: create a fresh inner JSGlobalObject - Handle<JSGlobalObject> object; + Handle<GlobalObject> object; { Handle<JSFunction> js_global_function; Handle<ObjectTemplateInfo> js_global_template; @@ -579,9 +578,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template, } js_global_function->initial_map()->set_is_hidden_prototype(); - SetExpectedNofProperties(js_global_function, 100); - object = Handle<JSGlobalObject>::cast( - Factory::NewJSObject(js_global_function, TENURED)); + object = Factory::NewGlobalObject(js_global_function); } // Set the global context for the global object. @@ -963,12 +960,10 @@ bool Genesis::InstallNatives() { Handle<String> name = Factory::LookupAsciiSymbol("builtins"); builtins_fun->shared()->set_instance_class_name(*name); - SetExpectedNofProperties(builtins_fun, 100); // Allocate the builtins object. Handle<JSBuiltinsObject> builtins = - Handle<JSBuiltinsObject>::cast(Factory::NewJSObject(builtins_fun, - TENURED)); + Handle<JSBuiltinsObject>::cast(Factory::NewGlobalObject(builtins_fun)); builtins->set_builtins(*builtins); builtins->set_global_context(*global_context()); builtins->set_global_receiver(*builtins); @@ -1113,8 +1108,8 @@ bool Genesis::InstallNatives() { } #ifdef V8_HOST_ARCH_64_BIT - // TODO(X64): Remove this test when code generation works and is stable. - CodeGenerator::TestCodeGenerator(); + // TODO(X64): Remove this when inline caches work. + FLAG_use_ic = false; #endif // V8_HOST_ARCH_64_BIT @@ -1191,10 +1186,6 @@ bool Genesis::InstallNatives() { apply->shared()->set_length(2); } - // Make sure that the builtins object has fast properties. - // If the ASSERT below fails, please increase the expected number of - // properties for the builtins object. - ASSERT(builtins->HasFastProperties()); #ifdef DEBUG builtins->Verify(); #endif @@ -1214,6 +1205,15 @@ bool Genesis::InstallSpecialObjects() { Handle<JSObject>(js_global->builtins()), DONT_ENUM); } + Handle<Object> Error = GetProperty(js_global, "Error"); + if (Error->IsJSObject()) { + Handle<String> name = Factory::LookupAsciiSymbol("stackTraceLimit"); + SetProperty(Handle<JSObject>::cast(Error), + name, + Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)), + NONE); + } + #ifdef ENABLE_DEBUGGER_SUPPORT // Expose the debug global object in global if a name for it is specified. if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) { @@ -1373,43 +1373,35 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, if (from->HasFastProperties()) { Handle<DescriptorArray> descs = Handle<DescriptorArray>(from->map()->instance_descriptors()); - int offset = 0; - while (true) { - // Iterating through the descriptors is not gc safe so we have to - // store the value in a handle and create a new stream for each entry. - DescriptorReader stream(*descs, offset); - if (stream.eos()) break; - // We have to read out the next offset before we do anything that may - // cause a gc, since the DescriptorReader is not gc safe. - offset = stream.next_position(); - PropertyDetails details = stream.GetDetails(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + PropertyDetails details = PropertyDetails(descs->GetDetails(i)); switch (details.type()) { case FIELD: { HandleScope inner; - Handle<String> key = Handle<String>(stream.GetKey()); - int index = stream.GetFieldIndex(); + Handle<String> key = Handle<String>(descs->GetKey(i)); + int index = descs->GetFieldIndex(i); Handle<Object> value = Handle<Object>(from->FastPropertyAt(index)); SetProperty(to, key, value, details.attributes()); break; } case CONSTANT_FUNCTION: { HandleScope inner; - Handle<String> key = Handle<String>(stream.GetKey()); + Handle<String> key = Handle<String>(descs->GetKey(i)); Handle<JSFunction> fun = - Handle<JSFunction>(stream.GetConstantFunction()); + Handle<JSFunction>(descs->GetConstantFunction(i)); SetProperty(to, key, fun, details.attributes()); break; } case CALLBACKS: { LookupResult result; - to->LocalLookup(stream.GetKey(), &result); + to->LocalLookup(descs->GetKey(i), &result); // If the property is already there we skip it if (result.IsValid()) continue; HandleScope inner; Handle<DescriptorArray> inst_descs = Handle<DescriptorArray>(to->map()->instance_descriptors()); - Handle<String> key = Handle<String>(stream.GetKey()); - Handle<Object> entry = Handle<Object>(stream.GetCallbacksObject()); + Handle<String> key = Handle<String>(descs->GetKey(i)); + Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i)); inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs, key, entry, @@ -1431,8 +1423,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, } } } else { - Handle<Dictionary> properties = - Handle<Dictionary>(from->property_dictionary()); + Handle<StringDictionary> properties = + Handle<StringDictionary>(from->property_dictionary()); int capacity = properties->Capacity(); for (int i = 0; i < capacity; i++) { Object* raw_key(properties->KeyAt(i)); @@ -1445,6 +1437,9 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, // Set the property. Handle<String> key = Handle<String>(String::cast(raw_key)); Handle<Object> value = Handle<Object>(properties->ValueAt(i)); + if (value->IsJSGlobalPropertyCell()) { + value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value()); + } PropertyDetails details = properties->DetailsAt(i); SetProperty(to, key, value, details.attributes()); } @@ -1552,7 +1547,7 @@ Genesis::Genesis(Handle<Object> global_object, // will always do unlinking. previous_ = current_; current_ = this; - result_ = NULL; + result_ = Handle<Context>::null(); // If V8 isn't running and cannot be initialized, just return. if (!V8::IsRunning() && !V8::Initialize(NULL)) return; diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index f4d8ce8ae..9c24c60b7 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -37,8 +37,8 @@ namespace internal { Handle<Code> CodeStub::GetCode() { uint32_t key = GetKey(); - int index = Heap::code_stubs()->FindNumberEntry(key); - if (index == -1) { + int index = Heap::code_stubs()->FindEntry(key); + if (index == NumberDictionary::kNotFound) { HandleScope scope; // Update the static counter each time a new code stub is generated. @@ -80,14 +80,15 @@ Handle<Code> CodeStub::GetCode() { #endif // Update the dictionary and the root in Heap. - Handle<Dictionary> dict = - Factory::DictionaryAtNumberPut(Handle<Dictionary>(Heap::code_stubs()), - key, - code); - Heap::set_code_stubs(*dict); - index = Heap::code_stubs()->FindNumberEntry(key); + Handle<NumberDictionary> dict = + Factory::DictionaryAtNumberPut( + Handle<NumberDictionary>(Heap::code_stubs()), + key, + code); + Heap::public_set_code_stubs(*dict); + index = Heap::code_stubs()->FindEntry(key); } - ASSERT(index != -1); + ASSERT(index != NumberDictionary::kNotFound); return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index))); } @@ -133,6 +134,10 @@ const char* CodeStub::MajorName(CodeStub::Major major_key) { return "InvokeBuiltin"; case JSExit: return "JSExit"; + case ConvertToDouble: + return "ConvertToDouble"; + case WriteInt32ToHeapNumber: + return "WriteInt32ToHeapNumber"; default: UNREACHABLE(); return NULL; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index ad5b1eaf7..b7297d7ce 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -416,8 +416,10 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = { {&CodeGenerator::GenerateIsSmi, "_IsSmi"}, {&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"}, {&CodeGenerator::GenerateIsArray, "_IsArray"}, + {&CodeGenerator::GenerateIsConstructCall, "_IsConstructCall"}, {&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"}, {&CodeGenerator::GenerateArgumentsAccess, "_Arguments"}, + {&CodeGenerator::GenerateClassOf, "_ClassOf"}, {&CodeGenerator::GenerateValueOf, "_ValueOf"}, {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"}, {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"}, diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index fa414d412..243d87ca6 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -77,6 +77,8 @@ enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; #include "x64/codegen-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/codegen-arm.h" +#else +#error Unsupported target architecture. #endif #include "register-allocator.h" @@ -249,6 +251,36 @@ class UnarySubStub : public CodeStub { }; +class CompareStub: public CodeStub { + public: + CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } + + void Generate(MacroAssembler* masm); + + private: + Condition cc_; + bool strict_; + + Major MajorKey() { return Compare; } + + int MinorKey(); + + // Branch to the label if the given object isn't a symbol. + void BranchIfNonSymbol(MacroAssembler* masm, + Label* label, + Register object, + Register scratch); + +#ifdef DEBUG + void Print() { + PrintF("CompareStub (cc %d), (strict %s)\n", + static_cast<int>(cc_), + strict_ ? "true" : "false"); + } +#endif +}; + + class CEntryStub : public CodeStub { public: CEntryStub() { } diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index fd706af88..0951af139 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -281,6 +281,7 @@ void CompilationCacheScript::Put(Handle<String> source, HandleScope scope; ASSERT(boilerplate->IsBoilerplate()); Handle<CompilationCacheTable> table = GetTable(0); + // TODO(X64): -fstrict-aliasing causes a problem with table. Fix it. CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate)); } diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h index d541fda1b..c7acc2f79 100644 --- a/deps/v8/src/d8-debug.h +++ b/deps/v8/src/d8-debug.h @@ -41,7 +41,7 @@ void HandleDebugEvent(DebugEvent event, Handle<Object> event_data, Handle<Value> data); -// Start the remote debugger connecting to a V8 debugger agent on the specified +// Start the remove debugger connecting to a V8 debugger agent on the specified // port. void RunRemoteDebugger(int port); diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc index 3a091f93c..fe130cef7 100644 --- a/deps/v8/src/d8-posix.cc +++ b/deps/v8/src/d8-posix.cc @@ -370,7 +370,11 @@ static Handle<Value> GetStdout(int child_fd, // whether it exited normally. In the common case this doesn't matter because // we don't get here before the child has closed stdout and most programs don't // do that before they exit. -#if defined(WNOWAIT) && !defined(ANDROID) +// +// We're disabling usage of waitid in Mac OS X because it doens't work for us: +// a parent process hangs on waiting while a child process is already a zombie. +// See http://code.google.com/p/v8/issues/detail?id=401. +#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) #define HAS_WAITID 1 #endif diff --git a/deps/v8/src/date-delay.js b/deps/v8/src/date-delay.js index 0a89783b4..6adde464f 100644 --- a/deps/v8/src/date-delay.js +++ b/deps/v8/src/date-delay.js @@ -28,7 +28,6 @@ // This file relies on the fact that the following declarations have been made // in v8natives.js: -// const $isNaN = GlobalIsNaN; // const $isFinite = GlobalIsFinite; // ------------------------------------------------------------------- @@ -41,9 +40,14 @@ // changes to these properties. const $Date = global.Date; +// Helper function to throw error. +function ThrowDateTypeError() { + throw new $TypeError('this is not a Date object.'); +} + // ECMA 262 - 15.9.1.2 function Day(time) { - return FLOOR(time/msPerDay); + return FLOOR(time / msPerDay); } @@ -232,7 +236,7 @@ function WeekDay(time) { var local_time_offset = %DateLocalTimeOffset(); function LocalTime(time) { - if ($isNaN(time)) return time; + if (NUMBER_IS_NAN(time)) return time; return time + local_time_offset + DaylightSavingsOffset(time); } @@ -242,7 +246,7 @@ function LocalTimeNoCheck(time) { function UTC(time) { - if ($isNaN(time)) return time; + if (NUMBER_IS_NAN(time)) return time; var tmp = time - local_time_offset; return tmp - DaylightSavingsOffset(tmp); } @@ -423,30 +427,61 @@ function TimeClip(time) { } +// The Date cache is used to limit the cost of parsing the same Date +// strings over and over again. +var Date_cache = { + // Cached time value. + time: $NaN, + // Cached year when interpreting the time as a local time. Only + // valid when the time matches cached time. + year: $NaN, + // String input for which the cached time is valid. + string: null +}; + + %SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) { - if (%IsConstructCall()) { - // ECMA 262 - 15.9.3 - var argc = %_ArgumentsLength(); - if (argc == 0) { - %_SetValueOf(this, %DateCurrentTime()); - return; - } - if (argc == 1) { + if (!%_IsConstructCall()) { + // ECMA 262 - 15.9.2 + return (new $Date()).toString(); + } + + // ECMA 262 - 15.9.3 + var argc = %_ArgumentsLength(); + var value; + if (argc == 0) { + value = %DateCurrentTime(); + + } else if (argc == 1) { + if (IS_NUMBER(year)) { + value = TimeClip(year); + + } else if (IS_STRING(year)) { + // Probe the Date cache. If we already have a time value for the + // given time, we re-use that instead of parsing the string again. + var cache = Date_cache; + if (cache.string === year) { + value = cache.time; + } else { + value = DateParse(year); + cache.time = value; + cache.year = YearFromTime(LocalTimeNoCheck(value)); + cache.string = year; + } + + } else { // According to ECMA 262, no hint should be given for this - // conversion. However, ToPrimitive defaults to String Hint - // for Date objects which will lose precision when the Date + // conversion. However, ToPrimitive defaults to STRING_HINT for + // Date objects which will lose precision when the Date // constructor is called with another Date object as its - // argument. We therefore use Number Hint for the conversion - // (which is the default for everything else than Date - // objects). This makes us behave like KJS and SpiderMonkey. + // argument. We therefore use NUMBER_HINT for the conversion, + // which is the default for everything else than Date objects. + // This makes us behave like KJS and SpiderMonkey. var time = ToPrimitive(year, NUMBER_HINT); - if (IS_STRING(time)) { - %_SetValueOf(this, DateParse(time)); - } else { - %_SetValueOf(this, TimeClip(ToNumber(time))); - } - return; + value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time)); } + + } else { year = ToNumber(year); month = ToNumber(month); date = argc > 2 ? ToNumber(date) : 1; @@ -454,120 +489,118 @@ function TimeClip(time) { minutes = argc > 4 ? ToNumber(minutes) : 0; seconds = argc > 5 ? ToNumber(seconds) : 0; ms = argc > 6 ? ToNumber(ms) : 0; - year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) + year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year; var day = MakeDay(year, month, date); var time = MakeTime(hours, minutes, seconds, ms); - %_SetValueOf(this, TimeClip(UTC(MakeDate(day, time)))); - } else { - // ECMA 262 - 15.9.2 - return (new $Date()).toString(); + value = TimeClip(UTC(MakeDate(day, time))); } + %_SetValueOf(this, value); }); // Helper functions. function GetTimeFrom(aDate) { - if (IS_DATE(aDate)) return %_ValueOf(aDate); - throw new $TypeError('this is not a Date object.'); + return DATE_VALUE(aDate); } function GetMillisecondsFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return msFromTime(LocalTimeNoCheck(t)); } function GetUTCMillisecondsFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return msFromTime(t); } function GetSecondsFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return SecFromTime(LocalTimeNoCheck(t)); } function GetUTCSecondsFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return SecFromTime(t); } function GetMinutesFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return MinFromTime(LocalTimeNoCheck(t)); } function GetUTCMinutesFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return MinFromTime(t); } function GetHoursFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return HourFromTime(LocalTimeNoCheck(t)); } function GetUTCHoursFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return HourFromTime(t); } function GetFullYearFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; - // Ignore the DST offset for year computations. - return YearFromTime(t + local_time_offset); + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + var cache = Date_cache; + if (cache.time === t) return cache.year; + return YearFromTime(LocalTimeNoCheck(t)); } function GetUTCFullYearFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return YearFromTime(t); } function GetMonthFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return MonthFromTime(LocalTimeNoCheck(t)); } function GetUTCMonthFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return MonthFromTime(t); } function GetDateFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return DateFromTime(LocalTimeNoCheck(t)); } function GetUTCDateFrom(aDate) { - var t = GetTimeFrom(aDate); - if ($isNaN(t)) return t; + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; return DateFromTime(t); } @@ -629,7 +662,7 @@ function DatePrintString(time) { // ------------------------------------------------------------------- -// Reused output buffer. +// Reused output buffer. Used when parsing date strings. var parse_buffer = $Array(7); // ECMA 262 - 15.9.4.2 @@ -659,7 +692,7 @@ function DateUTC(year, month, date, hours, minutes, seconds, ms) { minutes = argc > 4 ? ToNumber(minutes) : 0; seconds = argc > 5 ? ToNumber(seconds) : 0; ms = argc > 6 ? ToNumber(ms) : 0; - year = (!$isNaN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) + year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year; var day = MakeDay(year, month, date); var time = MakeTime(hours, minutes, seconds, ms); @@ -676,24 +709,24 @@ function DateNow() { // ECMA 262 - 15.9.5.2 function DateToString() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return kInvalidDate; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t); } // ECMA 262 - 15.9.5.3 function DateToDateString() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return kInvalidDate; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; return DateString(LocalTimeNoCheck(t)); } // ECMA 262 - 15.9.5.4 function DateToTimeString() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return kInvalidDate; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; var lt = LocalTimeNoCheck(t); return TimeString(lt) + LocalTimezoneString(lt); } @@ -707,16 +740,16 @@ function DateToLocaleString() { // ECMA 262 - 15.9.5.6 function DateToLocaleDateString() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return kInvalidDate; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; return LongDateString(LocalTimeNoCheck(t)); } // ECMA 262 - 15.9.5.7 function DateToLocaleTimeString() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return kInvalidDate; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; var lt = LocalTimeNoCheck(t); return TimeString(lt); } @@ -724,13 +757,13 @@ function DateToLocaleTimeString() { // ECMA 262 - 15.9.5.8 function DateValueOf() { - return GetTimeFrom(this); + return DATE_VALUE(this); } // ECMA 262 - 15.9.5.9 function DateGetTime() { - return GetTimeFrom(this); + return DATE_VALUE(this); } @@ -772,16 +805,16 @@ function DateGetUTCDate() { // ECMA 262 - 15.9.5.16 function DateGetDay() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return t; + var t = %_ValueOf(this); + if (NUMBER_IS_NAN(t)) return t; return WeekDay(LocalTimeNoCheck(t)); } // ECMA 262 - 15.9.5.17 function DateGetUTCDay() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return t; + var t = %_ValueOf(this); + if (NUMBER_IS_NAN(t)) return t; return WeekDay(t); } @@ -836,22 +869,22 @@ function DateGetUTCMilliseconds() { // ECMA 262 - 15.9.5.26 function DateGetTimezoneOffset() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return t; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return t; return (t - LocalTimeNoCheck(t)) / msPerMinute; } // ECMA 262 - 15.9.5.27 function DateSetTime(ms) { - if (!IS_DATE(this)) throw new $TypeError('this is not a Date object.'); + if (!IS_DATE(this)) ThrowDateTypeError(); return %_SetValueOf(this, TimeClip(ToNumber(ms))); } // ECMA 262 - 15.9.5.28 function DateSetMilliseconds(ms) { - var t = LocalTime(GetTimeFrom(this)); + var t = LocalTime(DATE_VALUE(this)); ms = ToNumber(ms); var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms); return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time)))); @@ -860,7 +893,7 @@ function DateSetMilliseconds(ms) { // ECMA 262 - 15.9.5.29 function DateSetUTCMilliseconds(ms) { - var t = GetTimeFrom(this); + var t = DATE_VALUE(this); ms = ToNumber(ms); var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms); return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time))); @@ -869,7 +902,7 @@ function DateSetUTCMilliseconds(ms) { // ECMA 262 - 15.9.5.30 function DateSetSeconds(sec, ms) { - var t = LocalTime(GetTimeFrom(this)); + var t = LocalTime(DATE_VALUE(this)); sec = ToNumber(sec); ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms); var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms); @@ -879,7 +912,7 @@ function DateSetSeconds(sec, ms) { // ECMA 262 - 15.9.5.31 function DateSetUTCSeconds(sec, ms) { - var t = GetTimeFrom(this); + var t = DATE_VALUE(this); sec = ToNumber(sec); ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms); var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms); @@ -889,7 +922,7 @@ function DateSetUTCSeconds(sec, ms) { // ECMA 262 - 15.9.5.33 function DateSetMinutes(min, sec, ms) { - var t = LocalTime(GetTimeFrom(this)); + var t = LocalTime(DATE_VALUE(this)); min = ToNumber(min); var argc = %_ArgumentsLength(); sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec); @@ -901,7 +934,7 @@ function DateSetMinutes(min, sec, ms) { // ECMA 262 - 15.9.5.34 function DateSetUTCMinutes(min, sec, ms) { - var t = GetTimeFrom(this); + var t = DATE_VALUE(this); min = ToNumber(min); var argc = %_ArgumentsLength(); sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec); @@ -913,7 +946,7 @@ function DateSetUTCMinutes(min, sec, ms) { // ECMA 262 - 15.9.5.35 function DateSetHours(hour, min, sec, ms) { - var t = LocalTime(GetTimeFrom(this)); + var t = LocalTime(DATE_VALUE(this)); hour = ToNumber(hour); var argc = %_ArgumentsLength(); min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min); @@ -926,7 +959,7 @@ function DateSetHours(hour, min, sec, ms) { // ECMA 262 - 15.9.5.34 function DateSetUTCHours(hour, min, sec, ms) { - var t = GetTimeFrom(this); + var t = DATE_VALUE(this); hour = ToNumber(hour); var argc = %_ArgumentsLength(); min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min); @@ -939,7 +972,7 @@ function DateSetUTCHours(hour, min, sec, ms) { // ECMA 262 - 15.9.5.36 function DateSetDate(date) { - var t = LocalTime(GetTimeFrom(this)); + var t = LocalTime(DATE_VALUE(this)); date = ToNumber(date); var day = MakeDay(YearFromTime(t), MonthFromTime(t), date); return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t))))); @@ -948,7 +981,7 @@ function DateSetDate(date) { // ECMA 262 - 15.9.5.37 function DateSetUTCDate(date) { - var t = GetTimeFrom(this); + var t = DATE_VALUE(this); date = ToNumber(date); var day = MakeDay(YearFromTime(t), MonthFromTime(t), date); return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t)))); @@ -957,7 +990,7 @@ function DateSetUTCDate(date) { // ECMA 262 - 15.9.5.38 function DateSetMonth(month, date) { - var t = LocalTime(GetTimeFrom(this)); + var t = LocalTime(DATE_VALUE(this)); month = ToNumber(month); date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date); var day = MakeDay(YearFromTime(t), month, date); @@ -967,7 +1000,7 @@ function DateSetMonth(month, date) { // ECMA 262 - 15.9.5.39 function DateSetUTCMonth(month, date) { - var t = GetTimeFrom(this); + var t = DATE_VALUE(this); month = ToNumber(month); date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date); var day = MakeDay(YearFromTime(t), month, date); @@ -977,8 +1010,8 @@ function DateSetUTCMonth(month, date) { // ECMA 262 - 15.9.5.40 function DateSetFullYear(year, month, date) { - var t = GetTimeFrom(this); - t = $isNaN(t) ? 0 : LocalTimeNoCheck(t); + var t = DATE_VALUE(this); + t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t); year = ToNumber(year); var argc = %_ArgumentsLength(); month = argc < 2 ? MonthFromTime(t) : ToNumber(month); @@ -990,8 +1023,8 @@ function DateSetFullYear(year, month, date) { // ECMA 262 - 15.9.5.41 function DateSetUTCFullYear(year, month, date) { - var t = GetTimeFrom(this); - if ($isNaN(t)) t = 0; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) t = 0; var argc = %_ArgumentsLength(); year = ToNumber(year); month = argc < 2 ? MonthFromTime(t) : ToNumber(month); @@ -1003,8 +1036,8 @@ function DateSetUTCFullYear(year, month, date) { // ECMA 262 - 15.9.5.42 function DateToUTCString() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return kInvalidDate; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT return WeekDays[WeekDay(t)] + ', ' + TwoDigitString(DateFromTime(t)) + ' ' @@ -1016,18 +1049,18 @@ function DateToUTCString() { // ECMA 262 - B.2.4 function DateGetYear() { - var t = GetTimeFrom(this); - if ($isNaN(t)) return $NaN; + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return $NaN; return YearFromTime(LocalTimeNoCheck(t)) - 1900; } // ECMA 262 - B.2.5 function DateSetYear(year) { - var t = LocalTime(GetTimeFrom(this)); - if ($isNaN(t)) t = 0; + var t = LocalTime(DATE_VALUE(this)); + if (NUMBER_IS_NAN(t)) t = 0; year = ToNumber(year); - if ($isNaN(year)) return %_SetValueOf(this, $NaN); + if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN); year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year; var day = MakeDay(year, MonthFromTime(t), DateFromTime(t)); diff --git a/deps/v8/src/debug-delay.js b/deps/v8/src/debug-delay.js index 423a1185e..4f6085128 100644 --- a/deps/v8/src/debug-delay.js +++ b/deps/v8/src/debug-delay.js @@ -223,7 +223,8 @@ function IsBreakPointTriggered(break_id, break_point) { // Object representing a script break point. The script is referenced by its // script name or script id and the break point is represented as line and // column. -function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column) { +function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column, + opt_groupId) { this.type_ = type; if (type == Debug.ScriptBreakPointType.ScriptId) { this.script_id_ = script_id_or_name; @@ -232,6 +233,7 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column) { } this.line_ = opt_line || 0; this.column_ = opt_column; + this.groupId_ = opt_groupId; this.hit_count_ = 0; this.active_ = true; this.condition_ = null; @@ -244,6 +246,11 @@ ScriptBreakPoint.prototype.number = function() { }; +ScriptBreakPoint.prototype.groupId = function() { + return this.groupId_; +}; + + ScriptBreakPoint.prototype.type = function() { return this.type_; }; @@ -611,10 +618,12 @@ Debug.findScriptBreakPoint = function(break_point_number, remove) { // Sets a breakpoint in a script identified through id or name at the // specified source line and column within that line. Debug.setScriptBreakPoint = function(type, script_id_or_name, - opt_line, opt_column, opt_condition) { + opt_line, opt_column, opt_condition, + opt_groupId) { // Create script break point object. var script_break_point = - new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column); + new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column, + opt_groupId); // Assign number to the new script break point and add it. script_break_point.number_ = next_break_point_number++; @@ -636,19 +645,19 @@ Debug.setScriptBreakPoint = function(type, script_id_or_name, Debug.setScriptBreakPointById = function(script_id, opt_line, opt_column, - opt_condition) { + opt_condition, opt_groupId) { return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, script_id, opt_line, opt_column, - opt_condition) + opt_condition, opt_groupId); } Debug.setScriptBreakPointByName = function(script_name, opt_line, opt_column, - opt_condition) { + opt_condition, opt_groupId) { return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName, script_name, opt_line, opt_column, - opt_condition) + opt_condition, opt_groupId); } @@ -991,7 +1000,6 @@ CompileEvent.prototype.toJSONProtocol = function() { } o.body = {}; o.body.script = this.script_; - o.setOption('includeSource', true); return o.toJSONProtocol(); } @@ -1211,6 +1219,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) this.changeBreakPointRequest_(request, response); } else if (request.command == 'clearbreakpoint') { this.clearBreakPointRequest_(request, response); + } else if (request.command == 'clearbreakpointgroup') { + this.clearBreakPointGroupRequest_(request, response); } else if (request.command == 'backtrace') { this.backtraceRequest_(request, response); } else if (request.command == 'frame') { @@ -1326,6 +1336,7 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ = true : request.arguments.enabled; var condition = request.arguments.condition; var ignoreCount = request.arguments.ignoreCount; + var groupId = request.arguments.groupId; // Check for legal arguments. if (!type || IS_UNDEFINED(target)) { @@ -1379,10 +1390,11 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ = } else if (type == 'script') { // set script break point. break_point_number = - Debug.setScriptBreakPointByName(target, line, column, condition); + Debug.setScriptBreakPointByName(target, line, column, condition, + groupId); } else { // type == 'scriptId. break_point_number = - Debug.setScriptBreakPointById(target, line, column, condition); + Debug.setScriptBreakPointById(target, line, column, condition, groupId); } // Set additional break point properties. @@ -1455,6 +1467,40 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, res } +DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) { + // Check for legal request. + if (!request.arguments) { + response.failed('Missing arguments'); + return; + } + + // Pull out arguments. + var group_id = request.arguments.groupId; + + // Check for legal arguments. + if (!group_id) { + response.failed('Missing argument "groupId"'); + return; + } + + var cleared_break_points = []; + var new_script_break_points = []; + for (var i = 0; i < script_break_points.length; i++) { + var next_break_point = script_break_points[i]; + if (next_break_point.groupId() == group_id) { + cleared_break_points.push(next_break_point.number()); + next_break_point.clear(); + } else { + new_script_break_points.push(next_break_point); + } + } + script_break_points = new_script_break_points; + + // Add the cleared break point numbers to the response. + response.body = { breakpoints: cleared_break_points }; +} + + DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) { // Check for legal request. if (!request.arguments) { diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index e37bfb77c..52be9301f 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -1260,6 +1260,7 @@ void Debug::SetBreak(StackFrame::Id break_frame_id, int break_id) { // Handle stepping into a function. void Debug::HandleStepIn(Handle<JSFunction> function, + Handle<Object> holder, Address fp, bool is_constructor) { // If the frame pointer is not supplied by the caller find it. @@ -1285,21 +1286,12 @@ void Debug::HandleStepIn(Handle<JSFunction> function, Builtins::builtin(Builtins::FunctionCall)) { // Handle function.apply and function.call separately to flood the // function to be called and not the code for Builtins::FunctionApply or - // Builtins::FunctionCall. At the point of the call IC to call either - // Builtins::FunctionApply or Builtins::FunctionCall the expression - // stack has the following content: - // symbol "apply" or "call" - // function apply or call was called on - // receiver for apply or call (first parameter to apply or call) - // ... further arguments to apply or call. - JavaScriptFrameIterator it; - ASSERT(it.frame()->fp() == fp); - ASSERT(it.frame()->GetExpression(1)->IsJSFunction()); - if (it.frame()->GetExpression(1)->IsJSFunction()) { - Handle<JSFunction> - actual_function(JSFunction::cast(it.frame()->GetExpression(1))); - Handle<SharedFunctionInfo> actual_shared(actual_function->shared()); - Debug::FloodWithOneShot(actual_shared); + // Builtins::FunctionCall. The receiver of call/apply is the target + // function. + if (!holder.is_null() && holder->IsJSFunction()) { + Handle<SharedFunctionInfo> shared_info( + JSFunction::cast(*holder)->shared()); + Debug::FloodWithOneShot(shared_info); } } else { Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared())); diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index a1abceda8..970dbbe5c 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -270,6 +270,7 @@ class Debug { static bool StepInActive() { return thread_local_.step_into_fp_ != 0; } static void HandleStepIn(Handle<JSFunction> function, + Handle<Object> holder, Address fp, bool is_constructor); static Address step_in_fp() { return thread_local_.step_into_fp_; } @@ -363,6 +364,10 @@ class Debug { static const int kIa32CallInstructionLength = 5; static const int kIa32JSReturnSequenceLength = 6; + // The x64 JS return sequence is padded with int3 to make it large + // enough to hold a call instruction when the debugger patches it. + static const int kX64JSReturnSequenceLength = 13; + // Code generator routines. static void GenerateLoadICDebugBreak(MacroAssembler* masm); static void GenerateStoreICDebugBreak(MacroAssembler* masm); diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc index 95022d052..e2f908d7b 100644 --- a/deps/v8/src/disassembler.cc +++ b/deps/v8/src/disassembler.cc @@ -239,6 +239,13 @@ static int DecodeIt(FILE* f, InlineCacheState ic_state = code->ic_state(); out.AddFormatted(" %s, %s", Code::Kind2String(kind), Code::ICState2String(ic_state)); + if (ic_state == MONOMORPHIC) { + PropertyType type = code->type(); + out.AddFormatted(", %s", Code::PropertyType2String(type)); + } + if (code->ic_in_loop() == IN_LOOP) { + out.AddFormatted(", in_loop"); + } if (kind == Code::CALL_IC) { out.AddFormatted(", argc = %d", code->arguments_count()); } diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index fa3c2ecb2..adc18725e 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -38,6 +38,8 @@ #include "x64/simulator-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/simulator-arm.h" +#else +#error Unsupported target architecture. #endif #include "debug.h" diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index fe19873ab..1045a4c3c 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -49,9 +49,17 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size) { } -Handle<Dictionary> Factory::NewDictionary(int at_least_space_for) { +Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) { ASSERT(0 <= at_least_space_for); - CALL_HEAP_FUNCTION(Dictionary::Allocate(at_least_space_for), Dictionary); + CALL_HEAP_FUNCTION(StringDictionary::Allocate(at_least_space_for), + StringDictionary); +} + + +Handle<NumberDictionary> Factory::NewNumberDictionary(int at_least_space_for) { + ASSERT(0 <= at_least_space_for); + CALL_HEAP_FUNCTION(NumberDictionary::Allocate(at_least_space_for), + NumberDictionary); } @@ -562,12 +570,10 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors( int descriptor_count = 0; // Copy the descriptors from the array. - DescriptorWriter w(*result); - for (DescriptorReader r(*array); !r.eos(); r.advance()) { - if (!r.IsNullDescriptor()) { - w.WriteFrom(&r); + for (int i = 0; i < array->number_of_descriptors(); i++) { + if (array->GetType(i) != NULL_DESCRIPTOR) { + result->CopyFrom(descriptor_count++, *array, i); } - descriptor_count++; } // Number of duplicates detected. @@ -586,7 +592,7 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors( if (result->LinearSearch(*key, descriptor_count) == DescriptorArray::kNotFound) { CallbacksDescriptor desc(*key, *entry, entry->property_attributes()); - w.Write(&desc); + result->Set(descriptor_count, &desc); descriptor_count++; } else { duplicates++; @@ -596,13 +602,11 @@ Handle<DescriptorArray> Factory::CopyAppendCallbackDescriptors( // If duplicates were detected, allocate a result of the right size // and transfer the elements. if (duplicates > 0) { + int number_of_descriptors = result->number_of_descriptors() - duplicates; Handle<DescriptorArray> new_result = - NewDescriptorArray(result->number_of_descriptors() - duplicates); - DescriptorWriter w(*new_result); - DescriptorReader r(*result); - while (!w.eos()) { - w.WriteFrom(&r); - r.advance(); + NewDescriptorArray(number_of_descriptors); + for (int i = 0; i < number_of_descriptors; i++) { + new_result->CopyFrom(i, *result, i); } result = new_result; } @@ -619,6 +623,14 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor, } +Handle<GlobalObject> Factory::NewGlobalObject( + Handle<JSFunction> constructor) { + CALL_HEAP_FUNCTION(Heap::AllocateGlobalObject(*constructor), + GlobalObject); +} + + + Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) { CALL_HEAP_FUNCTION(Heap::AllocateJSObjectFromMap(*map, NOT_TENURED), JSObject); @@ -647,10 +659,11 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) { } -Handle<Dictionary> Factory::DictionaryAtNumberPut(Handle<Dictionary> dictionary, - uint32_t key, - Handle<Object> value) { - CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), Dictionary); +Handle<NumberDictionary> Factory::DictionaryAtNumberPut( + Handle<NumberDictionary> dictionary, + uint32_t key, + Handle<Object> value) { + CALL_HEAP_FUNCTION(dictionary->AtNumberPut(key, *value), NumberDictionary); } diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 95dbee909..0afdd76a4 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -28,6 +28,7 @@ #ifndef V8_FACTORY_H_ #define V8_FACTORY_H_ +#include "globals.h" #include "heap.h" #include "zone-inl.h" @@ -47,7 +48,9 @@ class Factory : public AllStatic { // Allocate a new fixed array with non-existing entries (the hole). static Handle<FixedArray> NewFixedArrayWithHoles(int size); - static Handle<Dictionary> NewDictionary(int at_least_space_for); + static Handle<NumberDictionary> NewNumberDictionary(int at_least_space_for); + + static Handle<StringDictionary> NewStringDictionary(int at_least_space_for); static Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors); @@ -183,6 +186,9 @@ class Factory : public AllStatic { static Handle<JSObject> NewJSObject(Handle<JSFunction> constructor, PretenureFlag pretenure = NOT_TENURED); + // Global objects are pretenured. + static Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor); + // JS objects are pretenured when allocated by the bootstrapper and // runtime. static Handle<JSObject> NewJSObjectFromMap(Handle<Map> map); @@ -294,13 +300,19 @@ class Factory : public AllStatic { Handle<JSObject> instance, bool* pending_exception); -#define ROOT_ACCESSOR(type, name) \ - static Handle<type> name() { return Handle<type>(&Heap::name##_); } +#define ROOT_ACCESSOR(type, name, camel_name) \ + static inline Handle<type> name() { \ + return Handle<type>(bit_cast<type**, Object**>( \ + &Heap::roots_[Heap::k##camel_name##RootIndex])); \ + } ROOT_LIST(ROOT_ACCESSOR) #undef ROOT_ACCESSOR_ACCESSOR #define SYMBOL_ACCESSOR(name, str) \ - static Handle<String> name() { return Handle<String>(&Heap::name##_); } + static inline Handle<String> name() { \ + return Handle<String>(bit_cast<String**, Object**>( \ + &Heap::roots_[Heap::k##name##RootIndex])); \ + } SYMBOL_LIST(SYMBOL_ACCESSOR) #undef SYMBOL_ACCESSOR @@ -310,9 +322,10 @@ class Factory : public AllStatic { static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name); - static Handle<Dictionary> DictionaryAtNumberPut(Handle<Dictionary>, - uint32_t key, - Handle<Object> value); + static Handle<NumberDictionary> DictionaryAtNumberPut( + Handle<NumberDictionary>, + uint32_t key, + Handle<Object> value); #ifdef ENABLE_DEBUGGER_SUPPORT static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 8110e1284..9c696ed66 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -110,6 +110,7 @@ DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_string(natives_file, NULL, "alternative natives file") DEFINE_bool(expose_gc, false, "expose gc extension") +DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") // builtins-ia32.cc DEFINE_bool(inline_new, true, "use fast inline allocation") @@ -207,8 +208,6 @@ DEFINE_bool(preemption, false, // Regexp DEFINE_bool(trace_regexps, false, "trace regexp execution") -DEFINE_bool(regexp_native, true, - "use native code regexp implementation (IA32 only)") DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") // Testing flags test/cctest/test-{flags,api,serialization}.cc diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h index 0e2adb9b1..b04cf506d 100644 --- a/deps/v8/src/frames-inl.h +++ b/deps/v8/src/frames-inl.h @@ -36,6 +36,8 @@ #include "x64/frames-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/frames-arm.h" +#else +#error Unsupported target architecture. #endif namespace v8 { diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index bf83d0d75..44bd52757 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -198,7 +198,8 @@ class FixedArray; class FunctionEntry; class FunctionLiteral; class FunctionTemplateInfo; -class Dictionary; +class NumberDictionary; +class StringDictionary; class FreeStoreAllocationPolicy; template <typename T> class Handle; class Heap; @@ -256,14 +257,16 @@ typedef bool (*WeakSlotCallback)(Object** pointer); // NOTE: SpaceIterator depends on AllocationSpace enumeration values being // consecutive. enum AllocationSpace { - NEW_SPACE, // Semispaces collected with copying collector. - OLD_POINTER_SPACE, // Must be first of the paged spaces - see PagedSpaces. - OLD_DATA_SPACE, // May not have pointers to new space. - CODE_SPACE, // Also one of the old spaces. Marked executable. - MAP_SPACE, // Only map objects. - LO_SPACE, // Large objects. + NEW_SPACE, // Semispaces collected with copying collector. + OLD_POINTER_SPACE, // May contain pointers to new space. + OLD_DATA_SPACE, // Must not have pointers to new space. + CODE_SPACE, // No pointers to new space, marked executable. + MAP_SPACE, // Only and all map objects. + CELL_SPACE, // Only and all cell objects. + LO_SPACE, // Promoted large objects. + FIRST_SPACE = NEW_SPACE, - LAST_SPACE = LO_SPACE // <= 5 (see kSpaceBits and kLOSpacePointer) + LAST_SPACE = LO_SPACE }; const int kSpaceTagSize = 3; const int kSpaceTagMask = (1 << kSpaceTagSize) - 1; diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 44ca60221..510ea95b8 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -289,10 +289,11 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj, // hidden symbols hash code is zero (and no other string has hash // code zero) it will always occupy the first entry if present. DescriptorArray* descriptors = obj->map()->instance_descriptors(); - DescriptorReader r(descriptors, 0); // Explicitly position reader at zero. - if (!r.eos() && (r.GetKey() == *key) && r.IsProperty()) { - ASSERT(r.type() == FIELD); - return Handle<Object>(obj->FastPropertyAt(r.GetFieldIndex())); + if ((descriptors->number_of_descriptors() > 0) && + (descriptors->GetKey(0) == *key) && + descriptors->IsProperty(0)) { + ASSERT(descriptors->GetType(0) == FIELD); + return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0))); } } @@ -372,10 +373,10 @@ static void ClearWrapperCache(Persistent<v8::Value> handle, void*) { Handle<JSValue> GetScriptWrapper(Handle<Script> script) { - Handle<Object> cache(reinterpret_cast<Object**>(script->wrapper()->proxy())); - if (!cache.is_null()) { + if (script->wrapper()->proxy() != NULL) { // Return the script wrapper directly from the cache. - return Handle<JSValue>(JSValue::cast(*cache)); + return Handle<JSValue>( + reinterpret_cast<JSValue**>(script->wrapper()->proxy())); } // Construct a new script wrapper. @@ -588,12 +589,13 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) { int num_enum = object->NumberOfEnumProperties(); Handle<FixedArray> storage = Factory::NewFixedArray(num_enum); Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum); - for (DescriptorReader r(object->map()->instance_descriptors()); - !r.eos(); - r.advance()) { - if (r.IsProperty() && !r.IsDontEnum()) { - (*storage)->set(index, r.GetKey()); - (*sort_array)->set(index, Smi::FromInt(r.GetDetails().index())); + Handle<DescriptorArray> descs = + Handle<DescriptorArray>(object->map()->instance_descriptors()); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->IsProperty(i) && !descs->IsDontEnum(i)) { + (*storage)->set(index, descs->GetKey(i)); + PropertyDetails details(descs->GetDetails(i)); + (*sort_array)->set(index, Smi::FromInt(details.index())); index++; } } diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index af638b804..a86dc9632 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -42,7 +42,7 @@ namespace internal { template<class T> class Handle { public: - INLINE(Handle(T** location)) { location_ = location; } + INLINE(Handle(T** location)) { location_ = location; } INLINE(explicit Handle(T* obj)); INLINE(Handle()) : location_(NULL) {} @@ -59,7 +59,7 @@ class Handle { location_ = reinterpret_cast<T**>(handle.location()); } - INLINE(T* operator ->() const) { return operator*(); } + INLINE(T* operator ->() const) { return operator*(); } // Check if this handle refers to the exact same object as the other handle. bool is_identical_to(const Handle<T> other) const { diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 810d3d42a..36c6f4bfa 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -82,6 +82,8 @@ Object* Heap::AllocateRaw(int size_in_bytes, result = code_space_->AllocateRaw(size_in_bytes); } else if (LO_SPACE == space) { result = lo_space_->AllocateRaw(size_in_bytes); + } else if (CELL_SPACE == space) { + result = cell_space_->AllocateRaw(size_in_bytes); } else { ASSERT(MAP_SPACE == space); result = map_space_->AllocateRaw(size_in_bytes); @@ -107,12 +109,23 @@ Object* Heap::NumberFromUint32(uint32_t value) { } -Object* Heap::AllocateRawMap(int size_in_bytes) { +Object* Heap::AllocateRawMap() { #ifdef DEBUG Counters::objs_since_last_full.Increment(); Counters::objs_since_last_young.Increment(); #endif - Object* result = map_space_->AllocateRaw(size_in_bytes); + Object* result = map_space_->AllocateRaw(Map::kSize); + if (result->IsFailure()) old_gen_exhausted_ = true; + return result; +} + + +Object* Heap::AllocateRawCell() { +#ifdef DEBUG + Counters::objs_since_last_full.Increment(); + Counters::objs_since_last_young.Increment(); +#endif + Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize); if (result->IsFailure()) old_gen_exhausted_ = true; return result; } @@ -216,7 +229,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { void Heap::SetLastScriptId(Object* last_script_id) { - last_script_id_ = last_script_id; + roots_[kLastScriptIdRootIndex] = last_script_id; } diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index bf6fccd9f..0af3d90ef 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -43,27 +43,17 @@ namespace v8 { namespace internal { -#define ROOT_ALLOCATION(type, name) type* Heap::name##_; - ROOT_LIST(ROOT_ALLOCATION) -#undef ROOT_ALLOCATION - - -#define STRUCT_ALLOCATION(NAME, Name, name) Map* Heap::name##_map_; - STRUCT_LIST(STRUCT_ALLOCATION) -#undef STRUCT_ALLOCATION - - -#define SYMBOL_ALLOCATION(name, string) String* Heap::name##_; - SYMBOL_LIST(SYMBOL_ALLOCATION) -#undef SYMBOL_ALLOCATION String* Heap::hidden_symbol_; +Object* Heap::roots_[Heap::kRootListLength]; + NewSpace Heap::new_space_; OldSpace* Heap::old_pointer_space_ = NULL; OldSpace* Heap::old_data_space_ = NULL; OldSpace* Heap::code_space_ = NULL; MapSpace* Heap::map_space_ = NULL; +CellSpace* Heap::cell_space_ = NULL; LargeObjectSpace* Heap::lo_space_ = NULL; static const int kMinimumPromotionLimit = 2*MB; @@ -79,7 +69,7 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; // semispace_size_ should be a power of 2 and old_generation_size_ should be // a multiple of Page::kPageSize. -#if V8_HOST_ARCH_ARM +#if V8_TARGET_ARCH_ARM int Heap::semispace_size_ = 512*KB; int Heap::old_generation_size_ = 128*MB; int Heap::initial_semispace_size_ = 128*KB; @@ -121,7 +111,8 @@ int Heap::Capacity() { old_pointer_space_->Capacity() + old_data_space_->Capacity() + code_space_->Capacity() + - map_space_->Capacity(); + map_space_->Capacity() + + cell_space_->Capacity(); } @@ -132,7 +123,8 @@ int Heap::Available() { old_pointer_space_->Available() + old_data_space_->Available() + code_space_->Available() + - map_space_->Available(); + map_space_->Available() + + cell_space_->Available(); } @@ -141,6 +133,7 @@ bool Heap::HasBeenSetup() { old_data_space_ != NULL && code_space_ != NULL && map_space_ != NULL && + cell_space_ != NULL && lo_space_ != NULL; } @@ -221,6 +214,7 @@ void Heap::ReportStatisticsAfterGC() { // NewSpace statistics are logged exactly once when --log-gc is turned on. #if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) if (FLAG_heap_stats) { + new_space_.CollectStatistics(); ReportHeapStatistics("After GC"); } else if (FLAG_log_gc) { new_space_.ReportStatistics(); @@ -283,9 +277,8 @@ void Heap::GarbageCollectionEpilogue() { Counters::alive_after_last_gc.Set(SizeOfObjects()); - SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table_); - Counters::symbol_table_capacity.Set(symbol_table->Capacity()); - Counters::number_of_symbols.Set(symbol_table->NumberOfElements()); + Counters::symbol_table_capacity.Set(symbol_table()->Capacity()); + Counters::number_of_symbols.Set(symbol_table()->NumberOfElements()); #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) ReportStatisticsAfterGC(); #endif @@ -370,6 +363,8 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) { return code_space_->Available() >= requested_size; case MAP_SPACE: return map_space_->Available() >= requested_size; + case CELL_SPACE: + return cell_space_->Available() >= requested_size; case LO_SPACE: return lo_space_->Available() >= requested_size; } @@ -404,8 +399,7 @@ class SymbolTableVerifier : public ObjectVisitor { static void VerifySymbolTable() { #ifdef DEBUG SymbolTableVerifier verifier; - SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table()); - symbol_table->IterateElements(&verifier); + Heap::symbol_table()->IterateElements(&verifier); #endif // DEBUG } @@ -428,22 +422,8 @@ void Heap::PerformGarbageCollection(AllocationSpace space, old_gen_allocation_limit_ = old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2); old_gen_exhausted_ = false; - - // If we have used the mark-compact collector to collect the new - // space, and it has not compacted the new space, we force a - // separate scavenge collection. This is a hack. It covers the - // case where (1) a new space collection was requested, (2) the - // collector selection policy selected the mark-compact collector, - // and (3) the mark-compact collector policy selected not to - // compact the new space. In that case, there is no more (usable) - // free space in the new space after the collection compared to - // before. - if (space == NEW_SPACE && !MarkCompactCollector::HasCompacted()) { - Scavenge(); - } - } else { - Scavenge(); } + Scavenge(); Counters::objs_since_last_young.Set(0); PostGarbageCollectionProcessing(); @@ -621,6 +601,7 @@ static void VerifyNonPointerSpacePointers() { } #endif + void Heap::Scavenge() { #ifdef DEBUG if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); @@ -679,7 +660,7 @@ void Heap::Scavenge() { // Copy objects reachable from weak pointers. GlobalHandles::IterateWeakRoots(&scavenge_visitor); -#if V8_HOST_ARCH_64_BIT +#ifdef V8_HOST_ARCH_64_BIT // TODO(X64): Make this go away again. We currently disable RSets for // 64-bit-mode. HeapObjectIterator old_pointer_iterator(old_pointer_space_); @@ -699,13 +680,25 @@ void Heap::Scavenge() { heap_object->Iterate(&scavenge_visitor); } } -#else // V8_HOST_ARCH_64_BIT +#else // !defined(V8_HOST_ARCH_64_BIT) // Copy objects reachable from the old generation. By definition, // there are no intergenerational pointers in code or data spaces. IterateRSet(old_pointer_space_, &ScavengePointer); IterateRSet(map_space_, &ScavengePointer); lo_space_->IterateRSet(&ScavengePointer); -#endif // V8_HOST_ARCH_64_BIT +#endif + + // Copy objects reachable from cells by scavenging cell values directly. + HeapObjectIterator cell_iterator(cell_space_); + while (cell_iterator.has_next()) { + HeapObject* cell = cell_iterator.next(); + if (cell->IsJSGlobalPropertyCell()) { + Address value_address = + reinterpret_cast<Address>(cell) + + (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); + scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); + } + } do { ASSERT(new_space_front <= new_space_.top()); @@ -845,8 +838,8 @@ int Heap::UpdateRSet(HeapObject* obj) { void Heap::RebuildRSets() { - // By definition, we do not care about remembered set bits in code or data - // spaces. + // By definition, we do not care about remembered set bits in code, + // data, or cell spaces. map_space_->ClearRSet(); RebuildRSets(map_space_); @@ -1021,11 +1014,11 @@ void Heap::ScavengePointer(HeapObject** p) { Object* Heap::AllocatePartialMap(InstanceType instance_type, int instance_size) { - Object* result = AllocateRawMap(Map::kSize); + Object* result = AllocateRawMap(); if (result->IsFailure()) return result; // Map::cast cannot be used due to uninitialized map field. - reinterpret_cast<Map*>(result)->set_map(meta_map()); + reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); reinterpret_cast<Map*>(result)->set_instance_type(instance_type); reinterpret_cast<Map*>(result)->set_instance_size(instance_size); reinterpret_cast<Map*>(result)->set_inobject_properties(0); @@ -1035,7 +1028,7 @@ Object* Heap::AllocatePartialMap(InstanceType instance_type, Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) { - Object* result = AllocateRawMap(Map::kSize); + Object* result = AllocateRawMap(); if (result->IsFailure()) return result; Map* map = reinterpret_cast<Map*>(result); @@ -1054,36 +1047,59 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) { } +const Heap::StringTypeTable Heap::string_type_table[] = { +#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ + {type, size, k##camel_name##MapRootIndex}, + STRING_TYPE_LIST(STRING_TYPE_ELEMENT) +#undef STRING_TYPE_ELEMENT +}; + + +const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = { +#define CONSTANT_SYMBOL_ELEMENT(name, contents) \ + {contents, k##name##RootIndex}, + SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT) +#undef CONSTANT_SYMBOL_ELEMENT +}; + + +const Heap::StructTable Heap::struct_table[] = { +#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \ + { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex }, + STRUCT_LIST(STRUCT_TABLE_ELEMENT) +#undef STRUCT_TABLE_ELEMENT +}; + + bool Heap::CreateInitialMaps() { Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize); if (obj->IsFailure()) return false; - // Map::cast cannot be used due to uninitialized map field. - meta_map_ = reinterpret_cast<Map*>(obj); - meta_map()->set_map(meta_map()); + Map* new_meta_map = reinterpret_cast<Map*>(obj); + set_meta_map(new_meta_map); + new_meta_map->set_map(new_meta_map); obj = AllocatePartialMap(FIXED_ARRAY_TYPE, FixedArray::kHeaderSize); if (obj->IsFailure()) return false; - fixed_array_map_ = Map::cast(obj); + set_fixed_array_map(Map::cast(obj)); obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize); if (obj->IsFailure()) return false; - oddball_map_ = Map::cast(obj); + set_oddball_map(Map::cast(obj)); // Allocate the empty array obj = AllocateEmptyFixedArray(); if (obj->IsFailure()) return false; - empty_fixed_array_ = FixedArray::cast(obj); + set_empty_fixed_array(FixedArray::cast(obj)); obj = Allocate(oddball_map(), OLD_DATA_SPACE); if (obj->IsFailure()) return false; - null_value_ = obj; + set_null_value(obj); - // Allocate the empty descriptor array. AllocateMap can now be used. + // Allocate the empty descriptor array. obj = AllocateEmptyFixedArray(); if (obj->IsFailure()) return false; - // There is a check against empty_descriptor_array() in cast(). - empty_descriptor_array_ = reinterpret_cast<DescriptorArray*>(obj); + set_empty_descriptor_array(DescriptorArray::cast(obj)); // Fix the instance_descriptors for the existing maps. meta_map()->set_instance_descriptors(empty_descriptor_array()); @@ -1101,100 +1117,106 @@ bool Heap::CreateInitialMaps() { fixed_array_map()->set_prototype(null_value()); fixed_array_map()->set_constructor(null_value()); + oddball_map()->set_prototype(null_value()); oddball_map()->set_constructor(null_value()); obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize); if (obj->IsFailure()) return false; - heap_number_map_ = Map::cast(obj); + set_heap_number_map(Map::cast(obj)); obj = AllocateMap(PROXY_TYPE, Proxy::kSize); if (obj->IsFailure()) return false; - proxy_map_ = Map::cast(obj); + set_proxy_map(Map::cast(obj)); -#define ALLOCATE_STRING_MAP(type, size, name) \ - obj = AllocateMap(type, size); \ - if (obj->IsFailure()) return false; \ - name##_map_ = Map::cast(obj); - STRING_TYPE_LIST(ALLOCATE_STRING_MAP); -#undef ALLOCATE_STRING_MAP + for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) { + const StringTypeTable& entry = string_type_table[i]; + obj = AllocateMap(entry.type, entry.size); + if (obj->IsFailure()) return false; + roots_[entry.index] = Map::cast(obj); + } obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize); if (obj->IsFailure()) return false; - undetectable_short_string_map_ = Map::cast(obj); - undetectable_short_string_map_->set_is_undetectable(); + set_undetectable_short_string_map(Map::cast(obj)); + Map::cast(obj)->set_is_undetectable(); obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize); if (obj->IsFailure()) return false; - undetectable_medium_string_map_ = Map::cast(obj); - undetectable_medium_string_map_->set_is_undetectable(); + set_undetectable_medium_string_map(Map::cast(obj)); + Map::cast(obj)->set_is_undetectable(); obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize); if (obj->IsFailure()) return false; - undetectable_long_string_map_ = Map::cast(obj); - undetectable_long_string_map_->set_is_undetectable(); + set_undetectable_long_string_map(Map::cast(obj)); + Map::cast(obj)->set_is_undetectable(); obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize); if (obj->IsFailure()) return false; - undetectable_short_ascii_string_map_ = Map::cast(obj); - undetectable_short_ascii_string_map_->set_is_undetectable(); + set_undetectable_short_ascii_string_map(Map::cast(obj)); + Map::cast(obj)->set_is_undetectable(); obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize); if (obj->IsFailure()) return false; - undetectable_medium_ascii_string_map_ = Map::cast(obj); - undetectable_medium_ascii_string_map_->set_is_undetectable(); + set_undetectable_medium_ascii_string_map(Map::cast(obj)); + Map::cast(obj)->set_is_undetectable(); obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize); if (obj->IsFailure()) return false; - undetectable_long_ascii_string_map_ = Map::cast(obj); - undetectable_long_ascii_string_map_->set_is_undetectable(); + set_undetectable_long_ascii_string_map(Map::cast(obj)); + Map::cast(obj)->set_is_undetectable(); obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize); if (obj->IsFailure()) return false; - byte_array_map_ = Map::cast(obj); + set_byte_array_map(Map::cast(obj)); obj = AllocateMap(CODE_TYPE, Code::kHeaderSize); if (obj->IsFailure()) return false; - code_map_ = Map::cast(obj); + set_code_map(Map::cast(obj)); + + obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE, + JSGlobalPropertyCell::kSize); + if (obj->IsFailure()) return false; + set_global_property_cell_map(Map::cast(obj)); obj = AllocateMap(FILLER_TYPE, kPointerSize); if (obj->IsFailure()) return false; - one_word_filler_map_ = Map::cast(obj); + set_one_pointer_filler_map(Map::cast(obj)); obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize); if (obj->IsFailure()) return false; - two_word_filler_map_ = Map::cast(obj); + set_two_pointer_filler_map(Map::cast(obj)); -#define ALLOCATE_STRUCT_MAP(NAME, Name, name) \ - obj = AllocateMap(NAME##_TYPE, Name::kSize); \ - if (obj->IsFailure()) return false; \ - name##_map_ = Map::cast(obj); - STRUCT_LIST(ALLOCATE_STRUCT_MAP) -#undef ALLOCATE_STRUCT_MAP + for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) { + const StructTable& entry = struct_table[i]; + obj = AllocateMap(entry.type, entry.size); + if (obj->IsFailure()) return false; + roots_[entry.index] = Map::cast(obj); + } obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); if (obj->IsFailure()) return false; - hash_table_map_ = Map::cast(obj); + set_hash_table_map(Map::cast(obj)); obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); if (obj->IsFailure()) return false; - context_map_ = Map::cast(obj); + set_context_map(Map::cast(obj)); obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); if (obj->IsFailure()) return false; - catch_context_map_ = Map::cast(obj); + set_catch_context_map(Map::cast(obj)); obj = AllocateMap(FIXED_ARRAY_TYPE, HeapObject::kHeaderSize); if (obj->IsFailure()) return false; - global_context_map_ = Map::cast(obj); + set_global_context_map(Map::cast(obj)); obj = AllocateMap(JS_FUNCTION_TYPE, JSFunction::kSize); if (obj->IsFailure()) return false; - boilerplate_function_map_ = Map::cast(obj); + set_boilerplate_function_map(Map::cast(obj)); obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize); if (obj->IsFailure()) return false; - shared_function_info_map_ = Map::cast(obj); + set_shared_function_info_map(Map::cast(obj)); ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array())); return true; @@ -1230,6 +1252,15 @@ Object* Heap::AllocateHeapNumber(double value) { } +Object* Heap::AllocateJSGlobalPropertyCell(Object* value) { + Object* result = AllocateRawCell(); + if (result->IsFailure()) return result; + HeapObject::cast(result)->set_map(global_property_cell_map()); + JSGlobalPropertyCell::cast(result)->set_value(value); + return result; +} + + Object* Heap::CreateOddball(Map* map, const char* to_string, Object* to_number) { @@ -1244,41 +1275,62 @@ bool Heap::CreateApiObjects() { obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); if (obj->IsFailure()) return false; - neander_map_ = Map::cast(obj); + set_neander_map(Map::cast(obj)); - obj = Heap::AllocateJSObjectFromMap(neander_map_); + obj = Heap::AllocateJSObjectFromMap(neander_map()); if (obj->IsFailure()) return false; Object* elements = AllocateFixedArray(2); if (elements->IsFailure()) return false; FixedArray::cast(elements)->set(0, Smi::FromInt(0)); JSObject::cast(obj)->set_elements(FixedArray::cast(elements)); - message_listeners_ = JSObject::cast(obj); + set_message_listeners(JSObject::cast(obj)); return true; } + +void Heap::CreateCEntryStub() { + CEntryStub stub; + set_c_entry_code(*stub.GetCode()); +} + + +void Heap::CreateCEntryDebugBreakStub() { + CEntryDebugBreakStub stub; + set_c_entry_debug_break_code(*stub.GetCode()); +} + + +void Heap::CreateJSEntryStub() { + JSEntryStub stub; + set_js_entry_code(*stub.GetCode()); +} + + +void Heap::CreateJSConstructEntryStub() { + JSConstructEntryStub stub; + set_js_construct_entry_code(*stub.GetCode()); +} + + void Heap::CreateFixedStubs() { // Here we create roots for fixed stubs. They are needed at GC // for cooking and uncooking (check out frames.cc). // The eliminates the need for doing dictionary lookup in the // stub cache for these stubs. HandleScope scope; - { - CEntryStub stub; - c_entry_code_ = *stub.GetCode(); - } - { - CEntryDebugBreakStub stub; - c_entry_debug_break_code_ = *stub.GetCode(); - } - { - JSEntryStub stub; - js_entry_code_ = *stub.GetCode(); - } - { - JSConstructEntryStub stub; - js_construct_entry_code_ = *stub.GetCode(); - } + // gcc-4.4 has problem generating correct code of following snippet: + // { CEntryStub stub; + // c_entry_code_ = *stub.GetCode(); + // } + // { CEntryDebugBreakStub stub; + // c_entry_debug_break_code_ = *stub.GetCode(); + // } + // To workaround the problem, make separate functions without inlining. + Heap::CreateCEntryStub(); + Heap::CreateCEntryDebugBreakStub(); + Heap::CreateJSEntryStub(); + Heap::CreateJSConstructEntryStub(); } @@ -1288,34 +1340,35 @@ bool Heap::CreateInitialObjects() { // The -0 value must be set before NumberFromDouble works. obj = AllocateHeapNumber(-0.0, TENURED); if (obj->IsFailure()) return false; - minus_zero_value_ = obj; - ASSERT(signbit(minus_zero_value_->Number()) != 0); + set_minus_zero_value(obj); + ASSERT(signbit(minus_zero_value()->Number()) != 0); obj = AllocateHeapNumber(OS::nan_value(), TENURED); if (obj->IsFailure()) return false; - nan_value_ = obj; + set_nan_value(obj); obj = Allocate(oddball_map(), OLD_DATA_SPACE); if (obj->IsFailure()) return false; - undefined_value_ = obj; + set_undefined_value(obj); ASSERT(!InNewSpace(undefined_value())); // Allocate initial symbol table. obj = SymbolTable::Allocate(kInitialSymbolTableSize); if (obj->IsFailure()) return false; - symbol_table_ = obj; + // Don't use set_symbol_table() due to asserts. + roots_[kSymbolTableRootIndex] = obj; // Assign the print strings for oddballs after creating symboltable. Object* symbol = LookupAsciiSymbol("undefined"); if (symbol->IsFailure()) return false; - Oddball::cast(undefined_value_)->set_to_string(String::cast(symbol)); - Oddball::cast(undefined_value_)->set_to_number(nan_value_); + Oddball::cast(undefined_value())->set_to_string(String::cast(symbol)); + Oddball::cast(undefined_value())->set_to_number(nan_value()); // Assign the print strings for oddballs after creating symboltable. symbol = LookupAsciiSymbol("null"); if (symbol->IsFailure()) return false; - Oddball::cast(null_value_)->set_to_string(String::cast(symbol)); - Oddball::cast(null_value_)->set_to_number(Smi::FromInt(0)); + Oddball::cast(null_value())->set_to_string(String::cast(symbol)); + Oddball::cast(null_value())->set_to_number(Smi::FromInt(0)); // Allocate the null_value obj = Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0)); @@ -1323,32 +1376,31 @@ bool Heap::CreateInitialObjects() { obj = CreateOddball(oddball_map(), "true", Smi::FromInt(1)); if (obj->IsFailure()) return false; - true_value_ = obj; + set_true_value(obj); obj = CreateOddball(oddball_map(), "false", Smi::FromInt(0)); if (obj->IsFailure()) return false; - false_value_ = obj; + set_false_value(obj); obj = CreateOddball(oddball_map(), "hole", Smi::FromInt(-1)); if (obj->IsFailure()) return false; - the_hole_value_ = obj; + set_the_hole_value(obj); // Allocate the empty string. obj = AllocateRawAsciiString(0, TENURED); if (obj->IsFailure()) return false; - empty_string_ = String::cast(obj); + set_empty_string(String::cast(obj)); -#define SYMBOL_INITIALIZE(name, string) \ - obj = LookupAsciiSymbol(string); \ - if (obj->IsFailure()) return false; \ - (name##_) = String::cast(obj); - SYMBOL_LIST(SYMBOL_INITIALIZE) -#undef SYMBOL_INITIALIZE + for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) { + obj = LookupAsciiSymbol(constant_symbol_table[i].contents); + if (obj->IsFailure()) return false; + roots_[constant_symbol_table[i].index] = String::cast(obj); + } // Allocate the hidden symbol which is used to identify the hidden properties // in JSObjects. The hash code has a special value so that it will not match // the empty string when searching for the property. It cannot be part of the - // SYMBOL_LIST because it needs to be allocated manually with the special + // loop above because it needs to be allocated manually with the special // hash code in place. The hash code for the hidden_symbol is zero to ensure // that it will always be at the first entry in property descriptors. obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask); @@ -1358,37 +1410,37 @@ bool Heap::CreateInitialObjects() { // Allocate the proxy for __proto__. obj = AllocateProxy((Address) &Accessors::ObjectPrototype); if (obj->IsFailure()) return false; - prototype_accessors_ = Proxy::cast(obj); + set_prototype_accessors(Proxy::cast(obj)); // Allocate the code_stubs dictionary. - obj = Dictionary::Allocate(4); + obj = NumberDictionary::Allocate(4); if (obj->IsFailure()) return false; - code_stubs_ = Dictionary::cast(obj); + set_code_stubs(NumberDictionary::cast(obj)); // Allocate the non_monomorphic_cache used in stub-cache.cc - obj = Dictionary::Allocate(4); + obj = NumberDictionary::Allocate(4); if (obj->IsFailure()) return false; - non_monomorphic_cache_ = Dictionary::cast(obj); + set_non_monomorphic_cache(NumberDictionary::cast(obj)); CreateFixedStubs(); // Allocate the number->string conversion cache obj = AllocateFixedArray(kNumberStringCacheSize * 2); if (obj->IsFailure()) return false; - number_string_cache_ = FixedArray::cast(obj); + set_number_string_cache(FixedArray::cast(obj)); // Allocate cache for single character strings. obj = AllocateFixedArray(String::kMaxAsciiCharCode+1); if (obj->IsFailure()) return false; - single_character_string_cache_ = FixedArray::cast(obj); + set_single_character_string_cache(FixedArray::cast(obj)); // Allocate cache for external strings pointing to native source code. obj = AllocateFixedArray(Natives::GetBuiltinsCount()); if (obj->IsFailure()) return false; - natives_source_cache_ = FixedArray::cast(obj); + set_natives_source_cache(FixedArray::cast(obj)); // Handling of script id generation is in Factory::NewScript. - last_script_id_ = undefined_value(); + set_last_script_id(undefined_value()); // Initialize keyed lookup cache. KeyedLookupCache::Clear(); @@ -1426,13 +1478,13 @@ Object* Heap::GetNumberStringCache(Object* number) { } else { hash = double_get_hash(number->Number()); } - Object* key = number_string_cache_->get(hash * 2); + Object* key = number_string_cache()->get(hash * 2); if (key == number) { - return String::cast(number_string_cache_->get(hash * 2 + 1)); + return String::cast(number_string_cache()->get(hash * 2 + 1)); } else if (key->IsHeapNumber() && number->IsHeapNumber() && key->Number() == number->Number()) { - return String::cast(number_string_cache_->get(hash * 2 + 1)); + return String::cast(number_string_cache()->get(hash * 2 + 1)); } return undefined_value(); } @@ -1442,12 +1494,12 @@ void Heap::SetNumberStringCache(Object* number, String* string) { int hash; if (number->IsSmi()) { hash = smi_get_hash(Smi::cast(number)); - number_string_cache_->set(hash * 2, number, SKIP_WRITE_BARRIER); + number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER); } else { hash = double_get_hash(number->Number()); - number_string_cache_->set(hash * 2, number); + number_string_cache()->set(hash * 2, number); } - number_string_cache_->set(hash * 2 + 1, string); + number_string_cache()->set(hash * 2 + 1, string); } @@ -1460,19 +1512,19 @@ Object* Heap::SmiOrNumberFromDouble(double value, static const DoubleRepresentation plus_zero(0.0); static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation nan(OS::nan_value()); - ASSERT(minus_zero_value_ != NULL); + ASSERT(minus_zero_value() != NULL); ASSERT(sizeof(plus_zero.value) == sizeof(plus_zero.bits)); DoubleRepresentation rep(value); if (rep.bits == plus_zero.bits) return Smi::FromInt(0); // not uncommon if (rep.bits == minus_zero.bits) { return new_object ? AllocateHeapNumber(-0.0, pretenure) - : minus_zero_value_; + : minus_zero_value(); } if (rep.bits == nan.bits) { return new_object ? AllocateHeapNumber(OS::nan_value(), pretenure) - : nan_value_; + : nan_value(); } // Try to represent the value as a tagged small integer. @@ -1514,7 +1566,7 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) { Object* Heap::AllocateSharedFunctionInfo(Object* name) { - Object* result = Allocate(shared_function_info_map(), NEW_SPACE); + Object* result = Allocate(shared_function_info_map(), OLD_POINTER_SPACE); if (result->IsFailure()) return result; SharedFunctionInfo* share = SharedFunctionInfo::cast(result); @@ -1778,7 +1830,7 @@ void Heap::CreateFillerObjectAt(Address addr, int size) { if (size == 0) return; HeapObject* filler = HeapObject::FromAddress(addr); if (size == kPointerSize) { - filler->set_map(Heap::one_word_filler_map()); + filler->set_map(Heap::one_pointer_filler_map()); } else { filler->set_map(Heap::byte_array_map()); ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size)); @@ -2006,7 +2058,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { // Allocate the backing storage for the properties. int prop_size = map->unused_property_fields() - map->inobject_properties(); - Object* properties = AllocateFixedArray(prop_size); + Object* properties = AllocateFixedArray(prop_size, pretenure); if (properties->IsFailure()) return properties; // Allocate the JSObject. @@ -2034,7 +2086,39 @@ Object* Heap::AllocateJSObject(JSFunction* constructor, Map::cast(initial_map)->set_constructor(constructor); } // Allocate the object based on the constructors initial map. - return AllocateJSObjectFromMap(constructor->initial_map(), pretenure); + Object* result = + AllocateJSObjectFromMap(constructor->initial_map(), pretenure); + // Make sure result is NOT a global object if valid. + ASSERT(result->IsFailure() || !result->IsGlobalObject()); + return result; +} + + +Object* Heap::AllocateGlobalObject(JSFunction* constructor) { + ASSERT(constructor->has_initial_map()); + // Make sure no field properties are described in the initial map. + // This guarantees us that normalizing the properties does not + // require us to change property values to JSGlobalPropertyCells. + ASSERT(constructor->initial_map()->NextFreePropertyIndex() == 0); + + // Make sure we don't have a ton of pre-allocated slots in the + // global objects. They will be unused once we normalize the object. + ASSERT(constructor->initial_map()->unused_property_fields() == 0); + ASSERT(constructor->initial_map()->inobject_properties() == 0); + + // Allocate the object based on the constructors initial map. + Object* result = AllocateJSObjectFromMap(constructor->initial_map(), TENURED); + if (result->IsFailure()) return result; + + // Normalize the result. + JSObject* global = JSObject::cast(result); + result = global->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + if (result->IsFailure()) return result; + + // Make sure result is a global object with properties in dictionary. + ASSERT(global->IsGlobalObject()); + ASSERT(!global->HasFastProperties()); + return global; } @@ -2111,7 +2195,7 @@ Object* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor, // Allocate the backing storage for the properties. int prop_size = map->unused_property_fields() - map->inobject_properties(); - Object* properties = AllocateFixedArray(prop_size); + Object* properties = AllocateFixedArray(prop_size, TENURED); if (properties->IsFailure()) return properties; // Reset the map for the object. @@ -2500,7 +2584,7 @@ Object* Heap::AllocateHashTable(int length) { Object* result = Heap::AllocateFixedArray(length); if (result->IsFailure()) return result; reinterpret_cast<Array*>(result)->set_map(hash_table_map()); - ASSERT(result->IsDictionary()); + ASSERT(result->IsHashTable()); return result; } @@ -2622,6 +2706,8 @@ void Heap::ReportHeapStatistics(const char* title) { code_space_->ReportStatistics(); PrintF("Map space : "); map_space_->ReportStatistics(); + PrintF("Cell space : "); + cell_space_->ReportStatistics(); PrintF("Large object space : "); lo_space_->ReportStatistics(); PrintF(">>>>>> ========================================= >>>>>>\n"); @@ -2642,6 +2728,7 @@ bool Heap::Contains(Address addr) { old_data_space_->Contains(addr) || code_space_->Contains(addr) || map_space_->Contains(addr) || + cell_space_->Contains(addr) || lo_space_->SlowContains(addr)); } @@ -2666,6 +2753,8 @@ bool Heap::InSpace(Address addr, AllocationSpace space) { return code_space_->Contains(addr); case MAP_SPACE: return map_space_->Contains(addr); + case CELL_SPACE: + return cell_space_->Contains(addr); case LO_SPACE: return lo_space_->SlowContains(addr); } @@ -2679,22 +2768,31 @@ void Heap::Verify() { ASSERT(HasBeenSetup()); VerifyPointersVisitor visitor; - Heap::IterateRoots(&visitor); + IterateRoots(&visitor); - AllSpaces spaces; - while (Space* space = spaces.next()) { - space->Verify(); - } + new_space_.Verify(); + + VerifyPointersAndRSetVisitor rset_visitor; + old_pointer_space_->Verify(&rset_visitor); + map_space_->Verify(&rset_visitor); + + VerifyPointersVisitor no_rset_visitor; + old_data_space_->Verify(&no_rset_visitor); + code_space_->Verify(&no_rset_visitor); + cell_space_->Verify(&no_rset_visitor); + + lo_space_->Verify(); } #endif // DEBUG Object* Heap::LookupSymbol(Vector<const char> string) { Object* symbol = NULL; - Object* new_table = - SymbolTable::cast(symbol_table_)->LookupSymbol(string, &symbol); + Object* new_table = symbol_table()->LookupSymbol(string, &symbol); if (new_table->IsFailure()) return new_table; - symbol_table_ = new_table; + // Can't use set_symbol_table because SymbolTable::cast knows that + // SymbolTable is a singleton and checks for identity. + roots_[kSymbolTableRootIndex] = new_table; ASSERT(symbol != NULL); return symbol; } @@ -2703,10 +2801,11 @@ Object* Heap::LookupSymbol(Vector<const char> string) { Object* Heap::LookupSymbol(String* string) { if (string->IsSymbol()) return string; Object* symbol = NULL; - Object* new_table = - SymbolTable::cast(symbol_table_)->LookupString(string, &symbol); + Object* new_table = symbol_table()->LookupString(string, &symbol); if (new_table->IsFailure()) return new_table; - symbol_table_ = new_table; + // Can't use set_symbol_table because SymbolTable::cast knows that + // SymbolTable is a singleton and checks for identity. + roots_[kSymbolTableRootIndex] = new_table; ASSERT(symbol != NULL); return symbol; } @@ -2717,8 +2816,7 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) { *symbol = string; return true; } - SymbolTable* table = SymbolTable::cast(symbol_table_); - return table->LookupSymbolIfExists(string, symbol); + return symbol_table()->LookupSymbolIfExists(string, symbol); } @@ -2805,28 +2903,15 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { void Heap::IterateRoots(ObjectVisitor* v) { IterateStrongRoots(v); - v->VisitPointer(reinterpret_cast<Object**>(&symbol_table_)); + v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); SYNCHRONIZE_TAG("symbol_table"); } void Heap::IterateStrongRoots(ObjectVisitor* v) { -#define ROOT_ITERATE(type, name) \ - v->VisitPointer(bit_cast<Object**, type**>(&name##_)); - STRONG_ROOT_LIST(ROOT_ITERATE); -#undef ROOT_ITERATE + v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); SYNCHRONIZE_TAG("strong_root_list"); -#define STRUCT_MAP_ITERATE(NAME, Name, name) \ - v->VisitPointer(bit_cast<Object**, Map**>(&name##_map_)); - STRUCT_LIST(STRUCT_MAP_ITERATE); -#undef STRUCT_MAP_ITERATE - SYNCHRONIZE_TAG("struct_map"); - -#define SYMBOL_ITERATE(name, string) \ - v->VisitPointer(bit_cast<Object**, String**>(&name##_)); - SYMBOL_LIST(SYMBOL_ITERATE) -#undef SYMBOL_ITERATE v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_)); SYNCHRONIZE_TAG("symbol"); @@ -2901,6 +2986,7 @@ int Heap::PromotedSpaceSize() { + old_data_space_->Size() + code_space_->Size() + map_space_->Size() + + cell_space_->Size() + lo_space_->Size(); } @@ -2978,6 +3064,13 @@ bool Heap::Setup(bool create_heap_objects) { // enough to hold at least a page will cause it to allocate. if (!map_space_->Setup(NULL, 0)) return false; + // Initialize global property cell space. + cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE); + if (cell_space_ == NULL) return false; + // Setting up a paged space without giving it a virtual memory range big + // enough to hold at least a page will cause it to allocate. + if (!cell_space_->Setup(NULL, 0)) return false; + // The large object code space may contain code or data. We set the memory // to be non-executable here for safety, but this means we need to enable it // explicitly when allocating large code objects. @@ -3030,6 +3123,12 @@ void Heap::TearDown() { map_space_ = NULL; } + if (cell_space_ != NULL) { + cell_space_->TearDown(); + delete cell_space_; + cell_space_ = NULL; + } + if (lo_space_ != NULL) { lo_space_->TearDown(); delete lo_space_; @@ -3041,11 +3140,9 @@ void Heap::TearDown() { void Heap::Shrink() { - // Try to shrink map, old, and code spaces. - map_space_->Shrink(); - old_pointer_space_->Shrink(); - old_data_space_->Shrink(); - code_space_->Shrink(); + // Try to shrink all paged spaces. + PagedSpaces spaces; + while (PagedSpace* space = spaces.next()) space->Shrink(); } @@ -3053,24 +3150,16 @@ void Heap::Shrink() { void Heap::Protect() { if (HasBeenSetup()) { - new_space_.Protect(); - map_space_->Protect(); - old_pointer_space_->Protect(); - old_data_space_->Protect(); - code_space_->Protect(); - lo_space_->Protect(); + AllSpaces spaces; + while (Space* space = spaces.next()) space->Protect(); } } void Heap::Unprotect() { if (HasBeenSetup()) { - new_space_.Unprotect(); - map_space_->Unprotect(); - old_pointer_space_->Unprotect(); - old_data_space_->Unprotect(); - code_space_->Unprotect(); - lo_space_->Unprotect(); + AllSpaces spaces; + while (Space* space = spaces.next()) space->Unprotect(); } } @@ -3108,6 +3197,8 @@ Space* AllSpaces::next() { return Heap::code_space(); case MAP_SPACE: return Heap::map_space(); + case CELL_SPACE: + return Heap::cell_space(); case LO_SPACE: return Heap::lo_space(); default: @@ -3126,6 +3217,8 @@ PagedSpace* PagedSpaces::next() { return Heap::code_space(); case MAP_SPACE: return Heap::map_space(); + case CELL_SPACE: + return Heap::cell_space(); default: return NULL; } @@ -3199,6 +3292,9 @@ ObjectIterator* SpaceIterator::CreateIterator() { case MAP_SPACE: iterator_ = new HeapObjectIterator(Heap::map_space()); break; + case CELL_SPACE: + iterator_ = new HeapObjectIterator(Heap::cell_space()); + break; case LO_SPACE: iterator_ = new LargeObjectIterator(Heap::lo_space()); break; @@ -3303,8 +3399,8 @@ void HeapProfiler::WriteSample() { // Lump all the string types together. int string_number = 0; int string_bytes = 0; -#define INCREMENT_SIZE(type, size, name) \ - string_number += info[type].number(); \ +#define INCREMENT_SIZE(type, size, name, camel_name) \ + string_number += info[type].number(); \ string_bytes += info[type].bytes(); STRING_TYPE_LIST(INCREMENT_SIZE) #undef INCREMENT_SIZE diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 31adcbdba..f3959882d 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -34,104 +34,107 @@ namespace v8 { namespace internal { // Defines all the roots in Heap. -#define STRONG_ROOT_LIST(V) \ - V(Map, meta_map) \ - V(Map, heap_number_map) \ - V(Map, short_string_map) \ - V(Map, medium_string_map) \ - V(Map, long_string_map) \ - V(Map, short_ascii_string_map) \ - V(Map, medium_ascii_string_map) \ - V(Map, long_ascii_string_map) \ - V(Map, short_symbol_map) \ - V(Map, medium_symbol_map) \ - V(Map, long_symbol_map) \ - V(Map, short_ascii_symbol_map) \ - V(Map, medium_ascii_symbol_map) \ - V(Map, long_ascii_symbol_map) \ - V(Map, short_cons_symbol_map) \ - V(Map, medium_cons_symbol_map) \ - V(Map, long_cons_symbol_map) \ - V(Map, short_cons_ascii_symbol_map) \ - V(Map, medium_cons_ascii_symbol_map) \ - V(Map, long_cons_ascii_symbol_map) \ - V(Map, short_sliced_symbol_map) \ - V(Map, medium_sliced_symbol_map) \ - V(Map, long_sliced_symbol_map) \ - V(Map, short_sliced_ascii_symbol_map) \ - V(Map, medium_sliced_ascii_symbol_map) \ - V(Map, long_sliced_ascii_symbol_map) \ - V(Map, short_external_symbol_map) \ - V(Map, medium_external_symbol_map) \ - V(Map, long_external_symbol_map) \ - V(Map, short_external_ascii_symbol_map) \ - V(Map, medium_external_ascii_symbol_map) \ - V(Map, long_external_ascii_symbol_map) \ - V(Map, short_cons_string_map) \ - V(Map, medium_cons_string_map) \ - V(Map, long_cons_string_map) \ - V(Map, short_cons_ascii_string_map) \ - V(Map, medium_cons_ascii_string_map) \ - V(Map, long_cons_ascii_string_map) \ - V(Map, short_sliced_string_map) \ - V(Map, medium_sliced_string_map) \ - V(Map, long_sliced_string_map) \ - V(Map, short_sliced_ascii_string_map) \ - V(Map, medium_sliced_ascii_string_map) \ - V(Map, long_sliced_ascii_string_map) \ - V(Map, short_external_string_map) \ - V(Map, medium_external_string_map) \ - V(Map, long_external_string_map) \ - V(Map, short_external_ascii_string_map) \ - V(Map, medium_external_ascii_string_map) \ - V(Map, long_external_ascii_string_map) \ - V(Map, undetectable_short_string_map) \ - V(Map, undetectable_medium_string_map) \ - V(Map, undetectable_long_string_map) \ - V(Map, undetectable_short_ascii_string_map) \ - V(Map, undetectable_medium_ascii_string_map) \ - V(Map, undetectable_long_ascii_string_map) \ - V(Map, byte_array_map) \ - V(Map, fixed_array_map) \ - V(Map, hash_table_map) \ - V(Map, context_map) \ - V(Map, catch_context_map) \ - V(Map, global_context_map) \ - V(Map, code_map) \ - V(Map, oddball_map) \ - V(Map, boilerplate_function_map) \ - V(Map, shared_function_info_map) \ - V(Map, proxy_map) \ - V(Map, one_word_filler_map) \ - V(Map, two_word_filler_map) \ - V(Object, nan_value) \ - V(Object, undefined_value) \ - V(Object, minus_zero_value) \ - V(Object, null_value) \ - V(Object, true_value) \ - V(Object, false_value) \ - V(String, empty_string) \ - V(FixedArray, empty_fixed_array) \ - V(DescriptorArray, empty_descriptor_array) \ - V(Object, the_hole_value) \ - V(Map, neander_map) \ - V(JSObject, message_listeners) \ - V(Proxy, prototype_accessors) \ - V(Dictionary, code_stubs) \ - V(Dictionary, non_monomorphic_cache) \ - V(Code, js_entry_code) \ - V(Code, js_construct_entry_code) \ - V(Code, c_entry_code) \ - V(Code, c_entry_debug_break_code) \ - V(FixedArray, number_string_cache) \ - V(FixedArray, single_character_string_cache) \ - V(FixedArray, natives_source_cache) \ - V(Object, last_script_id) +#define STRONG_ROOT_LIST(V) \ + V(Map, meta_map, MetaMap) \ + V(Map, heap_number_map, HeapNumberMap) \ + V(Map, short_string_map, ShortStringMap) \ + V(Map, medium_string_map, MediumStringMap) \ + V(Map, long_string_map, LongStringMap) \ + V(Map, short_ascii_string_map, ShortAsciiStringMap) \ + V(Map, medium_ascii_string_map, MediumAsciiStringMap) \ + V(Map, long_ascii_string_map, LongAsciiStringMap) \ + V(Map, short_symbol_map, ShortSymbolMap) \ + V(Map, medium_symbol_map, MediumSymbolMap) \ + V(Map, long_symbol_map, LongSymbolMap) \ + V(Map, short_ascii_symbol_map, ShortAsciiSymbolMap) \ + V(Map, medium_ascii_symbol_map, MediumAsciiSymbolMap) \ + V(Map, long_ascii_symbol_map, LongAsciiSymbolMap) \ + V(Map, short_cons_symbol_map, ShortConsSymbolMap) \ + V(Map, medium_cons_symbol_map, MediumConsSymbolMap) \ + V(Map, long_cons_symbol_map, LongConsSymbolMap) \ + V(Map, short_cons_ascii_symbol_map, ShortConsAsciiSymbolMap) \ + V(Map, medium_cons_ascii_symbol_map, MediumConsAsciiSymbolMap) \ + V(Map, long_cons_ascii_symbol_map, LongConsAsciiSymbolMap) \ + V(Map, short_sliced_symbol_map, ShortSlicedSymbolMap) \ + V(Map, medium_sliced_symbol_map, MediumSlicedSymbolMap) \ + V(Map, long_sliced_symbol_map, LongSlicedSymbolMap) \ + V(Map, short_sliced_ascii_symbol_map, ShortSlicedAsciiSymbolMap) \ + V(Map, medium_sliced_ascii_symbol_map, MediumSlicedAsciiSymbolMap) \ + V(Map, long_sliced_ascii_symbol_map, LongSlicedAsciiSymbolMap) \ + V(Map, short_external_symbol_map, ShortExternalSymbolMap) \ + V(Map, medium_external_symbol_map, MediumExternalSymbolMap) \ + V(Map, long_external_symbol_map, LongExternalSymbolMap) \ + V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \ + V(Map, medium_external_ascii_symbol_map, MediumExternalAsciiSymbolMap) \ + V(Map, long_external_ascii_symbol_map, LongExternalAsciiSymbolMap) \ + V(Map, short_cons_string_map, ShortConsStringMap) \ + V(Map, medium_cons_string_map, MediumConsStringMap) \ + V(Map, long_cons_string_map, LongConsStringMap) \ + V(Map, short_cons_ascii_string_map, ShortConsAsciiStringMap) \ + V(Map, medium_cons_ascii_string_map, MediumConsAsciiStringMap) \ + V(Map, long_cons_ascii_string_map, LongConsAsciiStringMap) \ + V(Map, short_sliced_string_map, ShortSlicedStringMap) \ + V(Map, medium_sliced_string_map, MediumSlicedStringMap) \ + V(Map, long_sliced_string_map, LongSlicedStringMap) \ + V(Map, short_sliced_ascii_string_map, ShortSlicedAsciiStringMap) \ + V(Map, medium_sliced_ascii_string_map, MediumSlicedAsciiStringMap) \ + V(Map, long_sliced_ascii_string_map, LongSlicedAsciiStringMap) \ + V(Map, short_external_string_map, ShortExternalStringMap) \ + V(Map, medium_external_string_map, MediumExternalStringMap) \ + V(Map, long_external_string_map, LongExternalStringMap) \ + V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \ + V(Map, medium_external_ascii_string_map, MediumExternalAsciiStringMap) \ + V(Map, long_external_ascii_string_map, LongExternalAsciiStringMap) \ + V(Map, undetectable_short_string_map, UndetectableShortStringMap) \ + V(Map, undetectable_medium_string_map, UndetectableMediumStringMap) \ + V(Map, undetectable_long_string_map, UndetectableLongStringMap) \ + V(Map, undetectable_short_ascii_string_map, UndetectableShortAsciiStringMap) \ + V(Map, \ + undetectable_medium_ascii_string_map, \ + UndetectableMediumAsciiStringMap) \ + V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \ + V(Map, byte_array_map, ByteArrayMap) \ + V(Map, fixed_array_map, FixedArrayMap) \ + V(Map, hash_table_map, HashTableMap) \ + V(Map, context_map, ContextMap) \ + V(Map, catch_context_map, CatchContextMap) \ + V(Map, global_context_map, GlobalContextMap) \ + V(Map, code_map, CodeMap) \ + V(Map, oddball_map, OddballMap) \ + V(Map, global_property_cell_map, GlobalPropertyCellMap) \ + V(Map, boilerplate_function_map, BoilerplateFunctionMap) \ + V(Map, shared_function_info_map, SharedFunctionInfoMap) \ + V(Map, proxy_map, ProxyMap) \ + V(Map, one_pointer_filler_map, OnePointerFillerMap) \ + V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ + V(Object, nan_value, NanValue) \ + V(Object, undefined_value, UndefinedValue) \ + V(Object, minus_zero_value, MinusZeroValue) \ + V(Object, null_value, NullValue) \ + V(Object, true_value, TrueValue) \ + V(Object, false_value, FalseValue) \ + V(String, empty_string, EmptyString) \ + V(FixedArray, empty_fixed_array, EmptyFixedArray) \ + V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ + V(Object, the_hole_value, TheHoleValue) \ + V(Map, neander_map, NeanderMap) \ + V(JSObject, message_listeners, MessageListeners) \ + V(Proxy, prototype_accessors, PrototypeAccessors) \ + V(NumberDictionary, code_stubs, CodeStubs) \ + V(NumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \ + V(Code, js_entry_code, JsEntryCode) \ + V(Code, js_construct_entry_code, JsConstructEntryCode) \ + V(Code, c_entry_code, CEntryCode) \ + V(Code, c_entry_debug_break_code, CEntryDebugBreakCode) \ + V(FixedArray, number_string_cache, NumberStringCache) \ + V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ + V(FixedArray, natives_source_cache, NativesSourceCache) \ + V(Object, last_script_id, LastScriptId) #define ROOT_LIST(V) \ STRONG_ROOT_LIST(V) \ - V(Object, symbol_table) + V(SymbolTable, symbol_table, SymbolTable) #define SYMBOL_LIST(V) \ V(Array_symbol, "Array") \ @@ -260,6 +263,7 @@ class Heap : public AllStatic { static OldSpace* old_data_space() { return old_data_space_; } static OldSpace* code_space() { return code_space_; } static MapSpace* map_space() { return map_space_; } + static CellSpace* cell_space() { return cell_space_; } static LargeObjectSpace* lo_space() { return lo_space_; } static bool always_allocate() { return always_allocate_scope_depth_ != 0; } @@ -288,6 +292,12 @@ class Heap : public AllStatic { static Object* AllocateJSObject(JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED); + // Allocates and initializes a new global object based on a constructor. + // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation + // failed. + // Please note this does not perform a garbage collection. + static Object* AllocateGlobalObject(JSFunction* constructor); + // Returns a deep copy of the JavaScript object. // Properties and elements are copied too. // Returns failure if allocation failed. @@ -408,6 +418,12 @@ class Heap : public AllStatic { // Please note this does not perform a garbage collection. static Object* AllocateByteArray(int length); + // Allocate a tenured JS global property cell. + // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation + // failed. + // Please note this does not perform a garbage collection. + static Object* AllocateJSGlobalPropertyCell(Object* value); + // Allocates a fixed array initialized with undefined values // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -623,18 +639,29 @@ class Heap : public AllStatic { global_gc_epilogue_callback_ = callback; } - // Heap roots -#define ROOT_ACCESSOR(type, name) static type* name() { return name##_; } + // Heap root getters. We have versions with and without type::cast() here. + // You can't use type::cast during GC because the assert fails. +#define ROOT_ACCESSOR(type, name, camel_name) \ + static inline type* name() { \ + return type::cast(roots_[k##camel_name##RootIndex]); \ + } \ + static inline type* raw_unchecked_##name() { \ + return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \ + } ROOT_LIST(ROOT_ACCESSOR) #undef ROOT_ACCESSOR // Utility type maps -#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ - static Map* name##_map() { return name##_map_; } +#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ + static inline Map* name##_map() { \ + return Map::cast(roots_[k##Name##MapRootIndex]); \ + } STRUCT_LIST(STRUCT_MAP_ACCESSOR) #undef STRUCT_MAP_ACCESSOR -#define SYMBOL_ACCESSOR(name, str) static String* name() { return name##_; } +#define SYMBOL_ACCESSOR(name, str) static inline String* name() { \ + return String::cast(roots_[k##name##RootIndex]); \ + } SYMBOL_LIST(SYMBOL_ACCESSOR) #undef SYMBOL_ACCESSOR @@ -679,11 +706,13 @@ class Heap : public AllStatic { static inline AllocationSpace TargetSpaceId(InstanceType type); // Sets the stub_cache_ (only used when expanding the dictionary). - static void set_code_stubs(Dictionary* value) { code_stubs_ = value; } + static void public_set_code_stubs(NumberDictionary* value) { + roots_[kCodeStubsRootIndex] = value; + } // Sets the non_monomorphic_cache_ (only used when expanding the dictionary). - static void set_non_monomorphic_cache(Dictionary* value) { - non_monomorphic_cache_ = value; + static void public_set_non_monomorphic_cache(NumberDictionary* value) { + roots_[kNonMonomorphicCacheRootIndex] = value; } // Update the next script id. @@ -824,6 +853,7 @@ class Heap : public AllStatic { static OldSpace* old_data_space_; static OldSpace* code_space_; static MapSpace* map_space_; + static CellSpace* cell_space_; static LargeObjectSpace* lo_space_; static HeapState gc_state_; @@ -836,6 +866,13 @@ class Heap : public AllStatic { static int mc_count_; // how many mark-compact collections happened static int gc_count_; // how many gc happened +#define ROOT_ACCESSOR(type, name, camel_name) \ + static inline void set_##name(type* value) { \ + roots_[k##camel_name##RootIndex] = value; \ + } + ROOT_LIST(ROOT_ACCESSOR) +#undef ROOT_ACCESSOR + #ifdef DEBUG static bool allocation_allowed_; @@ -870,20 +907,49 @@ class Heap : public AllStatic { // last GC. static int old_gen_exhausted_; - // Declare all the roots -#define ROOT_DECLARATION(type, name) static type* name##_; - ROOT_LIST(ROOT_DECLARATION) -#undef ROOT_DECLARATION + // Declare all the root indices. + enum RootListIndex { +#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex, + STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) +#undef ROOT_INDEX_DECLARATION // Utility type maps -#define DECLARE_STRUCT_MAP(NAME, Name, name) static Map* name##_map_; +#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, STRUCT_LIST(DECLARE_STRUCT_MAP) #undef DECLARE_STRUCT_MAP -#define SYMBOL_DECLARATION(name, str) static String* name##_; - SYMBOL_LIST(SYMBOL_DECLARATION) +#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex, + SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) #undef SYMBOL_DECLARATION + kSymbolTableRootIndex, + kStrongRootListLength = kSymbolTableRootIndex, + kRootListLength + }; + + static Object* roots_[kRootListLength]; + + struct StringTypeTable { + InstanceType type; + int size; + RootListIndex index; + }; + + struct ConstantSymbolTable { + const char* contents; + RootListIndex index; + }; + + struct StructTable { + InstanceType type; + int size; + RootListIndex index; + }; + + static const StringTypeTable string_type_table[]; + static const ConstantSymbolTable constant_symbol_table[]; + static const StructTable struct_table[]; + // The special hidden symbol which is an empty string, but does not match // any string when looked up in properties. static String* hidden_symbol_; @@ -911,7 +977,10 @@ class Heap : public AllStatic { // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't // have to test the allocation space argument and (b) can reduce code size // (since both AllocateRaw and AllocateRawMap are inlined). - static inline Object* AllocateRawMap(int size_in_bytes); + static inline Object* AllocateRawMap(); + + // Allocate an uninitialized object in the global property cell space. + static inline Object* AllocateRawCell(); // Initializes a JSObject based on its map. static void InitializeJSObjectFromMap(JSObject* obj, @@ -920,7 +989,15 @@ class Heap : public AllStatic { static bool CreateInitialMaps(); static bool CreateInitialObjects(); + + // These four Create*EntryStub functions are here because of a gcc-4.4 bug + // that assigns wrong vtable entries. + static void CreateCEntryStub(); + static void CreateCEntryDebugBreakStub(); + static void CreateJSEntryStub(); + static void CreateJSConstructEntryStub(); static void CreateFixedStubs(); + static Object* CreateOddball(Map* map, const char* to_string, Object* to_number); @@ -1042,9 +1119,11 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor { HeapObject* object = HeapObject::cast(*current); ASSERT(Heap::Contains(object)); ASSERT(object->map()->IsMap()); +#ifndef V8_TARGET_ARCH_X64 if (Heap::InNewSpace(object)) { ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0)); } +#endif } } } diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 596861009..02bde2ae3 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -114,8 +114,10 @@ void CpuFeatures::Probe() { CodeDesc desc; assm.GetCode(&desc); - Object* code = - Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL); + Object* code = Heap::CreateCode(desc, + NULL, + Code::ComputeFlags(Code::STUB), + Handle<Code>::null()); if (!code->IsCode()) return; LOG(CodeCreateEvent(Logger::BUILTIN_TAG, Code::cast(code), "CpuFeatures::Probe")); @@ -919,6 +921,14 @@ void Assembler::idiv(Register src) { } +void Assembler::imul(Register reg) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + EMIT(0xF7); + EMIT(0xE8 | reg.code()); +} + + void Assembler::imul(Register dst, const Operand& src) { EnsureSpace ensure_space(this); last_pc_ = pc_; diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 92c390cfb..70b510e72 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -544,15 +544,18 @@ class Assembler : public Malloced { void idiv(Register src); - void imul(Register dst, const Operand& src); - void imul(Register dst, Register src, int32_t imm32); + // Signed multiply instructions. + void imul(Register src); // edx:eax = eax * src. + void imul(Register dst, const Operand& src); // dst = dst * src. + void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. void inc(Register dst); void inc(const Operand& dst); void lea(Register dst, const Operand& src); - void mul(Register src); + // Unsigned multiply instruction. + void mul(Register src); // edx:eax = eax * reg. void neg(Register dst); diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 59c1d4540..6d1dc2dee 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -1856,40 +1856,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, } -class CompareStub: public CodeStub { - public: - CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } - - void Generate(MacroAssembler* masm); - - private: - Condition cc_; - bool strict_; - - Major MajorKey() { return Compare; } - - int MinorKey() { - // Encode the three parameters in a unique 16 bit value. - ASSERT(static_cast<int>(cc_) < (1 << 15)); - return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0); - } - - // Branch to the label if the given object isn't a symbol. - void BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch); - -#ifdef DEBUG - void Print() { - PrintF("CompareStub (cc %d), (strict %s)\n", - static_cast<int>(cc_), - strict_ ? "true" : "false"); - } -#endif -}; - - void CodeGenerator::Comparison(Condition cc, bool strict, ControlDestination* dest) { @@ -4987,6 +4953,29 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { } +void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { + ASSERT(args->length() == 0); + + // Get the frame pointer for the calling frame. + Result fp = allocator()->Allocate(); + __ mov(fp.reg(), Operand(ebp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset), + Immediate(ArgumentsAdaptorFrame::SENTINEL)); + __ j(not_equal, &check_frame_marker); + __ mov(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset), + Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); + fp.Unuse(); + destination()->Split(equal); +} + + void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { ASSERT(args->length() == 0); // ArgumentsAccessStub takes the parameter count as an input argument @@ -4999,6 +4988,70 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { } +void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + JumpTarget leave, null, function, non_function_constructor; + Load(args->at(0)); // Load the object. + Result obj = frame_->Pop(); + obj.ToRegister(); + frame_->Spill(obj.reg()); + + // If the object is a smi, we return null. + __ test(obj.reg(), Immediate(kSmiTagMask)); + null.Branch(zero); + + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. + { Result tmp = allocator()->Allocate(); + __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset)); + __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset)); + __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE); + null.Branch(less); + + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ cmp(tmp.reg(), JS_FUNCTION_TYPE); + function.Branch(equal); + } + + // Check if the constructor in the map is a function. + { Result tmp = allocator()->Allocate(); + __ mov(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset)); + __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, tmp.reg()); + non_function_constructor.Branch(not_equal); + } + + // The map register now contains the constructor function. Grab the + // instance class name from there. + __ mov(obj.reg(), + FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); + __ mov(obj.reg(), + FieldOperand(obj.reg(), SharedFunctionInfo::kInstanceClassNameOffset)); + frame_->Push(&obj); + leave.Jump(); + + // Functions have class 'Function'. + function.Bind(); + frame_->Push(Factory::function_class_symbol()); + leave.Jump(); + + // Objects with a non-function constructor have class 'Object'. + non_function_constructor.Bind(); + frame_->Push(Factory::Object_symbol()); + leave.Jump(); + + // Non-JS objects have class null. + null.Bind(); + frame_->Push(Factory::null_value()); + + // All done. + leave.Bind(); +} + + void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { ASSERT(args->length() == 1); JumpTarget leave; @@ -7538,6 +7591,16 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ dec(Operand::StaticVariable(scope_depth)); } + // Make sure we're not trying to return 'the hole' from the runtime + // call as this may lead to crashes in the IC code later. + if (FLAG_debug_code) { + Label okay; + __ cmp(eax, Factory::the_hole_value()); + __ j(not_equal, &okay); + __ int3(); + __ bind(&okay); + } + // Check for failure result. Label failure_returned; ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); @@ -7856,6 +7919,12 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } +int CompareStub::MinorKey() { + // Encode the two parameters in a unique 16 bit value. + ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); + return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); +} + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index d25d07c7e..5cd50b8b8 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -522,11 +522,15 @@ class CodeGenerator: public AstVisitor { void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args); void GenerateIsArray(ZoneList<Expression*>* args); + // Support for construct call checks. + void GenerateIsConstructCall(ZoneList<Expression*>* args); + // Support for arguments.length and arguments[?]. void GenerateArgumentsLength(ZoneList<Expression*>* args); void GenerateArgumentsAccess(ZoneList<Expression*>* args); - // Support for accessing the value field of an object (used by Date). + // Support for accessing the class and value fields of an object. + void GenerateClassOf(ZoneList<Expression*>* args); void GenerateValueOf(ZoneList<Expression*>* args); void GenerateSetValueOf(ZoneList<Expression*>* args); diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index 1ba475779..90e0fd1b4 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -66,9 +66,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // Test the has_named_interceptor bit in the map. __ test(FieldOperand(r0, Map::kInstanceAttributesOffset), Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8)))); + // Jump to miss if the interceptor bit is set. __ j(not_zero, miss_label, not_taken); + // Bail out if we have a JS global proxy object. + __ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset)); + __ cmp(r0, JS_GLOBAL_PROXY_TYPE); + __ j(equal, miss_label, not_taken); + + // Possible work-around for http://crbug.com/16276. + __ cmp(r0, JS_GLOBAL_OBJECT_TYPE); + __ j(equal, miss_label, not_taken); + __ cmp(r0, JS_BUILTINS_OBJECT_TYPE); + __ j(equal, miss_label, not_taken); + // Check that the properties array is a dictionary. __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset)); __ cmp(FieldOperand(r0, HeapObject::kMapOffset), @@ -77,7 +89,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // Compute the capacity mask. const int kCapacityOffset = - Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize; + Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; __ mov(r2, FieldOperand(r0, kCapacityOffset)); __ shr(r2, kSmiTagSize); // convert smi to int __ dec(r2); @@ -87,18 +99,18 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // cover ~93% of loads from dictionaries. static const int kProbes = 4; const int kElementsStartOffset = - Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize; + Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; for (int i = 0; i < kProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(r1, FieldOperand(name, String::kLengthOffset)); __ shr(r1, String::kHashShift); if (i > 0) { - __ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i))); + __ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i))); } __ and_(r1, Operand(r2)); - // Scale the index by multiplying by the element size. - ASSERT(Dictionary::kElementSize == 3); + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3 // Check if the key is identical to the name. @@ -431,7 +443,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // Probe the stub cache. Code::Flags flags = Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc); - StubCache::GenerateProbe(masm, flags, edx, ecx, ebx); + StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, eax); // If the stub cache probing failed, the receiver might be a value. // For value objects, we use the map of the prototype objects for @@ -468,7 +480,7 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { // Probe the stub cache for the value object. __ bind(&probe); - StubCache::GenerateProbe(masm, flags, edx, ecx, ebx); + StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg); // Cache miss: Jump to runtime. __ bind(&miss); @@ -642,7 +654,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, NOT_IN_LOOP, MONOMORPHIC); - StubCache::GenerateProbe(masm, flags, eax, ecx, ebx); + StubCache::GenerateProbe(masm, flags, eax, ecx, ebx, edx); // Cache miss: Jump to runtime. Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); @@ -872,7 +884,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, NOT_IN_LOOP, MONOMORPHIC); - StubCache::GenerateProbe(masm, flags, edx, ecx, ebx); + StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg); // Cache miss: Jump to runtime. Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss))); diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index b31f7062c..0a887d5fa 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -41,39 +41,61 @@ static void ProbeTable(MacroAssembler* masm, Code::Flags flags, StubCache::Table table, Register name, - Register offset) { + Register offset, + Register extra) { ExternalReference key_offset(SCTableReference::keyReference(table)); ExternalReference value_offset(SCTableReference::valueReference(table)); Label miss; - // Save the offset on the stack. - __ push(offset); + if (extra.is_valid()) { + // Get the code entry from the cache. + __ mov(extra, Operand::StaticArray(offset, times_2, value_offset)); - // Check that the key in the entry matches the name. - __ cmp(name, Operand::StaticArray(offset, times_2, key_offset)); - __ j(not_equal, &miss, not_taken); + // Check that the key in the entry matches the name. + __ cmp(name, Operand::StaticArray(offset, times_2, key_offset)); + __ j(not_equal, &miss, not_taken); - // Get the code entry from the cache. - __ mov(offset, Operand::StaticArray(offset, times_2, value_offset)); + // Check that the flags match what we're looking for. + __ mov(offset, FieldOperand(extra, Code::kFlagsOffset)); + __ and_(offset, ~Code::kFlagsNotUsedInLookup); + __ cmp(offset, flags); + __ j(not_equal, &miss); - // Check that the flags match what we're looking for. - __ mov(offset, FieldOperand(offset, Code::kFlagsOffset)); - __ and_(offset, ~Code::kFlagsNotUsedInLookup); - __ cmp(offset, flags); - __ j(not_equal, &miss); + // Jump to the first instruction in the code stub. + __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(Operand(extra)); - // Restore offset and re-load code entry from cache. - __ pop(offset); - __ mov(offset, Operand::StaticArray(offset, times_2, value_offset)); + __ bind(&miss); + } else { + // Save the offset on the stack. + __ push(offset); - // Jump to the first instruction in the code stub. - __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(offset)); + // Check that the key in the entry matches the name. + __ cmp(name, Operand::StaticArray(offset, times_2, key_offset)); + __ j(not_equal, &miss, not_taken); - // Miss: Restore offset and fall through. - __ bind(&miss); - __ pop(offset); + // Get the code entry from the cache. + __ mov(offset, Operand::StaticArray(offset, times_2, value_offset)); + + // Check that the flags match what we're looking for. + __ mov(offset, FieldOperand(offset, Code::kFlagsOffset)); + __ and_(offset, ~Code::kFlagsNotUsedInLookup); + __ cmp(offset, flags); + __ j(not_equal, &miss); + + // Restore offset and re-load code entry from cache. + __ pop(offset); + __ mov(offset, Operand::StaticArray(offset, times_2, value_offset)); + + // Jump to the first instruction in the code stub. + __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(Operand(offset)); + + // Pop at miss. + __ bind(&miss); + __ pop(offset); + } } @@ -81,7 +103,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver, Register name, - Register scratch) { + Register scratch, + Register extra) { Label miss; // Make sure that code is valid. The shifting code relies on the @@ -94,6 +117,9 @@ void StubCache::GenerateProbe(MacroAssembler* masm, // Make sure that there are no register conflicts. ASSERT(!scratch.is(receiver)); ASSERT(!scratch.is(name)); + ASSERT(!extra.is(receiver)); + ASSERT(!extra.is(name)); + ASSERT(!extra.is(scratch)); // Check that the receiver isn't a smi. __ test(receiver, Immediate(kSmiTagMask)); @@ -106,15 +132,19 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize); // Probe the primary table. - ProbeTable(masm, flags, kPrimary, name, scratch); + ProbeTable(masm, flags, kPrimary, name, scratch, extra); // Primary miss: Compute hash for secondary probe. + __ mov(scratch, FieldOperand(name, String::kLengthOffset)); + __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(scratch, flags); + __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize); __ sub(scratch, Operand(name)); __ add(Operand(scratch), Immediate(flags)); __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize); // Probe the secondary table. - ProbeTable(masm, flags, kSecondary, name, scratch); + ProbeTable(masm, flags, kSecondary, name, scratch, extra); // Cache miss: Fall-through and let caller handle the miss by // entering the runtime system. @@ -243,114 +273,6 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, } -void StubCompiler::GenerateLoadField(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register scratch1, - Register scratch2, - int index, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ test(receiver, Immediate(kSmiTagMask)); - __ j(zero, miss_label, not_taken); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - - // Get the value from the properties. - GenerateFastPropertyLoad(masm, eax, reg, holder, index); - __ ret(0); -} - - -void StubCompiler::GenerateLoadCallback(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register name, - Register scratch1, - Register scratch2, - AccessorInfo* callback, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ test(receiver, Immediate(kSmiTagMask)); - __ j(zero, miss_label, not_taken); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - - // Push the arguments on the JS stack of the caller. - __ pop(scratch2); // remove return address - __ push(receiver); // receiver - __ push(Immediate(Handle<AccessorInfo>(callback))); // callback data - __ push(name); // name - __ push(reg); // holder - __ push(scratch2); // restore return address - - // Do tail-call to the runtime system. - ExternalReference load_callback_property = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 4); -} - - -void StubCompiler::GenerateLoadConstant(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register scratch1, - Register scratch2, - Object* value, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ test(receiver, Immediate(kSmiTagMask)); - __ j(zero, miss_label, not_taken); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - - // Return the constant value. - __ mov(eax, Handle<Object>(value)); - __ ret(0); -} - - -void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Smi* lookup_hint, - Register receiver, - Register name, - Register scratch1, - Register scratch2, - Label* miss_label) { - // Check that the receiver isn't a smi. - __ test(receiver, Immediate(kSmiTagMask)); - __ j(zero, miss_label, not_taken); - - // Check that the maps haven't changed. - Register reg = - masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); - - // Push the arguments on the JS stack of the caller. - __ pop(scratch2); // remove return address - __ push(receiver); // receiver - __ push(reg); // holder - __ push(name); // name - // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or - // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method? - __ push(Immediate(lookup_hint)); - __ push(scratch2); // restore return address - - // Do tail-call to the runtime system. - ExternalReference load_ic_property = - ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); - __ TailCallRuntime(load_ic_property, 4); -} void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { @@ -444,10 +366,159 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, #undef __ - #define __ ACCESS_MASM(masm()) +Register StubCompiler::CheckPrototypes(JSObject* object, + Register object_reg, + JSObject* holder, + Register holder_reg, + Register scratch, + String* name, + Label* miss) { + // Check that the maps haven't changed. + Register result = + masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss); + + // If we've skipped any global objects, it's not enough to verify + // that their maps haven't changed. + while (object != holder) { + if (object->IsGlobalObject()) { + GlobalObject* global = GlobalObject::cast(object); + Object* probe = global->EnsurePropertyCell(name); + if (probe->IsFailure()) { + set_failure(Failure::cast(probe)); + return result; + } + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe); + ASSERT(cell->value()->IsTheHole()); + __ mov(scratch, Immediate(Handle<Object>(cell))); + __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset), + Immediate(Factory::the_hole_value())); + __ j(not_equal, miss, not_taken); + } + object = JSObject::cast(object->GetPrototype()); + } + + // Return the register containin the holder. + return result; +} + + +void StubCompiler::GenerateLoadField(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + int index, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ test(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss, not_taken); + + // Check the prototype chain. + Register reg = + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + // Get the value from the properties. + GenerateFastPropertyLoad(masm(), eax, reg, holder, index); + __ ret(0); +} + + +void StubCompiler::GenerateLoadCallback(JSObject* object, + JSObject* holder, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + AccessorInfo* callback, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ test(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss, not_taken); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + // Push the arguments on the JS stack of the caller. + __ pop(scratch2); // remove return address + __ push(receiver); // receiver + __ push(Immediate(Handle<AccessorInfo>(callback))); // callback data + __ push(name_reg); // name + __ push(reg); // holder + __ push(scratch2); // restore return address + + // Do tail-call to the runtime system. + ExternalReference load_callback_property = + ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); + __ TailCallRuntime(load_callback_property, 4); +} + + +void StubCompiler::GenerateLoadConstant(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + Object* value, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ test(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss, not_taken); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + // Return the constant value. + __ mov(eax, Handle<Object>(value)); + __ ret(0); +} + + +void StubCompiler::GenerateLoadInterceptor(JSObject* object, + JSObject* holder, + Smi* lookup_hint, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ test(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss, not_taken); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + // Push the arguments on the JS stack of the caller. + __ pop(scratch2); // remove return address + __ push(receiver); // receiver + __ push(reg); // holder + __ push(name_reg); // name + // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or + // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method? + __ push(Immediate(lookup_hint)); + __ push(scratch2); // restore return address + + // Do tail-call to the runtime system. + ExternalReference load_ic_property = + ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); + __ TailCallRuntime(load_ic_property, 4); +} + + // TODO(1241006): Avoid having lazy compile stubs specialized by the // number of arguments. It is not needed anymore. Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { @@ -475,9 +546,7 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { Object* CallStubCompiler::CompileCallField(Object* object, JSObject* holder, int index, - String* name, - Code::Flags flags) { - ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags)); + String* name) { // ----------- S t a t e ------------- // ----------------------------------- Label miss; @@ -492,7 +561,8 @@ Object* CallStubCompiler::CompileCallField(Object* object, // Do the right check and compute the holder register. Register reg = - masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss); + CheckPrototypes(JSObject::cast(object), edx, holder, + ebx, ecx, name, &miss); GenerateFastPropertyLoad(masm(), edi, reg, holder, index); @@ -518,16 +588,15 @@ Object* CallStubCompiler::CompileCallField(Object* object, __ jmp(ic, RelocInfo::CODE_TARGET); // Return the generated code. - return GetCodeWithFlags(flags, name); + return GetCode(FIELD, name); } Object* CallStubCompiler::CompileCallConstant(Object* object, JSObject* holder, JSFunction* function, - CheckType check, - Code::Flags flags) { - ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags)); + String* name, + CheckType check) { // ----------- S t a t e ------------- // ----------------------------------- Label miss; @@ -549,7 +618,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, switch (check) { case RECEIVER_MAP_CHECK: // Check that the maps haven't changed. - __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss); + CheckPrototypes(JSObject::cast(object), edx, holder, + ebx, ecx, name, &miss); // Patch the receiver on the stack with the global proxy if // necessary. @@ -569,8 +639,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateLoadGlobalFunctionPrototype(masm(), Context::STRING_FUNCTION_INDEX, ecx); - __ CheckMaps(JSObject::cast(object->GetPrototype()), - ecx, holder, ebx, edx, &miss); + CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder, + ebx, edx, name, &miss); break; case NUMBER_CHECK: { @@ -585,8 +655,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateLoadGlobalFunctionPrototype(masm(), Context::NUMBER_FUNCTION_INDEX, ecx); - __ CheckMaps(JSObject::cast(object->GetPrototype()), - ecx, holder, ebx, edx, &miss); + CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder, + ebx, edx, name, &miss); break; } @@ -602,13 +672,14 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, GenerateLoadGlobalFunctionPrototype(masm(), Context::BOOLEAN_FUNCTION_INDEX, ecx); - __ CheckMaps(JSObject::cast(object->GetPrototype()), - ecx, holder, ebx, edx, &miss); + CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder, + ebx, edx, name, &miss); break; } case JSARRAY_HAS_FAST_ELEMENTS_CHECK: - __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss); + CheckPrototypes(JSObject::cast(object), edx, holder, + ebx, ecx, name, &miss); // Make sure object->elements()->map() != Heap::dictionary_array_map() // Get the elements array of the object. __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); @@ -627,6 +698,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Jump to the cached code (tail call). + ASSERT(function->is_compiled()); Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); __ InvokeCode(code, expected, arguments(), @@ -642,7 +714,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, if (function->shared()->name()->IsString()) { function_name = String::cast(function->shared()->name()); } - return GetCodeWithFlags(flags, function_name); + return GetCode(CONSTANT_FUNCTION, function_name); } @@ -665,7 +737,8 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object, // Check that maps have not changed and compute the holder register. Register reg = - masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss); + CheckPrototypes(JSObject::cast(object), edx, holder, + ebx, ecx, name, &miss); // Enter an internal frame. __ EnterInternalFrame(); @@ -718,6 +791,70 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object, } +Object* CallStubCompiler::CompileCallGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name) { + // ----------- S t a t e ------------- + // ----------------------------------- + Label miss; + + __ IncrementCounter(&Counters::call_global_inline, 1); + + // Get the number of arguments. + const int argc = arguments().immediate(); + + // Get the receiver from the stack. + __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual calls. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ test(edx, Immediate(kSmiTagMask)); + __ j(zero, &miss, not_taken); + } + + // Check that the maps haven't changed. + CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss); + + // Get the value from the cell. + __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell))); + __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset)); + + // Check that the cell contains the same function. + __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function))); + __ j(not_equal, &miss, not_taken); + + // Patch the receiver on the stack with the global proxy. + if (object->IsGlobalObject()) { + __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset)); + __ mov(Operand(esp, (argc + 1) * kPointerSize), edx); + } + + // Setup the context (function already in edi). + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + // Jump to the cached code (tail call). + ASSERT(function->is_compiled()); + Handle<Code> code(function->code()); + ParameterCount expected(function->shared()->formal_parameter_count()); + __ InvokeCode(code, expected, arguments(), + RelocInfo::CODE_TARGET, JUMP_FUNCTION); + + // Handle call cache miss. + __ bind(&miss); + __ DecrementCounter(&Counters::call_global_inline, 1); + __ IncrementCounter(&Counters::call_global_inline_miss, 1); + Handle<Code> ic = ComputeCallMiss(arguments().immediate()); + __ jmp(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, name); +} + + Object* StoreStubCompiler::CompileStoreField(JSObject* object, int index, Map* transition, @@ -861,6 +998,44 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, } +Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, + JSGlobalPropertyCell* cell, + String* name) { + // ----------- S t a t e ------------- + // -- eax : value + // -- ecx : name + // -- esp[0] : return address + // -- esp[4] : receiver + // ----------------------------------- + Label miss; + + __ IncrementCounter(&Counters::named_store_global_inline, 1); + + // Check that the map of the global has not changed. + __ mov(ebx, (Operand(esp, kPointerSize))); + __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), + Immediate(Handle<Map>(object->map()))); + __ j(not_equal, &miss, not_taken); + + // Store the value in the cell. + __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell))); + __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax); + + // Return the value (register eax). + __ ret(0); + + // Handle store cache miss. + __ bind(&miss); + __ DecrementCounter(&Counters::named_store_global_inline, 1); + __ IncrementCounter(&Counters::named_store_global_inline_miss, 1); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); + __ jmp(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, name); +} + + Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, int index, Map* transition, @@ -904,6 +1079,7 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, } + Object* LoadStubCompiler::CompileLoadField(JSObject* object, JSObject* holder, int index, @@ -916,7 +1092,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, Label miss; __ mov(eax, (Operand(esp, kPointerSize))); - GenerateLoadField(masm(), object, holder, eax, ebx, edx, index, &miss); + GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -937,8 +1113,8 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object, Label miss; __ mov(eax, (Operand(esp, kPointerSize))); - GenerateLoadCallback(masm(), object, holder, eax, ecx, ebx, - edx, callback, &miss); + GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, + callback, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -959,7 +1135,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, Label miss; __ mov(eax, (Operand(esp, kPointerSize))); - GenerateLoadConstant(masm(), object, holder, eax, ebx, edx, value, &miss); + GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -981,14 +1157,14 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ mov(eax, (Operand(esp, kPointerSize))); // TODO(368): Compile in the whole chain: all the interceptors in // prototypes and ultimate answer. - GenerateLoadInterceptor(masm(), - receiver, + GenerateLoadInterceptor(receiver, holder, holder->InterceptorPropertyLookupHint(name), eax, ecx, edx, ebx, + name, &miss); __ bind(&miss); @@ -999,6 +1175,59 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, } +Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + String* name, + bool is_dont_delete) { + // ----------- S t a t e ------------- + // -- ecx : name + // -- esp[0] : return address + // -- esp[4] : receiver + // ----------------------------------- + Label miss; + + __ IncrementCounter(&Counters::named_load_global_inline, 1); + + // Get the receiver from the stack. + __ mov(eax, (Operand(esp, kPointerSize))); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual loads. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ test(eax, Immediate(kSmiTagMask)); + __ j(zero, &miss, not_taken); + } + + // Check that the maps haven't changed. + CheckPrototypes(object, eax, holder, ebx, edx, name, &miss); + + // Get the value from the cell. + __ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell))); + __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset)); + + // Check for deleted property if property can actually be deleted. + if (!is_dont_delete) { + __ cmp(eax, Factory::the_hole_value()); + __ j(equal, &miss, not_taken); + } else if (FLAG_debug_code) { + __ cmp(eax, Factory::the_hole_value()); + __ Check(not_equal, "DontDelete cells can't contain the hole"); + } + + __ ret(0); + + __ bind(&miss); + __ DecrementCounter(&Counters::named_load_global_inline, 1); + __ IncrementCounter(&Counters::named_load_global_inline_miss, 1); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(NORMAL, name); +} + + Object* KeyedLoadStubCompiler::CompileLoadField(String* name, JSObject* receiver, JSObject* holder, @@ -1018,7 +1247,8 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, __ cmp(Operand(eax), Immediate(Handle<String>(name))); __ j(not_equal, &miss, not_taken); - GenerateLoadField(masm(), receiver, holder, ecx, ebx, edx, index, &miss); + GenerateLoadField(receiver, holder, ecx, ebx, edx, index, name, &miss); + __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_field, 1); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1047,8 +1277,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, __ cmp(Operand(eax), Immediate(Handle<String>(name))); __ j(not_equal, &miss, not_taken); - GenerateLoadCallback(masm(), receiver, holder, ecx, eax, ebx, edx, - callback, &miss); + GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx, + callback, name, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_callback, 1); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1077,7 +1307,8 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ cmp(Operand(eax), Immediate(Handle<String>(name))); __ j(not_equal, &miss, not_taken); - GenerateLoadConstant(masm(), receiver, holder, ecx, ebx, edx, value, &miss); + GenerateLoadConstant(receiver, holder, ecx, ebx, edx, + value, name, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_constant_function, 1); GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); @@ -1105,14 +1336,14 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ cmp(Operand(eax), Immediate(Handle<String>(name))); __ j(not_equal, &miss, not_taken); - GenerateLoadInterceptor(masm(), - receiver, + GenerateLoadInterceptor(receiver, holder, Smi::FromInt(JSObject::kLookupInHolder), ecx, eax, edx, ebx, + name, &miss); __ bind(&miss); __ DecrementCounter(&Counters::keyed_load_interceptor, 1); diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 35c40366e..7e82295c8 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -265,6 +265,39 @@ void KeyedStoreIC::Clear(Address address, Code* target) { } +static bool HasInterceptorGetter(JSObject* object) { + return !object->GetNamedInterceptor()->getter()->IsUndefined(); +} + + +static void LookupForRead(Object* object, + String* name, + LookupResult* lookup) { + object->Lookup(name, lookup); + if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR) { + return; + } + + JSObject* holder = lookup->holder(); + if (HasInterceptorGetter(holder)) { + return; + } + + // There is no getter, just skip it and lookup down the proto chain + holder->LocalLookupRealNamedProperty(name, lookup); + if (lookup->IsValid()) { + return; + } + + Object* proto = holder->GetPrototype(); + if (proto == Heap::null_value()) { + return; + } + + LookupForRead(proto, name, lookup); +} + + Object* CallIC::TryCallAsFunction(Object* object) { HandleScope scope; Handle<Object> target(object); @@ -294,13 +327,11 @@ Object* CallIC::LoadFunction(State state, return TypeError("non_object_property_call", object, name); } - Object* result = Heap::the_hole_value(); - // Check if the name is trivially convertible to an index and get // the element if so. uint32_t index; if (name->AsArrayIndex(&index)) { - result = object->GetElement(index); + Object* result = object->GetElement(index); if (result->IsJSFunction()) return result; // Try to find a suitable function delegate for the object at hand. @@ -312,7 +343,7 @@ Object* CallIC::LoadFunction(State state, // Lookup the property in the object. LookupResult lookup; - object->Lookup(*name, &lookup); + LookupForRead(*object, *name, &lookup); if (!lookup.IsValid()) { // If the object does not have the requested property, check which @@ -328,11 +359,11 @@ Object* CallIC::LoadFunction(State state, UpdateCaches(&lookup, state, object, name); } + // Get the property. + PropertyAttributes attr; + Object* result = object->GetProperty(*object, &lookup, *name, &attr); + if (result->IsFailure()) return result; if (lookup.type() == INTERCEPTOR) { - // Get the property. - PropertyAttributes attr; - result = object->GetProperty(*name, &attr); - if (result->IsFailure()) return result; // If the object does not have the requested property, check which // exception we need to throw. if (attr == ABSENT) { @@ -341,11 +372,6 @@ Object* CallIC::LoadFunction(State state, } return TypeError("undefined_method", object, name); } - } else { - // Lookup is valid and no interceptors are involved. Get the - // property. - result = object->GetProperty(*name); - if (result->IsFailure()) return result; } ASSERT(result != Heap::the_hole_value()); @@ -369,7 +395,7 @@ Object* CallIC::LoadFunction(State state, // cause GC. HandleScope scope; Handle<JSFunction> function(JSFunction::cast(result)); - Debug::HandleStepIn(function, fp(), false); + Debug::HandleStepIn(function, object, fp(), false); return *function; } #endif @@ -423,17 +449,34 @@ void CallIC::UpdateCaches(LookupResult* lookup, break; } case NORMAL: { - // There is only one shared stub for calling normalized - // properties. It does not traverse the prototype chain, so the - // property must be found in the receiver for the stub to be - // applicable. if (!object->IsJSObject()) return; Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (lookup->holder() != *receiver) return; - code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver); + + if (lookup->holder()->IsGlobalObject()) { + GlobalObject* global = GlobalObject::cast(lookup->holder()); + JSGlobalPropertyCell* cell = + JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup)); + if (!cell->value()->IsJSFunction()) return; + JSFunction* function = JSFunction::cast(cell->value()); + code = StubCache::ComputeCallGlobal(argc, + in_loop, + *name, + *receiver, + global, + cell, + function); + } else { + // There is only one shared stub for calling normalized + // properties. It does not traverse the prototype chain, so the + // property must be found in the receiver for the stub to be + // applicable. + if (lookup->holder() != *receiver) return; + code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver); + } break; } case INTERCEPTOR: { + ASSERT(HasInterceptorGetter(lookup->holder())); code = StubCache::ComputeCallInterceptor(argc, *name, *object, lookup->holder()); break; @@ -445,7 +488,7 @@ void CallIC::UpdateCaches(LookupResult* lookup, // If we're unable to compute the stub (not enough memory left), we // simply avoid updating the caches. - if (code->IsFailure()) return; + if (code == NULL || code->IsFailure()) return; // Patch the call site depending on the state of the cache. if (state == UNINITIALIZED || @@ -520,7 +563,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) { // Named lookup in the object. LookupResult lookup; - object->Lookup(*name, &lookup); + LookupForRead(*object, *name, &lookup); // If lookup is invalid, check if we need to throw an exception. if (!lookup.IsValid()) { @@ -614,12 +657,23 @@ void LoadIC::UpdateCaches(LookupResult* lookup, break; } case NORMAL: { - // There is only one shared stub for loading normalized - // properties. It does not traverse the prototype chain, so the - // property must be found in the receiver for the stub to be - // applicable. - if (lookup->holder() != *receiver) return; - code = StubCache::ComputeLoadNormal(*name, *receiver); + if (lookup->holder()->IsGlobalObject()) { + GlobalObject* global = GlobalObject::cast(lookup->holder()); + JSGlobalPropertyCell* cell = + JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup)); + code = StubCache::ComputeLoadGlobal(*name, + *receiver, + global, + cell, + lookup->IsDontDelete()); + } else { + // There is only one shared stub for loading normalized + // properties. It does not traverse the prototype chain, so the + // property must be found in the receiver for the stub to be + // applicable. + if (lookup->holder() != *receiver) return; + code = StubCache::ComputeLoadNormal(*name, *receiver); + } break; } case CALLBACKS: { @@ -632,6 +686,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup, break; } case INTERCEPTOR: { + ASSERT(HasInterceptorGetter(lookup->holder())); code = StubCache::ComputeLoadInterceptor(*name, *receiver, lookup->holder()); break; @@ -643,7 +698,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup, // If we're unable to compute the stub (not enough memory left), we // simply avoid updating the caches. - if (code->IsFailure()) return; + if (code == NULL || code->IsFailure()) return; // Patch the call site depending on the state of the cache. if (state == UNINITIALIZED || state == PREMONOMORPHIC || @@ -723,7 +778,7 @@ Object* KeyedLoadIC::Load(State state, // Named lookup. LookupResult lookup; - object->Lookup(*name, &lookup); + LookupForRead(*object, *name, &lookup); // If lookup is invalid, check if we need to throw an exception. if (!lookup.IsValid()) { @@ -817,6 +872,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state, break; } case INTERCEPTOR: { + ASSERT(HasInterceptorGetter(lookup->holder())); code = StubCache::ComputeKeyedLoadInterceptor(*name, *receiver, lookup->holder()); break; @@ -832,7 +888,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state, // If we're unable to compute the stub (not enough memory left), we // simply avoid updating the caches. - if (code->IsFailure()) return; + if (code == NULL || code->IsFailure()) return; // Patch the call site depending on the state of the cache. Make // sure to always rewrite from monomorphic to megamorphic. @@ -863,9 +919,9 @@ static bool StoreICableLookup(LookupResult* lookup) { } -static bool LookupForStoreIC(JSObject* object, - String* name, - LookupResult* lookup) { +static bool LookupForWrite(JSObject* object, + String* name, + LookupResult* lookup) { object->LocalLookup(name, lookup); if (!StoreICableLookup(lookup)) { return false; @@ -908,7 +964,7 @@ Object* StoreIC::Store(State state, // Lookup the property locally in the receiver. if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) { LookupResult lookup; - if (LookupForStoreIC(*receiver, *name, &lookup)) { + if (LookupForWrite(*receiver, *name, &lookup)) { UpdateCaches(&lookup, state, receiver, name, value); } } @@ -953,6 +1009,19 @@ void StoreIC::UpdateCaches(LookupResult* lookup, code = StubCache::ComputeStoreField(*name, *receiver, index, *transition); break; } + case NORMAL: { + if (!receiver->IsGlobalObject()) { + return; + } + // The stub generated for the global object picks the value directly + // from the property cell. So the property must be directly on the + // global object. + Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver); + JSGlobalPropertyCell* cell = + JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup)); + code = StubCache::ComputeStoreGlobal(*name, *global, cell); + break; + } case CALLBACKS: { if (!lookup->GetCallbackObject()->IsAccessorInfo()) return; AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); @@ -961,6 +1030,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup, break; } case INTERCEPTOR: { + ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined()); code = StubCache::ComputeStoreInterceptor(*name, *receiver); break; } @@ -970,7 +1040,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup, // If we're unable to compute the stub (not enough memory left), we // simply avoid updating the caches. - if (code->IsFailure()) return; + if (code == NULL || code->IsFailure()) return; // Patch the call site depending on the state of the cache. if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) { @@ -1092,7 +1162,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, // If we're unable to compute the stub (not enough memory left), we // simply avoid updating the caches. - if (code->IsFailure()) return; + if (code == NULL || code->IsFailure()) return; // Patch the call site depending on the state of the cache. Make // sure to always rewrite from monomorphic to megamorphic. diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc index 355fae467..0a8ae8cce 100644 --- a/deps/v8/src/interpreter-irregexp.cc +++ b/deps/v8/src/interpreter-irregexp.cc @@ -115,17 +115,17 @@ static void TraceInterpreter(const byte* code_base, } -#define BYTECODE(name) \ - case BC_##name: \ - TraceInterpreter(code_base, \ - pc, \ - backtrack_sp - backtrack_stack, \ - current, \ - current_char, \ - BC_##name##_LENGTH, \ +#define BYTECODE(name) \ + case BC_##name: \ + TraceInterpreter(code_base, \ + pc, \ + backtrack_sp - backtrack_stack_base, \ + current, \ + current_char, \ + BC_##name##_LENGTH, \ #name); #else -#define BYTECODE(name) \ +#define BYTECODE(name) \ case BC_##name: #endif @@ -142,6 +142,49 @@ static int32_t Load16Aligned(const byte* pc) { } +// A simple abstraction over the backtracking stack used by the interpreter. +// This backtracking stack does not grow automatically, but it ensures that the +// the memory held by the stack is released or remembered in a cache if the +// matching terminates. +class BacktrackStack { + public: + explicit BacktrackStack() { + if (cache_ != NULL) { + // If the cache is not empty reuse the previously allocated stack. + data_ = cache_; + cache_ = NULL; + } else { + // Cache was empty. Allocate a new backtrack stack. + data_ = NewArray<int>(kBacktrackStackSize); + } + } + + ~BacktrackStack() { + if (cache_ == NULL) { + // The cache is empty. Keep this backtrack stack around. + cache_ = data_; + } else { + // A backtrack stack was already cached, just release this one. + DeleteArray(data_); + } + } + + int* data() const { return data_; } + + int max_size() const { return kBacktrackStackSize; } + + private: + static const int kBacktrackStackSize = 10000; + + int* data_; + static int* cache_; + + DISALLOW_COPY_AND_ASSIGN(BacktrackStack); +}; + +int* BacktrackStack::cache_ = NULL; + + template <typename Char> static bool RawMatch(const byte* code_base, Vector<const Char> subject, @@ -149,10 +192,13 @@ static bool RawMatch(const byte* code_base, int current, uint32_t current_char) { const byte* pc = code_base; - static const int kBacktrackStackSize = 10000; - int backtrack_stack[kBacktrackStackSize]; - int backtrack_stack_space = kBacktrackStackSize; - int* backtrack_sp = backtrack_stack; + // BacktrackStack ensures that the memory allocated for the backtracking stack + // is returned to the system or cached if there is no stack being cached at + // the moment. + BacktrackStack backtrack_stack; + int* backtrack_stack_base = backtrack_stack.data(); + int* backtrack_sp = backtrack_stack_base; + int backtrack_stack_space = backtrack_stack.max_size(); #ifdef DEBUG if (FLAG_trace_regexp_bytecodes) { PrintF("\n\nStart bytecode interpreter\n\n"); @@ -202,13 +248,13 @@ static bool RawMatch(const byte* code_base, pc += BC_SET_CP_TO_REGISTER_LENGTH; break; BYTECODE(SET_REGISTER_TO_SP) - registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack; + registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base; pc += BC_SET_REGISTER_TO_SP_LENGTH; break; BYTECODE(SET_SP_TO_REGISTER) - backtrack_sp = backtrack_stack + registers[insn >> BYTECODE_SHIFT]; - backtrack_stack_space = kBacktrackStackSize - - (backtrack_sp - backtrack_stack); + backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT]; + backtrack_stack_space = backtrack_stack.max_size() - + (backtrack_sp - backtrack_stack_base); pc += BC_SET_SP_TO_REGISTER_LENGTH; break; BYTECODE(POP_CP) diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index 7b294ec45..852d431be 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -51,6 +51,8 @@ #include "x64/regexp-macro-assembler-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/regexp-macro-assembler-arm.h" +#else +#error Unsupported target architecture. #endif #include "interpreter-irregexp.h" @@ -261,7 +263,6 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, // Irregexp implementation. - // Ensures that the regexp object contains a compiled version of the // source for either ASCII or non-ASCII strings. // If the compiled version doesn't already exist, it is compiled @@ -269,25 +270,26 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, // If compilation fails, an exception is thrown and this function // returns false. bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) { - int index; - if (is_ascii) { - index = JSRegExp::kIrregexpASCIICodeIndex; - } else { - index = JSRegExp::kIrregexpUC16CodeIndex; - } - Object* entry = re->DataAt(index); - if (!entry->IsTheHole()) { - // A value has already been compiled. - if (entry->IsJSObject()) { - // If it's a JS value, it's an error. - Top::Throw(entry); - return false; - } - return true; - } +#ifdef V8_NATIVE_REGEXP + if (re->DataAt(JSRegExp::code_index(is_ascii))->IsCode()) return true; +#else // ! V8_NATIVE_REGEXP (RegExp interpreter code) + if (re->DataAt(JSRegExp::code_index(is_ascii))->IsByteArray()) return true; +#endif + return CompileIrregexp(re, is_ascii); +} + +bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re, bool is_ascii) { // Compile the RegExp. CompilationZoneScope zone_scope(DELETE_ON_EXIT); + Object* entry = re->DataAt(JSRegExp::code_index(is_ascii)); + if (entry->IsJSObject()) { + // If it's a JSObject, a previous compilation failed and threw this object. + // Re-throw the object without trying again. + Top::Throw(entry); + return false; + } + ASSERT(entry->IsTheHole()); JSRegExp::Flags flags = re->GetFlags(); @@ -300,7 +302,7 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) { FlatStringReader reader(pattern); if (!ParseRegExp(&reader, flags.is_multiline(), &compile_data)) { // Throw an exception if we fail to parse the pattern. - // THIS SHOULD NOT HAPPEN. We already parsed it successfully once. + // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once. ThrowRegExpException(re, pattern, compile_data.error, @@ -323,17 +325,15 @@ bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii) { Handle<Object> regexp_err = Factory::NewSyntaxError("malformed_regexp", array); Top::Throw(*regexp_err); - re->SetDataAt(index, *regexp_err); + re->SetDataAt(JSRegExp::code_index(is_ascii), *regexp_err); return false; } - NoHandleAllocation no_handles; - - FixedArray* data = FixedArray::cast(re->data()); - data->set(index, result.code); - int register_max = IrregexpMaxRegisterCount(data); + Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data())); + data->set(JSRegExp::code_index(is_ascii), result.code); + int register_max = IrregexpMaxRegisterCount(*data); if (result.num_registers > register_max) { - SetIrregexpMaxRegisterCount(data, result.num_registers); + SetIrregexpMaxRegisterCount(*data, result.num_registers); } return true; @@ -362,24 +362,12 @@ int RegExpImpl::IrregexpNumberOfRegisters(FixedArray* re) { ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) { - int index; - if (is_ascii) { - index = JSRegExp::kIrregexpASCIICodeIndex; - } else { - index = JSRegExp::kIrregexpUC16CodeIndex; - } - return ByteArray::cast(re->get(index)); + return ByteArray::cast(re->get(JSRegExp::code_index(is_ascii))); } Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) { - int index; - if (is_ascii) { - index = JSRegExp::kIrregexpASCIICodeIndex; - } else { - index = JSRegExp::kIrregexpUC16CodeIndex; - } - return Code::cast(re->get(index)); + return Code::cast(re->get(JSRegExp::code_index(is_ascii))); } @@ -406,6 +394,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, int number_of_capture_registers = (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2; +#ifndef V8_NATIVE_REGEXP #ifdef DEBUG if (FLAG_trace_regexp_bytecodes) { String* pattern = jsregexp->Pattern(); @@ -413,6 +402,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString())); } #endif +#endif if (!subject->IsFlat()) { FlattenString(subject); @@ -420,88 +410,83 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead); - bool rc; - // We have to initialize this with something to make gcc happy but we can't - // initialize it with its real value until after the GC-causing things are - // over. - FixedArray* array = NULL; + Handle<FixedArray> array; // Dispatch to the correct RegExp implementation. - Handle<String> original_subject = subject; Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data())); - if (UseNativeRegexp()) { +#ifdef V8_NATIVE_REGEXP #if V8_TARGET_ARCH_IA32 - OffsetsVector captures(number_of_capture_registers); - int* captures_vector = captures.vector(); - RegExpMacroAssemblerIA32::Result res; - do { - bool is_ascii = subject->IsAsciiRepresentation(); - if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) { - return Handle<Object>::null(); - } - Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii)); - res = RegExpMacroAssemblerIA32::Match(code, - subject, - captures_vector, - captures.length(), - previous_index); - // If result is RETRY, the string have changed representation, and we - // must restart from scratch. - } while (res == RegExpMacroAssemblerIA32::RETRY); - if (res == RegExpMacroAssemblerIA32::EXCEPTION) { - ASSERT(Top::has_pending_exception()); - return Handle<Object>::null(); - } - ASSERT(res == RegExpMacroAssemblerIA32::SUCCESS - || res == RegExpMacroAssemblerIA32::FAILURE); - - rc = (res == RegExpMacroAssemblerIA32::SUCCESS); - if (!rc) return Factory::null_value(); - - array = last_match_info->elements(); - ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead); - // The captures come in (start, end+1) pairs. - for (int i = 0; i < number_of_capture_registers; i += 2) { - SetCapture(array, i, captures_vector[i]); - SetCapture(array, i + 1, captures_vector[i + 1]); - } -#else // !V8_TARGET_ARCH_IA32 - UNREACHABLE(); -#endif - } else { + OffsetsVector captures(number_of_capture_registers); + int* captures_vector = captures.vector(); + RegExpMacroAssemblerIA32::Result res; + do { bool is_ascii = subject->IsAsciiRepresentation(); if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) { return Handle<Object>::null(); } - // Now that we have done EnsureCompiledIrregexp we can get the number of - // registers. - int number_of_registers = - IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data())); - OffsetsVector registers(number_of_registers); - int* register_vector = registers.vector(); - for (int i = number_of_capture_registers - 1; i >= 0; i--) { - register_vector[i] = -1; - } - Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii)); - - rc = IrregexpInterpreter::Match(byte_codes, - subject, - register_vector, - previous_index); - if (!rc) return Factory::null_value(); - - array = last_match_info->elements(); - ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead); - // The captures come in (start, end+1) pairs. - for (int i = 0; i < number_of_capture_registers; i += 2) { - SetCapture(array, i, register_vector[i]); - SetCapture(array, i + 1, register_vector[i + 1]); - } + Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii)); + res = RegExpMacroAssemblerIA32::Match(code, + subject, + captures_vector, + captures.length(), + previous_index); + // If result is RETRY, the string have changed representation, and we + // must restart from scratch. + } while (res == RegExpMacroAssemblerIA32::RETRY); + if (res == RegExpMacroAssemblerIA32::EXCEPTION) { + ASSERT(Top::has_pending_exception()); + return Handle<Object>::null(); + } + ASSERT(res == RegExpMacroAssemblerIA32::SUCCESS + || res == RegExpMacroAssemblerIA32::FAILURE); + + if (res != RegExpMacroAssemblerIA32::SUCCESS) return Factory::null_value(); + + array = Handle<FixedArray>(last_match_info->elements()); + ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead); + // The captures come in (start, end+1) pairs. + for (int i = 0; i < number_of_capture_registers; i += 2) { + SetCapture(*array, i, captures_vector[i]); + SetCapture(*array, i + 1, captures_vector[i + 1]); + } +#else // !V8_TARGET_ARCH_IA32 + UNREACHABLE(); +#endif // V8_TARGET_ARCH_IA32 +#else // !V8_NATIVE_REGEXP + bool is_ascii = subject->IsAsciiRepresentation(); + if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) { + return Handle<Object>::null(); + } + // Now that we have done EnsureCompiledIrregexp we can get the number of + // registers. + int number_of_registers = + IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data())); + OffsetsVector registers(number_of_registers); + int* register_vector = registers.vector(); + for (int i = number_of_capture_registers - 1; i >= 0; i--) { + register_vector[i] = -1; + } + Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii)); + + if (!IrregexpInterpreter::Match(byte_codes, + subject, + register_vector, + previous_index)) { + return Factory::null_value(); + } + + array = Handle<FixedArray>(last_match_info->elements()); + ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead); + // The captures come in (start, end+1) pairs. + for (int i = 0; i < number_of_capture_registers; i += 2) { + SetCapture(*array, i, register_vector[i]); + SetCapture(*array, i + 1, register_vector[i + 1]); } +#endif // V8_NATIVE_REGEXP - SetLastCaptureCount(array, number_of_capture_registers); - SetLastSubject(array, *original_subject); - SetLastInput(array, *original_subject); + SetLastCaptureCount(*array, number_of_capture_registers); + SetLastSubject(*array, *subject); + SetLastInput(*array, *subject); return last_match_info; } @@ -4472,35 +4457,38 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data, NodeInfo info = *node->info(); - if (RegExpImpl::UseNativeRegexp()) { +#ifdef V8_NATIVE_REGEXP #ifdef V8_TARGET_ARCH_ARM - UNREACHABLE(); + // ARM native regexp not implemented yet. + UNREACHABLE(); #endif #ifdef V8_TARGET_ARCH_X64 - UNREACHABLE(); + // X64 native regexp not implemented yet. + UNREACHABLE(); #endif #ifdef V8_TARGET_ARCH_IA32 - RegExpMacroAssemblerIA32::Mode mode; - if (is_ascii) { - mode = RegExpMacroAssemblerIA32::ASCII; - } else { - mode = RegExpMacroAssemblerIA32::UC16; - } - RegExpMacroAssemblerIA32 macro_assembler(mode, - (data->capture_count + 1) * 2); - return compiler.Assemble(¯o_assembler, - node, - data->capture_count, - pattern); -#endif + RegExpMacroAssemblerIA32::Mode mode; + if (is_ascii) { + mode = RegExpMacroAssemblerIA32::ASCII; + } else { + mode = RegExpMacroAssemblerIA32::UC16; } + RegExpMacroAssemblerIA32 macro_assembler(mode, + (data->capture_count + 1) * 2); + return compiler.Assemble(¯o_assembler, + node, + data->capture_count, + pattern); +#endif +#else // ! V8_NATIVE_REGEXP + // Interpreted regexp. EmbeddedVector<byte, 1024> codes; RegExpMacroAssemblerIrregexp macro_assembler(codes); return compiler.Assemble(¯o_assembler, node, data->capture_count, pattern); +#endif // V8_NATIVE_REGEXP } - }} // namespace v8::internal diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index a86f7e648..0e7965c2a 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -37,13 +37,15 @@ class RegExpMacroAssembler; class RegExpImpl { public: - static inline bool UseNativeRegexp() { -#ifdef V8_TARGET_ARCH_IA32 - return FLAG_regexp_native; + // Whether V8 is compiled with native regexp support or not. + static bool UsesNativeRegExp() { +#ifdef V8_NATIVE_REGEXP + return true; #else - return false; + return false; #endif } + // Creates a regular expression literal in the old space. // This function calls the garbage collector if necessary. static Handle<Object> CreateRegExpLiteral(Handle<JSFunction> constructor, @@ -148,7 +150,8 @@ class RegExpImpl { static String* last_ascii_string_; static String* two_byte_cached_string_; - static bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii); + static bool CompileIrregexp(Handle<JSRegExp> re, bool is_ascii); + static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re, bool is_ascii); // Set the subject cache. The previous string buffer is not deleted, so the diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 0dba08dae..2ca89dd46 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -176,8 +176,11 @@ class Ticker: public Sampler { ~Ticker() { if (IsActive()) Stop(); } + void SampleStack(TickSample* sample) { + StackTracer::Trace(sample); + } + void Tick(TickSample* sample) { - if (IsProfiling()) StackTracer::Trace(sample); if (profiler_) profiler_->Insert(sample); if (window_) window_->AddState(sample->state); } diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h index 116381bb3..983802e6a 100644 --- a/deps/v8/src/macro-assembler.h +++ b/deps/v8/src/macro-assembler.h @@ -47,6 +47,8 @@ #include "arm/assembler-arm-inl.h" #include "code.h" // must be after assembler_*.h #include "arm/macro-assembler-arm.h" +#else +#error Unsupported target architecture. #endif #endif // V8_MACRO_ASSEMBLER_H_ diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py index fdbdb58b7..c75f0ea48 100644 --- a/deps/v8/src/macros.py +++ b/deps/v8/src/macros.py @@ -82,13 +82,16 @@ macro IS_NUMBER(arg) = (typeof(arg) === 'number'); macro IS_STRING(arg) = (typeof(arg) === 'string'); macro IS_OBJECT(arg) = (typeof(arg) === 'object'); macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean'); -macro IS_REGEXP(arg) = %HasRegExpClass(arg); -macro IS_ARRAY(arg) = %HasArrayClass(arg); -macro IS_DATE(arg) = %HasDateClass(arg); -macro IS_NUMBER_WRAPPER(arg) = %HasNumberClass(arg); -macro IS_STRING_WRAPPER(arg) = %HasStringClass(arg); -macro IS_ERROR(arg) = (%ClassOf(arg) === 'Error'); -macro IS_SCRIPT(arg) = (%ClassOf(arg) === 'Script'); +macro IS_ARRAY(arg) = (%_IsArray(arg)); +macro IS_REGEXP(arg) = (%_ClassOf(arg) === 'RegExp'); +macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date'); +macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number'); +macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String'); +macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean'); +macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error'); +macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script'); +macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments'); +macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global'); macro FLOOR(arg) = %Math_floor(arg); # Inline macros. Use %IS_VAR to make sure arg is evaluated only once. @@ -111,6 +114,10 @@ const REGEXP_FIRST_CAPTURE = 3; # REGEXP_NUMBER_OF_CAPTURES macro NUMBER_OF_CAPTURES(array) = ((array)[0]); +# Gets the value of a Date object. If arg is not a Date object +# a type error is thrown. +macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError()); + # Last input and last subject are after the captures so we can omit them on # results returned from global searches. Beware - these evaluate their # arguments twice. diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 89d97e925..6e823b3d9 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -56,6 +56,7 @@ int MarkCompactCollector::live_old_data_objects_ = 0; int MarkCompactCollector::live_old_pointer_objects_ = 0; int MarkCompactCollector::live_code_objects_ = 0; int MarkCompactCollector::live_map_objects_ = 0; +int MarkCompactCollector::live_cell_objects_ = 0; int MarkCompactCollector::live_lo_objects_ = 0; #endif @@ -155,6 +156,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { live_old_data_objects_ = 0; live_code_objects_ = 0; live_map_objects_ = 0; + live_cell_objects_ = 0; live_lo_objects_ = 0; #endif } @@ -224,7 +226,9 @@ static inline HeapObject* ShortCircuitConsString(Object** p) { if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); - if (reinterpret_cast<String*>(second) != Heap::empty_string()) return object; + if (second != Heap::raw_unchecked_empty_string()) { + return object; + } // Since we don't have the object's start, it is impossible to update the // remembered set. Therefore, we only replace the string with its left @@ -421,7 +425,7 @@ class SymbolTableCleaner : public ObjectVisitor { } } // Set the entry to null_value (as deleted). - *p = Heap::null_value(); + *p = Heap::raw_unchecked_null_value(); pointers_removed_++; } } @@ -475,7 +479,7 @@ void MarkCompactCollector::MarkDescriptorArray( DescriptorArray* descriptors) { if (descriptors->IsMarked()) return; // Empty descriptor array is marked as a root before any maps are marked. - ASSERT(descriptors != Heap::empty_descriptor_array()); + ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array()); SetMark(descriptors); FixedArray* contents = reinterpret_cast<FixedArray*>( @@ -590,7 +594,7 @@ void MarkCompactCollector::MarkSymbolTable() { // and if it is a sliced string or a cons string backed by an // external string (even indirectly), then the external string does // not receive a weak reference callback. - SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table()); + SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); // Mark the symbol table itself. SetMark(symbol_table); // Explicitly mark the prefix. @@ -708,6 +712,10 @@ void MarkCompactCollector::RefillMarkingStack() { ScanOverflowedObjects(&map_it); if (marking_stack.is_full()) return; + HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize); + ScanOverflowedObjects(&cell_it); + if (marking_stack.is_full()) return; + LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize); ScanOverflowedObjects(&lo_it); if (marking_stack.is_full()) return; @@ -780,10 +788,9 @@ void MarkCompactCollector::MarkLiveObjects() { ProcessObjectGroups(root_visitor.stack_visitor()); // Prune the symbol table removing all symbols only pointed to by the - // symbol table. Cannot use SymbolTable::cast here because the symbol + // symbol table. Cannot use symbol_table() here because the symbol // table is marked. - SymbolTable* symbol_table = - reinterpret_cast<SymbolTable*>(Heap::symbol_table()); + SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); SymbolTableCleaner v; symbol_table->IterateElements(&v); symbol_table->ElementsRemoved(v.PointersRemoved()); @@ -808,6 +815,9 @@ void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) { } else if (Heap::map_space()->Contains(obj)) { ASSERT(obj->IsMap()); live_map_objects_++; + } else if (Heap::cell_space()->Contains(obj)) { + ASSERT(obj->IsJSGlobalPropertyCell()); + live_cell_objects_++; } else if (Heap::old_pointer_space()->Contains(obj)) { live_old_pointer_objects_++; } else if (Heap::old_data_space()->Contains(obj)) { @@ -967,27 +977,32 @@ inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) { // Allocation functions for the paged spaces call the space's MCAllocateRaw. -inline Object* MCAllocateFromOldPointerSpace(HeapObject* object, +inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore, int object_size) { return Heap::old_pointer_space()->MCAllocateRaw(object_size); } -inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) { +inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) { return Heap::old_data_space()->MCAllocateRaw(object_size); } -inline Object* MCAllocateFromCodeSpace(HeapObject* object, int object_size) { +inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) { return Heap::code_space()->MCAllocateRaw(object_size); } -inline Object* MCAllocateFromMapSpace(HeapObject* object, int object_size) { +inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) { return Heap::map_space()->MCAllocateRaw(object_size); } +inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) { + return Heap::cell_space()->MCAllocateRaw(object_size); +} + + // The forwarding address is encoded at the same offset as the current // to-space object, but in from space. inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object, @@ -1141,12 +1156,12 @@ static void SweepSpace(NewSpace* space) { // We give non-live objects a map that will correctly give their size, // since their existing map might not be live after the collection. int size = object->Size(); - if (size >= Array::kHeaderSize) { - object->set_map(Heap::byte_array_map()); + if (size >= ByteArray::kHeaderSize) { + object->set_map(Heap::raw_unchecked_byte_array_map()); ByteArray::cast(object)->set_length(ByteArray::LengthFor(size)); } else { ASSERT(size == kPointerSize); - object->set_map(Heap::one_word_filler_map()); + object->set_map(Heap::raw_unchecked_one_pointer_filler_map()); } ASSERT(object->Size() == size); } @@ -1196,8 +1211,8 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { // loop. } - // If the last region was not live we need to from free_start to the - // allocation top in the page. + // If the last region was not live we need to deallocate from + // free_start to the allocation top in the page. if (!is_previous_alive) { int free_size = p->AllocationTop() - free_start; if (free_size > 0) { @@ -1241,6 +1256,21 @@ void MarkCompactCollector::DeallocateMapBlock(Address start, } +void MarkCompactCollector::DeallocateCellBlock(Address start, + int size_in_bytes) { + // Free-list elements in cell space are assumed to have a fixed size. + // We break the free block into chunks and add them to the free list + // individually. + int size = Heap::cell_space()->object_size_in_bytes(); + ASSERT(size_in_bytes % size == 0); + Heap::ClearRSetRange(start, size_in_bytes); + Address end = start + size_in_bytes; + for (Address a = start; a < end; a += size) { + Heap::cell_space()->Free(a); + } +} + + void MarkCompactCollector::EncodeForwardingAddresses() { ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); // Objects in the active semispace of the young generation may be @@ -1261,6 +1291,11 @@ void MarkCompactCollector::EncodeForwardingAddresses() { LogNonLiveCodeObject>( Heap::code_space()); + EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace, + IgnoreNonLiveObject>( + Heap::cell_space()); + + // Compute new space next to last after the old and code spaces have been // compacted. Objects in new space can be promoted to old or code space. EncodeForwardingAddressesInNewSpace(); @@ -1279,6 +1314,7 @@ void MarkCompactCollector::EncodeForwardingAddresses() { Heap::old_data_space()->MCWriteRelocationInfoToPage(); Heap::code_space()->MCWriteRelocationInfoToPage(); Heap::map_space()->MCWriteRelocationInfoToPage(); + Heap::cell_space()->MCWriteRelocationInfoToPage(); } @@ -1293,6 +1329,7 @@ void MarkCompactCollector::SweepSpaces() { SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock); SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock); SweepSpace(Heap::code_space(), &DeallocateCodeBlock); + SweepSpace(Heap::cell_space(), &DeallocateCellBlock); SweepSpace(Heap::new_space()); SweepSpace(Heap::map_space(), &DeallocateMapBlock); } @@ -1371,15 +1408,16 @@ class UpdatingVisitor: public ObjectVisitor { ASSERT(!Heap::InFromSpace(obj)); if (Heap::new_space()->Contains(obj)) { - Address f_addr = Heap::new_space()->FromSpaceLow() + - Heap::new_space()->ToSpaceOffsetForAddress(old_addr); - new_addr = Memory::Address_at(f_addr); + Address forwarding_pointer_addr = + Heap::new_space()->FromSpaceLow() + + Heap::new_space()->ToSpaceOffsetForAddress(old_addr); + new_addr = Memory::Address_at(forwarding_pointer_addr); #ifdef DEBUG ASSERT(Heap::old_pointer_space()->Contains(new_addr) || Heap::old_data_space()->Contains(new_addr) || - Heap::code_space()->Contains(new_addr) || - Heap::new_space()->FromSpaceContains(new_addr)); + Heap::new_space()->FromSpaceContains(new_addr) || + Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr))); if (Heap::new_space()->FromSpaceContains(new_addr)) { ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <= @@ -1392,32 +1430,19 @@ class UpdatingVisitor: public ObjectVisitor { return; } else { - ASSERT(Heap::old_pointer_space()->Contains(obj) || - Heap::old_data_space()->Contains(obj) || - Heap::code_space()->Contains(obj) || - Heap::map_space()->Contains(obj)); - - new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj); - ASSERT(Heap::old_pointer_space()->Contains(new_addr) || - Heap::old_data_space()->Contains(new_addr) || - Heap::code_space()->Contains(new_addr) || - Heap::map_space()->Contains(new_addr)); - #ifdef DEBUG - if (Heap::old_pointer_space()->Contains(obj)) { - ASSERT(Heap::old_pointer_space()->MCSpaceOffsetForAddress(new_addr) <= - Heap::old_pointer_space()->MCSpaceOffsetForAddress(old_addr)); - } else if (Heap::old_data_space()->Contains(obj)) { - ASSERT(Heap::old_data_space()->MCSpaceOffsetForAddress(new_addr) <= - Heap::old_data_space()->MCSpaceOffsetForAddress(old_addr)); - } else if (Heap::code_space()->Contains(obj)) { - ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <= - Heap::code_space()->MCSpaceOffsetForAddress(old_addr)); - } else { - ASSERT(Heap::map_space()->MCSpaceOffsetForAddress(new_addr) <= - Heap::map_space()->MCSpaceOffsetForAddress(old_addr)); + PagedSpaces spaces; + PagedSpace* original_space = spaces.next(); + while (original_space != NULL) { + if (original_space->Contains(obj)) break; + original_space = spaces.next(); } + ASSERT(original_space != NULL); #endif + new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj); + ASSERT(original_space->Contains(new_addr)); + ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <= + original_space->MCSpaceOffsetForAddress(old_addr)); } *p = HeapObject::FromAddress(new_addr); @@ -1449,6 +1474,8 @@ void MarkCompactCollector::UpdatePointers() { &UpdatePointersInOldObject); int live_codes = IterateLiveObjects(Heap::code_space(), &UpdatePointersInOldObject); + int live_cells = IterateLiveObjects(Heap::cell_space(), + &UpdatePointersInOldObject); int live_news = IterateLiveObjects(Heap::new_space(), &UpdatePointersInNewObject); @@ -1460,15 +1487,14 @@ void MarkCompactCollector::UpdatePointers() { USE(live_pointer_olds); USE(live_data_olds); USE(live_codes); + USE(live_cells); USE(live_news); - -#ifdef DEBUG ASSERT(live_maps == live_map_objects_); ASSERT(live_data_olds == live_old_data_objects_); ASSERT(live_pointer_olds == live_old_pointer_objects_); ASSERT(live_codes == live_code_objects_); + ASSERT(live_cells == live_cell_objects_); ASSERT(live_news == live_young_objects_); -#endif } @@ -1589,30 +1615,31 @@ void MarkCompactCollector::RelocateObjects() { int live_data_olds = IterateLiveObjects(Heap::old_data_space(), &RelocateOldDataObject); int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject); + int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject); int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject); USE(live_maps); USE(live_data_olds); USE(live_pointer_olds); USE(live_codes); + USE(live_cells); USE(live_news); -#ifdef DEBUG ASSERT(live_maps == live_map_objects_); ASSERT(live_data_olds == live_old_data_objects_); ASSERT(live_pointer_olds == live_old_pointer_objects_); ASSERT(live_codes == live_code_objects_); + ASSERT(live_cells == live_cell_objects_); ASSERT(live_news == live_young_objects_); -#endif // Notify code object in LO to convert IC target to address // This must happen after lo_space_->Compact LargeObjectIterator it(Heap::lo_space()); while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); } - // Flips from and to spaces + // Flip from and to spaces Heap::new_space()->Flip(); - // Sets age_mark to bottom in to space + // Set age_mark to bottom in to space Address mark = Heap::new_space()->bottom(); Heap::new_space()->set_age_mark(mark); @@ -1636,7 +1663,7 @@ int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) { int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { - // decode map pointer (forwarded address) + // Recover map pointer. MapWord encoding = obj->map_word(); Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr))); @@ -1644,10 +1671,10 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { // Get forwarding address before resetting map pointer Address new_addr = GetForwardingAddressInOldSpace(obj); - // recover map pointer + // Reset map pointer. The meta map object may not be copied yet so + // Map::cast does not yet work. obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); - // The meta map object may not be copied yet. Address old_addr = obj->address(); if (new_addr != old_addr) { @@ -1664,23 +1691,23 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) { } -static inline int RelocateOldObject(HeapObject* obj, - OldSpace* space, - Address new_addr, - Address map_addr) { - // recover map pointer - obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr))); +static inline int RestoreMap(HeapObject* obj, + PagedSpace* space, + Address new_addr, + Address map_addr) { + // This must be a non-map object, and the function relies on the + // assumption that the Map space is compacted before the other paged + // spaces (see RelocateObjects). + + // Reset map pointer. + obj->set_map(Map::cast(HeapObject::FromAddress(map_addr))); - // This is a non-map object, it relies on the assumption that the Map space - // is compacted before the Old space (see RelocateObjects). int obj_size = obj->Size(); ASSERT_OBJECT_SIZE(obj_size); ASSERT(space->MCSpaceOffsetForAddress(new_addr) <= space->MCSpaceOffsetForAddress(obj->address())); - space->MCAdjustRelocationEnd(new_addr, obj_size); - #ifdef DEBUG if (FLAG_gc_verbose) { PrintF("relocate %p -> %p\n", obj->address(), new_addr); @@ -1692,21 +1719,22 @@ static inline int RelocateOldObject(HeapObject* obj, int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj, - OldSpace* space) { - // decode map pointer (forwarded address) + PagedSpace* space) { + // Recover map pointer. MapWord encoding = obj->map_word(); Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); ASSERT(Heap::map_space()->Contains(map_addr)); - // Get forwarding address before resetting map pointer + // Get forwarding address before resetting map pointer. Address new_addr = GetForwardingAddressInOldSpace(obj); - int obj_size = RelocateOldObject(obj, space, new_addr, map_addr); + // Reset the map pointer. + int obj_size = RestoreMap(obj, space, new_addr, map_addr); Address old_addr = obj->address(); if (new_addr != old_addr) { - memmove(new_addr, old_addr, obj_size); // copy contents + memmove(new_addr, old_addr, obj_size); // Copy contents } ASSERT(!HeapObject::FromAddress(new_addr)->IsCode()); @@ -1725,8 +1753,13 @@ int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) { } +int MarkCompactCollector::RelocateCellObject(HeapObject* obj) { + return RelocateOldNonCodeObject(obj, Heap::cell_space()); +} + + int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) { - // decode map pointer (forwarded address) + // Recover map pointer. MapWord encoding = obj->map_word(); Address map_addr = encoding.DecodeMapAddress(Heap::map_space()); ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr))); @@ -1734,23 +1767,23 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) { // Get forwarding address before resetting map pointer Address new_addr = GetForwardingAddressInOldSpace(obj); - int obj_size = RelocateOldObject(obj, Heap::code_space(), new_addr, map_addr); + // Reset the map pointer. + int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr); - // convert inline cache target to address using old address + // Convert inline cache target to address using old address. if (obj->IsCode()) { - // convert target to address first related to old_address Code::cast(obj)->ConvertICTargetsFromObjectToAddress(); } Address old_addr = obj->address(); if (new_addr != old_addr) { - memmove(new_addr, old_addr, obj_size); // copy contents + memmove(new_addr, old_addr, obj_size); // Copy contents. } HeapObject* copied_to = HeapObject::FromAddress(new_addr); if (copied_to->IsCode()) { - // may also update inline cache target. + // May also update inline cache target. Code::cast(copied_to)->Relocate(new_addr - old_addr); // Notify the logger that compiled code has moved. LOG(CodeMoveEvent(old_addr, new_addr)); @@ -1770,15 +1803,15 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) { Address new_addr = Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset); +#ifdef DEBUG if (Heap::new_space()->FromSpaceContains(new_addr)) { ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <= Heap::new_space()->ToSpaceOffsetForAddress(old_addr)); } else { - OldSpace* target_space = Heap::TargetSpace(obj); - ASSERT(target_space == Heap::old_pointer_space() || - target_space == Heap::old_data_space()); - target_space->MCAdjustRelocationEnd(new_addr, obj_size); + ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() || + Heap::TargetSpace(obj) == Heap::old_data_space()); } +#endif // New and old addresses cannot overlap. memcpy(reinterpret_cast<void*>(new_addr), diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index d7ad63013..bd9e4a028 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -293,6 +293,7 @@ class MarkCompactCollector: public AllStatic { static void DeallocateOldDataBlock(Address start, int size_in_bytes); static void DeallocateCodeBlock(Address start, int size_in_bytes); static void DeallocateMapBlock(Address start, int size_in_bytes); + static void DeallocateCellBlock(Address start, int size_in_bytes); // If we are not compacting the heap, we simply sweep the spaces except // for the large object space, clearing mark bits and adding unmarked @@ -352,8 +353,12 @@ class MarkCompactCollector: public AllStatic { static int RelocateOldPointerObject(HeapObject* obj); static int RelocateOldDataObject(HeapObject* obj); + // Relocate a property cell object. + static int RelocateCellObject(HeapObject* obj); + // Helper function. - static inline int RelocateOldNonCodeObject(HeapObject* obj, OldSpace* space); + static inline int RelocateOldNonCodeObject(HeapObject* obj, + PagedSpace* space); // Relocates an object in the code space. static int RelocateCodeObject(HeapObject* obj); @@ -393,6 +398,9 @@ class MarkCompactCollector: public AllStatic { // Number of live objects in Heap::map_space_. static int live_map_objects_; + // Number of live objects in Heap::cell_space_. + static int live_cell_objects_; + // Number of live objects in Heap::lo_space_. static int live_lo_objects_; diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js index 1f5ce87af..db75cb28a 100644 --- a/deps/v8/src/math.js +++ b/deps/v8/src/math.js @@ -68,10 +68,12 @@ function MathAtan(x) { } // ECMA 262 - 15.8.2.5 -function MathAtan2(x, y) { - if (!IS_NUMBER(x)) x = ToNumber(x); +// The naming of y and x matches the spec, as does the order in which +// ToNumber (valueOf) is called. +function MathAtan2(y, x) { if (!IS_NUMBER(y)) y = ToNumber(y); - return %Math_atan2(x, y); + if (!IS_NUMBER(x)) x = ToNumber(x); + return %Math_atan2(y, x); } // ECMA 262 - 15.8.2.6 @@ -95,7 +97,9 @@ function MathExp(x) { // ECMA 262 - 15.8.2.9 function MathFloor(x) { if (!IS_NUMBER(x)) x = ToNumber(x); - if (0 < x && x <= 0x7FFFFFFF) { + // It's more common to call this with a positive number that's out + // of range than negative numbers; check the upper bound first. + if (x <= 0x7FFFFFFF && x > 0) { // Numbers in the range [0, 2^31) can be floored by converting // them to an unsigned 32-bit value using the shift operator. // We avoid doing so for -0, because the result of Math.floor(-0) @@ -115,11 +119,12 @@ function MathLog(x) { // ECMA 262 - 15.8.2.11 function MathMax(arg1, arg2) { // length == 2 var r = -$Infinity; - for (var i = %_ArgumentsLength() - 1; i >= 0; --i) { + var length = %_ArgumentsLength(); + for (var i = 0; i < length; i++) { var n = ToNumber(%_Arguments(i)); if (NUMBER_IS_NAN(n)) return n; - // Make sure +0 is consider greater than -0. - if (n > r || (n === 0 && r === 0 && (1 / n) > (1 / r))) r = n; + // Make sure +0 is considered greater than -0. + if (n > r || (r === 0 && n === 0 && !%_IsSmi(r))) r = n; } return r; } @@ -127,11 +132,12 @@ function MathMax(arg1, arg2) { // length == 2 // ECMA 262 - 15.8.2.12 function MathMin(arg1, arg2) { // length == 2 var r = $Infinity; - for (var i = %_ArgumentsLength() - 1; i >= 0; --i) { + var length = %_ArgumentsLength(); + for (var i = 0; i < length; i++) { var n = ToNumber(%_Arguments(i)); if (NUMBER_IS_NAN(n)) return n; - // Make sure -0 is consider less than +0. - if (n < r || (n === 0 && r === 0 && (1 / n) < (1 / r))) r = n; + // Make sure -0 is considered less than +0. + if (n < r || (r === 0 && n === 0 && !%_IsSmi(n))) r = n; } return r; } diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index ec4b3528b..870c969f9 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -60,10 +60,8 @@ const kMessages = { unexpected_token_string: "Unexpected string", unexpected_token_identifier: "Unexpected identifier", unexpected_eos: "Unexpected end of input", - expected_label: "Expected label", malformed_regexp: "Invalid regular expression: /%0/: %1", unterminated_regexp: "Invalid regular expression: missing /", - pcre_error: "PCRE function %0, error code %1", regexp_flags: "Cannot supply flags when constructing one RegExp from another", invalid_lhs_in_assignment: "Invalid left-hand side in assignment", invalid_lhs_in_for_in: "Invalid left-hand side in for-in", @@ -74,21 +72,17 @@ const kMessages = { redeclaration: "%0 '%1' has already been declared", no_catch_or_finally: "Missing catch or finally after try", unknown_label: "Undefined label '%0'", - invalid_break: "Invalid break statement", - invalid_continue: "Invalid continue statement", uncaught_exception: "Uncaught %0", stack_trace: "Stack Trace:\n%0", called_non_callable: "%0 is not a function", undefined_method: "Object %1 has no method '%0'", property_not_function: "Property '%0' of object %1 is not a function", - null_or_undefined: "Cannot access property of null or undefined", cannot_convert_to_primitive: "Cannot convert object to primitive value", not_constructor: "%0 is not a constructor", not_defined: "%0 is not defined", non_object_property_load: "Cannot read property '%0' of %1", non_object_property_store: "Cannot set property '%0' of %1", non_object_property_call: "Cannot call method '%0' of %1", - illegal_eval: "Unsupported indirect eval() call", with_expression: "%0 has no properties", illegal_invocation: "Illegal invocation", no_setter_in_callback: "Cannot set property %0 of %1 which has only a getter", @@ -101,13 +95,11 @@ const kMessages = { reduce_no_initial: "Reduce of empty array with no initial value", // RangeError invalid_array_length: "Invalid array length", - invalid_array_apply_length: "Function.prototype.apply supports only up to 1024 arguments", stack_overflow: "Maximum call stack size exceeded", apply_overflow: "Function.prototype.apply cannot support %0 arguments", // SyntaxError unable_to_parse: "Parse error", duplicate_regexp_flag: "Duplicate RegExp flag %0", - unrecognized_regexp_flag: "Unrecognized RegExp flag %0", invalid_regexp: "Invalid RegExp pattern /%0/", illegal_break: "Illegal break statement", illegal_continue: "Illegal continue statement", @@ -557,55 +549,9 @@ function MakeMessage(type, args, startPos, endPos, script, stackTrace) { function GetStackTraceLine(recv, fun, pos, isGlobal) { - try { - return UnsafeGetStackTraceLine(recv, fun, pos, isGlobal); - } catch (e) { - return "<error: " + e + ">"; - } + return FormatSourcePosition(new CallSite(recv, fun, pos)); } - -function GetFunctionName(fun, recv) { - var name = %FunctionGetName(fun); - if (name) return name; - for (var prop in recv) { - if (recv[prop] === fun) - return prop; - } - return "[anonymous]"; -} - - -function UnsafeGetStackTraceLine(recv, fun, pos, isTopLevel) { - var result = ""; - // The global frame has no meaningful function or receiver - if (!isTopLevel) { - // If the receiver is not the global object then prefix the - // message send - if (recv !== global) - result += ToDetailString(recv) + "."; - result += GetFunctionName(fun, recv); - } - if (pos != -1) { - var script = %FunctionGetScript(fun); - var file; - if (script) { - file = %FunctionGetScript(fun).data; - } - if (file) { - var location = %FunctionGetScript(fun).locationFromPosition(pos, true); - if (!isTopLevel) result += "("; - result += file; - if (location != null) { - result += ":" + (location.line + 1) + ":" + (location.column + 1); - } - if (!isTopLevel) result += ")"; - } - } - return (result) ? " at " + result : result; -} - - // ---------------------------------------------------------------------------- // Error implementation @@ -632,6 +578,226 @@ function DefineOneShotAccessor(obj, name, fun) { }); } +function CallSite(receiver, fun, pos) { + this.receiver = receiver; + this.fun = fun; + this.pos = pos; +} + +CallSite.prototype.getThis = function () { + return this.receiver; +}; + +CallSite.prototype.getTypeName = function () { + var constructor = this.receiver.constructor; + if (!constructor) + return $Object.prototype.toString.call(this.receiver); + var constructorName = constructor.name; + if (!constructorName) + return $Object.prototype.toString.call(this.receiver); + return constructorName; +}; + +CallSite.prototype.isToplevel = function () { + if (this.receiver == null) + return true; + return IS_GLOBAL(this.receiver); +}; + +CallSite.prototype.isEval = function () { + var script = %FunctionGetScript(this.fun); + return script && script.compilation_type == 1; +}; + +CallSite.prototype.getEvalOrigin = function () { + var script = %FunctionGetScript(this.fun); + if (!script || script.compilation_type != 1) + return null; + return new CallSite(null, script.eval_from_function, + script.eval_from_position); +}; + +CallSite.prototype.getFunction = function () { + return this.fun; +}; + +CallSite.prototype.getFunctionName = function () { + // See if the function knows its own name + var name = this.fun.name; + if (name) { + return name; + } else { + return %FunctionGetInferredName(this.fun); + } + // Maybe this is an evaluation? + var script = %FunctionGetScript(this.fun); + if (script && script.compilation_type == 1) + return "eval"; + return null; +}; + +CallSite.prototype.getMethodName = function () { + // See if we can find a unique property on the receiver that holds + // this function. + var ownName = this.fun.name; + if (ownName && this.receiver && this.receiver[ownName] === this.fun) + // To handle DontEnum properties we guess that the method has + // the same name as the function. + return ownName; + var name = null; + for (var prop in this.receiver) { + if (this.receiver[prop] === this.fun) { + // If we find more than one match bail out to avoid confusion + if (name) + return null; + name = prop; + } + } + if (name) + return name; + return null; +}; + +CallSite.prototype.getFileName = function () { + var script = %FunctionGetScript(this.fun); + return script ? script.name : null; +}; + +CallSite.prototype.getLineNumber = function () { + if (this.pos == -1) + return null; + var script = %FunctionGetScript(this.fun); + var location = null; + if (script) { + location = script.locationFromPosition(this.pos, true); + } + return location ? location.line + 1 : null; +}; + +CallSite.prototype.getColumnNumber = function () { + if (this.pos == -1) + return null; + var script = %FunctionGetScript(this.fun); + var location = null; + if (script) { + location = script.locationFromPosition(this.pos, true); + } + return location ? location.column : null; +}; + +CallSite.prototype.isNative = function () { + var script = %FunctionGetScript(this.fun); + return script ? (script.type == 0) : false; +}; + +CallSite.prototype.getPosition = function () { + return this.pos; +}; + +CallSite.prototype.isConstructor = function () { + var constructor = this.receiver ? this.receiver.constructor : null; + if (!constructor) + return false; + return this.fun === constructor; +}; + +function FormatSourcePosition(frame) { + var fileLocation = ""; + if (frame.isNative()) { + fileLocation = "native"; + } else if (frame.isEval()) { + fileLocation = "eval at " + FormatSourcePosition(frame.getEvalOrigin()); + } else { + var fileName = frame.getFileName(); + if (fileName) { + fileLocation += fileName; + var lineNumber = frame.getLineNumber(); + if (lineNumber != null) { + fileLocation += ":" + lineNumber; + var columnNumber = frame.getColumnNumber(); + if (columnNumber) { + fileLocation += ":" + columnNumber; + } + } + } + } + if (!fileLocation) { + fileLocation = "unknown source"; + } + var line = ""; + var functionName = frame.getFunction().name; + var methodName = frame.getMethodName(); + var addPrefix = true; + var isConstructor = frame.isConstructor(); + var isMethodCall = !(frame.isToplevel() || isConstructor); + if (isMethodCall) { + line += frame.getTypeName() + "."; + if (functionName) { + line += functionName; + if (methodName && (methodName != functionName)) { + line += " [as " + methodName + "]"; + } + } else { + line += methodName || "<anonymous>"; + } + } else if (isConstructor) { + line += "new " + (functionName || "<anonymous>"); + } else if (functionName) { + line += functionName; + } else { + line += fileLocation; + addPrefix = false; + } + if (addPrefix) { + line += " (" + fileLocation + ")"; + } + return line; +} + +function FormatStackTrace(error, frames) { + var lines = []; + try { + lines.push(error.toString()); + } catch (e) { + try { + lines.push("<error: " + e + ">"); + } catch (ee) { + lines.push("<error>"); + } + } + for (var i = 0; i < frames.length; i++) { + var frame = frames[i]; + try { + var line = FormatSourcePosition(frame); + } catch (e) { + try { + var line = "<error: " + e + ">"; + } catch (ee) { + // Any code that reaches this point is seriously nasty! + var line = "<error>"; + } + } + lines.push(" at " + line); + } + return lines.join("\n"); +} + +function FormatRawStackTrace(error, raw_stack) { + var frames = [ ]; + for (var i = 0; i < raw_stack.length; i += 3) { + var recv = raw_stack[i]; + var fun = raw_stack[i+1]; + var pc = raw_stack[i+2]; + var pos = %FunctionGetPositionForOffset(fun, pc); + frames.push(new CallSite(recv, fun, pos)); + } + if (IS_FUNCTION($Error.prepareStackTrace)) { + return $Error.prepareStackTrace(error, frames); + } else { + return FormatStackTrace(error, frames); + } +} + function DefineError(f) { // Store the error function in both the global object // and the runtime object. The function is fetched @@ -659,7 +825,7 @@ function DefineError(f) { %SetProperty(f.prototype, 'constructor', f, DONT_ENUM); f.prototype.name = name; %SetCode(f, function(m) { - if (%IsConstructCall()) { + if (%_IsConstructCall()) { if (m === kAddMessageAccessorsMarker) { DefineOneShotAccessor(this, 'message', function (obj) { return FormatMessage({type: obj.type, args: obj.arguments}); @@ -667,6 +833,16 @@ function DefineError(f) { } else if (!IS_UNDEFINED(m)) { this.message = ToString(m); } + var stackTraceLimit = $Error.stackTraceLimit; + if (stackTraceLimit) { + // Cap the limit to avoid extremely big traces + if (stackTraceLimit < 0 || stackTraceLimit > 10000) + stackTraceLimit = 10000; + var raw_stack = %CollectStackTrace(f, stackTraceLimit); + DefineOneShotAccessor(this, 'stack', function (obj) { + return FormatRawStackTrace(obj, raw_stack); + }); + } } else { return new f(m); } diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js index d0e8aa44e..76ae75bf6 100644 --- a/deps/v8/src/mirror-delay.js +++ b/deps/v8/src/mirror-delay.js @@ -580,7 +580,7 @@ inherits(ObjectMirror, ValueMirror); ObjectMirror.prototype.className = function() { - return %ClassOf(this.value_); + return %_ClassOf(this.value_); }; diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index ba07af7e0..d54f74183 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -152,7 +152,9 @@ void HeapObject::HeapObjectPrint() { case SHARED_FUNCTION_INFO_TYPE: SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(); break; - + case JS_GLOBAL_PROPERTY_CELL_TYPE: + JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellPrint(); + break; #define MAKE_STRUCT_CASE(NAME, Name, name) \ case NAME##_TYPE: \ Name::cast(this)->Name##Print(); \ @@ -214,6 +216,9 @@ void HeapObject::HeapObjectVerify() { case JS_BUILTINS_OBJECT_TYPE: JSBuiltinsObject::cast(this)->JSBuiltinsObjectVerify(); break; + case JS_GLOBAL_PROPERTY_CELL_TYPE: + JSGlobalPropertyCell::cast(this)->JSGlobalPropertyCellVerify(); + break; case JS_ARRAY_TYPE: JSArray::cast(this)->JSArrayVerify(); break; @@ -266,29 +271,38 @@ void ByteArray::ByteArrayVerify() { void JSObject::PrintProperties() { if (HasFastProperties()) { - for (DescriptorReader r(map()->instance_descriptors()); - !r.eos(); - r.advance()) { + DescriptorArray* descs = map()->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { PrintF(" "); - r.GetKey()->StringPrint(); + descs->GetKey(i)->StringPrint(); PrintF(": "); - if (r.type() == FIELD) { - FastPropertyAt(r.GetFieldIndex())->ShortPrint(); - PrintF(" (field at offset %d)\n", r.GetFieldIndex()); - } else if (r.type() == CONSTANT_FUNCTION) { - r.GetConstantFunction()->ShortPrint(); - PrintF(" (constant function)\n"); - } else if (r.type() == CALLBACKS) { - r.GetCallbacksObject()->ShortPrint(); - PrintF(" (callback)\n"); - } else if (r.type() == MAP_TRANSITION) { - PrintF(" (map transition)\n"); - } else if (r.type() == CONSTANT_TRANSITION) { - PrintF(" (constant transition)\n"); - } else if (r.type() == NULL_DESCRIPTOR) { - PrintF(" (null descriptor)\n"); - } else { - UNREACHABLE(); + switch (descs->GetType(i)) { + case FIELD: { + int index = descs->GetFieldIndex(i); + FastPropertyAt(index)->ShortPrint(); + PrintF(" (field at offset %d)\n", index); + break; + } + case CONSTANT_FUNCTION: + descs->GetConstantFunction(i)->ShortPrint(); + PrintF(" (constant function)\n"); + break; + case CALLBACKS: + descs->GetCallbacksObject(i)->ShortPrint(); + PrintF(" (callback)\n"); + break; + case MAP_TRANSITION: + PrintF(" (map transition)\n"); + break; + case CONSTANT_TRANSITION: + PrintF(" (constant transition)\n"); + break; + case NULL_DESCRIPTOR: + PrintF(" (null descriptor)\n"); + break; + default: + UNREACHABLE(); + break; } } } else { @@ -392,6 +406,7 @@ static const char* TypeToString(InstanceType type) { case JS_OBJECT_TYPE: return "JS_OBJECT"; case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT"; case ODDBALL_TYPE: return "ODDBALL"; + case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL"; case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO"; case JS_FUNCTION_TYPE: return "JS_FUNCTION"; case CODE_TYPE: return "CODE"; @@ -428,6 +443,9 @@ void Map::MapPrint() { if (is_undetectable()) { PrintF(" - undetectable\n"); } + if (needs_loading()) { + PrintF(" - needs_loading\n"); + } if (has_instance_call_handler()) { PrintF(" - instance_call_handler\n"); } @@ -653,6 +671,17 @@ void Oddball::OddballVerify() { } +void JSGlobalPropertyCell::JSGlobalPropertyCellVerify() { + CHECK(IsJSGlobalPropertyCell()); + VerifyObjectField(kValueOffset); +} + + +void JSGlobalPropertyCell::JSGlobalPropertyCellPrint() { + HeapObject::PrintHeader("JSGlobalPropertyCell"); +} + + void Code::CodePrint() { HeapObject::PrintHeader("Code"); #ifdef ENABLE_DISASSEMBLER @@ -694,7 +723,7 @@ void JSRegExp::JSRegExpVerify() { break; } case JSRegExp::IRREGEXP: { - bool is_native = RegExpImpl::UseNativeRegexp(); + bool is_native = RegExpImpl::UsesNativeRegExp(); FixedArray* arr = FixedArray::cast(data()); Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex); @@ -725,25 +754,6 @@ void Proxy::ProxyVerify() { } -void Dictionary::Print() { - int capacity = Capacity(); - for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k)) { - PrintF(" "); - if (k->IsString()) { - String::cast(k)->StringPrint(); - } else { - k->ShortPrint(); - } - PrintF(": "); - ValueAt(i)->ShortPrint(); - PrintF("\n"); - } - } -} - - void AccessorInfo::AccessorInfoVerify() { CHECK(IsAccessorInfo()); VerifyPointer(getter()); @@ -997,7 +1007,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) { info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex(); info->number_of_fast_unused_fields_ += map()->unused_property_fields(); } else { - Dictionary* dict = property_dictionary(); + StringDictionary* dict = property_dictionary(); info->number_of_slow_used_properties_ += dict->NumberOfElements(); info->number_of_slow_unused_properties_ += dict->Capacity() - dict->NumberOfElements(); @@ -1014,7 +1024,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) { info->number_of_fast_used_elements_ += len - holes; info->number_of_fast_unused_elements_ += holes; } else { - Dictionary* dict = element_dictionary(); + NumberDictionary* dict = element_dictionary(); info->number_of_slow_used_elements_ += dict->NumberOfElements(); info->number_of_slow_unused_elements_ += dict->Capacity() - dict->NumberOfElements(); @@ -1061,11 +1071,10 @@ void JSObject::SpillInformation::Print() { void DescriptorArray::PrintDescriptors() { PrintF("Descriptor array %d\n", number_of_descriptors()); - int number = 0; - for (DescriptorReader r(this); !r.eos(); r.advance()) { + for (int i = 0; i < number_of_descriptors(); i++) { + PrintF(" %d: ", i); Descriptor desc; - r.Get(&desc); - PrintF(" %d: ", number++); + Get(i, &desc); desc.Print(); } PrintF("\n"); @@ -1075,14 +1084,14 @@ void DescriptorArray::PrintDescriptors() { bool DescriptorArray::IsSortedNoDuplicates() { String* current_key = NULL; uint32_t current = 0; - for (DescriptorReader r(this); !r.eos(); r.advance()) { - String* key = r.GetKey(); + for (int i = 0; i < number_of_descriptors(); i++) { + String* key = GetKey(i); if (key == current_key) { PrintDescriptors(); return false; } current_key = key; - uint32_t hash = r.GetKey()->Hash(); + uint32_t hash = GetKey(i)->Hash(); if (hash < current) { PrintDescriptors(); return false; diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 8c8371564..37c9b8b61 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -53,6 +53,13 @@ Smi* PropertyDetails::AsSmi() { } +PropertyDetails PropertyDetails::AsDeleted() { + PropertyDetails d(DONT_ENUM, NORMAL); + Smi* smi = Smi::FromInt(AsSmi()->value() | DeletedField::encode(1)); + return PropertyDetails(smi); +} + + #define CAST_ACCESSOR(type) \ type* type::cast(Object* object) { \ ASSERT(object->Is##type()); \ @@ -409,6 +416,13 @@ bool Object::IsOddball() { } +bool Object::IsJSGlobalPropertyCell() { + return Object::IsHeapObject() + && HeapObject::cast(this)->map()->instance_type() + == JS_GLOBAL_PROPERTY_CELL_TYPE; +} + + bool Object::IsSharedFunctionInfo() { return Object::IsHeapObject() && (HeapObject::cast(this)->map()->instance_type() == @@ -467,7 +481,7 @@ bool Object::IsDictionary() { bool Object::IsSymbolTable() { - return IsHashTable() && this == Heap::symbol_table(); + return IsHashTable() && this == Heap::raw_unchecked_symbol_table(); } @@ -685,7 +699,7 @@ Object** HeapObject::RawField(HeapObject* obj, int byte_offset) { int Smi::value() { - return static_cast<int>(reinterpret_cast<intptr_t>(this) >> kSmiTagSize); + return static_cast<int>(reinterpret_cast<intptr_t>(this)) >> kSmiTagSize; } @@ -754,6 +768,8 @@ int Failure::value() const { Failure* Failure::RetryAfterGC(int requested_bytes) { + // Assert that the space encoding fits in the three bytes allotted for it. + ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0); int requested = requested_bytes >> kObjectAlignmentBits; int value = (requested << kSpaceTagSize) | NEW_SPACE; ASSERT(value >> kSpaceTagSize == requested); @@ -1046,6 +1062,18 @@ ACCESSORS(Oddball, to_string, String, kToStringOffset) ACCESSORS(Oddball, to_number, Object, kToNumberOffset) +Object* JSGlobalPropertyCell::value() { + return READ_FIELD(this, kValueOffset); +} + + +void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) { + // The write barrier is not used for global property cells. + ASSERT(!val->IsJSGlobalPropertyCell()); + WRITE_FIELD(this, kValueOffset, val); +} + + int JSObject::GetHeaderSize() { switch (map()->instance_type()) { case JS_GLOBAL_PROXY_TYPE: @@ -1323,6 +1351,56 @@ Smi* DescriptorArray::GetDetails(int descriptor_number) { } +PropertyType DescriptorArray::GetType(int descriptor_number) { + ASSERT(descriptor_number < number_of_descriptors()); + return PropertyDetails(GetDetails(descriptor_number)).type(); +} + + +int DescriptorArray::GetFieldIndex(int descriptor_number) { + return Descriptor::IndexFromValue(GetValue(descriptor_number)); +} + + +JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) { + return JSFunction::cast(GetValue(descriptor_number)); +} + + +Object* DescriptorArray::GetCallbacksObject(int descriptor_number) { + ASSERT(GetType(descriptor_number) == CALLBACKS); + return GetValue(descriptor_number); +} + + +AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) { + ASSERT(GetType(descriptor_number) == CALLBACKS); + Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number)); + return reinterpret_cast<AccessorDescriptor*>(p->proxy()); +} + + +bool DescriptorArray::IsProperty(int descriptor_number) { + return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE; +} + + +bool DescriptorArray::IsTransition(int descriptor_number) { + PropertyType t = GetType(descriptor_number); + return t == MAP_TRANSITION || t == CONSTANT_TRANSITION; +} + + +bool DescriptorArray::IsNullDescriptor(int descriptor_number) { + return GetType(descriptor_number) == NULL_DESCRIPTOR; +} + + +bool DescriptorArray::IsDontEnum(int descriptor_number) { + return PropertyDetails(GetDetails(descriptor_number)).IsDontEnum(); +} + + void DescriptorArray::Get(int descriptor_number, Descriptor* desc) { desc->Init(GetKey(descriptor_number), GetValue(descriptor_number), @@ -1346,6 +1424,13 @@ void DescriptorArray::Set(int descriptor_number, Descriptor* desc) { } +void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) { + Descriptor desc; + src->Get(src_index, &desc); + Set(index, &desc); +} + + void DescriptorArray::Swap(int first, int second) { fast_swap(this, ToKeyIndex(first), ToKeyIndex(second)); FixedArray* content_array = GetContentArray(); @@ -1354,15 +1439,14 @@ void DescriptorArray::Swap(int first, int second) { } -bool Dictionary::requires_slow_elements() { +bool NumberDictionary::requires_slow_elements() { Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi()) return false; return 0 != (Smi::cast(max_index_object)->value() & kRequiresSlowElementsMask); } - -uint32_t Dictionary::max_number_key() { +uint32_t NumberDictionary::max_number_key() { ASSERT(!requires_slow_elements()); Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi()) return 0; @@ -1370,8 +1454,7 @@ uint32_t Dictionary::max_number_key() { return value >> kRequiresSlowElementsTagSize; } - -void Dictionary::set_requires_slow_elements() { +void NumberDictionary::set_requires_slow_elements() { set(kMaxNumberKeyIndex, Smi::FromInt(kRequiresSlowElementsMask), SKIP_WRITE_BARRIER); @@ -1384,7 +1467,6 @@ void Dictionary::set_requires_slow_elements() { CAST_ACCESSOR(FixedArray) CAST_ACCESSOR(DescriptorArray) -CAST_ACCESSOR(Dictionary) CAST_ACCESSOR(SymbolTable) CAST_ACCESSOR(CompilationCacheTable) CAST_ACCESSOR(MapCache) @@ -1403,6 +1485,7 @@ CAST_ACCESSOR(Failure) CAST_ACCESSOR(HeapObject) CAST_ACCESSOR(HeapNumber) CAST_ACCESSOR(Oddball) +CAST_ACCESSOR(JSGlobalPropertyCell) CAST_ACCESSOR(SharedFunctionInfo) CAST_ACCESSOR(Map) CAST_ACCESSOR(JSFunction) @@ -1422,9 +1505,9 @@ CAST_ACCESSOR(Struct) STRUCT_LIST(MAKE_STRUCT_CAST) #undef MAKE_STRUCT_CAST -template <int prefix_size, int elem_size> -HashTable<prefix_size, elem_size>* HashTable<prefix_size, elem_size>::cast( - Object* obj) { + +template <typename Shape, typename Key> +HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) { ASSERT(obj->IsHashTable()); return reinterpret_cast<HashTable*>(obj); } @@ -2451,15 +2534,15 @@ bool JSObject::HasIndexedInterceptor() { } -Dictionary* JSObject::property_dictionary() { +StringDictionary* JSObject::property_dictionary() { ASSERT(!HasFastProperties()); - return Dictionary::cast(properties()); + return StringDictionary::cast(properties()); } -Dictionary* JSObject::element_dictionary() { +NumberDictionary* JSObject::element_dictionary() { ASSERT(!HasFastElements()); - return Dictionary::cast(elements()); + return NumberDictionary::cast(elements()); } @@ -2623,16 +2706,17 @@ void AccessorInfo::set_property_attributes(PropertyAttributes attributes) { set_flag(Smi::FromInt(rest_value | AttributesField::encode(attributes))); } -void Dictionary::SetEntry(int entry, - Object* key, - Object* value, - PropertyDetails details) { - ASSERT(!key->IsString() || details.index() > 0); - int index = EntryToIndex(entry); - WriteBarrierMode mode = GetWriteBarrierMode(); - set(index, key, mode); - set(index+1, value, mode); - fast_set(this, index+2, details.AsSmi()); +template<typename Shape, typename Key> +void Dictionary<Shape, Key>::SetEntry(int entry, + Object* key, + Object* value, + PropertyDetails details) { + ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0); + int index = HashTable<Shape, Key>::EntryToIndex(entry); + WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(); + FixedArray::set(index, key, mode); + FixedArray::set(index+1, value, mode); + FixedArray::fast_set(this, index+2, details.AsSmi()); } @@ -2640,8 +2724,8 @@ void Map::ClearCodeCache() { // No write barrier is needed since empty_fixed_array is not in new space. // Please note this function is used during marking: // - MarkCompactCollector::MarkUnmarkedObject - ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array())); - WRITE_FIELD(this, kCodeCacheOffset, Heap::empty_fixed_array()); + ASSERT(!Heap::InNewSpace(Heap::raw_unchecked_empty_fixed_array())); + WRITE_FIELD(this, kCodeCacheOffset, Heap::raw_unchecked_empty_fixed_array()); } diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index ad57d17fb..a9004c924 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -41,6 +41,7 @@ #include "disassembler.h" #endif + namespace v8 { namespace internal { @@ -138,7 +139,7 @@ void Object::Lookup(String* name, LookupResult* result) { } else if (IsBoolean()) { holder = global_context->boolean_function()->instance_prototype(); } - ASSERT(holder != NULL); // cannot handle null or undefined. + ASSERT(holder != NULL); // Cannot handle null or undefined. JSObject::cast(holder)->Lookup(name, result); } @@ -399,6 +400,94 @@ Object* JSObject::DeleteLazyProperty(LookupResult* result, } +Object* JSObject::GetNormalizedProperty(LookupResult* result) { + ASSERT(!HasFastProperties()); + Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry()); + if (IsGlobalObject()) { + value = JSGlobalPropertyCell::cast(value)->value(); + } + ASSERT(!value->IsJSGlobalPropertyCell()); + return value; +} + + +Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) { + ASSERT(!HasFastProperties()); + if (IsGlobalObject()) { + JSGlobalPropertyCell* cell = + JSGlobalPropertyCell::cast( + property_dictionary()->ValueAt(result->GetDictionaryEntry())); + cell->set_value(value); + } else { + property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value); + } + return value; +} + + +Object* JSObject::SetNormalizedProperty(String* name, + Object* value, + PropertyDetails details) { + ASSERT(!HasFastProperties()); + int entry = property_dictionary()->FindEntry(name); + if (entry == StringDictionary::kNotFound) { + Object* store_value = value; + if (IsGlobalObject()) { + store_value = Heap::AllocateJSGlobalPropertyCell(value); + if (store_value->IsFailure()) return store_value; + } + Object* dict = property_dictionary()->Add(name, store_value, details); + if (dict->IsFailure()) return dict; + set_properties(StringDictionary::cast(dict)); + return value; + } + // Preserve enumeration index. + details = PropertyDetails(details.attributes(), + details.type(), + property_dictionary()->DetailsAt(entry).index()); + if (IsGlobalObject()) { + JSGlobalPropertyCell* cell = + JSGlobalPropertyCell::cast(property_dictionary()->ValueAt(entry)); + cell->set_value(value); + // Please note we have to update the property details. + property_dictionary()->DetailsAtPut(entry, details); + } else { + property_dictionary()->SetEntry(entry, name, value, details); + } + return value; +} + + +Object* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) { + ASSERT(!HasFastProperties()); + StringDictionary* dictionary = property_dictionary(); + int entry = dictionary->FindEntry(name); + if (entry != StringDictionary::kNotFound) { + // If we have a global object set the cell to the hole. + if (IsGlobalObject()) { + PropertyDetails details = dictionary->DetailsAt(entry); + if (details.IsDontDelete()) { + if (mode != FORCE_DELETION) return Heap::false_value(); + // When forced to delete global properties, we have to make a + // map change to invalidate any ICs that think they can load + // from the DontDelete cell without checking if it contains + // the hole value. + Object* new_map = map()->CopyDropDescriptors(); + if (new_map->IsFailure()) return new_map; + set_map(Map::cast(new_map)); + } + JSGlobalPropertyCell* cell = + JSGlobalPropertyCell::cast(dictionary->ValueAt(entry)); + cell->set_value(Heap::the_hole_value()); + dictionary->DetailsAtPut(entry, details.AsDeleted()); + } else { + return dictionary->DeleteProperty(entry, mode); + } + } + return Heap::true_value(); +} + + Object* Object::GetProperty(Object* receiver, LookupResult* result, String* name, @@ -449,8 +538,7 @@ Object* Object::GetProperty(Object* receiver, JSObject* holder = result->holder(); switch (result->type()) { case NORMAL: - value = - holder->property_dictionary()->ValueAt(result->GetDictionaryEntry()); + value = holder->GetNormalizedProperty(result); ASSERT(!value->IsTheHole() || result->IsReadOnly()); return value->IsTheHole() ? Heap::undefined_value() : value; case FIELD: @@ -949,6 +1037,10 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { case PROXY_TYPE: accumulator->Add("<Proxy>"); break; + case JS_GLOBAL_PROPERTY_CELL_TYPE: + accumulator->Add("Cell for "); + JSGlobalPropertyCell::cast(this)->value()->ShortPrint(accumulator); + break; default: accumulator->Add("<Other heap object (%d)>", map()->instance_type()); break; @@ -1042,6 +1134,10 @@ void HeapObject::IterateBody(InstanceType type, int object_size, case CODE_TYPE: reinterpret_cast<Code*>(this)->CodeIterateBody(v); break; + case JS_GLOBAL_PROPERTY_CELL_TYPE: + reinterpret_cast<JSGlobalPropertyCell*>(this) + ->JSGlobalPropertyCellIterateBody(v); + break; case HEAP_NUMBER_TYPE: case FILLER_TYPE: case BYTE_ARRAY_TYPE: @@ -1250,12 +1346,31 @@ Object* JSObject::AddConstantFunctionProperty(String* name, Object* JSObject::AddSlowProperty(String* name, Object* value, PropertyAttributes attributes) { + ASSERT(!HasFastProperties()); + StringDictionary* dict = property_dictionary(); + Object* store_value = value; + if (IsGlobalObject()) { + // In case name is an orphaned property reuse the cell. + int entry = dict->FindEntry(name); + if (entry != StringDictionary::kNotFound) { + store_value = dict->ValueAt(entry); + JSGlobalPropertyCell::cast(store_value)->set_value(value); + // Assign an enumeration index to the property and update + // SetNextEnumerationIndex. + int index = dict->NextEnumerationIndex(); + PropertyDetails details = PropertyDetails(attributes, NORMAL, index); + dict->SetNextEnumerationIndex(index + 1); + dict->SetEntry(entry, name, store_value, details); + return value; + } + store_value = Heap::AllocateJSGlobalPropertyCell(value); + if (store_value->IsFailure()) return store_value; + JSGlobalPropertyCell::cast(store_value)->set_value(value); + } PropertyDetails details = PropertyDetails(attributes, NORMAL); - Object* result = property_dictionary()->AddStringEntry(name, value, details); + Object* result = dict->Add(name, store_value, details); if (result->IsFailure()) return result; - if (property_dictionary() != result) { - set_properties(Dictionary::cast(result)); - } + if (dict != result) set_properties(StringDictionary::cast(result)); return value; } @@ -1301,8 +1416,8 @@ Object* JSObject::SetPropertyPostInterceptor(String* name, Object* JSObject::ReplaceSlowProperty(String* name, Object* value, PropertyAttributes attributes) { - Dictionary* dictionary = property_dictionary(); - int old_index = dictionary->FindStringEntry(name); + StringDictionary* dictionary = property_dictionary(); + int old_index = dictionary->FindEntry(name); int new_enumeration_index = 0; // 0 means "Use the next available index." if (old_index != -1) { // All calls to ReplaceSlowProperty have had all transitions removed. @@ -1311,13 +1426,7 @@ Object* JSObject::ReplaceSlowProperty(String* name, } PropertyDetails new_details(attributes, NORMAL, new_enumeration_index); - Object* result = - dictionary->SetOrAddStringEntry(name, value, new_details); - if (result->IsFailure()) return result; - if (dictionary != result) { - set_properties(Dictionary::cast(result)); - } - return value; + return SetNormalizedProperty(name, value, new_details); } Object* JSObject::ConvertDescriptorToFieldAndMapTransition( @@ -1523,6 +1632,7 @@ Object* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter, return *value_handle; } + void JSObject::LookupCallbackSetterInPrototypes(String* name, LookupResult* result) { for (Object* pt = GetPrototype(); @@ -1548,9 +1658,9 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) { pt != Heap::null_value(); pt = pt->GetPrototype()) { if (JSObject::cast(pt)->HasFastElements()) continue; - Dictionary* dictionary = JSObject::cast(pt)->element_dictionary(); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) { + NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS) { @@ -1600,18 +1710,26 @@ void JSObject::LocalLookupRealNamedProperty(String* name, return; } } else { - int entry = property_dictionary()->FindStringEntry(name); - if (entry != DescriptorArray::kNotFound) { + int entry = property_dictionary()->FindEntry(name); + if (entry != StringDictionary::kNotFound) { + Object* value = property_dictionary()->ValueAt(entry); + if (IsGlobalObject()) { + PropertyDetails d = property_dictionary()->DetailsAt(entry); + if (d.IsDeleted()) { + result->NotFound(); + return; + } + value = JSGlobalPropertyCell::cast(value)->value(); + ASSERT(result->IsLoaded()); + } // Make sure to disallow caching for uninitialized constants // found in the dictionary-mode objects. - if (property_dictionary()->ValueAt(entry)->IsTheHole()) { - result->DisallowCaching(); - } + if (value->IsTheHole()) result->DisallowCaching(); result->DictionaryResult(this, entry); return; } // Slow case object skipped during lookup. Do not use inline caching. - result->DisallowCaching(); + if (!IsGlobalObject()) result->DisallowCaching(); } result->NotFound(); } @@ -1736,8 +1854,7 @@ Object* JSObject::SetProperty(LookupResult* result, // transition or null descriptor and there are no setters in the prototypes. switch (result->type()) { case NORMAL: - property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value); - return value; + return SetNormalizedProperty(result, value); case FIELD: return FastPropertyAtPut(result->GetFieldIndex(), value); case MAP_TRANSITION: @@ -1753,7 +1870,7 @@ Object* JSObject::SetProperty(LookupResult* result, if (value == result->GetConstantFunction()) return value; // Preserve the attributes of this existing property. attributes = result->GetAttributes(); - return ConvertDescriptorToFieldAndMapTransition(name, value, attributes); + return ConvertDescriptorToField(name, value, attributes); case CALLBACKS: return SetPropertyWithCallback(result->GetCallbackObject(), name, @@ -1816,11 +1933,10 @@ Object* JSObject::IgnoreAttributesAndSetLocalProperty( if (!result->IsLoaded()) { return SetLazyProperty(result, name, value, attributes); } - // Check of IsReadOnly removed from here in clone. + // Check of IsReadOnly removed from here in clone. switch (result->type()) { case NORMAL: - property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value); - return value; + return SetNormalizedProperty(result, value); case FIELD: return FastPropertyAtPut(result->GetFieldIndex(), value); case MAP_TRANSITION: @@ -1836,7 +1952,7 @@ Object* JSObject::IgnoreAttributesAndSetLocalProperty( if (value == result->GetConstantFunction()) return value; // Preserve the attributes of this existing property. attributes = result->GetAttributes(); - return ConvertDescriptorToFieldAndMapTransition(name, value, attributes); + return ConvertDescriptorToField(name, value, attributes); case CALLBACKS: case INTERCEPTOR: // Override callback in clone @@ -1995,40 +2111,51 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) { // Allocate new content Object* obj = - Dictionary::Allocate(map()->NumberOfDescribedProperties() * 2 + 4); + StringDictionary::Allocate(map()->NumberOfDescribedProperties() * 2 + 4); if (obj->IsFailure()) return obj; - Dictionary* dictionary = Dictionary::cast(obj); + StringDictionary* dictionary = StringDictionary::cast(obj); - for (DescriptorReader r(map()->instance_descriptors()); - !r.eos(); - r.advance()) { - PropertyDetails details = r.GetDetails(); + DescriptorArray* descs = map()->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + PropertyDetails details = descs->GetDetails(i); switch (details.type()) { case CONSTANT_FUNCTION: { PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, details.index()); - Object* value = r.GetConstantFunction(); - Object* result = dictionary->AddStringEntry(r.GetKey(), value, d); + Object* value = descs->GetConstantFunction(i); + if (IsGlobalObject()) { + value = Heap::AllocateJSGlobalPropertyCell(value); + if (value->IsFailure()) return value; + } + Object* result = dictionary->Add(descs->GetKey(i), value, d); if (result->IsFailure()) return result; - dictionary = Dictionary::cast(result); + dictionary = StringDictionary::cast(result); break; } case FIELD: { PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, details.index()); - Object* value = FastPropertyAt(r.GetFieldIndex()); - Object* result = dictionary->AddStringEntry(r.GetKey(), value, d); + Object* value = FastPropertyAt(descs->GetFieldIndex(i)); + if (IsGlobalObject()) { + value = Heap::AllocateJSGlobalPropertyCell(value); + if (value->IsFailure()) return value; + } + Object* result = dictionary->Add(descs->GetKey(i), value, d); if (result->IsFailure()) return result; - dictionary = Dictionary::cast(result); + dictionary = StringDictionary::cast(result); break; } case CALLBACKS: { PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, details.index()); - Object* value = r.GetCallbacksObject(); - Object* result = dictionary->AddStringEntry(r.GetKey(), value, d); + Object* value = descs->GetCallbacksObject(i); + if (IsGlobalObject()) { + value = Heap::AllocateJSGlobalPropertyCell(value); + if (value->IsFailure()) return value; + } + Object* result = dictionary->Add(descs->GetKey(i), value, d); if (result->IsFailure()) return result; - dictionary = Dictionary::cast(result); + dictionary = StringDictionary::cast(result); break; } case MAP_TRANSITION: @@ -2085,8 +2212,9 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) { Object* JSObject::TransformToFastProperties(int unused_property_fields) { if (HasFastProperties()) return this; + ASSERT(!IsGlobalObject()); return property_dictionary()-> - TransformPropertiesToFastFor(this, unused_property_fields); + TransformPropertiesToFastFor(this, unused_property_fields); } @@ -2100,9 +2228,9 @@ Object* JSObject::NormalizeElements() { int length = IsJSArray() ? Smi::cast(JSArray::cast(this)->length())->value() : array->length(); - Object* obj = Dictionary::Allocate(length); + Object* obj = NumberDictionary::Allocate(length); if (obj->IsFailure()) return obj; - Dictionary* dictionary = Dictionary::cast(obj); + NumberDictionary* dictionary = NumberDictionary::cast(obj); // Copy entries. for (int i = 0; i < length; i++) { Object* value = array->get(i); @@ -2110,7 +2238,7 @@ Object* JSObject::NormalizeElements() { PropertyDetails details = PropertyDetails(NONE, NORMAL); Object* result = dictionary->AddNumberEntry(i, array->get(i), details); if (result->IsFailure()) return result; - dictionary = Dictionary::cast(result); + dictionary = NumberDictionary::cast(result); } } // Switch to using the dictionary as the backing storage for elements. @@ -2139,12 +2267,7 @@ Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) { Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); if (obj->IsFailure()) return obj; - ASSERT(!HasFastProperties()); - // Attempt to remove the property from the property dictionary. - Dictionary* dictionary = property_dictionary(); - int entry = dictionary->FindStringEntry(name); - if (entry != -1) return dictionary->DeleteProperty(entry, mode); - return Heap::true_value(); + return DeleteNormalizedProperty(name, mode); } @@ -2192,9 +2315,11 @@ Object* JSObject::DeleteElementPostInterceptor(uint32_t index, return Heap::true_value(); } ASSERT(!HasFastElements()); - Dictionary* dictionary = element_dictionary(); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) return dictionary->DeleteProperty(entry, mode); + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + return dictionary->DeleteProperty(entry, mode); + } return Heap::true_value(); } @@ -2264,9 +2389,11 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) { } return Heap::true_value(); } else { - Dictionary* dictionary = element_dictionary(); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) return dictionary->DeleteProperty(entry, mode); + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + return dictionary->DeleteProperty(entry, mode); + } } return Heap::true_value(); } @@ -2318,10 +2445,7 @@ Object* JSObject::DeleteProperty(String* name, DeleteMode mode) { Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); if (obj->IsFailure()) return obj; // Make sure the properties are normalized before removing the entry. - Dictionary* dictionary = property_dictionary(); - int entry = dictionary->FindStringEntry(name); - if (entry != -1) return dictionary->DeleteProperty(entry, mode); - return Heap::true_value(); + return DeleteNormalizedProperty(name, mode); } } @@ -2443,35 +2567,44 @@ bool JSObject::IsSimpleEnum() { int Map::NumberOfDescribedProperties() { int result = 0; - for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) { - if (r.IsProperty()) result++; + DescriptorArray* descs = instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->IsProperty(i)) result++; } return result; } int Map::PropertyIndexFor(String* name) { - for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) { - if (r.Equals(name) && !r.IsNullDescriptor()) return r.GetFieldIndex(); + DescriptorArray* descs = instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (name->Equals(descs->GetKey(i)) && !descs->IsNullDescriptor(i)) { + return descs->GetFieldIndex(i); + } } return -1; } int Map::NextFreePropertyIndex() { - int index = -1; - for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) { - if (r.type() == FIELD) { - if (r.GetFieldIndex() > index) index = r.GetFieldIndex(); + int max_index = -1; + DescriptorArray* descs = instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->GetType(i) == FIELD) { + int current_index = descs->GetFieldIndex(i); + if (current_index > max_index) max_index = current_index; } } - return index+1; + return max_index + 1; } AccessorDescriptor* Map::FindAccessor(String* name) { - for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) { - if (r.Equals(name) && r.type() == CALLBACKS) return r.GetCallbacks(); + DescriptorArray* descs = instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (name->Equals(descs->GetKey(i)) && descs->GetType(i) == CALLBACKS) { + return descs->GetCallbacks(i); + } } return NULL; } @@ -2572,9 +2705,9 @@ Object* JSObject::DefineGetterSetter(String* name, if (is_element) { // Lookup the index. if (!HasFastElements()) { - Dictionary* dictionary = element_dictionary(); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { Object* result = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.IsReadOnly()) return Heap::undefined_value(); @@ -2610,26 +2743,29 @@ Object* JSObject::DefineGetterSetter(String* name, // Update the dictionary with the new CALLBACKS property. Object* dict = - element_dictionary()->SetOrAddNumberEntry(index, structure, details); + element_dictionary()->Set(index, structure, details); if (dict->IsFailure()) return dict; // If name is an index we need to stay in slow case. - Dictionary* elements = Dictionary::cast(dict); + NumberDictionary* elements = NumberDictionary::cast(dict); elements->set_requires_slow_elements(); // Set the potential new dictionary on the object. - set_elements(Dictionary::cast(dict)); + set_elements(NumberDictionary::cast(dict)); } else { // Normalize object to make this operation simple. Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); if (ok->IsFailure()) return ok; - // Update the dictionary with the new CALLBACKS property. - Object* dict = - property_dictionary()->SetOrAddStringEntry(name, structure, details); - if (dict->IsFailure()) return dict; + // For the global object allocate a new map to invalidate the global inline + // caches which have a global property cell reference directly in the code. + if (IsGlobalObject()) { + Object* new_map = map()->CopyDropDescriptors(); + if (new_map->IsFailure()) return new_map; + set_map(Map::cast(new_map)); + } - // Set the potential new dictionary on the object. - set_properties(Dictionary::cast(dict)); + // Update the dictionary with the new CALLBACKS property. + return SetNormalizedProperty(name, structure, details); } return structure; @@ -2681,9 +2817,9 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) { obj = JSObject::cast(obj)->GetPrototype()) { JSObject* jsObject = JSObject::cast(obj); if (!jsObject->HasFastElements()) { - Dictionary* dictionary = jsObject->element_dictionary(); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) { + NumberDictionary* dictionary = jsObject->element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS) { @@ -2716,16 +2852,15 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) { Object* JSObject::SlowReverseLookup(Object* value) { if (HasFastProperties()) { - for (DescriptorReader r(map()->instance_descriptors()); - !r.eos(); - r.advance()) { - if (r.type() == FIELD) { - if (FastPropertyAt(r.GetFieldIndex()) == value) { - return r.GetKey(); + DescriptorArray* descs = map()->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->GetType(i) == FIELD) { + if (FastPropertyAt(descs->GetFieldIndex(i)) == value) { + return descs->GetKey(i); } - } else if (r.type() == CONSTANT_FUNCTION) { - if (r.GetConstantFunction() == value) { - return r.GetKey(); + } else if (descs->GetType(i) == CONSTANT_FUNCTION) { + if (descs->GetConstantFunction(i) == value) { + return descs->GetKey(i); } } } @@ -2886,7 +3021,7 @@ Object* FixedArray::AddKeysFromJSArray(JSArray* array) { return UnionOfKeys(array->elements()); } ASSERT(!array->HasFastElements()); - Dictionary* dict = array->element_dictionary(); + NumberDictionary* dict = array->element_dictionary(); int size = dict->NumberOfElements(); // Allocate a temporary fixed array. @@ -3039,13 +3174,13 @@ Object* DescriptorArray::CopyInsert(Descriptor* descriptor, int transitions = 0; int null_descriptors = 0; if (remove_transitions) { - for (DescriptorReader r(this); !r.eos(); r.advance()) { - if (r.IsTransition()) transitions++; - if (r.IsNullDescriptor()) null_descriptors++; + for (int i = 0; i < number_of_descriptors(); i++) { + if (IsTransition(i)) transitions++; + if (IsNullDescriptor(i)) null_descriptors++; } } else { - for (DescriptorReader r(this); !r.eos(); r.advance()) { - if (r.IsNullDescriptor()) null_descriptors++; + for (int i = 0; i < number_of_descriptors(); i++) { + if (IsNullDescriptor(i)) null_descriptors++; } } int new_size = number_of_descriptors() - transitions - null_descriptors; @@ -3093,32 +3228,31 @@ Object* DescriptorArray::CopyInsert(Descriptor* descriptor, // Copy the descriptors, filtering out transitions and null descriptors, // and inserting or replacing a descriptor. - DescriptorWriter w(new_descriptors); - DescriptorReader r(this); uint32_t descriptor_hash = descriptor->GetKey()->Hash(); + int from_index = 0; + int to_index = 0; - for (; !r.eos(); r.advance()) { - if (r.GetKey()->Hash() > descriptor_hash || - r.GetKey() == descriptor->GetKey()) break; - if (r.IsNullDescriptor()) continue; - if (remove_transitions && r.IsTransition()) continue; - w.WriteFrom(&r); - } - w.Write(descriptor); - if (replacing) { - ASSERT(r.GetKey() == descriptor->GetKey()); - r.advance(); - } else { - ASSERT(r.eos() || - r.GetKey()->Hash() > descriptor_hash || - r.IsNullDescriptor()); + for (; from_index < number_of_descriptors(); from_index++) { + String* key = GetKey(from_index); + if (key->Hash() > descriptor_hash || key == descriptor->GetKey()) { + break; + } + if (IsNullDescriptor(from_index)) continue; + if (remove_transitions && IsTransition(from_index)) continue; + new_descriptors->CopyFrom(to_index++, this, from_index); } - for (; !r.eos(); r.advance()) { - if (r.IsNullDescriptor()) continue; - if (remove_transitions && r.IsTransition()) continue; - w.WriteFrom(&r); + + new_descriptors->Set(to_index++, descriptor); + if (replacing) from_index++; + + for (; from_index < number_of_descriptors(); from_index++) { + if (IsNullDescriptor(from_index)) continue; + if (remove_transitions && IsTransition(from_index)) continue; + new_descriptors->CopyFrom(to_index++, this, from_index); } - ASSERT(w.eos()); + + ASSERT(to_index == new_descriptors->number_of_descriptors()); + SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates()); return new_descriptors; } @@ -3131,8 +3265,8 @@ Object* DescriptorArray::RemoveTransitions() { // Compute the size of the map transition entries to be removed. int num_removed = 0; - for (DescriptorReader r(this); !r.eos(); r.advance()) { - if (!r.IsProperty()) num_removed++; + for (int i = 0; i < number_of_descriptors(); i++) { + if (!IsProperty(i)) num_removed++; } // Allocate the new descriptor array. @@ -3141,11 +3275,11 @@ Object* DescriptorArray::RemoveTransitions() { DescriptorArray* new_descriptors = DescriptorArray::cast(result); // Copy the content. - DescriptorWriter w(new_descriptors); - for (DescriptorReader r(this); !r.eos(); r.advance()) { - if (r.IsProperty()) w.WriteFrom(&r); + int next_descriptor = 0; + for (int i = 0; i < number_of_descriptors(); i++) { + if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i); } - ASSERT(w.eos()); + ASSERT(next_descriptor == new_descriptors->number_of_descriptors()); return new_descriptors; } @@ -3974,6 +4108,11 @@ void ConsString::ConsStringIterateBody(ObjectVisitor* v) { } +void JSGlobalPropertyCell::JSGlobalPropertyCellIterateBody(ObjectVisitor* v) { + IteratePointers(v, kValueOffset, kValueOffset + kPointerSize); +} + + uint16_t ConsString::ConsStringGet(int index) { ASSERT(index >= 0 && index < this->length()); @@ -4447,10 +4586,10 @@ void String::PrintOn(FILE* file) { void Map::CreateBackPointers() { DescriptorArray* descriptors = instance_descriptors(); - for (DescriptorReader r(descriptors); !r.eos(); r.advance()) { - if (r.type() == MAP_TRANSITION) { + for (int i = 0; i < descriptors->number_of_descriptors(); i++) { + if (descriptors->GetType(i) == MAP_TRANSITION) { // Get target. - Map* target = Map::cast(r.GetValue()); + Map* target = Map::cast(descriptors->GetValue(i)); #ifdef DEBUG // Verify target. Object* source_prototype = prototype(); @@ -4461,7 +4600,7 @@ void Map::CreateBackPointers() { ASSERT(target_prototype->IsJSObject() || target_prototype->IsNull()); ASSERT(source_prototype->IsMap() || - source_prototype == target_prototype); + source_prototype == target_prototype); #endif // Point target back to source. set_prototype() will not let us set // the prototype to a map, as we do here. @@ -4476,7 +4615,7 @@ void Map::ClearNonLiveTransitions(Object* real_prototype) { // low-level accessors to get and modify their data. DescriptorArray* d = reinterpret_cast<DescriptorArray*>( *RawField(this, Map::kInstanceDescriptorsOffset)); - if (d == Heap::empty_descriptor_array()) return; + if (d == Heap::raw_unchecked_empty_descriptor_array()) return; Smi* NullDescriptorDetails = PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi(); FixedArray* contents = reinterpret_cast<FixedArray*>( @@ -4897,8 +5036,30 @@ const char* Code::ICState2String(InlineCacheState state) { } +const char* Code::PropertyType2String(PropertyType type) { + switch (type) { + case NORMAL: return "NORMAL"; + case FIELD: return "FIELD"; + case CONSTANT_FUNCTION: return "CONSTANT_FUNCTION"; + case CALLBACKS: return "CALLBACKS"; + case INTERCEPTOR: return "INTERCEPTOR"; + case MAP_TRANSITION: return "MAP_TRANSITION"; + case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION"; + case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR"; + } + UNREACHABLE(); + return NULL; +} + void Code::Disassemble(const char* name) { PrintF("kind = %s\n", Kind2String(kind())); + if (is_inline_cache_stub()) { + PrintF("ic_state = %s\n", ICState2String(ic_state())); + PrintF("ic_in_loop = %d\n", ic_in_loop() == IN_LOOP); + if (ic_state() == MONOMORPHIC) { + PrintF("type = %s\n", PropertyType2String(type())); + } + } if ((name != NULL) && (name[0] != '\0')) { PrintF("name = %s\n", name); } @@ -4930,7 +5091,7 @@ void JSObject::SetFastElements(FixedArray* elems) { elems->set(i, old_elements->get(i), mode); } } else { - Dictionary* dictionary = Dictionary::cast(elements()); + NumberDictionary* dictionary = NumberDictionary::cast(elements()); for (int i = 0; i < dictionary->Capacity(); i++) { Object* key = dictionary->KeyAt(i); if (key->IsNumber()) { @@ -5095,7 +5256,10 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) { return true; } } else { - if (element_dictionary()->FindNumberEntry(index) != -1) return true; + if (element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound) { + return true; + } } // Handle [] on String objects. @@ -5170,7 +5334,8 @@ bool JSObject::HasLocalElement(uint32_t index) { return (index < length) && !FixedArray::cast(elements())->get(index)->IsTheHole(); } else { - return element_dictionary()->FindNumberEntry(index) != -1; + return element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound; } } @@ -5196,7 +5361,10 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) { if ((index < length) && !FixedArray::cast(elements())->get(index)->IsTheHole()) return true; } else { - if (element_dictionary()->FindNumberEntry(index) != -1) return true; + if (element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound) { + return true; + } } // Handle [] on String objects. @@ -5329,10 +5497,10 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) { // Insert element in the dictionary. FixedArray* elms = FixedArray::cast(elements()); - Dictionary* dictionary = Dictionary::cast(elms); + NumberDictionary* dictionary = NumberDictionary::cast(elms); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) { + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS) { @@ -5381,7 +5549,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) { CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &new_length)); JSArray::cast(this)->set_length(Smi::FromInt(new_length)); } else { - new_length = Dictionary::cast(elements())->max_number_key() + 1; + new_length = NumberDictionary::cast(elements())->max_number_key() + 1; } Object* obj = Heap::AllocateFixedArrayWithHoles(new_length); if (obj->IsFailure()) return obj; @@ -5424,9 +5592,9 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver, if (!value->IsTheHole()) return value; } } else { - Dictionary* dictionary = element_dictionary(); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS) { @@ -5508,9 +5676,9 @@ Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) { if (!value->IsTheHole()) return value; } } else { - Dictionary* dictionary = element_dictionary(); - int entry = dictionary->FindNumberEntry(index); - if (entry != -1) { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS) { @@ -5546,7 +5714,7 @@ bool JSObject::HasDenseElements() { if (!elms->get(i)->IsTheHole()) number_of_elements++; } } else { - Dictionary* dictionary = Dictionary::cast(elements()); + NumberDictionary* dictionary = NumberDictionary::cast(elements()); capacity = dictionary->Capacity(); number_of_elements = dictionary->NumberOfElements(); } @@ -5568,7 +5736,7 @@ bool JSObject::ShouldConvertToSlowElements(int new_capacity) { bool JSObject::ShouldConvertToFastElements() { ASSERT(!HasFastElements()); - Dictionary* dictionary = Dictionary::cast(elements()); + NumberDictionary* dictionary = NumberDictionary::cast(elements()); // If the elements are sparse, we should not go back to fast case. if (!HasDenseElements()) return false; // If an element has been added at a very high index in the elements @@ -5587,17 +5755,47 @@ bool JSObject::ShouldConvertToFastElements() { length = dictionary->max_number_key(); } return static_cast<uint32_t>(dictionary->Capacity()) >= - (length / (2 * Dictionary::kElementSize)); + (length / (2 * NumberDictionary::kEntrySize)); } -void Dictionary::CopyValuesTo(FixedArray* elements) { +// Certain compilers request function template instantiation when they +// see the definition of the other template functions in the +// class. This requires us to have the template functions put +// together, so even though this function belongs in objects-debug.cc, +// we keep it here instead to satisfy certain compilers. +#ifdef DEBUG +template<typename Shape, typename Key> +void Dictionary<Shape, Key>::Print() { + int capacity = HashTable<Shape, Key>::Capacity(); + for (int i = 0; i < capacity; i++) { + Object* k = HashTable<Shape, Key>::KeyAt(i); + if (HashTable<Shape, Key>::IsKey(k)) { + PrintF(" "); + if (k->IsString()) { + String::cast(k)->StringPrint(); + } else { + k->ShortPrint(); + } + PrintF(": "); + ValueAt(i)->ShortPrint(); + PrintF("\n"); + } + } +} +#endif + + +template<typename Shape, typename Key> +void Dictionary<Shape, Key>::CopyValuesTo(FixedArray* elements) { int pos = 0; - int capacity = Capacity(); + int capacity = HashTable<Shape, Key>::Capacity(); WriteBarrierMode mode = elements->GetWriteBarrierMode(); for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k)) elements->set(pos++, ValueAt(i), mode); + Object* k = Dictionary<Shape, Key>::KeyAt(i); + if (Dictionary<Shape, Key>::IsKey(k)) { + elements->set(pos++, ValueAt(i), mode); + } } ASSERT(pos == elements->length()); } @@ -5638,11 +5836,10 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver, } -bool JSObject::GetPropertyWithInterceptorProper( +Object* JSObject::GetPropertyWithInterceptorProper( JSObject* receiver, String* name, - PropertyAttributes* attributes, - Object** result_object) { + PropertyAttributes* attributes) { HandleScope scope; Handle<InterceptorInfo> interceptor(GetNamedInterceptor()); Handle<JSObject> receiver_handle(receiver); @@ -5663,17 +5860,14 @@ bool JSObject::GetPropertyWithInterceptorProper( VMState state(EXTERNAL); result = getter(v8::Utils::ToLocal(name_handle), info); } - if (Top::has_scheduled_exception()) { - return false; - } - if (!result.IsEmpty()) { + if (!Top::has_scheduled_exception() && !result.IsEmpty()) { *attributes = NONE; - *result_object = *v8::Utils::OpenHandle(*result); - return true; + return *v8::Utils::OpenHandle(*result); } } - return false; + *attributes = ABSENT; + return Heap::undefined_value(); } @@ -5687,12 +5881,13 @@ Object* JSObject::GetInterceptorPropertyWithLookupHint( Handle<JSObject> holder_handle(this); Handle<String> name_handle(name); - Object* result = NULL; - if (GetPropertyWithInterceptorProper(receiver, name, attributes, &result)) { + Object* result = GetPropertyWithInterceptorProper(receiver, + name, + attributes); + if (*attributes != ABSENT) { return result; - } else { - RETURN_IF_SCHEDULED_EXCEPTION(); } + RETURN_IF_SCHEDULED_EXCEPTION(); int property_index = lookup_hint->value(); if (property_index >= 0) { @@ -5737,12 +5932,11 @@ Object* JSObject::GetPropertyWithInterceptor( Handle<JSObject> holder_handle(this); Handle<String> name_handle(name); - Object* result = NULL; - if (GetPropertyWithInterceptorProper(receiver, name, attributes, &result)) { + Object* result = GetPropertyWithInterceptorProper(receiver, name, attributes); + if (*attributes != ABSENT) { return result; - } else { - RETURN_IF_SCHEDULED_EXCEPTION(); } + RETURN_IF_SCHEDULED_EXCEPTION(); result = holder_handle->GetPropertyPostInterceptor( *receiver_handle, @@ -5803,7 +5997,8 @@ bool JSObject::HasRealElementProperty(uint32_t index) { return (index < length) && !FixedArray::cast(elements())->get(index)->IsTheHole(); } - return element_dictionary()->FindNumberEntry(index) != -1; + return element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound; } @@ -5823,13 +6018,11 @@ bool JSObject::HasRealNamedCallbackProperty(String* key) { int JSObject::NumberOfLocalProperties(PropertyAttributes filter) { if (HasFastProperties()) { + DescriptorArray* descs = map()->instance_descriptors(); int result = 0; - for (DescriptorReader r(map()->instance_descriptors()); - !r.eos(); - r.advance()) { - PropertyDetails details = r.GetDetails(); - if (details.IsProperty() && - (details.attributes() & filter) == 0) { + for (int i = 0; i < descs->number_of_descriptors(); i++) { + PropertyDetails details = descs->GetDetails(i); + if (details.IsProperty() && (details.attributes() & filter) == 0) { result++; } } @@ -5962,16 +6155,11 @@ void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) { // purpose of this function is to provide reflection information for the object // mirrors. void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) { - ASSERT(storage->length() >= - NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE)) - - index); + ASSERT(storage->length() >= (NumberOfLocalProperties(NONE) - index)); if (HasFastProperties()) { - for (DescriptorReader r(map()->instance_descriptors()); - !r.eos(); - r.advance()) { - if (r.IsProperty()) { - storage->set(index++, r.GetKey()); - } + DescriptorArray* descs = map()->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->IsProperty(i)) storage->set(index++, descs->GetKey(i)); } ASSERT(storage->length() >= index); } else { @@ -6036,38 +6224,49 @@ int JSObject::GetEnumElementKeys(FixedArray* storage) { } -// The NumberKey uses carries the uint32_t as key. -// This avoids allocation in HasProperty. -class NumberKey : public HashTableKey { - public: - explicit NumberKey(uint32_t number) : number_(number) { } +bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) { + ASSERT(other->IsNumber()); + return key == static_cast<uint32_t>(other->Number()); +} - bool IsMatch(Object* number) { - return number_ == ToUint32(number); - } - uint32_t Hash() { return ComputeIntegerHash(number_); } +uint32_t NumberDictionaryShape::Hash(uint32_t key) { + return ComputeIntegerHash(key); +} - HashFunction GetHashFunction() { return NumberHash; } - Object* GetObject() { - return Heap::NumberFromDouble(number_); - } +uint32_t NumberDictionaryShape::HashForObject(uint32_t key, Object* other) { + ASSERT(other->IsNumber()); + return ComputeIntegerHash(static_cast<uint32_t>(other->Number())); +} - bool IsStringKey() { return false; } - private: - static uint32_t NumberHash(Object* obj) { - return ComputeIntegerHash(ToUint32(obj)); - } +Object* NumberDictionaryShape::AsObject(uint32_t key) { + return Heap::NumberFromUint32(key); +} - static uint32_t ToUint32(Object* obj) { - ASSERT(obj->IsNumber()); - return static_cast<uint32_t>(obj->Number()); - } - uint32_t number_; -}; +bool StringDictionaryShape::IsMatch(String* key, Object* other) { + // We know that all entries in a hash table had their hash keys created. + // Use that knowledge to have fast failure. + if (key->Hash() != String::cast(other)->Hash()) return false; + return key->Equals(String::cast(other)); +} + + +uint32_t StringDictionaryShape::Hash(String* key) { + return key->Hash(); +} + + +uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) { + return String::cast(other)->Hash(); +} + + +Object* StringDictionaryShape::AsObject(String* key) { + return key; +} // StringKey simply carries a string object as key. @@ -6075,12 +6274,12 @@ class StringKey : public HashTableKey { public: explicit StringKey(String* string) : string_(string), - hash_(StringHash(string)) { } + hash_(HashForObject(string)) { } bool IsMatch(Object* string) { // We know that all entries in a hash table had their hash keys created. // Use that knowledge to have fast failure. - if (hash_ != StringHash(string)) { + if (hash_ != HashForObject(string)) { return false; } return string_->Equals(String::cast(string)); @@ -6088,15 +6287,9 @@ class StringKey : public HashTableKey { uint32_t Hash() { return hash_; } - HashFunction GetHashFunction() { return StringHash; } + uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); } - Object* GetObject() { return string_; } - - static uint32_t StringHash(Object* obj) { - return String::cast(obj)->Hash(); - } - - bool IsStringKey() { return true; } + Object* AsObject() { return string_; } String* string_; uint32_t hash_; @@ -6118,10 +6311,6 @@ class StringSharedKey : public HashTableKey { return source->Equals(source_); } - typedef uint32_t (*HashFunction)(Object* obj); - - virtual HashFunction GetHashFunction() { return StringSharedHash; } - static uint32_t StringSharedHashHelper(String* source, SharedFunctionInfo* shared) { uint32_t hash = source->Hash(); @@ -6138,18 +6327,18 @@ class StringSharedKey : public HashTableKey { return hash; } - static uint32_t StringSharedHash(Object* obj) { + uint32_t Hash() { + return StringSharedHashHelper(source_, shared_); + } + + uint32_t HashForObject(Object* obj) { FixedArray* pair = FixedArray::cast(obj); SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0)); String* source = String::cast(pair->get(1)); return StringSharedHashHelper(source, shared); } - virtual uint32_t Hash() { - return StringSharedHashHelper(source_, shared_); - } - - virtual Object* GetObject() { + Object* AsObject() { Object* obj = Heap::AllocateFixedArray(2); if (obj->IsFailure()) return obj; FixedArray* pair = FixedArray::cast(obj); @@ -6158,8 +6347,6 @@ class StringSharedKey : public HashTableKey { return pair; } - virtual bool IsStringKey() { return false; } - private: String* source_; SharedFunctionInfo* shared_; @@ -6181,16 +6368,14 @@ class RegExpKey : public HashTableKey { uint32_t Hash() { return RegExpHash(string_, flags_); } - HashFunction GetHashFunction() { return RegExpObjectHash; } - - Object* GetObject() { + Object* AsObject() { // Plain hash maps, which is where regexp keys are used, don't // use this function. UNREACHABLE(); return NULL; } - static uint32_t RegExpObjectHash(Object* obj) { + uint32_t HashForObject(Object* obj) { FixedArray* val = FixedArray::cast(obj); return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)), Smi::cast(val->get(JSRegExp::kFlagsIndex))); @@ -6200,8 +6385,6 @@ class RegExpKey : public HashTableKey { return string->Hash() + flags->value(); } - bool IsStringKey() { return false; } - String* string_; Smi* flags_; }; @@ -6216,10 +6399,6 @@ class Utf8SymbolKey : public HashTableKey { return String::cast(string)->IsEqualTo(string_); } - HashFunction GetHashFunction() { - return StringHash; - } - uint32_t Hash() { if (length_field_ != 0) return length_field_ >> String::kHashShift; unibrow::Utf8InputBuffer<> buffer(string_.start(), @@ -6231,17 +6410,15 @@ class Utf8SymbolKey : public HashTableKey { return result; } - Object* GetObject() { - if (length_field_ == 0) Hash(); - return Heap::AllocateSymbol(string_, chars_, length_field_); + uint32_t HashForObject(Object* other) { + return String::cast(other)->Hash(); } - static uint32_t StringHash(Object* obj) { - return String::cast(obj)->Hash(); + Object* AsObject() { + if (length_field_ == 0) Hash(); + return Heap::AllocateSymbol(string_, chars_, length_field_); } - bool IsStringKey() { return true; } - Vector<const char> string_; uint32_t length_field_; int chars_; // Caches the number of characters when computing the hash code. @@ -6253,17 +6430,17 @@ class SymbolKey : public HashTableKey { public: explicit SymbolKey(String* string) : string_(string) { } - HashFunction GetHashFunction() { - return StringHash; - } - bool IsMatch(Object* string) { return String::cast(string)->Equals(string_); } uint32_t Hash() { return string_->Hash(); } - Object* GetObject() { + uint32_t HashForObject(Object* other) { + return String::cast(other)->Hash(); + } + + Object* AsObject() { // If the string is a cons string, attempt to flatten it so that // symbols will most often be flat strings. if (StringShape(string_).IsCons()) { @@ -6291,28 +6468,27 @@ class SymbolKey : public HashTableKey { return String::cast(obj)->Hash(); } - bool IsStringKey() { return true; } - String* string_; }; -template<int prefix_size, int element_size> -void HashTable<prefix_size, element_size>::IteratePrefix(ObjectVisitor* v) { +template<typename Shape, typename Key> +void HashTable<Shape, Key>::IteratePrefix(ObjectVisitor* v) { IteratePointers(v, 0, kElementsStartOffset); } -template<int prefix_size, int element_size> -void HashTable<prefix_size, element_size>::IterateElements(ObjectVisitor* v) { +template<typename Shape, typename Key> +void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) { IteratePointers(v, kElementsStartOffset, kHeaderSize + length() * kPointerSize); } -template<int prefix_size, int element_size> -Object* HashTable<prefix_size, element_size>::Allocate(int at_least_space_for) { +template<typename Shape, typename Key> +Object* HashTable<Shape, Key>::Allocate( + int at_least_space_for) { int capacity = RoundUpToPowerOf2(at_least_space_for); if (capacity < 4) capacity = 4; // Guarantee min capacity. Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity)); @@ -6324,37 +6500,37 @@ Object* HashTable<prefix_size, element_size>::Allocate(int at_least_space_for) { } + // Find entry for key otherwise return -1. -template <int prefix_size, int element_size> -int HashTable<prefix_size, element_size>::FindEntry(HashTableKey* key) { +template<typename Shape, typename Key> +int HashTable<Shape, Key>::FindEntry(Key key) { uint32_t nof = NumberOfElements(); - if (nof == 0) return -1; // Bail out if empty. + if (nof == 0) return kNotFound; // Bail out if empty. uint32_t capacity = Capacity(); - uint32_t hash = key->Hash(); + uint32_t hash = Shape::Hash(key); uint32_t entry = GetProbe(hash, 0, capacity); Object* element = KeyAt(entry); uint32_t passed_elements = 0; if (!element->IsNull()) { - if (!element->IsUndefined() && key->IsMatch(element)) return entry; - if (++passed_elements == nof) return -1; + if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry; + if (++passed_elements == nof) return kNotFound; } for (uint32_t i = 1; !element->IsUndefined(); i++) { entry = GetProbe(hash, i, capacity); element = KeyAt(entry); if (!element->IsNull()) { - if (!element->IsUndefined() && key->IsMatch(element)) return entry; - if (++passed_elements == nof) return -1; + if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry; + if (++passed_elements == nof) return kNotFound; } } - return -1; + return kNotFound; } -template<int prefix_size, int element_size> -Object* HashTable<prefix_size, element_size>::EnsureCapacity( - int n, HashTableKey* key) { +template<typename Shape, typename Key> +Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) { int capacity = Capacity(); int nof = NumberOfElements() + n; // Make sure 50% is free @@ -6366,18 +6542,20 @@ Object* HashTable<prefix_size, element_size>::EnsureCapacity( WriteBarrierMode mode = table->GetWriteBarrierMode(); // Copy prefix to new array. - for (int i = kPrefixStartIndex; i < kPrefixStartIndex + prefix_size; i++) { + for (int i = kPrefixStartIndex; + i < kPrefixStartIndex + Shape::kPrefixSize; + i++) { table->set(i, get(i), mode); } // Rehash the elements. - uint32_t (*Hash)(Object* key) = key->GetHashFunction(); for (int i = 0; i < capacity; i++) { uint32_t from_index = EntryToIndex(i); - Object* key = get(from_index); - if (IsKey(key)) { + Object* k = get(from_index); + if (IsKey(k)) { + uint32_t hash = Shape::HashForObject(key, k); uint32_t insertion_index = - EntryToIndex(table->FindInsertionEntry(key, Hash(key))); - for (int j = 0; j < element_size; j++) { + EntryToIndex(table->FindInsertionEntry(hash)); + for (int j = 0; j < Shape::kEntrySize; j++) { table->set(insertion_index + j, get(from_index + j), mode); } } @@ -6387,10 +6565,8 @@ Object* HashTable<prefix_size, element_size>::EnsureCapacity( } -template<int prefix_size, int element_size> -uint32_t HashTable<prefix_size, element_size>::FindInsertionEntry( - Object* key, - uint32_t hash) { +template<typename Shape, typename Key> +uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) { uint32_t capacity = Capacity(); uint32_t entry = GetProbe(hash, 0, capacity); Object* element = KeyAt(entry); @@ -6403,18 +6579,80 @@ uint32_t HashTable<prefix_size, element_size>::FindInsertionEntry( return entry; } +// Force instantiation of template instances class. +// Please note this list is compiler dependent. + +template class HashTable<SymbolTableShape, HashTableKey*>; + +template class HashTable<CompilationCacheShape, HashTableKey*>; + +template class HashTable<MapCacheShape, HashTableKey*>; + +template class Dictionary<StringDictionaryShape, String*>; + +template class Dictionary<NumberDictionaryShape, uint32_t>; + +template Object* Dictionary<NumberDictionaryShape, uint32_t>::Allocate( + int); + +template Object* Dictionary<StringDictionaryShape, String*>::Allocate( + int); + +template Object* Dictionary<NumberDictionaryShape, uint32_t>::AtPut( + uint32_t, Object*); + +template Object* Dictionary<NumberDictionaryShape, uint32_t>::SlowReverseLookup( + Object*); + +template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup( + Object*); + +template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo( + FixedArray*, PropertyAttributes); + +template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty( + int, JSObject::DeleteMode); + +template Object* Dictionary<NumberDictionaryShape, uint32_t>::DeleteProperty( + int, JSObject::DeleteMode); + +template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo( + FixedArray*); + +template int +Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes( + PropertyAttributes); + +template Object* Dictionary<StringDictionaryShape, String*>::Add( + String*, Object*, PropertyDetails); + +template Object* +Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices(); -// Force instantiation of SymbolTable's base class -template class HashTable<0, 1>; +template int +Dictionary<NumberDictionaryShape, uint32_t>::NumberOfElementsFilterAttributes( + PropertyAttributes); +template Object* Dictionary<NumberDictionaryShape, uint32_t>::Add( + uint32_t, Object*, PropertyDetails); -// Force instantiation of Dictionary's base class -template class HashTable<2, 3>; +template Object* Dictionary<NumberDictionaryShape, uint32_t>::EnsureCapacity( + int, uint32_t); +template Object* Dictionary<StringDictionaryShape, String*>::EnsureCapacity( + int, String*); -// Force instantiation of EvalCache's base class -template class HashTable<0, 2>; +template Object* Dictionary<NumberDictionaryShape, uint32_t>::AddEntry( + uint32_t, Object*, PropertyDetails, uint32_t); +template Object* Dictionary<StringDictionaryShape, String*>::AddEntry( + String*, Object*, PropertyDetails, uint32_t); + +template +int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements(); + +template +int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements(); // Collates undefined and unexisting elements below limit from position // zero of the elements. The object stays in Dictionary mode. @@ -6423,7 +6661,7 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) { // Must stay in dictionary mode, either because of requires_slow_elements, // or because we are not going to sort (and therefore compact) all of the // elements. - Dictionary* dict = element_dictionary(); + NumberDictionary* dict = element_dictionary(); HeapNumber* result_double = NULL; if (limit > static_cast<uint32_t>(Smi::kMaxValue)) { // Allocate space for result before we start mutating the object. @@ -6433,9 +6671,9 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) { } int capacity = dict->Capacity(); - Object* obj = Dictionary::Allocate(dict->Capacity()); + Object* obj = NumberDictionary::Allocate(dict->Capacity()); if (obj->IsFailure()) return obj; - Dictionary* new_dict = Dictionary::cast(obj); + NumberDictionary* new_dict = NumberDictionary::cast(obj); AssertNoAllocation no_alloc; @@ -6496,7 +6734,7 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) { if (!HasFastElements()) { // Convert to fast elements containing only the existing properties. // Ordering is irrelevant, since we are going to sort anyway. - Dictionary* dict = element_dictionary(); + NumberDictionary* dict = element_dictionary(); if (IsJSArray() || dict->requires_slow_elements() || dict->max_number_key() >= limit) { return PrepareSlowElementsForSort(limit); @@ -6588,6 +6826,34 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) { } +Object* GlobalObject::GetPropertyCell(LookupResult* result) { + ASSERT(!HasFastProperties()); + Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry()); + ASSERT(value->IsJSGlobalPropertyCell()); + return value; +} + + +Object* GlobalObject::EnsurePropertyCell(String* name) { + ASSERT(!HasFastProperties()); + int entry = property_dictionary()->FindEntry(name); + if (entry == StringDictionary::kNotFound) { + Object* cell = Heap::AllocateJSGlobalPropertyCell(Heap::the_hole_value()); + if (cell->IsFailure()) return cell; + PropertyDetails details(NONE, NORMAL); + details = details.AsDeleted(); + Object* dictionary = property_dictionary()->Add(name, cell, details); + if (dictionary->IsFailure()) return dictionary; + set_properties(StringDictionary::cast(dictionary)); + return cell; + } else { + Object* value = property_dictionary()->ValueAt(entry); + ASSERT(value->IsJSGlobalPropertyCell()); + return value; + } +} + + Object* SymbolTable::LookupString(String* string, Object** s) { SymbolKey key(string); return LookupKey(&key, s); @@ -6597,7 +6863,7 @@ Object* SymbolTable::LookupString(String* string, Object** s) { bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) { SymbolKey key(string); int entry = FindEntry(&key); - if (entry == -1) { + if (entry == kNotFound) { return false; } else { String* result = String::cast(KeyAt(entry)); @@ -6618,7 +6884,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) { int entry = FindEntry(key); // Symbol already in table. - if (entry != -1) { + if (entry != kNotFound) { *s = KeyAt(entry); return this; } @@ -6628,7 +6894,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) { if (obj->IsFailure()) return obj; // Create symbol object. - Object* symbol = key->GetObject(); + Object* symbol = key->AsObject(); if (symbol->IsFailure()) return symbol; // If the symbol table grew as part of EnsureCapacity, obj is not @@ -6637,7 +6903,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) { SymbolTable* table = reinterpret_cast<SymbolTable*>(obj); // Add the new symbol and return it along with the symbol table. - entry = table->FindInsertionEntry(symbol, key->Hash()); + entry = table->FindInsertionEntry(key->Hash()); table->set(EntryToIndex(entry), symbol); table->ElementAdded(); *s = symbol; @@ -6648,7 +6914,7 @@ Object* SymbolTable::LookupKey(HashTableKey* key, Object** s) { Object* CompilationCacheTable::Lookup(String* src) { StringKey key(src); int entry = FindEntry(&key); - if (entry == -1) return Heap::undefined_value(); + if (entry == kNotFound) return Heap::undefined_value(); return get(EntryToIndex(entry) + 1); } @@ -6656,7 +6922,7 @@ Object* CompilationCacheTable::Lookup(String* src) { Object* CompilationCacheTable::LookupEval(String* src, Context* context) { StringSharedKey key(src, context->closure()->shared()); int entry = FindEntry(&key); - if (entry == -1) return Heap::undefined_value(); + if (entry == kNotFound) return Heap::undefined_value(); return get(EntryToIndex(entry) + 1); } @@ -6665,7 +6931,7 @@ Object* CompilationCacheTable::LookupRegExp(String* src, JSRegExp::Flags flags) { RegExpKey key(src, flags); int entry = FindEntry(&key); - if (entry == -1) return Heap::undefined_value(); + if (entry == kNotFound) return Heap::undefined_value(); return get(EntryToIndex(entry) + 1); } @@ -6677,7 +6943,7 @@ Object* CompilationCacheTable::Put(String* src, Object* value) { CompilationCacheTable* cache = reinterpret_cast<CompilationCacheTable*>(obj); - int entry = cache->FindInsertionEntry(src, key.Hash()); + int entry = cache->FindInsertionEntry(key.Hash()); cache->set(EntryToIndex(entry), src); cache->set(EntryToIndex(entry) + 1, value); cache->ElementAdded(); @@ -6694,9 +6960,9 @@ Object* CompilationCacheTable::PutEval(String* src, CompilationCacheTable* cache = reinterpret_cast<CompilationCacheTable*>(obj); - int entry = cache->FindInsertionEntry(src, key.Hash()); + int entry = cache->FindInsertionEntry(key.Hash()); - Object* k = key.GetObject(); + Object* k = key.AsObject(); if (k->IsFailure()) return k; cache->set(EntryToIndex(entry), k); @@ -6715,7 +6981,7 @@ Object* CompilationCacheTable::PutRegExp(String* src, CompilationCacheTable* cache = reinterpret_cast<CompilationCacheTable*>(obj); - int entry = cache->FindInsertionEntry(value, key.Hash()); + int entry = cache->FindInsertionEntry(key.Hash()); cache->set(EntryToIndex(entry), value); cache->set(EntryToIndex(entry) + 1, value); cache->ElementAdded(); @@ -6738,13 +7004,9 @@ class SymbolsKey : public HashTableKey { return true; } - uint32_t Hash() { return SymbolsHash(symbols_); } + uint32_t Hash() { return HashForObject(symbols_); } - HashFunction GetHashFunction() { return SymbolsHash; } - - Object* GetObject() { return symbols_; } - - static uint32_t SymbolsHash(Object* obj) { + uint32_t HashForObject(Object* obj) { FixedArray* symbols = FixedArray::cast(obj); int len = symbols->length(); uint32_t hash = 0; @@ -6754,7 +7016,7 @@ class SymbolsKey : public HashTableKey { return hash; } - bool IsStringKey() { return false; } + Object* AsObject() { return symbols_; } private: FixedArray* symbols_; @@ -6764,7 +7026,7 @@ class SymbolsKey : public HashTableKey { Object* MapCache::Lookup(FixedArray* array) { SymbolsKey key(array); int entry = FindEntry(&key); - if (entry == -1) return Heap::undefined_value(); + if (entry == kNotFound) return Heap::undefined_value(); return get(EntryToIndex(entry) + 1); } @@ -6775,7 +7037,7 @@ Object* MapCache::Put(FixedArray* array, Map* value) { if (obj->IsFailure()) return obj; MapCache* cache = reinterpret_cast<MapCache*>(obj); - int entry = cache->FindInsertionEntry(array, key.Hash()); + int entry = cache->FindInsertionEntry(key.Hash()); cache->set(EntryToIndex(entry), array); cache->set(EntryToIndex(entry) + 1, value); cache->ElementAdded(); @@ -6783,19 +7045,21 @@ Object* MapCache::Put(FixedArray* array, Map* value) { } -Object* Dictionary::Allocate(int at_least_space_for) { - Object* obj = DictionaryBase::Allocate(at_least_space_for); +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::Allocate(int at_least_space_for) { + Object* obj = HashTable<Shape, Key>::Allocate(at_least_space_for); // Initialize the next enumeration index. if (!obj->IsFailure()) { - Dictionary::cast(obj)-> + Dictionary<Shape, Key>::cast(obj)-> SetNextEnumerationIndex(PropertyDetails::kInitialIndex); } return obj; } -Object* Dictionary::GenerateNewEnumerationIndices() { - int length = NumberOfElements(); +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::GenerateNewEnumerationIndices() { + int length = HashTable<Shape, Key>::NumberOfElements(); // Allocate and initialize iteration order array. Object* obj = Heap::AllocateFixedArray(length); @@ -6811,10 +7075,10 @@ Object* Dictionary::GenerateNewEnumerationIndices() { FixedArray* enumeration_order = FixedArray::cast(obj); // Fill the enumeration order array with property details. - int capacity = Capacity(); + int capacity = HashTable<Shape, Key>::Capacity(); int pos = 0; for (int i = 0; i < capacity; i++) { - if (IsKey(KeyAt(i))) { + if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) { enumeration_order->set(pos++, Smi::FromInt(DetailsAt(i).index()), SKIP_WRITE_BARRIER); @@ -6834,10 +7098,10 @@ Object* Dictionary::GenerateNewEnumerationIndices() { } // Update the dictionary with new indices. - capacity = Capacity(); + capacity = HashTable<Shape, Key>::Capacity(); pos = 0; for (int i = 0; i < capacity; i++) { - if (IsKey(KeyAt(i))) { + if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) { int enum_index = Smi::cast(enumeration_order->get(pos++))->value(); PropertyDetails details = DetailsAt(i); PropertyDetails new_details = @@ -6851,20 +7115,20 @@ Object* Dictionary::GenerateNewEnumerationIndices() { return this; } - -Object* Dictionary::EnsureCapacity(int n, HashTableKey* key) { +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::EnsureCapacity(int n, Key key) { // Check whether there are enough enumeration indices to add n elements. - if (key->IsStringKey() && + if (Shape::kIsEnumerable && !PropertyDetails::IsValidIndex(NextEnumerationIndex() + n)) { // If not, we generate new indices for the properties. Object* result = GenerateNewEnumerationIndices(); if (result->IsFailure()) return result; } - return DictionaryBase::EnsureCapacity(n, key); + return HashTable<Shape, Key>::EnsureCapacity(n, key); } -void Dictionary::RemoveNumberEntries(uint32_t from, uint32_t to) { +void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) { // Do nothing if the interval [from, to) is empty. if (from >= to) return; @@ -6887,35 +7151,26 @@ void Dictionary::RemoveNumberEntries(uint32_t from, uint32_t to) { } -Object* Dictionary::DeleteProperty(int entry, JSObject::DeleteMode mode) { +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::DeleteProperty(int entry, + JSObject::DeleteMode mode) { PropertyDetails details = DetailsAt(entry); // Ignore attributes if forcing a deletion. if (details.IsDontDelete() && mode == JSObject::NORMAL_DELETION) { return Heap::false_value(); } SetEntry(entry, Heap::null_value(), Heap::null_value(), Smi::FromInt(0)); - ElementRemoved(); + HashTable<Shape, Key>::ElementRemoved(); return Heap::true_value(); } -int Dictionary::FindStringEntry(String* key) { - StringKey k(key); - return FindEntry(&k); -} - - -int Dictionary::FindNumberEntry(uint32_t index) { - NumberKey k(index); - return FindEntry(&k); -} - - -Object* Dictionary::AtPut(HashTableKey* key, Object* value) { +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::AtPut(Key key, Object* value) { int entry = FindEntry(key); // If the entry is present set the value; - if (entry != -1) { + if (entry != Dictionary<Shape, Key>::kNotFound) { ValueAtPut(entry, value); return this; } @@ -6923,48 +7178,57 @@ Object* Dictionary::AtPut(HashTableKey* key, Object* value) { // Check whether the dictionary should be extended. Object* obj = EnsureCapacity(1, key); if (obj->IsFailure()) return obj; - Object* k = key->GetObject(); + + Object* k = Shape::AsObject(key); if (k->IsFailure()) return k; PropertyDetails details = PropertyDetails(NONE, NORMAL); - Dictionary::cast(obj)->AddEntry(k, value, details, key->Hash()); - return obj; + return Dictionary<Shape, Key>::cast(obj)-> + AddEntry(key, value, details, Shape::Hash(key)); } -Object* Dictionary::Add(HashTableKey* key, Object* value, - PropertyDetails details) { +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::Add(Key key, + Object* value, + PropertyDetails details) { + // Valdate key is absent. + SLOW_ASSERT((FindEntry(key) == Dictionary<Shape, Key>::kNotFound)); // Check whether the dictionary should be extended. Object* obj = EnsureCapacity(1, key); if (obj->IsFailure()) return obj; - // Compute the key object. - Object* k = key->GetObject(); - if (k->IsFailure()) return k; - Dictionary::cast(obj)->AddEntry(k, value, details, key->Hash()); - return obj; + return Dictionary<Shape, Key>::cast(obj)-> + AddEntry(key, value, details, Shape::Hash(key)); } // Add a key, value pair to the dictionary. -void Dictionary::AddEntry(Object* key, - Object* value, - PropertyDetails details, - uint32_t hash) { - uint32_t entry = FindInsertionEntry(key, hash); +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::AddEntry(Key key, + Object* value, + PropertyDetails details, + uint32_t hash) { + // Compute the key object. + Object* k = Shape::AsObject(key); + if (k->IsFailure()) return k; + + uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash); // Insert element at empty or deleted entry - if (details.index() == 0 && key->IsString()) { + if (!details.IsDeleted() && details.index() == 0 && Shape::kIsEnumerable) { // Assign an enumeration index to the property and update // SetNextEnumerationIndex. int index = NextEnumerationIndex(); details = PropertyDetails(details.attributes(), details.type(), index); SetNextEnumerationIndex(index + 1); } - SetEntry(entry, key, value, details); - ASSERT(KeyAt(entry)->IsNumber() || KeyAt(entry)->IsString()); - ElementAdded(); + SetEntry(entry, k, value, details); + ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber() + || Dictionary<Shape, Key>::KeyAt(entry)->IsString())); + HashTable<Shape, Key>::ElementAdded(); + return this; } -void Dictionary::UpdateMaxNumberKey(uint32_t key) { +void NumberDictionary::UpdateMaxNumberKey(uint32_t key) { // If the dictionary requires slow elements an element has already // been added at a high index. if (requires_slow_elements()) return; @@ -6977,82 +7241,54 @@ void Dictionary::UpdateMaxNumberKey(uint32_t key) { // Update max key value. Object* max_index_object = get(kMaxNumberKeyIndex); if (!max_index_object->IsSmi() || max_number_key() < key) { - set(kMaxNumberKeyIndex, - Smi::FromInt(key << kRequiresSlowElementsTagSize), - SKIP_WRITE_BARRIER); + FixedArray::set(kMaxNumberKeyIndex, + Smi::FromInt(key << kRequiresSlowElementsTagSize), + SKIP_WRITE_BARRIER); } } -Object* Dictionary::AddStringEntry(String* key, - Object* value, - PropertyDetails details) { - StringKey k(key); - SLOW_ASSERT(FindEntry(&k) == -1); - return Add(&k, value, details); -} - - -Object* Dictionary::AddNumberEntry(uint32_t key, - Object* value, - PropertyDetails details) { - NumberKey k(key); +Object* NumberDictionary::AddNumberEntry(uint32_t key, + Object* value, + PropertyDetails details) { UpdateMaxNumberKey(key); - SLOW_ASSERT(FindEntry(&k) == -1); - return Add(&k, value, details); + SLOW_ASSERT(FindEntry(key) == kNotFound); + return Add(key, value, details); } -Object* Dictionary::AtStringPut(String* key, Object* value) { - StringKey k(key); - return AtPut(&k, value); -} - - -Object* Dictionary::AtNumberPut(uint32_t key, Object* value) { - NumberKey k(key); +Object* NumberDictionary::AtNumberPut(uint32_t key, Object* value) { UpdateMaxNumberKey(key); - return AtPut(&k, value); + return AtPut(key, value); } -Object* Dictionary::SetOrAddStringEntry(String* key, - Object* value, - PropertyDetails details) { - StringKey k(key); - int entry = FindEntry(&k); - if (entry == -1) return AddStringEntry(key, value, details); +Object* NumberDictionary::Set(uint32_t key, + Object* value, + PropertyDetails details) { + int entry = FindEntry(key); + if (entry == kNotFound) return AddNumberEntry(key, value, details); // Preserve enumeration index. details = PropertyDetails(details.attributes(), details.type(), DetailsAt(entry).index()); - SetEntry(entry, key, value, details); + SetEntry(entry, NumberDictionaryShape::AsObject(key), value, details); return this; } -Object* Dictionary::SetOrAddNumberEntry(uint32_t key, - Object* value, - PropertyDetails details) { - NumberKey k(key); - int entry = FindEntry(&k); - if (entry == -1) return AddNumberEntry(key, value, details); - // Preserve enumeration index. - details = PropertyDetails(details.attributes(), - details.type(), - DetailsAt(entry).index()); - SetEntry(entry, k.GetObject(), value, details); - return this; -} - -int Dictionary::NumberOfElementsFilterAttributes(PropertyAttributes filter) { - int capacity = Capacity(); +template<typename Shape, typename Key> +int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes( + PropertyAttributes filter) { + int capacity = HashTable<Shape, Key>::Capacity(); int result = 0; for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k)) { - PropertyAttributes attr = DetailsAt(i).attributes(); + Object* k = HashTable<Shape, Key>::KeyAt(i); + if (HashTable<Shape, Key>::IsKey(k)) { + PropertyDetails details = DetailsAt(i); + if (details.IsDeleted()) continue; + PropertyAttributes attr = details.attributes(); if ((attr & filter) == 0) result++; } } @@ -7060,20 +7296,25 @@ int Dictionary::NumberOfElementsFilterAttributes(PropertyAttributes filter) { } -int Dictionary::NumberOfEnumElements() { +template<typename Shape, typename Key> +int Dictionary<Shape, Key>::NumberOfEnumElements() { return NumberOfElementsFilterAttributes( static_cast<PropertyAttributes>(DONT_ENUM)); } -void Dictionary::CopyKeysTo(FixedArray* storage, PropertyAttributes filter) { +template<typename Shape, typename Key> +void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage, + PropertyAttributes filter) { ASSERT(storage->length() >= NumberOfEnumElements()); - int capacity = Capacity(); + int capacity = HashTable<Shape, Key>::Capacity(); int index = 0; for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k)) { - PropertyAttributes attr = DetailsAt(i).attributes(); + Object* k = HashTable<Shape, Key>::KeyAt(i); + if (HashTable<Shape, Key>::IsKey(k)) { + PropertyDetails details = DetailsAt(i); + if (details.IsDeleted()) continue; + PropertyAttributes attr = details.attributes(); if ((attr & filter) == 0) storage->set(index++, k); } } @@ -7082,7 +7323,8 @@ void Dictionary::CopyKeysTo(FixedArray* storage, PropertyAttributes filter) { } -void Dictionary::CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array) { +void StringDictionary::CopyEnumKeysTo(FixedArray* storage, + FixedArray* sort_array) { ASSERT(storage->length() >= NumberOfEnumElements()); int capacity = Capacity(); int index = 0; @@ -7090,13 +7332,12 @@ void Dictionary::CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array) { Object* k = KeyAt(i); if (IsKey(k)) { PropertyDetails details = DetailsAt(i); - if (!details.IsDontEnum()) { - storage->set(index, k); - sort_array->set(index, - Smi::FromInt(details.index()), - SKIP_WRITE_BARRIER); - index++; - } + if (details.IsDeleted() || details.IsDontEnum()) continue; + storage->set(index, k); + sort_array->set(index, + Smi::FromInt(details.index()), + SKIP_WRITE_BARRIER); + index++; } } storage->SortPairs(sort_array, sort_array->length()); @@ -7104,14 +7345,17 @@ void Dictionary::CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array) { } -void Dictionary::CopyKeysTo(FixedArray* storage) { +template<typename Shape, typename Key> +void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) { ASSERT(storage->length() >= NumberOfElementsFilterAttributes( static_cast<PropertyAttributes>(NONE))); - int capacity = Capacity(); + int capacity = HashTable<Shape, Key>::Capacity(); int index = 0; for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k)) { + Object* k = HashTable<Shape, Key>::KeyAt(i); + if (HashTable<Shape, Key>::IsKey(k)) { + PropertyDetails details = DetailsAt(i); + if (details.IsDeleted()) continue; storage->set(index++, k); } } @@ -7120,20 +7364,25 @@ void Dictionary::CopyKeysTo(FixedArray* storage) { // Backwards lookup (slow). -Object* Dictionary::SlowReverseLookup(Object* value) { - int capacity = Capacity(); +template<typename Shape, typename Key> +Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) { + int capacity = HashTable<Shape, Key>::Capacity(); for (int i = 0; i < capacity; i++) { - Object* k = KeyAt(i); - if (IsKey(k) && ValueAt(i) == value) { - return k; + Object* k = HashTable<Shape, Key>::KeyAt(i); + if (Dictionary<Shape, Key>::IsKey(k)) { + Object* e = ValueAt(i); + if (e->IsJSGlobalPropertyCell()) { + e = JSGlobalPropertyCell::cast(e)->value(); + } + if (e == value) return k; } } return Heap::undefined_value(); } -Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj, - int unused_property_fields) { +Object* StringDictionary::TransformPropertiesToFastFor( + JSObject* obj, int unused_property_fields) { // Make sure we preserve dictionary representation if there are too many // descriptors. if (NumberOfElements() > DescriptorArray::kMaxNumberOfDescriptors) return obj; @@ -7141,7 +7390,8 @@ Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj, // Figure out if it is necessary to generate new enumeration indices. int max_enumeration_index = NextEnumerationIndex() + - (DescriptorArray::kMaxNumberOfDescriptors - NumberOfElements()); + (DescriptorArray::kMaxNumberOfDescriptors - + NumberOfElements()); if (!PropertyDetails::IsValidIndex(max_enumeration_index)) { Object* result = GenerateNewEnumerationIndices(); if (result->IsFailure()) return result; @@ -7178,7 +7428,7 @@ Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj, if (fields->IsFailure()) return fields; // Fill in the instance descriptor and the fields. - DescriptorWriter w(descriptors); + int next_descriptor = 0; int current_offset = 0; for (int i = 0; i < capacity; i++) { Object* k = KeyAt(i); @@ -7195,7 +7445,7 @@ Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj, JSFunction::cast(value), details.attributes(), details.index()); - w.Write(&d); + descriptors->Set(next_descriptor++, &d); } else if (type == NORMAL) { if (current_offset < inobject_props) { obj->InObjectPropertyAtPut(current_offset, @@ -7209,13 +7459,13 @@ Object* Dictionary::TransformPropertiesToFastFor(JSObject* obj, current_offset++, details.attributes(), details.index()); - w.Write(&d); + descriptors->Set(next_descriptor++, &d); } else if (type == CALLBACKS) { CallbacksDescriptor d(String::cast(key), value, details.attributes(), details.index()); - w.Write(&d); + descriptors->Set(next_descriptor++, &d); } else { UNREACHABLE(); } @@ -7490,4 +7740,5 @@ int BreakPointInfo::GetBreakPointCount() { } #endif + } } // namespace v8::internal diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index fd9af38c3..5c76e4a51 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -153,20 +153,23 @@ class PropertyDetails BASE_EMBEDDED { int index() { return IndexField::decode(value_); } + inline PropertyDetails AsDeleted(); + static bool IsValidIndex(int index) { return IndexField::is_valid(index); } bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; } bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; } bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; } + bool IsDeleted() { return DeletedField::decode(value_) != 0;} // Bit fields in value_ (type, shift, size). Must be public so the // constants can be embedded in generated code. class TypeField: public BitField<PropertyType, 0, 3> {}; class AttributesField: public BitField<PropertyAttributes, 3, 3> {}; - class IndexField: public BitField<uint32_t, 6, 32-6> {}; + class DeletedField: public BitField<uint32_t, 6, 1> {}; + class IndexField: public BitField<uint32_t, 7, 31-7> {}; static const int kInitialIndex = 1; - private: uint32_t value_; }; @@ -263,6 +266,7 @@ enum PropertyNormalizationMode { V(HEAP_NUMBER_TYPE) \ V(FIXED_ARRAY_TYPE) \ V(CODE_TYPE) \ + V(JS_GLOBAL_PROPERTY_CELL_TYPE) \ V(ODDBALL_TYPE) \ V(PROXY_TYPE) \ V(BYTE_ARRAY_TYPE) \ @@ -293,97 +297,202 @@ enum PropertyNormalizationMode { V(JS_FUNCTION_TYPE) \ + // Since string types are not consecutive, this macro is used to // iterate over them. #define STRING_TYPE_LIST(V) \ - V(SHORT_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, short_symbol) \ - V(MEDIUM_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, medium_symbol) \ - V(LONG_SYMBOL_TYPE, SeqTwoByteString::kAlignedSize, long_symbol) \ - V(SHORT_ASCII_SYMBOL_TYPE, SeqAsciiString::kAlignedSize, short_ascii_symbol) \ + V(SHORT_SYMBOL_TYPE, \ + SeqTwoByteString::kAlignedSize, \ + short_symbol, \ + ShortSymbol) \ + V(MEDIUM_SYMBOL_TYPE, \ + SeqTwoByteString::kAlignedSize, \ + medium_symbol, \ + MediumSymbol) \ + V(LONG_SYMBOL_TYPE, \ + SeqTwoByteString::kAlignedSize, \ + long_symbol, \ + LongSymbol) \ + V(SHORT_ASCII_SYMBOL_TYPE, \ + SeqAsciiString::kAlignedSize, \ + short_ascii_symbol, \ + ShortAsciiSymbol) \ V(MEDIUM_ASCII_SYMBOL_TYPE, \ SeqAsciiString::kAlignedSize, \ - medium_ascii_symbol) \ - V(LONG_ASCII_SYMBOL_TYPE, SeqAsciiString::kAlignedSize, long_ascii_symbol) \ - V(SHORT_CONS_SYMBOL_TYPE, ConsString::kSize, short_cons_symbol) \ - V(MEDIUM_CONS_SYMBOL_TYPE, ConsString::kSize, medium_cons_symbol) \ - V(LONG_CONS_SYMBOL_TYPE, ConsString::kSize, long_cons_symbol) \ - V(SHORT_CONS_ASCII_SYMBOL_TYPE, ConsString::kSize, short_cons_ascii_symbol) \ - V(MEDIUM_CONS_ASCII_SYMBOL_TYPE, ConsString::kSize, medium_cons_ascii_symbol)\ - V(LONG_CONS_ASCII_SYMBOL_TYPE, ConsString::kSize, long_cons_ascii_symbol) \ - V(SHORT_SLICED_SYMBOL_TYPE, SlicedString::kSize, short_sliced_symbol) \ - V(MEDIUM_SLICED_SYMBOL_TYPE, SlicedString::kSize, medium_sliced_symbol) \ - V(LONG_SLICED_SYMBOL_TYPE, SlicedString::kSize, long_sliced_symbol) \ + medium_ascii_symbol, \ + MediumAsciiSymbol) \ + V(LONG_ASCII_SYMBOL_TYPE, \ + SeqAsciiString::kAlignedSize, \ + long_ascii_symbol, \ + LongAsciiSymbol) \ + V(SHORT_CONS_SYMBOL_TYPE, \ + ConsString::kSize, \ + short_cons_symbol, \ + ShortConsSymbol) \ + V(MEDIUM_CONS_SYMBOL_TYPE, \ + ConsString::kSize, \ + medium_cons_symbol, \ + MediumConsSymbol) \ + V(LONG_CONS_SYMBOL_TYPE, \ + ConsString::kSize, \ + long_cons_symbol, \ + LongConsSymbol) \ + V(SHORT_CONS_ASCII_SYMBOL_TYPE, \ + ConsString::kSize, \ + short_cons_ascii_symbol, \ + ShortConsAsciiSymbol) \ + V(MEDIUM_CONS_ASCII_SYMBOL_TYPE, \ + ConsString::kSize, \ + medium_cons_ascii_symbol, \ + MediumConsAsciiSymbol) \ + V(LONG_CONS_ASCII_SYMBOL_TYPE, \ + ConsString::kSize, \ + long_cons_ascii_symbol, \ + LongConsAsciiSymbol) \ + V(SHORT_SLICED_SYMBOL_TYPE, \ + SlicedString::kSize, \ + short_sliced_symbol, \ + ShortSlicedSymbol) \ + V(MEDIUM_SLICED_SYMBOL_TYPE, \ + SlicedString::kSize, \ + medium_sliced_symbol, \ + MediumSlicedSymbol) \ + V(LONG_SLICED_SYMBOL_TYPE, \ + SlicedString::kSize, \ + long_sliced_symbol, \ + LongSlicedSymbol) \ V(SHORT_SLICED_ASCII_SYMBOL_TYPE, \ SlicedString::kSize, \ - short_sliced_ascii_symbol) \ + short_sliced_ascii_symbol, \ + ShortSlicedAsciiSymbol) \ V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE, \ SlicedString::kSize, \ - medium_sliced_ascii_symbol) \ + medium_sliced_ascii_symbol, \ + MediumSlicedAsciiSymbol) \ V(LONG_SLICED_ASCII_SYMBOL_TYPE, \ SlicedString::kSize, \ - long_sliced_ascii_symbol) \ + long_sliced_ascii_symbol, \ + LongSlicedAsciiSymbol) \ V(SHORT_EXTERNAL_SYMBOL_TYPE, \ ExternalTwoByteString::kSize, \ - short_external_symbol) \ + short_external_symbol, \ + ShortExternalSymbol) \ V(MEDIUM_EXTERNAL_SYMBOL_TYPE, \ ExternalTwoByteString::kSize, \ - medium_external_symbol) \ + medium_external_symbol, \ + MediumExternalSymbol) \ V(LONG_EXTERNAL_SYMBOL_TYPE, \ ExternalTwoByteString::kSize, \ - long_external_symbol) \ + long_external_symbol, \ + LongExternalSymbol) \ V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \ ExternalAsciiString::kSize, \ - short_external_ascii_symbol) \ + short_external_ascii_symbol, \ + ShortExternalAsciiSymbol) \ V(MEDIUM_EXTERNAL_ASCII_SYMBOL_TYPE, \ ExternalAsciiString::kSize, \ - medium_external_ascii_symbol) \ + medium_external_ascii_symbol, \ + MediumExternalAsciiSymbol) \ V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE, \ ExternalAsciiString::kSize, \ - long_external_ascii_symbol) \ - V(SHORT_STRING_TYPE, SeqTwoByteString::kAlignedSize, short_string) \ - V(MEDIUM_STRING_TYPE, SeqTwoByteString::kAlignedSize, medium_string) \ - V(LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize, long_string) \ - V(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize, short_ascii_string) \ + long_external_ascii_symbol, \ + LongExternalAsciiSymbol) \ + V(SHORT_STRING_TYPE, \ + SeqTwoByteString::kAlignedSize, \ + short_string, \ + ShortString) \ + V(MEDIUM_STRING_TYPE, \ + SeqTwoByteString::kAlignedSize, \ + medium_string, \ + MediumString) \ + V(LONG_STRING_TYPE, \ + SeqTwoByteString::kAlignedSize, \ + long_string, \ + LongString) \ + V(SHORT_ASCII_STRING_TYPE, \ + SeqAsciiString::kAlignedSize, \ + short_ascii_string, \ + ShortAsciiString) \ V(MEDIUM_ASCII_STRING_TYPE, \ SeqAsciiString::kAlignedSize, \ - medium_ascii_string) \ - V(LONG_ASCII_STRING_TYPE, SeqAsciiString::kAlignedSize, long_ascii_string) \ - V(SHORT_CONS_STRING_TYPE, ConsString::kSize, short_cons_string) \ - V(MEDIUM_CONS_STRING_TYPE, ConsString::kSize, medium_cons_string) \ - V(LONG_CONS_STRING_TYPE, ConsString::kSize, long_cons_string) \ - V(SHORT_CONS_ASCII_STRING_TYPE, ConsString::kSize, short_cons_ascii_string) \ - V(MEDIUM_CONS_ASCII_STRING_TYPE, ConsString::kSize, medium_cons_ascii_string)\ - V(LONG_CONS_ASCII_STRING_TYPE, ConsString::kSize, long_cons_ascii_string) \ - V(SHORT_SLICED_STRING_TYPE, SlicedString::kSize, short_sliced_string) \ - V(MEDIUM_SLICED_STRING_TYPE, SlicedString::kSize, medium_sliced_string) \ - V(LONG_SLICED_STRING_TYPE, SlicedString::kSize, long_sliced_string) \ + medium_ascii_string, \ + MediumAsciiString) \ + V(LONG_ASCII_STRING_TYPE, \ + SeqAsciiString::kAlignedSize, \ + long_ascii_string, \ + LongAsciiString) \ + V(SHORT_CONS_STRING_TYPE, \ + ConsString::kSize, \ + short_cons_string, \ + ShortConsString) \ + V(MEDIUM_CONS_STRING_TYPE, \ + ConsString::kSize, \ + medium_cons_string, \ + MediumConsString) \ + V(LONG_CONS_STRING_TYPE, \ + ConsString::kSize, \ + long_cons_string, \ + LongConsString) \ + V(SHORT_CONS_ASCII_STRING_TYPE, \ + ConsString::kSize, \ + short_cons_ascii_string, \ + ShortConsAsciiString) \ + V(MEDIUM_CONS_ASCII_STRING_TYPE, \ + ConsString::kSize, \ + medium_cons_ascii_string, \ + MediumConsAsciiString) \ + V(LONG_CONS_ASCII_STRING_TYPE, \ + ConsString::kSize, \ + long_cons_ascii_string, \ + LongConsAsciiString) \ + V(SHORT_SLICED_STRING_TYPE, \ + SlicedString::kSize, \ + short_sliced_string, \ + ShortSlicedString) \ + V(MEDIUM_SLICED_STRING_TYPE, \ + SlicedString::kSize, \ + medium_sliced_string, \ + MediumSlicedString) \ + V(LONG_SLICED_STRING_TYPE, \ + SlicedString::kSize, \ + long_sliced_string, \ + LongSlicedString) \ V(SHORT_SLICED_ASCII_STRING_TYPE, \ SlicedString::kSize, \ - short_sliced_ascii_string) \ + short_sliced_ascii_string, \ + ShortSlicedAsciiString) \ V(MEDIUM_SLICED_ASCII_STRING_TYPE, \ SlicedString::kSize, \ - medium_sliced_ascii_string) \ + medium_sliced_ascii_string, \ + MediumSlicedAsciiString) \ V(LONG_SLICED_ASCII_STRING_TYPE, \ SlicedString::kSize, \ - long_sliced_ascii_string) \ + long_sliced_ascii_string, \ + LongSlicedAsciiString) \ V(SHORT_EXTERNAL_STRING_TYPE, \ ExternalTwoByteString::kSize, \ - short_external_string) \ + short_external_string, \ + ShortExternalString) \ V(MEDIUM_EXTERNAL_STRING_TYPE, \ ExternalTwoByteString::kSize, \ - medium_external_string) \ + medium_external_string, \ + MediumExternalString) \ V(LONG_EXTERNAL_STRING_TYPE, \ ExternalTwoByteString::kSize, \ - long_external_string) \ + long_external_string, \ + LongExternalString) \ V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \ ExternalAsciiString::kSize, \ - short_external_ascii_string) \ + short_external_ascii_string, \ + ShortExternalAsciiString) \ V(MEDIUM_EXTERNAL_ASCII_STRING_TYPE, \ ExternalAsciiString::kSize, \ - medium_external_ascii_string) \ + medium_external_ascii_string, \ + MediumExternalAsciiString) \ V(LONG_EXTERNAL_ASCII_STRING_TYPE, \ ExternalAsciiString::kSize, \ - long_external_ascii_string) + long_external_ascii_string, \ + LongExternalAsciiString) // A struct is a simple object a set of object-valued fields. Including an // object type in this causes the compiler to generate most of the boilerplate @@ -547,6 +656,7 @@ enum InstanceType { FIXED_ARRAY_TYPE, CODE_TYPE, ODDBALL_TYPE, + JS_GLOBAL_PROPERTY_CELL_TYPE, PROXY_TYPE, BYTE_ARRAY_TYPE, FILLER_TYPE, @@ -684,6 +794,7 @@ class Object BASE_EMBEDDED { inline bool IsJSGlobalProxy(); inline bool IsUndetectableObject(); inline bool IsAccessCheckNeeded(); + inline bool IsJSGlobalPropertyCell(); // Returns true if this object is an instance of the specified // function template. @@ -817,12 +928,14 @@ class Smi: public Object { // Failure is used for reporting out of memory situations and // propagating exceptions through the runtime system. Failure objects -// are transient and cannot occur as part of the objects graph. +// are transient and cannot occur as part of the object graph. // // Failures are a single word, encoded as follows: // +-------------------------+---+--+--+ // |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11| // +-------------------------+---+--+--+ +// 3 7 6 4 32 10 +// 1 // // The low two bits, 0-1, are the failure tag, 11. The next two bits, // 2-3, are a failure type tag 'tt' with possible values: @@ -833,18 +946,13 @@ class Smi: public Object { // // The next three bits, 4-6, are an allocation space tag 'sss'. The // allocation space tag is 000 for all failure types except -// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are -// (the encoding is found in globals.h): -// 000 NEW_SPACE -// 001 OLD_SPACE -// 010 CODE_SPACE -// 011 MAP_SPACE -// 100 LO_SPACE +// RETRY_AFTER_GC. For RETRY_AFTER_GC, the possible values are the +// allocation spaces (the encoding is found in globals.h). // -// The remaining bits is the number of words requested by the -// allocation request that failed, and is zeroed except for -// RETRY_AFTER_GC failures. The 25 bits (on a 32 bit platform) gives -// a representable range of 2^27 bytes (128MB). +// The remaining bits is the size of the allocation request in units +// of the pointer size, and is zeroed except for RETRY_AFTER_GC +// failures. The 25 bits (on a 32 bit platform) gives a representable +// range of 2^27 bytes (128MB). // Failure type tag info. const int kFailureTypeTagSize = 2; @@ -974,14 +1082,6 @@ class MapWord BASE_EMBEDDED { inline Address ToEncodedAddress(); - private: - // HeapObject calls the private constructor and directly reads the value. - friend class HeapObject; - - explicit MapWord(uintptr_t value) : value_(value) {} - - uintptr_t value_; - // Bits used by the marking phase of the garbage collector. // // The first word of a heap object is normally a map pointer. The last two @@ -1023,6 +1123,14 @@ class MapWord BASE_EMBEDDED { // 0xFFE00000 static const uint32_t kForwardingOffsetMask = ~(kMapPageIndexMask | kMapPageOffsetMask); + + private: + // HeapObject calls the private constructor and directly reads the value. + friend class HeapObject; + + explicit MapWord(uintptr_t value) : value_(value) {} + + uintptr_t value_; }; @@ -1193,13 +1301,15 @@ class HeapNumber: public HeapObject { // caching. class JSObject: public HeapObject { public: + enum DeleteMode { NORMAL_DELETION, FORCE_DELETION }; + // [properties]: Backing storage for properties. // properties is a FixedArray in the fast case, and a Dictionary in the // slow case. DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties. inline void initialize_properties(); inline bool HasFastProperties(); - inline Dictionary* property_dictionary(); // Gets slow properties. + inline StringDictionary* property_dictionary(); // Gets slow properties. // [elements]: The elements (properties with names that are integers). // elements is a FixedArray in the fast case, and a Dictionary in the slow @@ -1207,7 +1317,7 @@ class JSObject: public HeapObject { DECL_ACCESSORS(elements, FixedArray) // Get and set fast elements. inline void initialize_elements(); inline bool HasFastElements(); - inline Dictionary* element_dictionary(); // Gets slow elements. + inline NumberDictionary* element_dictionary(); // Gets slow elements. // Collects elements starting at index 0. // Undefined values are placed after non-undefined values. @@ -1243,6 +1353,23 @@ class JSObject: public HeapObject { Object* value, PropertyAttributes attributes); + // Retrieve a value in a normalized object given a lookup result. + // Handles the special representation of JS global objects. + Object* GetNormalizedProperty(LookupResult* result); + + // Sets the property value in a normalized object given a lookup result. + // Handles the special representation of JS global objects. + Object* SetNormalizedProperty(LookupResult* result, Object* value); + + // Sets the property value in a normalized object given (key, value, details). + // Handles the special representation of JS global objects. + Object* SetNormalizedProperty(String* name, + Object* value, + PropertyDetails details); + + // Deletes the named property in a normalized object. + Object* DeleteNormalizedProperty(String* name, DeleteMode mode); + // Sets a property that currently has lazy loading. Object* SetLazyProperty(LookupResult* result, String* name, @@ -1293,7 +1420,6 @@ class JSObject: public HeapObject { return GetLocalPropertyAttribute(name) != ABSENT; } - enum DeleteMode { NORMAL_DELETION, FORCE_DELETION }; Object* DeleteProperty(String* name, DeleteMode mode); Object* DeleteElement(uint32_t index, DeleteMode mode); Object* DeleteLazyProperty(LookupResult* result, @@ -1569,13 +1695,11 @@ class JSObject: public HeapObject { void LookupInDescriptor(String* name, LookupResult* result); - // Attempts to get property with a named interceptor getter. Returns - // |true| and stores result into |result| if succesful, otherwise - // returns |false| - bool GetPropertyWithInterceptorProper(JSObject* receiver, - String* name, - PropertyAttributes* attributes, - Object** result); + // Attempts to get property with a named interceptor getter. + // Sets |attributes| to ABSENT if interceptor didn't return anything + Object* GetPropertyWithInterceptorProper(JSObject* receiver, + String* name, + PropertyAttributes* attributes); DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject); }; @@ -1726,15 +1850,28 @@ class DescriptorArray: public FixedArray { // using the supplied storage for the small "bridge". void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache); - // Accessors for fetching instance descriptor at descriptor number.. + // Accessors for fetching instance descriptor at descriptor number. inline String* GetKey(int descriptor_number); inline Object* GetValue(int descriptor_number); inline Smi* GetDetails(int descriptor_number); + inline PropertyType GetType(int descriptor_number); + inline int GetFieldIndex(int descriptor_number); + inline JSFunction* GetConstantFunction(int descriptor_number); + inline Object* GetCallbacksObject(int descriptor_number); + inline AccessorDescriptor* GetCallbacks(int descriptor_number); + inline bool IsProperty(int descriptor_number); + inline bool IsTransition(int descriptor_number); + inline bool IsNullDescriptor(int descriptor_number); + inline bool IsDontEnum(int descriptor_number); // Accessor for complete descriptor. inline void Get(int descriptor_number, Descriptor* desc); inline void Set(int descriptor_number, Descriptor* desc); + // Transfer complete descriptor from another descriptor array to + // this one. + inline void CopyFrom(int index, DescriptorArray* src, int src_index); + // Copy the descriptor array, insert a new descriptor and optionally // remove map transitions. If the descriptor is already present, it is // replaced. If a replaced descriptor is a real property (not a transition @@ -1851,32 +1988,29 @@ class DescriptorArray: public FixedArray { // - Elements with key == undefined have not been used yet. // - Elements with key == null have been deleted. // -// The hash table class is parameterized with a prefix size and with -// the size, including the key size, of the elements held in the hash +// The hash table class is parameterized with a Shape and a Key. +// Shape must be a class with the following interface: +// class ExampleShape { +// public: +// // Tells whether key matches other. +// static bool IsMatch(Key key, Object* other); +// // Returns the hash value for key. +// static uint32_t Hash(Key key); +// // Returns the hash value for object. +// static uint32_t HashForObject(Key key, Object* object); +// // Convert key to an object. +// static inline Object* AsObject(Key key); +// // The prefix size indicates number of elements in the beginning +// // of the backing storage. +// static const int kPrefixSize = ..; +// // The Element size indicates number of elements per entry. +// static const int kEntrySize = ..; +// }; // table. The prefix size indicates an amount of memory in the // beginning of the backing storage that can be used for non-element // information by subclasses. -// HashTableKey is an abstract superclass keys. -class HashTableKey { - public: - // Returns whether the other object matches this key. - virtual bool IsMatch(Object* other) = 0; - typedef uint32_t (*HashFunction)(Object* obj); - // Returns the hash function used for this key. - virtual HashFunction GetHashFunction() = 0; - // Returns the hash value for this key. - virtual uint32_t Hash() = 0; - // Returns the key object for storing into the dictionary. - // If allocations fails a failure object is returned. - virtual Object* GetObject() = 0; - virtual bool IsStringKey() = 0; - // Required. - virtual ~HashTableKey() {} -}; - - -template<int prefix_size, int element_size> +template<typename Shape, typename Key> class HashTable: public FixedArray { public: // Returns the number of elements in the dictionary. @@ -1925,22 +2059,27 @@ class HashTable: public FixedArray { static const int kNumberOfElementsIndex = 0; static const int kCapacityIndex = 1; static const int kPrefixStartIndex = 2; - static const int kElementsStartIndex = kPrefixStartIndex + prefix_size; - static const int kElementSize = element_size; + static const int kElementsStartIndex = + kPrefixStartIndex + Shape::kPrefixSize; + static const int kEntrySize = Shape::kEntrySize; static const int kElementsStartOffset = kHeaderSize + kElementsStartIndex * kPointerSize; - protected: + // Constant used for denoting a absent entry. + static const int kNotFound = -1; + // Find entry for key otherwise return -1. - int FindEntry(HashTableKey* key); + int FindEntry(Key key); + + protected: // Find the entry at which to insert element with the given key that // has the given hash value. - uint32_t FindInsertionEntry(Object* key, uint32_t hash); + uint32_t FindInsertionEntry(uint32_t hash); // Returns the index for an entry (of the key) static inline int EntryToIndex(int entry) { - return (entry * kElementSize) + kElementsStartIndex; + return (entry * kEntrySize) + kElementsStartIndex; } // Update the number of elements in the dictionary. @@ -1965,15 +2104,51 @@ class HashTable: public FixedArray { } // Ensure enough space for n additional elements. - Object* EnsureCapacity(int n, HashTableKey* key); + Object* EnsureCapacity(int n, Key key); }; + +// HashTableKey is an abstract superclass for virtual key behavior. +class HashTableKey { + public: + // Returns whether the other object matches this key. + virtual bool IsMatch(Object* other) = 0; + // Returns the hash value for this key. + virtual uint32_t Hash() = 0; + // Returns the hash value for object. + virtual uint32_t HashForObject(Object* key) = 0; + // Returns the key object for storing into the dictionary. + // If allocations fails a failure object is returned. + virtual Object* AsObject() = 0; + // Required. + virtual ~HashTableKey() {} +}; + +class SymbolTableShape { + public: + static bool IsMatch(HashTableKey* key, Object* value) { + return key->IsMatch(value); + } + static uint32_t Hash(HashTableKey* key) { + return key->Hash(); + } + static uint32_t HashForObject(HashTableKey* key, Object* object) { + return key->HashForObject(object); + } + static Object* AsObject(HashTableKey* key) { + return key->AsObject(); + } + + static const int kPrefixSize = 0; + static const int kEntrySize = 1; +}; + // SymbolTable. // // No special elements in the prefix and the element size is 1 // because only the symbol itself (the key) needs to be stored. -class SymbolTable: public HashTable<0, 1> { +class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> { public: // Find symbol in the symbol table. If it is not there yet, it is // added. The return value is the symbol table which might have @@ -1997,11 +2172,33 @@ class SymbolTable: public HashTable<0, 1> { }; +class MapCacheShape { + public: + static bool IsMatch(HashTableKey* key, Object* value) { + return key->IsMatch(value); + } + static uint32_t Hash(HashTableKey* key) { + return key->Hash(); + } + + static uint32_t HashForObject(HashTableKey* key, Object* object) { + return key->HashForObject(object); + } + + static Object* AsObject(HashTableKey* key) { + return key->AsObject(); + } + + static const int kPrefixSize = 0; + static const int kEntrySize = 2; +}; + + // MapCache. // // Maps keys that are a fixed array of symbols to a map. // Used for canonicalize maps for object literals. -class MapCache: public HashTable<0, 2> { +class MapCache: public HashTable<MapCacheShape, HashTableKey*> { public: // Find cached value for a string key, otherwise return null. Object* Lookup(FixedArray* key); @@ -2013,72 +2210,42 @@ class MapCache: public HashTable<0, 2> { }; -// Dictionary for keeping properties and elements in slow case. -// -// One element in the prefix is used for storing non-element -// information about the dictionary. -// -// The rest of the array embeds triples of (key, value, details). -// if key == undefined the triple is empty. -// if key == null the triple has been deleted. -// otherwise key contains the name of a property. -class DictionaryBase: public HashTable<2, 3> {}; - -class Dictionary: public DictionaryBase { +template <typename Shape, typename Key> +class Dictionary: public HashTable<Shape, Key> { public: + + static inline Dictionary<Shape, Key>* cast(Object* obj) { + return reinterpret_cast<Dictionary<Shape, Key>*>(obj); + } + // Returns the value at entry. - Object* ValueAt(int entry) { return get(EntryToIndex(entry)+1); } + Object* ValueAt(int entry) { + return get(HashTable<Shape, Key>::EntryToIndex(entry)+1); + } // Set the value for entry. void ValueAtPut(int entry, Object* value) { - set(EntryToIndex(entry)+1, value); + set(HashTable<Shape, Key>::EntryToIndex(entry)+1, value); } // Returns the property details for the property at entry. PropertyDetails DetailsAt(int entry) { ASSERT(entry >= 0); // Not found is -1, which is not caught by get(). - return PropertyDetails(Smi::cast(get(EntryToIndex(entry) + 2))); + return PropertyDetails( + Smi::cast(get(HashTable<Shape, Key>::EntryToIndex(entry) + 2))); } // Set the details for entry. void DetailsAtPut(int entry, PropertyDetails value) { - set(EntryToIndex(entry) + 2, value.AsSmi()); + set(HashTable<Shape, Key>::EntryToIndex(entry) + 2, value.AsSmi()); } - // Remove all entries were key is a number and (from <= key && key < to). - void RemoveNumberEntries(uint32_t from, uint32_t to); - // Sorting support void CopyValuesTo(FixedArray* elements); - // Casting. - static inline Dictionary* cast(Object* obj); - - // Find entry for string key otherwise return -1. - int FindStringEntry(String* key); - - // Find entry for number key otherwise return -1. - int FindNumberEntry(uint32_t index); - // Delete a property from the dictionary. Object* DeleteProperty(int entry, JSObject::DeleteMode mode); - // Type specific at put (default NONE attributes is used when adding). - Object* AtStringPut(String* key, Object* value); - Object* AtNumberPut(uint32_t key, Object* value); - - Object* AddStringEntry(String* key, Object* value, PropertyDetails details); - Object* AddNumberEntry(uint32_t key, Object* value, PropertyDetails details); - - // Set an existing entry or add a new one if needed. - Object* SetOrAddStringEntry(String* key, - Object* value, - PropertyDetails details); - - Object* SetOrAddNumberEntry(uint32_t key, - Object* value, - PropertyDetails details); - // Returns the number of elements in the dictionary filtering out properties // with the specified attributes. int NumberOfElementsFilterAttributes(PropertyAttributes filter); @@ -2088,42 +2255,23 @@ class Dictionary: public DictionaryBase { // Copies keys to preallocated fixed array. void CopyKeysTo(FixedArray* storage, PropertyAttributes filter); - // Copies enumerable keys to preallocated fixed array. - void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array); // Fill in details for properties into storage. void CopyKeysTo(FixedArray* storage); - // For transforming properties of a JSObject. - Object* TransformPropertiesToFastFor(JSObject* obj, - int unused_property_fields); - - // If slow elements are required we will never go back to fast-case - // for the elements kept in this dictionary. We require slow - // elements if an element has been added at an index larger than - // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called - // when defining a getter or setter with a number key. - inline bool requires_slow_elements(); - inline void set_requires_slow_elements(); - - // Get the value of the max number key that has been added to this - // dictionary. max_number_key can only be called if - // requires_slow_elements returns false. - inline uint32_t max_number_key(); - // Accessors for next enumeration index. void SetNextEnumerationIndex(int index) { fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index)); } int NextEnumerationIndex() { - return Smi::cast(get(kNextEnumerationIndexIndex))->value(); + return Smi::cast(FixedArray::get(kNextEnumerationIndexIndex))->value(); } // Returns a new array for dictionary usage. Might return Failure. static Object* Allocate(int at_least_space_for); // Ensure enough space for n additional elements. - Object* EnsureCapacity(int n, HashTableKey* key); + Object* EnsureCapacity(int n, Key key); #ifdef DEBUG void Print(); @@ -2131,38 +2279,110 @@ class Dictionary: public DictionaryBase { // Returns the key (slow). Object* SlowReverseLookup(Object* value); - // Bit masks. - static const int kRequiresSlowElementsMask = 1; - static const int kRequiresSlowElementsTagSize = 1; - static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1; - - void UpdateMaxNumberKey(uint32_t key); - - private: - // Generic at put operation. - Object* AtPut(HashTableKey* key, Object* value); - - Object* Add(HashTableKey* key, Object* value, PropertyDetails details); - - // Add entry to dictionary. - void AddEntry(Object* key, - Object* value, - PropertyDetails details, - uint32_t hash); - // Sets the entry to (key, value) pair. inline void SetEntry(int entry, Object* key, Object* value, PropertyDetails details); + Object* Add(Key key, Object* value, PropertyDetails details); + + protected: + // Generic at put operation. + Object* AtPut(Key key, Object* value); + + // Add entry to dictionary. + Object* AddEntry(Key key, + Object* value, + PropertyDetails details, + uint32_t hash); + // Generate new enumeration indices to avoid enumeration index overflow. Object* GenerateNewEnumerationIndices(); - - static const int kMaxNumberKeyIndex = kPrefixStartIndex; + static const int kMaxNumberKeyIndex = + HashTable<Shape, Key>::kPrefixStartIndex; static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1; +}; + + +class StringDictionaryShape { + public: + static inline bool IsMatch(String* key, Object* other); + static inline uint32_t Hash(String* key); + static inline uint32_t HashForObject(String* key, Object* object); + static inline Object* AsObject(String* key); + static const int kPrefixSize = 2; + static const int kEntrySize = 3; + static const bool kIsEnumerable = true; +}; + + +class StringDictionary: public Dictionary<StringDictionaryShape, String*> { + public: + static inline StringDictionary* cast(Object* obj) { + ASSERT(obj->IsDictionary()); + return reinterpret_cast<StringDictionary*>(obj); + } + + // Copies enumerable keys to preallocated fixed array. + void CopyEnumKeysTo(FixedArray* storage, FixedArray* sort_array); + + // For transforming properties of a JSObject. + Object* TransformPropertiesToFastFor(JSObject* obj, + int unused_property_fields); +}; - DISALLOW_IMPLICIT_CONSTRUCTORS(Dictionary); + +class NumberDictionaryShape { + public: + static inline bool IsMatch(uint32_t key, Object* other); + static inline uint32_t Hash(uint32_t key); + static inline uint32_t HashForObject(uint32_t key, Object* object); + static inline Object* AsObject(uint32_t key); + static const int kPrefixSize = 2; + static const int kEntrySize = 3; + static const bool kIsEnumerable = false; +}; + + +class NumberDictionary: public Dictionary<NumberDictionaryShape, uint32_t> { + public: + static NumberDictionary* cast(Object* obj) { + ASSERT(obj->IsDictionary()); + return reinterpret_cast<NumberDictionary*>(obj); + } + + // Type specific at put (default NONE attributes is used when adding). + Object* AtNumberPut(uint32_t key, Object* value); + Object* AddNumberEntry(uint32_t key, + Object* value, + PropertyDetails details); + + // Set an existing entry or add a new one if needed. + Object* Set(uint32_t key, Object* value, PropertyDetails details); + + void UpdateMaxNumberKey(uint32_t key); + + // If slow elements are required we will never go back to fast-case + // for the elements kept in this dictionary. We require slow + // elements if an element has been added at an index larger than + // kRequiresSlowElementsLimit or set_requires_slow_elements() has been called + // when defining a getter or setter with a number key. + inline bool requires_slow_elements(); + inline void set_requires_slow_elements(); + + // Get the value of the max number key that has been added to this + // dictionary. max_number_key can only be called if + // requires_slow_elements returns false. + inline uint32_t max_number_key(); + + // Remove all entries were key is a number and (from <= key && key < to). + void RemoveNumberEntries(uint32_t from, uint32_t to); + + // Bit masks. + static const int kRequiresSlowElementsMask = 1; + static const int kRequiresSlowElementsTagSize = 1; + static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1; }; @@ -2252,6 +2472,7 @@ class Code: public HeapObject { // Printing static const char* Kind2String(Kind kind); static const char* ICState2String(InlineCacheState state); + static const char* PropertyType2String(PropertyType type); void Disassemble(const char* name); #endif // ENABLE_DISASSEMBLER @@ -2274,7 +2495,7 @@ class Code: public HeapObject { // [flags]: Access to specific code flags. inline Kind kind(); inline InlineCacheState ic_state(); // Only valid for IC stubs. - inline InLoopFlag ic_in_loop(); // Only valid for IC stubs.. + inline InLoopFlag ic_in_loop(); // Only valid for IC stubs. inline PropertyType type(); // Only valid for monomorphic IC stubs. inline int arguments_count(); // Only valid for call IC stubs. @@ -2655,16 +2876,16 @@ class Script: public Struct { public: // Script types. enum Type { - TYPE_NATIVE, - TYPE_EXTENSION, - TYPE_NORMAL + TYPE_NATIVE = 0, + TYPE_EXTENSION = 1, + TYPE_NORMAL = 2 }; // Script compilation types. enum CompilationType { - COMPILATION_TYPE_HOST, - COMPILATION_TYPE_EVAL, - COMPILATION_TYPE_JSON + COMPILATION_TYPE_HOST = 0, + COMPILATION_TYPE_EVAL = 1, + COMPILATION_TYPE_JSON = 2 }; // [source]: the script source. @@ -3029,6 +3250,12 @@ class GlobalObject: public JSObject { // [global receiver]: the global receiver object of the context DECL_ACCESSORS(global_receiver, JSObject) + // Retrieve the property cell used to store a property. + Object* GetPropertyCell(LookupResult* result); + + // Ensure that the global object has a cell for the given property name. + Object* EnsurePropertyCell(String* name); + // Casting. static inline GlobalObject* cast(Object* obj); @@ -3048,6 +3275,7 @@ class GlobalObject: public JSObject { // JavaScript global object. class JSGlobalObject: public GlobalObject { public: + // Casting. static inline JSGlobalObject* cast(Object* obj); @@ -3160,6 +3388,13 @@ class JSRegExp: public JSObject { inline Object* DataAt(int index); // Set implementation data after the object has been prepared. inline void SetDataAt(int index, Object* value); + static int code_index(bool is_ascii) { + if (is_ascii) { + return kIrregexpASCIICodeIndex; + } else { + return kIrregexpUC16CodeIndex; + } + } static inline JSRegExp* cast(Object* obj); @@ -3197,7 +3432,30 @@ class JSRegExp: public JSObject { }; -class CompilationCacheTable: public HashTable<0, 2> { +class CompilationCacheShape { + public: + static inline bool IsMatch(HashTableKey* key, Object* value) { + return key->IsMatch(value); + } + + static inline uint32_t Hash(HashTableKey* key) { + return key->Hash(); + } + + static inline uint32_t HashForObject(HashTableKey* key, Object* object) { + return key->HashForObject(object); + } + + static Object* AsObject(HashTableKey* key) { + return key->AsObject(); + } + + static const int kPrefixSize = 0; + static const int kEntrySize = 2; +}; + +class CompilationCacheTable: public HashTable<CompilationCacheShape, + HashTableKey*> { public: // Find cached value for a string key, otherwise return null. Object* Lookup(String* src); @@ -3936,6 +4194,31 @@ class Oddball: public HeapObject { }; +class JSGlobalPropertyCell: public HeapObject { + public: + // [value]: value of the global property. + DECL_ACCESSORS(value, Object) + + // Casting. + static inline JSGlobalPropertyCell* cast(Object* obj); + + // Dispatched behavior. + void JSGlobalPropertyCellIterateBody(ObjectVisitor* v); +#ifdef DEBUG + void JSGlobalPropertyCellVerify(); + void JSGlobalPropertyCellPrint(); +#endif + + // Layout description. + static const int kValueOffset = HeapObject::kHeaderSize; + static const int kSize = kValueOffset + kPointerSize; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell); +}; + + + // Proxy describes objects pointing from JavaScript to C structures. // Since they cannot contain references to JS HeapObjects they can be // placed in old_data_space. diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 2b4be79a0..89d6d5b62 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -361,7 +361,7 @@ class BufferedZoneList { }; // Accumulates RegExp atoms and assertions into lists of terms and alternatives. -class RegExpBuilder { +class RegExpBuilder: public ZoneObject { public: RegExpBuilder(); void AddCharacter(uc16 character); @@ -392,7 +392,10 @@ class RegExpBuilder { RegExpBuilder::RegExpBuilder() - : pending_empty_(false), characters_(NULL), terms_(), alternatives_() + : pending_empty_(false), + characters_(NULL), + terms_(), + alternatives_() #ifdef DEBUG , last_added_(ADD_NONE) #endif @@ -594,6 +597,44 @@ class RegExpParser { static const int kMaxCaptures = 1 << 16; static const uc32 kEndMarker = (1 << 21); private: + enum SubexpressionType { + INITIAL, + CAPTURE, // All positive values represent captures. + POSITIVE_LOOKAHEAD, + NEGATIVE_LOOKAHEAD, + GROUPING + }; + + class RegExpParserState : public ZoneObject { + public: + RegExpParserState(RegExpParserState* previous_state, + SubexpressionType group_type, + int disjunction_capture_index) + : previous_state_(previous_state), + builder_(new RegExpBuilder()), + group_type_(group_type), + disjunction_capture_index_(disjunction_capture_index) {} + // Parser state of containing expression, if any. + RegExpParserState* previous_state() { return previous_state_; } + bool IsSubexpression() { return previous_state_ != NULL; } + // RegExpBuilder building this regexp's AST. + RegExpBuilder* builder() { return builder_; } + // Type of regexp being parsed (parenthesized group or entire regexp). + SubexpressionType group_type() { return group_type_; } + // Index in captures array of first capture in this sub-expression, if any. + // Also the capture index of this sub-expression itself, if group_type + // is CAPTURE. + int capture_index() { return disjunction_capture_index_; } + private: + // Linked list implementation of stack of states. + RegExpParserState* previous_state_; + // Builder for the stored disjunction. + RegExpBuilder* builder_; + // Stored disjunction type (capture, look-ahead or grouping), if any. + SubexpressionType group_type_; + // Stored disjunction's capture index (if any). + int disjunction_capture_index_; + }; uc32 current() { return current_; } bool has_more() { return has_more_; } @@ -601,7 +642,6 @@ class RegExpParser { uc32 Next(); FlatStringReader* in() { return in_; } void ScanForCaptures(); - bool CaptureAvailable(int index); uc32 current_; bool has_more_; bool multiline_; @@ -1536,10 +1576,10 @@ VariableProxy* AstBuildingParser::Declare(Handle<String> name, // to the calling function context. if (top_scope_->is_function_scope()) { // Declare the variable in the function scope. - var = top_scope_->LookupLocal(name); + var = top_scope_->LocalLookup(name); if (var == NULL) { // Declare the name. - var = top_scope_->Declare(name, mode); + var = top_scope_->DeclareLocal(name, mode); } else { // The name was declared before; check for conflicting // re-declarations. If the previous declaration was a const or the @@ -2005,7 +2045,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) { // 'continue' Identifier? ';' Expect(Token::CONTINUE, CHECK_OK); - Handle<String> label(static_cast<String**>(NULL)); + Handle<String> label = Handle<String>::null(); Token::Value tok = peek(); if (!scanner_.has_line_terminator_before_next() && tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) { @@ -3426,8 +3466,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name, while (!done) { Handle<String> param_name = ParseIdentifier(CHECK_OK); if (!is_pre_parsing_) { - top_scope_->AddParameter(top_scope_->Declare(param_name, - Variable::VAR)); + top_scope_->AddParameter(top_scope_->DeclareLocal(param_name, + Variable::VAR)); num_parameters++; } done = (peek() == Token::RPAREN); @@ -3808,9 +3848,7 @@ RegExpTree* RegExpParser::ReportError(Vector<const char> message) { // Disjunction RegExpTree* RegExpParser::ParsePattern() { RegExpTree* result = ParseDisjunction(CHECK_FAILED); - if (has_more()) { - ReportError(CStrVector("Unmatched ')'") CHECK_FAILED); - } + ASSERT(!has_more()); // If the result of parsing is a literal string atom, and it has the // same length as the input, then the atom is identical to the input. if (result->IsAtom() && result->AsAtom()->length() == in()->length()) { @@ -3820,14 +3858,6 @@ RegExpTree* RegExpParser::ParsePattern() { } -bool RegExpParser::CaptureAvailable(int index) { - if (captures_ == NULL) return false; - if (index >= captures_->length()) return false; - RegExpCapture* capture = captures_->at(index); - return capture != NULL && capture->available() == CAPTURE_AVAILABLE; -} - - // Disjunction :: // Alternative // Alternative | Disjunction @@ -3839,24 +3869,60 @@ bool RegExpParser::CaptureAvailable(int index) { // Atom // Atom Quantifier RegExpTree* RegExpParser::ParseDisjunction() { - RegExpBuilder builder; - int capture_start_index = captures_started(); + // Used to store current state while parsing subexpressions. + RegExpParserState initial_state(NULL, INITIAL, 0); + RegExpParserState* stored_state = &initial_state; + // Cache the builder in a local variable for quick access. + RegExpBuilder* builder = initial_state.builder(); while (true) { switch (current()) { case kEndMarker: - case ')': - return builder.ToRegExp(); - case '|': { + if (stored_state->IsSubexpression()) { + // Inside a parenthesized group when hitting end of input. + ReportError(CStrVector("Unterminated group") CHECK_FAILED); + } + ASSERT_EQ(INITIAL, stored_state->group_type()); + // Parsing completed successfully. + return builder->ToRegExp(); + case ')': { + if (!stored_state->IsSubexpression()) { + ReportError(CStrVector("Unmatched ')'") CHECK_FAILED); + } + ASSERT_NE(INITIAL, stored_state->group_type()); + Advance(); - builder.NewAlternative(); - int capture_new_alt_start_index = captures_started(); - for (int i = capture_start_index; i < capture_new_alt_start_index; i++) { - RegExpCapture* capture = captures_->at(i); - if (capture->available() == CAPTURE_AVAILABLE) { - capture->set_available(CAPTURE_UNREACHABLE); - } + // End disjunction parsing and convert builder content to new single + // regexp atom. + RegExpTree* body = builder->ToRegExp(); + + int end_capture_index = captures_started(); + + int capture_index = stored_state->capture_index(); + SubexpressionType type = stored_state->group_type(); + + // Restore previous state. + stored_state = stored_state->previous_state(); + builder = stored_state->builder(); + + // Build result of subexpression. + if (type == CAPTURE) { + RegExpCapture* capture = new RegExpCapture(body, capture_index); + captures_->at(capture_index - 1) = capture; + body = capture; + } else if (type != GROUPING) { + ASSERT(type == POSITIVE_LOOKAHEAD || type == NEGATIVE_LOOKAHEAD); + bool is_positive = (type == POSITIVE_LOOKAHEAD); + body = new RegExpLookahead(body, + is_positive, + end_capture_index - capture_index, + capture_index); } - capture_start_index = capture_new_alt_start_index; + builder->AddAtom(body); + break; + } + case '|': { + Advance(); + builder->NewAlternative(); continue; } case '*': @@ -3866,10 +3932,10 @@ RegExpTree* RegExpParser::ParseDisjunction() { case '^': { Advance(); if (multiline_) { - builder.AddAssertion( + builder->AddAssertion( new RegExpAssertion(RegExpAssertion::START_OF_LINE)); } else { - builder.AddAssertion( + builder->AddAssertion( new RegExpAssertion(RegExpAssertion::START_OF_INPUT)); set_contains_anchor(); } @@ -3880,7 +3946,7 @@ RegExpTree* RegExpParser::ParseDisjunction() { RegExpAssertion::Type type = multiline_ ? RegExpAssertion::END_OF_LINE : RegExpAssertion::END_OF_INPUT; - builder.AddAssertion(new RegExpAssertion(type)); + builder->AddAssertion(new RegExpAssertion(type)); continue; } case '.': { @@ -3889,17 +3955,47 @@ RegExpTree* RegExpParser::ParseDisjunction() { ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2); CharacterRange::AddClassEscape('.', ranges); RegExpTree* atom = new RegExpCharacterClass(ranges, false); - builder.AddAtom(atom); + builder->AddAtom(atom); break; } case '(': { - RegExpTree* atom = ParseGroup(CHECK_FAILED); - builder.AddAtom(atom); + SubexpressionType type = CAPTURE; + Advance(); + if (current() == '?') { + switch (Next()) { + case ':': + type = GROUPING; + break; + case '=': + type = POSITIVE_LOOKAHEAD; + break; + case '!': + type = NEGATIVE_LOOKAHEAD; + break; + default: + ReportError(CStrVector("Invalid group") CHECK_FAILED); + break; + } + Advance(2); + } else { + if (captures_ == NULL) { + captures_ = new ZoneList<RegExpCapture*>(2); + } + if (captures_started() >= kMaxCaptures) { + ReportError(CStrVector("Too many captures") CHECK_FAILED); + } + captures_->Add(NULL); + } + // Store current state and begin new disjunction parsing. + stored_state = new RegExpParserState(stored_state, + type, + captures_started()); + builder = stored_state->builder(); break; } case '[': { RegExpTree* atom = ParseCharacterClass(CHECK_FAILED); - builder.AddAtom(atom); + builder->AddAtom(atom); break; } // Atom :: @@ -3910,12 +4006,12 @@ RegExpTree* RegExpParser::ParseDisjunction() { ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED); case 'b': Advance(2); - builder.AddAssertion( + builder->AddAssertion( new RegExpAssertion(RegExpAssertion::BOUNDARY)); continue; case 'B': Advance(2); - builder.AddAssertion( + builder->AddAssertion( new RegExpAssertion(RegExpAssertion::NON_BOUNDARY)); continue; // AtomEscape :: @@ -3929,27 +4025,29 @@ RegExpTree* RegExpParser::ParseDisjunction() { ZoneList<CharacterRange>* ranges = new ZoneList<CharacterRange>(2); CharacterRange::AddClassEscape(c, ranges); RegExpTree* atom = new RegExpCharacterClass(ranges, false); - builder.AddAtom(atom); - goto has_read_atom; // Avoid setting has_character_escapes_. + builder->AddAtom(atom); + break; } case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { int index = 0; if (ParseBackReferenceIndex(&index)) { - if (!CaptureAvailable(index - 1)) { - // Prepare to ignore a following quantifier - builder.AddEmpty(); - goto has_read_atom; + RegExpCapture* capture = NULL; + if (captures_ != NULL && index <= captures_->length()) { + capture = captures_->at(index - 1); + } + if (capture == NULL) { + builder->AddEmpty(); + break; } - RegExpCapture* capture = captures_->at(index - 1); RegExpTree* atom = new RegExpBackReference(capture); - builder.AddAtom(atom); - goto has_read_atom; // Avoid setting has_character_escapes_. + builder->AddAtom(atom); + break; } uc32 first_digit = Next(); if (first_digit == '8' || first_digit == '9') { // Treat as identity escape - builder.AddCharacter(first_digit); + builder->AddCharacter(first_digit); Advance(2); break; } @@ -3958,44 +4056,44 @@ RegExpTree* RegExpParser::ParseDisjunction() { case '0': { Advance(); uc32 octal = ParseOctalLiteral(); - builder.AddCharacter(octal); + builder->AddCharacter(octal); break; } // ControlEscape :: one of // f n r t v case 'f': Advance(2); - builder.AddCharacter('\f'); + builder->AddCharacter('\f'); break; case 'n': Advance(2); - builder.AddCharacter('\n'); + builder->AddCharacter('\n'); break; case 'r': Advance(2); - builder.AddCharacter('\r'); + builder->AddCharacter('\r'); break; case 't': Advance(2); - builder.AddCharacter('\t'); + builder->AddCharacter('\t'); break; case 'v': Advance(2); - builder.AddCharacter('\v'); + builder->AddCharacter('\v'); break; case 'c': { Advance(2); uc32 control = ParseControlLetterEscape(); - builder.AddCharacter(control); + builder->AddCharacter(control); break; } case 'x': { Advance(2); uc32 value; if (ParseHexEscape(2, &value)) { - builder.AddCharacter(value); + builder->AddCharacter(value); } else { - builder.AddCharacter('x'); + builder->AddCharacter('x'); } break; } @@ -4003,15 +4101,15 @@ RegExpTree* RegExpParser::ParseDisjunction() { Advance(2); uc32 value; if (ParseHexEscape(4, &value)) { - builder.AddCharacter(value); + builder->AddCharacter(value); } else { - builder.AddCharacter('u'); + builder->AddCharacter('u'); } break; } default: // Identity escape. - builder.AddCharacter(Next()); + builder->AddCharacter(Next()); Advance(2); break; } @@ -4024,12 +4122,11 @@ RegExpTree* RegExpParser::ParseDisjunction() { // fallthrough } default: - builder.AddCharacter(current()); + builder->AddCharacter(current()); Advance(); break; } // end switch(current()) - has_read_atom: int min; int max; switch (current()) { @@ -4071,7 +4168,7 @@ RegExpTree* RegExpParser::ParseDisjunction() { is_greedy = false; Advance(); } - builder.AddQuantifierToAtom(min, max, is_greedy); + builder->AddQuantifierToAtom(min, max, is_greedy); } } @@ -4382,73 +4479,6 @@ uc32 RegExpParser::ParseClassCharacterEscape() { } -RegExpTree* RegExpParser::ParseGroup() { - ASSERT_EQ(current(), '('); - char type = '('; - Advance(); - if (current() == '?') { - switch (Next()) { - case ':': case '=': case '!': - type = Next(); - Advance(2); - break; - default: - ReportError(CStrVector("Invalid group") CHECK_FAILED); - break; - } - } else { - if (captures_ == NULL) { - captures_ = new ZoneList<RegExpCapture*>(2); - } - if (captures_started() >= kMaxCaptures) { - ReportError(CStrVector("Too many captures") CHECK_FAILED); - } - captures_->Add(NULL); - } - int capture_index = captures_started(); - RegExpTree* body = ParseDisjunction(CHECK_FAILED); - if (current() != ')') { - ReportError(CStrVector("Unterminated group") CHECK_FAILED); - } - Advance(); - - int end_capture_index = captures_started(); - if (type == '!') { - // Captures inside a negative lookahead are never available outside it. - for (int i = capture_index; i < end_capture_index; i++) { - RegExpCapture* capture = captures_->at(i); - ASSERT(capture != NULL); - capture->set_available(CAPTURE_PERMANENTLY_UNREACHABLE); - } - } else { - // Captures temporarily unavailable because they are in different - // alternatives are all available after the disjunction. - for (int i = capture_index; i < end_capture_index; i++) { - RegExpCapture* capture = captures_->at(i); - ASSERT(capture != NULL); - if (capture->available() == CAPTURE_UNREACHABLE) { - capture->set_available(CAPTURE_AVAILABLE); - } - } - } - - if (type == '(') { - RegExpCapture* capture = new RegExpCapture(body, capture_index); - captures_->at(capture_index - 1) = capture; - return capture; - } else if (type == ':') { - return body; - } else { - ASSERT(type == '=' || type == '!'); - bool is_positive = (type == '='); - return new RegExpLookahead(body, - is_positive, - end_capture_index - capture_index, - capture_index); - } -} - - CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) { ASSERT_EQ(0, *char_class); uc32 first = current(); diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index acef74cc5..92d72f852 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -561,6 +561,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { sample.sp = mcontext.mc_esp; sample.fp = mcontext.mc_ebp; #endif + active_sampler_->SampleStack(&sample); } // We always sample the VM state. diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index 39495ab96..bccf9e6ae 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -639,6 +639,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { sample.fp = mcontext.arm_fp; #endif #endif + active_sampler_->SampleStack(&sample); } // We always sample the VM state. diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 5a0eae253..880931e04 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -38,6 +38,7 @@ #include <pthread.h> #include <semaphore.h> #include <signal.h> +#include <mach/mach.h> #include <mach/semaphore.h> #include <mach/task.h> #include <sys/time.h> @@ -475,63 +476,94 @@ Semaphore* OS::CreateSemaphore(int count) { #ifdef ENABLE_LOGGING_AND_PROFILING -static Sampler* active_sampler_ = NULL; - -static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { - USE(info); - if (signal != SIGPROF) return; - if (active_sampler_ == NULL) return; - - TickSample sample; +class Sampler::PlatformData : public Malloced { + public: + explicit PlatformData(Sampler* sampler) + : sampler_(sampler), + task_self_(mach_task_self()), + profiled_thread_(0), + sampler_thread_(0) { + } - // If profiling, we extract the current pc and sp. - if (active_sampler_->IsProfiling()) { - // Extracting the sample from the context is extremely machine dependent. - ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); - mcontext_t& mcontext = ucontext->uc_mcontext; + Sampler* sampler_; + // Note: for profiled_thread_ Mach primitives are used instead of PThread's + // because the latter doesn't provide thread manipulation primitives required. + // For details, consult "Mac OS X Internals" book, Section 7.3. + mach_port_t task_self_; + thread_act_t profiled_thread_; + pthread_t sampler_thread_; + + // Sampler thread handler. + void Runner() { + // Loop until the sampler is disengaged. + while (sampler_->IsActive()) { + TickSample sample; + + // If profiling, we record the pc and sp of the profiled thread. + if (sampler_->IsProfiling() + && KERN_SUCCESS == thread_suspend(profiled_thread_)) { +#if V8_HOST_ARCH_X64 + thread_state_flavor_t flavor = x86_THREAD_STATE64; + x86_thread_state64_t state; + mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; +#elif V8_HOST_ARCH_IA32 + thread_state_flavor_t flavor = i386_THREAD_STATE; + i386_thread_state_t state; + mach_msg_type_number_t count = i386_THREAD_STATE_COUNT; +#else +#error Unsupported Mac OS X host architecture. +#endif // V8_TARGET_ARCH_IA32 + if (thread_get_state(profiled_thread_, + flavor, + reinterpret_cast<natural_t*>(&state), + &count) == KERN_SUCCESS) { #if V8_HOST_ARCH_X64 - UNIMPLEMENTED(); - USE(mcontext); - sample.pc = 0; - sample.sp = 0; - sample.fp = 0; + UNIMPLEMENTED(); + sample.pc = 0; + sample.sp = 0; + sample.fp = 0; #elif V8_HOST_ARCH_IA32 #if __DARWIN_UNIX03 - sample.pc = mcontext->__ss.__eip; - sample.sp = mcontext->__ss.__esp; - sample.fp = mcontext->__ss.__ebp; + sample.pc = state.__eip; + sample.sp = state.__esp; + sample.fp = state.__ebp; #else // !__DARWIN_UNIX03 - sample.pc = mcontext->ss.eip; - sample.sp = mcontext->ss.esp; - sample.fp = mcontext->ss.ebp; + sample.pc = state.eip; + sample.sp = state.esp; + sample.fp = state.ebp; #endif // __DARWIN_UNIX03 #else #error Unsupported Mac OS X host architecture. -#endif // V8_TARGET_ARCH_IA32 +#endif // V8_HOST_ARCH_IA32 + sampler_->SampleStack(&sample); + } + thread_resume(profiled_thread_); + } + + // We always sample the VM state. + sample.state = Logger::state(); + // Invoke tick handler with program counter and stack pointer. + sampler_->Tick(&sample); + + // Wait until next sampling. + usleep(sampler_->interval_ * 1000); + } } +}; - // We always sample the VM state. - sample.state = Logger::state(); - active_sampler_->Tick(&sample); +// Entry point for sampler thread. +static void* SamplerEntry(void* arg) { + Sampler::PlatformData* data = + reinterpret_cast<Sampler::PlatformData*>(arg); + data->Runner(); + return 0; } -class Sampler::PlatformData : public Malloced { - public: - PlatformData() { - signal_handler_installed_ = false; - } - - bool signal_handler_installed_; - struct sigaction old_signal_handler_; - struct itimerval old_timer_value_; -}; - - Sampler::Sampler(int interval, bool profiling) : interval_(interval), profiling_(profiling), active_(false) { - data_ = new PlatformData(); + data_ = new PlatformData(this); } @@ -541,43 +573,40 @@ Sampler::~Sampler() { void Sampler::Start() { - // There can only be one active sampler at the time on POSIX - // platforms. - if (active_sampler_ != NULL) return; - - // Request profiling signals. - struct sigaction sa; - sa.sa_sigaction = ProfilerSignalHandler; - sigemptyset(&sa.sa_mask); - sa.sa_flags = SA_SIGINFO; - if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return; - data_->signal_handler_installed_ = true; - - // Set the itimer to generate a tick for each interval. - itimerval itimer; - itimer.it_interval.tv_sec = interval_ / 1000; - itimer.it_interval.tv_usec = (interval_ % 1000) * 1000; - itimer.it_value.tv_sec = itimer.it_interval.tv_sec; - itimer.it_value.tv_usec = itimer.it_interval.tv_usec; - setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_); - - // Set this sampler as the active sampler. - active_sampler_ = this; + // If we are profiling, we need to be able to access the calling + // thread. + if (IsProfiling()) { + data_->profiled_thread_ = mach_thread_self(); + } + + // Create sampler thread with high priority. + // According to POSIX spec, when SCHED_FIFO policy is used, a thread + // runs until it exits or blocks. + pthread_attr_t sched_attr; + sched_param fifo_param; + pthread_attr_init(&sched_attr); + pthread_attr_setinheritsched(&sched_attr, PTHREAD_EXPLICIT_SCHED); + pthread_attr_setschedpolicy(&sched_attr, SCHED_FIFO); + fifo_param.sched_priority = sched_get_priority_max(SCHED_FIFO); + pthread_attr_setschedparam(&sched_attr, &fifo_param); + active_ = true; + pthread_create(&data_->sampler_thread_, &sched_attr, SamplerEntry, data_); } void Sampler::Stop() { - // Restore old signal handler - if (data_->signal_handler_installed_) { - setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL); - sigaction(SIGPROF, &data_->old_signal_handler_, 0); - data_->signal_handler_installed_ = false; - } - - // This sampler is no longer the active sampler. - active_sampler_ = NULL; + // Seting active to false triggers termination of the sampler + // thread. active_ = false; + + // Wait for sampler thread to terminate. + pthread_join(data_->sampler_thread_, NULL); + + // Deallocate Mach port for thread. + if (IsProfiling()) { + mach_port_deallocate(data_->task_self_, data_->profiled_thread_); + } } #endif // ENABLE_LOGGING_AND_PROFILING diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index 1b0f9b24d..a8a624321 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -1776,32 +1776,30 @@ class Sampler::PlatformData : public Malloced { TickSample sample; // If profiling, we record the pc and sp of the profiled thread. - if (sampler_->IsProfiling()) { - // Pause the profiled thread and get its context. - SuspendThread(profiled_thread_); + if (sampler_->IsProfiling() + && SuspendThread(profiled_thread_) != (DWORD)-1) { context.ContextFlags = CONTEXT_FULL; - GetThreadContext(profiled_thread_, &context); - // Invoke tick handler with program counter and stack pointer. + if (GetThreadContext(profiled_thread_, &context) != 0) { #if V8_HOST_ARCH_X64 - UNIMPLEMENTED(); - sample.pc = context.Rip; - sample.sp = context.Rsp; - sample.fp = context.Rbp; + UNIMPLEMENTED(); + sample.pc = context.Rip; + sample.sp = context.Rsp; + sample.fp = context.Rbp; #else - sample.pc = context.Eip; - sample.sp = context.Esp; - sample.fp = context.Ebp; + sample.pc = context.Eip; + sample.sp = context.Esp; + sample.fp = context.Ebp; #endif + sampler_->SampleStack(&sample); + } + ResumeThread(profiled_thread_); } // We always sample the VM state. sample.state = Logger::state(); + // Invoke tick handler with program counter and stack pointer. sampler_->Tick(&sample); - if (sampler_->IsProfiling()) { - ResumeThread(profiled_thread_); - } - // Wait until next sampling. Sleep(sampler_->interval_); } diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index b5123c5ac..11a1e79bc 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -510,6 +510,9 @@ class Sampler { explicit Sampler(int interval, bool profiling); virtual ~Sampler(); + // Performs stack sampling. + virtual void SampleStack(TickSample* sample) = 0; + // This method is called for each sampling period with the current // program counter. virtual void Tick(TickSample* sample) = 0; @@ -527,8 +530,8 @@ class Sampler { class PlatformData; private: - int interval_; - bool profiling_; + const int interval_; + const bool profiling_; bool active_; PlatformData* data_; // Platform specific data. DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler); diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc index 2915c4abc..caa739756 100644 --- a/deps/v8/src/property.cc +++ b/deps/v8/src/property.cc @@ -31,20 +31,6 @@ namespace v8 { namespace internal { -void DescriptorWriter::Write(Descriptor* desc) { - ASSERT(desc->key_->IsSymbol()); - descriptors_->Set(pos_, desc); - advance(); -} - - -void DescriptorWriter::WriteFrom(DescriptorReader* reader) { - Descriptor desc; - reader->Get(&desc); - Write(&desc); -} - - #ifdef DEBUG void LookupResult::Print() { if (!IsValid()) { diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h index edab97ab0..1869719f1 100644 --- a/deps/v8/src/property.h +++ b/deps/v8/src/property.h @@ -95,8 +95,6 @@ class Descriptor BASE_EMBEDDED { value_(value), details_(attributes, type, index) { } - friend class DescriptorWriter; - friend class DescriptorReader; friend class DescriptorArray; }; @@ -230,6 +228,7 @@ class LookupResult BASE_EMBEDDED { bool IsReadOnly() { return details_.IsReadOnly(); } bool IsDontDelete() { return details_.IsDontDelete(); } bool IsDontEnum() { return details_.IsDontEnum(); } + bool IsDeleted() { return details_.IsDeleted(); } bool IsValid() { return lookup_type_ != NOT_FOUND; } bool IsNotFound() { return lookup_type_ == NOT_FOUND; } @@ -256,8 +255,14 @@ class LookupResult BASE_EMBEDDED { switch (type()) { case FIELD: return holder()->FastPropertyAt(GetFieldIndex()); - case NORMAL: - return holder()->property_dictionary()->ValueAt(GetDictionaryEntry()); + case NORMAL: { + Object* value; + value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry()); + if (holder()->IsGlobalObject()) { + value = JSGlobalPropertyCell::cast(value)->value(); + } + return value; + } case CONSTANT_FUNCTION: return GetConstantFunction(); default: @@ -306,7 +311,7 @@ class LookupResult BASE_EMBEDDED { } // In the dictionary case, the data is held in the value field. ASSERT(lookup_type_ == DICTIONARY_TYPE); - return holder()->property_dictionary()->ValueAt(GetDictionaryEntry()); + return holder()->GetNormalizedProperty(this); } private: @@ -317,92 +322,6 @@ class LookupResult BASE_EMBEDDED { }; -// The DescriptorStream is an abstraction for iterating over a map's -// instance descriptors. -class DescriptorStream BASE_EMBEDDED { - public: - explicit DescriptorStream(DescriptorArray* descriptors, int pos) { - descriptors_ = descriptors; - pos_ = pos; - limit_ = descriptors_->number_of_descriptors(); - } - - // Tells whether we have reached the end of the steam. - bool eos() { return pos_ >= limit_; } - - int next_position() { return pos_ + 1; } - void advance() { pos_ = next_position(); } - - protected: - DescriptorArray* descriptors_; - int pos_; // Current position. - int limit_; // Limit for position. -}; - - -class DescriptorReader: public DescriptorStream { - public: - explicit DescriptorReader(DescriptorArray* descriptors, int pos = 0) - : DescriptorStream(descriptors, pos) {} - - String* GetKey() { return descriptors_->GetKey(pos_); } - Object* GetValue() { return descriptors_->GetValue(pos_); } - PropertyDetails GetDetails() { - return PropertyDetails(descriptors_->GetDetails(pos_)); - } - - int GetFieldIndex() { return Descriptor::IndexFromValue(GetValue()); } - - bool IsDontEnum() { return GetDetails().IsDontEnum(); } - - PropertyType type() { return GetDetails().type(); } - - // Tells whether the type is a transition. - bool IsTransition() { - PropertyType t = type(); - ASSERT(t != INTERCEPTOR); - return t == MAP_TRANSITION || t == CONSTANT_TRANSITION; - } - - bool IsNullDescriptor() { - return type() == NULL_DESCRIPTOR; - } - - bool IsProperty() { - return type() < FIRST_PHANTOM_PROPERTY_TYPE; - } - - JSFunction* GetConstantFunction() { return JSFunction::cast(GetValue()); } - - AccessorDescriptor* GetCallbacks() { - ASSERT(type() == CALLBACKS); - Proxy* p = Proxy::cast(GetCallbacksObject()); - return reinterpret_cast<AccessorDescriptor*>(p->proxy()); - } - - Object* GetCallbacksObject() { - ASSERT(type() == CALLBACKS); - return GetValue(); - } - - bool Equals(String* name) { return name->Equals(GetKey()); } - - void Get(Descriptor* desc) { - descriptors_->Get(pos_, desc); - } -}; - -class DescriptorWriter: public DescriptorStream { - public: - explicit DescriptorWriter(DescriptorArray* descriptors) - : DescriptorStream(descriptors, 0) {} - - // Append a descriptor to this stream. - void Write(Descriptor* desc); - // Read a descriptor from the reader and append it to this stream. - void WriteFrom(DescriptorReader* reader); -}; - } } // namespace v8::internal #endif // V8_PROPERTY_H_ diff --git a/deps/v8/src/regexp-delay.js b/deps/v8/src/regexp-delay.js index 84918637e..14c364457 100644 --- a/deps/v8/src/regexp-delay.js +++ b/deps/v8/src/regexp-delay.js @@ -103,7 +103,7 @@ function DoConstructRegExp(object, pattern, flags, isConstructorCall) { function RegExpConstructor(pattern, flags) { - if (%IsConstructCall()) { + if (%_IsConstructCall()) { DoConstructRegExp(this, pattern, flags, true); } else { // RegExp : Called as function; see ECMA-262, section 15.10.3.1. diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index dcff28bc3..350d391bf 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -168,7 +168,7 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) { } } } else { - Dictionary* element_dictionary = copy->element_dictionary(); + NumberDictionary* element_dictionary = copy->element_dictionary(); int capacity = element_dictionary->Capacity(); for (int i = 0; i < capacity; i++) { Object* k = element_dictionary->KeyAt(i); @@ -413,48 +413,6 @@ static Object* Runtime_ClassOf(Arguments args) { } -static Object* Runtime_HasStringClass(Arguments args) { - return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::String_symbol())); -} - - -static Object* Runtime_HasDateClass(Arguments args) { - return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Date_symbol())); -} - - -static Object* Runtime_HasArrayClass(Arguments args) { - return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Array_symbol())); -} - - -static Object* Runtime_HasFunctionClass(Arguments args) { - return Heap::ToBoolean( - args[0]->HasSpecificClassOf(Heap::function_class_symbol())); -} - - -static Object* Runtime_HasNumberClass(Arguments args) { - return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Number_symbol())); -} - - -static Object* Runtime_HasBooleanClass(Arguments args) { - return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::Boolean_symbol())); -} - - -static Object* Runtime_HasArgumentsClass(Arguments args) { - return Heap::ToBoolean( - args[0]->HasSpecificClassOf(Heap::Arguments_symbol())); -} - - -static Object* Runtime_HasRegExpClass(Arguments args) { - return Heap::ToBoolean(args[0]->HasSpecificClassOf(Heap::RegExp_symbol())); -} - - static Object* Runtime_IsInPrototypeChain(Arguments args) { NoHandleAllocation ha; ASSERT(args.length() == 2); @@ -618,9 +576,6 @@ static Object* Runtime_DeclareGlobals(Arguments args) { // property as read-only, so we don't either. PropertyAttributes base = is_eval ? NONE : DONT_DELETE; - // Only optimize the object if we intend to add more than 5 properties. - OptimizedObjectForAddingMultipleProperties ba(global, pairs->length()/2 > 5); - // Traverse the name/value pairs and set the properties. int length = pairs->length(); for (int i = 0; i < length; i += 2) { @@ -814,17 +769,23 @@ static Object* Runtime_InitializeVarGlobal(Arguments args) { PropertyAttributes attributes = DONT_DELETE; // Lookup the property locally in the global object. If it isn't - // there, we add the property and take special precautions to always - // add it as a local property even in case of callbacks in the - // prototype chain (this rules out using SetProperty). - // We have IgnoreAttributesAndSetLocalProperty for this. + // there, there is a property with this name in the prototype chain. + // We follow Safari and Firefox behavior and only set the property + // locally if there is an explicit initialization value that we have + // to assign to the property. When adding the property we take + // special precautions to always add it as a local property even in + // case of callbacks in the prototype chain (this rules out using + // SetProperty). We have IgnoreAttributesAndSetLocalProperty for + // this. LookupResult lookup; global->LocalLookup(*name, &lookup); if (!lookup.IsProperty()) { - Object* value = (assign) ? args[1] : Heap::undefined_value(); - return global->IgnoreAttributesAndSetLocalProperty(*name, - value, - attributes); + if (assign) { + return global->IgnoreAttributesAndSetLocalProperty(*name, + args[1], + attributes); + } + return Heap::undefined_value(); } // Determine if this is a redeclaration of something read-only. @@ -932,10 +893,8 @@ static Object* Runtime_InitializeConstGlobal(Arguments args) { properties->set(index, *value); } } else if (type == NORMAL) { - Dictionary* dictionary = global->property_dictionary(); - int entry = lookup.GetDictionaryEntry(); - if (dictionary->ValueAt(entry)->IsTheHole()) { - dictionary->ValueAtPut(entry, *value); + if (global->GetNormalizedProperty(&lookup)->IsTheHole()) { + global->SetNormalizedProperty(&lookup, *value); } } else { // Ignore re-initialization of constants that have already been @@ -1025,10 +984,8 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) { properties->set(index, *value); } } else if (type == NORMAL) { - Dictionary* dictionary = context_ext->property_dictionary(); - int entry = lookup.GetDictionaryEntry(); - if (dictionary->ValueAt(entry)->IsTheHole()) { - dictionary->ValueAtPut(entry, *value); + if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) { + context_ext->SetNormalizedProperty(&lookup, *value); } } else { // We should not reach here. Any real, named property should be @@ -1059,16 +1016,16 @@ static Object* Runtime_RegExpExec(Arguments args) { ASSERT(args.length() == 4); CONVERT_ARG_CHECKED(JSRegExp, regexp, 0); CONVERT_ARG_CHECKED(String, subject, 1); - // Due to the way the JS files are constructed this must be less than the + // Due to the way the JS calls are constructed this must be less than the // length of a string, i.e. it is always a Smi. We check anyway for security. - CONVERT_CHECKED(Smi, index, args[2]); + CONVERT_SMI_CHECKED(index, args[2]); CONVERT_ARG_CHECKED(JSArray, last_match_info, 3); RUNTIME_ASSERT(last_match_info->HasFastElements()); - RUNTIME_ASSERT(index->value() >= 0); - RUNTIME_ASSERT(index->value() <= subject->length()); + RUNTIME_ASSERT(index >= 0); + RUNTIME_ASSERT(index <= subject->length()); Handle<Object> result = RegExpImpl::Exec(regexp, subject, - index->value(), + index, last_match_info); if (result.is_null()) return Failure::Exception(); return *result; @@ -1156,6 +1113,21 @@ static Object* Runtime_FunctionGetScriptSourcePosition(Arguments args) { } +static Object* Runtime_FunctionGetPositionForOffset(Arguments args) { + ASSERT(args.length() == 2); + + CONVERT_CHECKED(JSFunction, fun, args[0]); + CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]); + + Code* code = fun->code(); + RUNTIME_ASSERT(0 <= offset && offset < code->Size()); + + Address pc = code->address() + offset; + return Smi::FromInt(fun->code()->SourcePosition(pc)); +} + + + static Object* Runtime_FunctionSetInstanceClassName(Arguments args) { NoHandleAllocation ha; ASSERT(args.length() == 2); @@ -2626,23 +2598,25 @@ static Object* Runtime_KeyedGetProperty(Arguments args) { Object* value = receiver->FastPropertyAt(offset); return value->IsTheHole() ? Heap::undefined_value() : value; } - // Lookup cache miss. Perform lookup and update the cache if - // appropriate. + // Lookup cache miss. Perform lookup and update the cache if appropriate. LookupResult result; receiver->LocalLookup(key, &result); if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) { int offset = result.GetFieldIndex(); KeyedLookupCache::Update(receiver_map, key, offset); - Object* value = receiver->FastPropertyAt(offset); - return value->IsTheHole() ? Heap::undefined_value() : value; + return receiver->FastPropertyAt(offset); } } else { // Attempt dictionary lookup. - Dictionary* dictionary = receiver->property_dictionary(); - int entry = dictionary->FindStringEntry(key); - if ((entry != DescriptorArray::kNotFound) && + StringDictionary* dictionary = receiver->property_dictionary(); + int entry = dictionary->FindEntry(key); + if ((entry != StringDictionary::kNotFound) && (dictionary->DetailsAt(entry).type() == NORMAL)) { - return dictionary->ValueAt(entry); + Object* value = dictionary->ValueAt(entry); + if (!receiver->IsGlobalObject()) return value; + value = JSGlobalPropertyCell::cast(value)->value(); + if (!value->IsTheHole()) return value; + // If value is the hole do the general lookup. } } } @@ -4179,16 +4153,21 @@ static Object* Runtime_Math_pow(Arguments args) { } CONVERT_DOUBLE_CHECKED(y, args[1]); - if (y == 0.5) { - // It's not uncommon to use Math.pow(x, 0.5) to compute the square - // root of a number. To speed up such computations, we explictly - // check for this case and use the sqrt() function which is faster - // than pow(). - return Heap::AllocateHeapNumber(sqrt(x)); - } else if (y == -0.5) { - // Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5). - return Heap::AllocateHeapNumber(1.0 / sqrt(x)); - } else if (y == 0) { + + if (!isinf(x)) { + if (y == 0.5) { + // It's not uncommon to use Math.pow(x, 0.5) to compute the + // square root of a number. To speed up such computations, we + // explictly check for this case and use the sqrt() function + // which is faster than pow(). + return Heap::AllocateHeapNumber(sqrt(x)); + } else if (y == -0.5) { + // Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5). + return Heap::AllocateHeapNumber(1.0 / sqrt(x)); + } + } + + if (y == 0) { return Smi::FromInt(1); } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) { return Heap::nan_value(); @@ -4337,7 +4316,7 @@ static Object* Runtime_NewObject(Arguments args) { #ifdef ENABLE_DEBUGGER_SUPPORT // Handle stepping into constructors if step into is active. if (Debug::StepInActive()) { - Debug::HandleStepIn(function, 0, true); + Debug::HandleStepIn(function, Handle<Object>::null(), 0, true); } #endif @@ -4522,17 +4501,25 @@ static Object* Runtime_LookupContext(Arguments args) { // compiler to do the right thing. // // TODO(1236026): This is a non-portable hack that should be removed. -// TODO(x64): Definitely! -typedef uint64_t ObjectPair; +#ifdef V8_HOST_ARCH_64_BIT +// Tested with GCC, not with MSVC. +struct ObjectPair { + Object* x; + Object* y; +}; static inline ObjectPair MakePair(Object* x, Object* y) { -#if V8_HOST_ARCH_64_BIT - UNIMPLEMENTED(); - return 0; + ObjectPair result = {x, y}; + return result; // Pointers x and y returned in rax and rdx, in AMD-x64-abi. +} #else +typedef uint64_t ObjectPair; +static inline ObjectPair MakePair(Object* x, Object* y) { return reinterpret_cast<uint32_t>(x) | (reinterpret_cast<ObjectPair>(y) << 32); -#endif } +#endif + + static inline Object* Unhole(Object* x, PropertyAttributes attributes) { @@ -5152,8 +5139,8 @@ class ArrayConcatVisitor { storage_->set(index, *elm); } else { - Handle<Dictionary> dict = Handle<Dictionary>::cast(storage_); - Handle<Dictionary> result = + Handle<NumberDictionary> dict = Handle<NumberDictionary>::cast(storage_); + Handle<NumberDictionary> result = Factory::DictionaryAtNumberPut(dict, index, elm); if (!result.is_identical_to(dict)) storage_ = result; @@ -5201,7 +5188,7 @@ static uint32_t IterateElements(Handle<JSObject> receiver, } } else { - Handle<Dictionary> dict(receiver->element_dictionary()); + Handle<NumberDictionary> dict(receiver->element_dictionary()); uint32_t capacity = dict->Capacity(); for (uint32_t j = 0; j < capacity; j++) { Handle<Object> k(dict->KeyAt(j)); @@ -5355,7 +5342,7 @@ static Object* Runtime_ArrayConcat(Arguments args) { uint32_t at_least_space_for = estimate_nof_elements + (estimate_nof_elements >> 2); storage = Handle<FixedArray>::cast( - Factory::NewDictionary(at_least_space_for)); + Factory::NewNumberDictionary(at_least_space_for)); } Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length)); @@ -5418,7 +5405,7 @@ static Object* Runtime_EstimateNumberOfElements(Arguments args) { CONVERT_CHECKED(JSArray, array, args[0]); HeapObject* elements = array->elements(); if (elements->IsDictionary()) { - return Smi::FromInt(Dictionary::cast(elements)->NumberOfElements()); + return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements()); } else { return array->length(); } @@ -5560,15 +5547,12 @@ static Object* DebugLookupResultValue(Object* receiver, String* name, bool* caught_exception) { Object* value; switch (result->type()) { - case NORMAL: { - Dictionary* dict = - JSObject::cast(result->holder())->property_dictionary(); - value = dict->ValueAt(result->GetDictionaryEntry()); + case NORMAL: + value = result->holder()->GetNormalizedProperty(result); if (value->IsTheHole()) { return Heap::undefined_value(); } return value; - } case FIELD: value = JSObject::cast( @@ -7408,6 +7392,81 @@ static Object* Runtime_GetScript(Arguments args) { } +// Determines whether the given stack frame should be displayed in +// a stack trace. The caller is the error constructor that asked +// for the stack trace to be collected. The first time a construct +// call to this function is encountered it is skipped. The seen_caller +// in/out parameter is used to remember if the caller has been seen +// yet. +static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller, + bool* seen_caller) { + // Only display JS frames. + if (!raw_frame->is_java_script()) + return false; + JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); + Object* raw_fun = frame->function(); + // Not sure when this can happen but skip it just in case. + if (!raw_fun->IsJSFunction()) + return false; + if ((raw_fun == caller) && !(*seen_caller) && frame->IsConstructor()) { + *seen_caller = true; + return false; + } + // Skip the most obvious builtin calls. Some builtin calls (such as + // Number.ADD which is invoked using 'call') are very difficult to + // recognize so we're leaving them in for now. + return !frame->receiver()->IsJSBuiltinsObject(); +} + + +// Collect the raw data for a stack trace. Returns an array of three +// element segments each containing a receiver, function and native +// code offset. +static Object* Runtime_CollectStackTrace(Arguments args) { + ASSERT_EQ(args.length(), 2); + Object* caller = args[0]; + CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]); + + HandleScope scope; + + int initial_size = limit < 10 ? limit : 10; + Handle<JSArray> result = Factory::NewJSArray(initial_size * 3); + + StackFrameIterator iter; + bool seen_caller = false; + int cursor = 0; + int frames_seen = 0; + while (!iter.done() && frames_seen < limit) { + StackFrame* raw_frame = iter.frame(); + if (ShowFrameInStackTrace(raw_frame, caller, &seen_caller)) { + frames_seen++; + JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); + Object* recv = frame->receiver(); + Object* fun = frame->function(); + Address pc = frame->pc(); + Address start = frame->code()->address(); + Smi* offset = Smi::FromInt(pc - start); + FixedArray* elements = result->elements(); + if (cursor + 2 < elements->length()) { + elements->set(cursor++, recv); + elements->set(cursor++, fun); + elements->set(cursor++, offset, SKIP_WRITE_BARRIER); + } else { + HandleScope scope; + SetElement(result, cursor++, Handle<Object>(recv)); + SetElement(result, cursor++, Handle<Object>(fun)); + SetElement(result, cursor++, Handle<Smi>(offset)); + } + } + iter.Advance(); + } + + result->set_length(Smi::FromInt(cursor), SKIP_WRITE_BARRIER); + + return *result; +} + + static Object* Runtime_Abort(Arguments args) { ASSERT(args.length() == 2); OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) + diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 15dd9b491..cdf21dcab 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -169,18 +169,12 @@ namespace internal { F(FunctionGetSourceCode, 1) \ F(FunctionGetScript, 1) \ F(FunctionGetScriptSourcePosition, 1) \ + F(FunctionGetPositionForOffset, 2) \ F(FunctionIsAPIFunction, 1) \ F(GetScript, 1) \ + F(CollectStackTrace, 2) \ \ F(ClassOf, 1) \ - F(HasDateClass, 1) \ - F(HasStringClass, 1) \ - F(HasArrayClass, 1) \ - F(HasFunctionClass, 1) \ - F(HasNumberClass, 1) \ - F(HasBooleanClass, 1) \ - F(HasArgumentsClass, 1) \ - F(HasRegExpClass, 1) \ F(SetCode, 2) \ \ F(CreateApiFunction, 1) \ diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js index df26b8894..789bfdb77 100644 --- a/deps/v8/src/runtime.js +++ b/deps/v8/src/runtime.js @@ -161,14 +161,31 @@ function ADD(x) { // Left operand (this) is already a string. function STRING_ADD_LEFT(y) { - if (!IS_STRING(y)) y = %ToString(%ToPrimitive(y, NO_HINT)); + if (!IS_STRING(y)) { + if (IS_STRING_WRAPPER(y)) { + y = %_ValueOf(y); + } else { + y = IS_NUMBER(y) + ? %NumberToString(y) + : %ToString(%ToPrimitive(y, NO_HINT)); + } + } return %StringAdd(this, y); } // Right operand (y) is already a string. function STRING_ADD_RIGHT(y) { - var x = IS_STRING(this) ? this : %ToString(%ToPrimitive(this, NO_HINT)); + var x = this; + if (!IS_STRING(x)) { + if (IS_STRING_WRAPPER(x)) { + x = %_ValueOf(x); + } else { + x = IS_NUMBER(x) + ? %NumberToString(x) + : %ToString(%ToPrimitive(x, NO_HINT)); + } + } return %StringAdd(x, y); } @@ -394,7 +411,7 @@ function APPLY_PREPARE(args) { // First check whether length is a positive Smi and args is an // array. This is the fast case. If this fails, we do the slow case // that takes care of more eventualities. - if (%_IsArray(args)) { + if (IS_ARRAY(args)) { length = args.length; if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) { return length; @@ -415,9 +432,7 @@ function APPLY_PREPARE(args) { } // Make sure the arguments list has the right type. - if (args != null && - !%HasArrayClass(args) && - !%HasArgumentsClass(args)) { + if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) { throw %MakeTypeError('apply_wrong_args', []); } diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index 88b1c66f3..78ed03518 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -71,28 +71,28 @@ static bool Match(void* key1, void* key2) { // Dummy constructor -LocalsMap::LocalsMap(bool gotta_love_static_overloading) : HashMap() {} +VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {} -LocalsMap::LocalsMap() : HashMap(Match, &LocalsMapAllocator, 8) {} -LocalsMap::~LocalsMap() {} +VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {} +VariableMap::~VariableMap() {} -Variable* LocalsMap::Declare(Scope* scope, - Handle<String> name, - Variable::Mode mode, - bool is_valid_LHS, - Variable::Kind kind) { +Variable* VariableMap::Declare(Scope* scope, + Handle<String> name, + Variable::Mode mode, + bool is_valid_lhs, + Variable::Kind kind) { HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true); if (p->value == NULL) { // The variable has not been declared yet -> insert it. ASSERT(p->key == name.location()); - p->value = new Variable(scope, name, mode, is_valid_LHS, kind); + p->value = new Variable(scope, name, mode, is_valid_lhs, kind); } return reinterpret_cast<Variable*>(p->value); } -Variable* LocalsMap::Lookup(Handle<String> name) { +Variable* VariableMap::Lookup(Handle<String> name) { HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false); if (p != NULL) { ASSERT(*reinterpret_cast<String**>(p->key) == *name); @@ -110,7 +110,7 @@ Variable* LocalsMap::Lookup(Handle<String> name) { // Dummy constructor Scope::Scope() : inner_scopes_(0), - locals_(false), + variables_(false), temps_(0), params_(0), dynamics_(NULL), @@ -168,27 +168,26 @@ void Scope::Initialize(bool inside_with) { // instead load them directly from the stack. Currently, the only // such parameter is 'this' which is passed on the stack when // invoking scripts - { Variable* var = - locals_.Declare(this, Factory::this_symbol(), Variable::VAR, - false, Variable::THIS); - var->rewrite_ = new Slot(var, Slot::PARAMETER, -1); - receiver_ = new VariableProxy(Factory::this_symbol(), true, false); - receiver_->BindTo(var); - } + Variable* var = + variables_.Declare(this, Factory::this_symbol(), Variable::VAR, + false, Variable::THIS); + var->rewrite_ = new Slot(var, Slot::PARAMETER, -1); + receiver_ = new VariableProxy(Factory::this_symbol(), true, false); + receiver_->BindTo(var); if (is_function_scope()) { // Declare 'arguments' variable which exists in all functions. - // Note that it may never be accessed, in which case it won't - // be allocated during variable allocation. - locals_.Declare(this, Factory::arguments_symbol(), Variable::VAR, - true, Variable::ARGUMENTS); + // Note that it might never be accessed, in which case it won't be + // allocated during variable allocation. + variables_.Declare(this, Factory::arguments_symbol(), Variable::VAR, + true, Variable::ARGUMENTS); } } -Variable* Scope::LookupLocal(Handle<String> name) { - return locals_.Lookup(name); +Variable* Scope::LocalLookup(Handle<String> name) { + return variables_.Lookup(name); } @@ -196,7 +195,7 @@ Variable* Scope::Lookup(Handle<String> name) { for (Scope* scope = this; scope != NULL; scope = scope->outer_scope()) { - Variable* var = scope->LookupLocal(name); + Variable* var = scope->LocalLookup(name); if (var != NULL) return var; } return NULL; @@ -210,18 +209,25 @@ Variable* Scope::DeclareFunctionVar(Handle<String> name) { } -Variable* Scope::Declare(Handle<String> name, Variable::Mode mode) { +Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) { // DYNAMIC variables are introduces during variable allocation, // INTERNAL variables are allocated explicitly, and TEMPORARY // variables are allocated via NewTemporary(). ASSERT(mode == Variable::VAR || mode == Variable::CONST); - return locals_.Declare(this, name, mode, true, Variable::NORMAL); + return variables_.Declare(this, name, mode, true, Variable::NORMAL); +} + + +Variable* Scope::DeclareGlobal(Handle<String> name) { + ASSERT(is_global_scope()); + return variables_.Declare(this, name, Variable::DYNAMIC, true, + Variable::NORMAL); } void Scope::AddParameter(Variable* var) { ASSERT(is_function_scope()); - ASSERT(LookupLocal(var->name()) == var); + ASSERT(LocalLookup(var->name()) == var); params_.Add(var); } @@ -291,7 +297,9 @@ void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) { locals->Add(var); } } - for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) { + for (VariableMap::Entry* p = variables_.Start(); + p != NULL; + p = variables_.Next(p)) { Variable* var = reinterpret_cast<Variable*>(p->value); if (var->var_uses()->is_used()) { locals->Add(var); @@ -410,8 +418,8 @@ static void PrintVar(PrettyPrinter* printer, int indent, Variable* var) { } -static void PrintMap(PrettyPrinter* printer, int indent, LocalsMap* map) { - for (LocalsMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) { +static void PrintMap(PrettyPrinter* printer, int indent, VariableMap* map) { + for (VariableMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) { Variable* var = reinterpret_cast<Variable*>(p->value); PrintVar(printer, indent, var); } @@ -478,7 +486,7 @@ void Scope::Print(int n) { } Indent(n1, "// local vars\n"); - PrintMap(&printer, n1, &locals_); + PrintMap(&printer, n1, &variables_); Indent(n1, "// dynamic vars\n"); if (dynamics_ != NULL) { @@ -502,7 +510,7 @@ void Scope::Print(int n) { Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) { if (dynamics_ == NULL) dynamics_ = new DynamicScopePart(); - LocalsMap* map = dynamics_->GetMap(mode); + VariableMap* map = dynamics_->GetMap(mode); Variable* var = map->Lookup(name); if (var == NULL) { // Declare a new non-local. @@ -530,7 +538,7 @@ Variable* Scope::LookupRecursive(Handle<String> name, bool guess = scope_calls_eval_; // Try to find the variable in this scope. - Variable* var = LookupLocal(name); + Variable* var = LocalLookup(name); if (var != NULL) { // We found a variable. If this is not an inner lookup, we are done. @@ -621,8 +629,7 @@ void Scope::ResolveVariable(Scope* global_scope, scope_calls_eval_ || outer_scope_calls_eval_)) { // We must have a global variable. ASSERT(global_scope != NULL); - var = new Variable(global_scope, proxy->name(), - Variable::DYNAMIC, true, Variable::NORMAL); + var = global_scope->DeclareGlobal(proxy->name()); } else if (scope_inside_with_) { // If we are inside a with statement we give up and look up @@ -706,26 +713,26 @@ bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval, bool Scope::MustAllocate(Variable* var) { - // Give var a read/write use if there is a chance it might be - // accessed via an eval() call, or if it is a global variable. - // This is only possible if the variable has a visible name. + // Give var a read/write use if there is a chance it might be accessed + // via an eval() call. This is only possible if the variable has a + // visible name. if ((var->is_this() || var->name()->length() > 0) && (var->is_accessed_from_inner_scope_ || scope_calls_eval_ || inner_scope_calls_eval_ || - scope_contains_with_ || var->is_global())) { + scope_contains_with_)) { var->var_uses()->RecordAccess(1); } - return var->var_uses()->is_used(); + // Global variables do not need to be allocated. + return !var->is_global() && var->var_uses()->is_used(); } bool Scope::MustAllocateInContext(Variable* var) { // If var is accessed from an inner scope, or if there is a - // possibility that it might be accessed from the current or - // an inner scope (through an eval() call), it must be allocated - // in the context. - // Exceptions: Global variables and temporary variables must - // never be allocated in the (FixedArray part of the) context. + // possibility that it might be accessed from the current or an inner + // scope (through an eval() call), it must be allocated in the + // context. Exception: temporary variables are not allocated in the + // context. return var->mode() != Variable::TEMPORARY && (var->is_accessed_from_inner_scope_ || @@ -755,7 +762,7 @@ void Scope::AllocateHeapSlot(Variable* var) { void Scope::AllocateParameterLocals() { ASSERT(is_function_scope()); - Variable* arguments = LookupLocal(Factory::arguments_symbol()); + Variable* arguments = LocalLookup(Factory::arguments_symbol()); ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly if (MustAllocate(arguments) && !HasArgumentsParameter()) { // 'arguments' is used. Unless there is also a parameter called @@ -865,7 +872,7 @@ void Scope::AllocateNonParameterLocal(Variable* var) { ASSERT(var->rewrite_ == NULL || (!var->IsVariable(Factory::result_symbol())) || (var->slot() == NULL || var->slot()->type() != Slot::LOCAL)); - if (MustAllocate(var) && var->rewrite_ == NULL) { + if (var->rewrite_ == NULL && MustAllocate(var)) { if (MustAllocateInContext(var)) { AllocateHeapSlot(var); } else { @@ -876,27 +883,21 @@ void Scope::AllocateNonParameterLocal(Variable* var) { void Scope::AllocateNonParameterLocals() { - // Each variable occurs exactly once in the locals_ list; all - // variables that have no rewrite yet are non-parameter locals. - - // Sort them according to use such that the locals with more uses - // get allocated first. - if (FLAG_usage_computation) { - // This is currently not implemented. - } - + // All variables that have no rewrite yet are non-parameter locals. for (int i = 0; i < temps_.length(); i++) { AllocateNonParameterLocal(temps_[i]); } - for (LocalsMap::Entry* p = locals_.Start(); p != NULL; p = locals_.Next(p)) { + for (VariableMap::Entry* p = variables_.Start(); + p != NULL; + p = variables_.Next(p)) { Variable* var = reinterpret_cast<Variable*>(p->value); AllocateNonParameterLocal(var); } - // Note: For now, function_ must be allocated at the very end. If - // it gets allocated in the context, it must be the last slot in the - // context, because of the current ScopeInfo implementation (see + // For now, function_ must be allocated at the very end. If it gets + // allocated in the context, it must be the last slot in the context, + // because of the current ScopeInfo implementation (see // ScopeInfo::ScopeInfo(FunctionScope* scope) constructor). if (function_ != NULL) { AllocateNonParameterLocal(function_); diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h index ea4e0f746..5767d9f01 100644 --- a/deps/v8/src/scopes.h +++ b/deps/v8/src/scopes.h @@ -35,19 +35,22 @@ namespace v8 { namespace internal { -// A hash map to support fast local variable declaration and lookup. -class LocalsMap: public HashMap { +// A hash map to support fast variable declaration and lookup. +class VariableMap: public HashMap { public: - LocalsMap(); + VariableMap(); // Dummy constructor. This constructor doesn't set up the map // properly so don't use it unless you have a good reason. - explicit LocalsMap(bool gotta_love_static_overloading); + explicit VariableMap(bool gotta_love_static_overloading); - virtual ~LocalsMap(); + virtual ~VariableMap(); - Variable* Declare(Scope* scope, Handle<String> name, Variable::Mode mode, - bool is_valid_LHS, Variable::Kind kind); + Variable* Declare(Scope* scope, + Handle<String> name, + Variable::Mode mode, + bool is_valid_lhs, + Variable::Kind kind); Variable* Lookup(Handle<String> name); }; @@ -59,14 +62,14 @@ class LocalsMap: public HashMap { // and setup time for scopes that don't need them. class DynamicScopePart : public ZoneObject { public: - LocalsMap* GetMap(Variable::Mode mode) { + VariableMap* GetMap(Variable::Mode mode) { int index = mode - Variable::DYNAMIC; ASSERT(index >= 0 && index < 3); return &maps_[index]; } private: - LocalsMap maps_[3]; + VariableMap maps_[3]; }; @@ -105,7 +108,7 @@ class Scope: public ZoneObject { // Declarations // Lookup a variable in this scope. Returns the variable or NULL if not found. - virtual Variable* LookupLocal(Handle<String> name); + virtual Variable* LocalLookup(Handle<String> name); // Lookup a variable in this scope or outer scopes. // Returns the variable or NULL if not found. @@ -116,9 +119,15 @@ class Scope: public ZoneObject { // outer scope. Only possible for function scopes; at most one variable. Variable* DeclareFunctionVar(Handle<String> name); - // Declare a variable in this scope. If the variable has been + // Declare a local variable in this scope. If the variable has been // declared before, the previously declared variable is returned. - virtual Variable* Declare(Handle<String> name, Variable::Mode mode); + virtual Variable* DeclareLocal(Handle<String> name, Variable::Mode mode); + + // Declare an implicit global variable in this scope which must be a + // global scope. The variable was introduced (possibly from an inner + // scope) by a reference to an unresolved variable with no intervening + // with statements or eval calls. + Variable* DeclareGlobal(Handle<String> name); // Add a parameter to the parameter list. The parameter must have been // declared via Declare. The same parameter may occur more then once in @@ -288,25 +297,28 @@ class Scope: public ZoneObject { Handle<String> scope_name_; // The variables declared in this scope: - // all user-declared variables (incl. parameters) - LocalsMap locals_; - // compiler-allocated (user-invisible) temporaries + // + // All user-declared variables (incl. parameters). For global scopes + // variables may be implicitly 'declared' by being used (possibly in + // an inner scope) with no intervening with statements or eval calls. + VariableMap variables_; + // Compiler-allocated (user-invisible) temporaries. ZoneList<Variable*> temps_; - // parameter list in source order + // Parameter list in source order. ZoneList<Variable*> params_; - // variables that must be looked up dynamically + // Variables that must be looked up dynamically. DynamicScopePart* dynamics_; - // unresolved variables referred to from this scope + // Unresolved variables referred to from this scope. ZoneList<VariableProxy*> unresolved_; - // declarations + // Declarations. ZoneList<Declaration*> decls_; - // convenience variable + // Convenience variable. VariableProxy* receiver_; - // function variable, if any; function scopes only + // Function variable, if any; function scopes only. Variable* function_; - // convenience variable; function scopes only + // Convenience variable; function scopes only. VariableProxy* arguments_; - // convenience variable; function scopes only + // Convenience variable; function scopes only. VariableProxy* arguments_shadow_; // Illegal redeclaration. diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index f45d65d30..592cf5aae 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -42,47 +42,44 @@ namespace v8 { namespace internal { -// Encoding: a RelativeAddress must be able to fit in a pointer: -// it is encoded as an Address with (from MS to LS bits): -// 27 bits identifying a word in the space, in one of three formats: -// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page -// - NEW space: 27 bits of word offset -// - LO space: 27 bits of page number -// 3 bits to encode the AllocationSpace (special values for code in LO space) -// 2 bits identifying this as a HeapObject +// 32-bit encoding: a RelativeAddress must be able to fit in a +// pointer: it is encoded as an Address with (from LS to MS bits): +// - 2 bits identifying this as a HeapObject. +// - 4 bits to encode the AllocationSpace (including special values for +// code and fixed arrays in LO space) +// - 27 bits identifying a word in the space, in one of three formats: +// - paged spaces: 16 bits of page number, 11 bits of word offset in page +// - NEW space: 27 bits of word offset +// - LO space: 27 bits of page number const int kSpaceShift = kHeapObjectTagSize; -const int kSpaceBits = kSpaceTagSize; -const int kSpaceMask = kSpaceTagMask; - -// These value are used instead of space numbers when serializing/ -// deserializing. They indicate an object that is in large object space, but -// should be treated specially. -// Make the pages executable on platforms that support it: -const int kLOSpaceExecutable = LAST_SPACE + 1; -// Reserve space for write barrier bits (for objects that can contain -// references to new space): -const int kLOSpacePointer = LAST_SPACE + 2; - +const int kSpaceBits = 4; +const int kSpaceMask = (1 << kSpaceBits) - 1; const int kOffsetShift = kSpaceShift + kSpaceBits; const int kOffsetBits = 11; const int kOffsetMask = (1 << kOffsetBits) - 1; -const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize); const int kPageShift = kOffsetShift + kOffsetBits; +const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize); const int kPageMask = (1 << kPageBits) - 1; const int kPageAndOffsetShift = kOffsetShift; const int kPageAndOffsetBits = kPageBits + kOffsetBits; const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1; +// These values are special allocation space tags used for +// serialization. +// Mar the pages executable on platforms that support it. +const int kLargeCode = LAST_SPACE + 1; +// Allocate extra remembered-set bits. +const int kLargeFixedArray = LAST_SPACE + 2; + static inline AllocationSpace GetSpace(Address addr) { const intptr_t encoded = reinterpret_cast<intptr_t>(addr); int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask); - if (space_number == kLOSpaceExecutable) space_number = LO_SPACE; - else if (space_number == kLOSpacePointer) space_number = LO_SPACE; + if (space_number > LAST_SPACE) space_number = LO_SPACE; return static_cast<AllocationSpace>(space_number); } @@ -91,7 +88,7 @@ static inline bool IsLargeExecutableObject(Address addr) { const intptr_t encoded = reinterpret_cast<intptr_t>(addr); const int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask); - return (space_number == kLOSpaceExecutable); + return (space_number == kLargeCode); } @@ -99,7 +96,7 @@ static inline bool IsLargeFixedArray(Address addr) { const intptr_t encoded = reinterpret_cast<intptr_t>(addr); const int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask); - return (space_number == kLOSpacePointer); + return (space_number == kLargeFixedArray); } @@ -147,6 +144,9 @@ class RelativeAddress { int page_index, int page_offset) : space_(space), page_index_(page_index), page_offset_(page_offset) { + // Assert that the space encoding (plus the two pseudo-spaces for + // special large objects) fits in the available bits. + ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0); ASSERT(space <= LAST_SPACE && space >= 0); } @@ -154,8 +154,7 @@ class RelativeAddress { Address Encode() const; AllocationSpace space() const { - if (space_ == kLOSpaceExecutable) return LO_SPACE; - if (space_ == kLOSpacePointer) return LO_SPACE; + if (space_ > LAST_SPACE) return LO_SPACE; return static_cast<AllocationSpace>(space_); } int page_index() const { return page_index_; } @@ -165,7 +164,8 @@ class RelativeAddress { return space_ == CODE_SPACE || space_ == OLD_POINTER_SPACE || space_ == OLD_DATA_SPACE || - space_ == MAP_SPACE; + space_ == MAP_SPACE || + space_ == CELL_SPACE; } void next_address(int offset) { page_offset_ += offset; } @@ -180,11 +180,11 @@ class RelativeAddress { void set_to_large_code_object() { ASSERT(space_ == LO_SPACE); - space_ = kLOSpaceExecutable; + space_ = kLargeCode; } void set_to_large_fixed_array() { ASSERT(space_ == LO_SPACE); - space_ = kLOSpacePointer; + space_ = kLargeFixedArray; } @@ -201,6 +201,7 @@ Address RelativeAddress::Encode() const { int result = 0; switch (space_) { case MAP_SPACE: + case CELL_SPACE: case OLD_POINTER_SPACE: case OLD_DATA_SPACE: case CODE_SPACE: @@ -216,8 +217,8 @@ Address RelativeAddress::Encode() const { result = word_offset << kPageAndOffsetShift; break; case LO_SPACE: - case kLOSpaceExecutable: - case kLOSpacePointer: + case kLargeCode: + case kLargeFixedArray: ASSERT_EQ(0, page_offset_); ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask); result = page_index_ << kPageAndOffsetShift; @@ -235,6 +236,7 @@ void RelativeAddress::Verify() { ASSERT(page_offset_ >= 0 && page_index_ >= 0); switch (space_) { case MAP_SPACE: + case CELL_SPACE: case OLD_POINTER_SPACE: case OLD_DATA_SPACE: case CODE_SPACE: @@ -245,8 +247,8 @@ void RelativeAddress::Verify() { ASSERT(page_index_ == 0); break; case LO_SPACE: - case kLOSpaceExecutable: - case kLOSpacePointer: + case kLargeCode: + case kLargeFixedArray: ASSERT(page_offset_ == 0); break; } @@ -291,6 +293,7 @@ class SimulatedHeapSpace { void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) { switch (space) { case MAP_SPACE: + case CELL_SPACE: case OLD_POINTER_SPACE: case OLD_DATA_SPACE: case CODE_SPACE: @@ -307,12 +310,15 @@ void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) { void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) { switch (space) { case MAP_SPACE: + case CELL_SPACE: case OLD_POINTER_SPACE: case OLD_DATA_SPACE: case CODE_SPACE: { PagedSpace* ps; if (space == MAP_SPACE) { ps = Heap::map_space(); + } else if (space == CELL_SPACE) { + ps = Heap::cell_space(); } else if (space == OLD_POINTER_SPACE) { ps = Heap::old_pointer_space(); } else if (space == OLD_DATA_SPACE) { @@ -699,6 +705,7 @@ void ExternalReferenceTable::PopulateTable() { UNCLASSIFIED, 10, "Debug::step_in_fp_addr()"); +#endif Add(ExternalReference::double_fp_operation(Token::ADD).address(), UNCLASSIFIED, 11, @@ -711,7 +718,18 @@ void ExternalReferenceTable::PopulateTable() { UNCLASSIFIED, 13, "mul_two_doubles"); -#endif + Add(ExternalReference::double_fp_operation(Token::DIV).address(), + UNCLASSIFIED, + 14, + "div_two_doubles"); + Add(ExternalReference::double_fp_operation(Token::MOD).address(), + UNCLASSIFIED, + 15, + "mod_two_doubles"); + Add(ExternalReference::compare_doubles().address(), + UNCLASSIFIED, + 16, + "compare_doubles"); } @@ -1109,6 +1127,8 @@ void Serializer::PutHeader() { writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size()); writer_->PutC('|'); writer_->PutInt(Heap::map_space()->Size()); + writer_->PutC('|'); + writer_->PutInt(Heap::cell_space()->Size()); writer_->PutC(']'); // Write global handles. writer_->PutC('G'); @@ -1291,6 +1311,7 @@ static const int kInitArraySize = 32; Deserializer::Deserializer(const byte* str, int len) : reader_(str, len), map_pages_(kInitArraySize), + cell_pages_(kInitArraySize), old_pointer_pages_(kInitArraySize), old_data_pages_(kInitArraySize), code_pages_(kInitArraySize), @@ -1463,6 +1484,8 @@ void Deserializer::GetHeader() { InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_); reader_.ExpectC('|'); InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_); + reader_.ExpectC('|'); + InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_); reader_.ExpectC(']'); // Create placeholders for global handles later to be fill during // IterateRoots. @@ -1595,6 +1618,9 @@ Object* Deserializer::Resolve(Address encoded) { case MAP_SPACE: return ResolvePaged(PageIndex(encoded), PageOffset(encoded), Heap::map_space(), &map_pages_); + case CELL_SPACE: + return ResolvePaged(PageIndex(encoded), PageOffset(encoded), + Heap::cell_space(), &cell_pages_); case OLD_POINTER_SPACE: return ResolvePaged(PageIndex(encoded), PageOffset(encoded), Heap::old_pointer_space(), &old_pointer_pages_); diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h index 7f4eb6321..1b24065cf 100644 --- a/deps/v8/src/serialize.h +++ b/deps/v8/src/serialize.h @@ -320,10 +320,11 @@ class Deserializer: public ObjectVisitor { bool has_log_; // The file has log information. // Resolve caches the following: - List<Page*> map_pages_; // All pages in the map space. + List<Page*> map_pages_; // All pages in the map space. + List<Page*> cell_pages_; // All pages in the cell space. List<Page*> old_pointer_pages_; // All pages in the old pointer space. - List<Page*> old_data_pages_; // All pages in the old data space. - List<Page*> code_pages_; + List<Page*> old_data_pages_; // All pages in the old data space. + List<Page*> code_pages_; // All pages in the code space. List<Object*> large_objects_; // All known large objects. // A list of global handles at deserialization time. List<Object**> global_handles_; diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h index 2f01164f8..8b2eab0c4 100644 --- a/deps/v8/src/spaces-inl.h +++ b/deps/v8/src/spaces-inl.h @@ -93,17 +93,21 @@ Address Page::AllocationTop() { void Page::ClearRSet() { -#ifndef V8_HOST_ARCH_64_BIT // This method can be called in all rset states. memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset); -#endif } -// Give an address a (32-bits): +// Given a 32-bit address, separate its bits into: // | page address | words (6) | bit offset (5) | pointer alignment (2) | -// The rset address is computed as: +// The address of the rset word containing the bit for this word is computed as: // page_address + words * 4 +// For a 64-bit address, if it is: +// | page address | quadwords(5) | bit offset(5) | pointer alignment (3) | +// The address of the rset word containing the bit for this word is computed as: +// page_address + quadwords * 4 + kRSetOffset. +// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word, +// even on the X64 architecture. Address Page::ComputeRSetBitPosition(Address address, int offset, uint32_t* bitmask) { @@ -115,7 +119,7 @@ Address Page::ComputeRSetBitPosition(Address address, int offset, *bitmask = 1 << (bit_offset % kBitsPerInt); Address rset_address = - page->address() + (bit_offset / kBitsPerInt) * kIntSize; + page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize; // The remembered set address is either in the normal remembered set range // of a page or else we have a large object page. ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd()) @@ -131,8 +135,10 @@ Address Page::ComputeRSetBitPosition(Address address, int offset, // of the object: // (rset_address - page->ObjectAreaStart()). // Ie, we can just add the object size. + // In the X64 architecture, the remembered set ends before the object start, + // so we need to add an additional offset, from rset end to object start ASSERT(HeapObject::FromAddress(address)->IsFixedArray()); - rset_address += + rset_address += kObjectStartOffset - kRSetEndOffset + FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart() + Array::kLengthOffset)); } @@ -160,14 +166,9 @@ void Page::UnsetRSet(Address address, int offset) { bool Page::IsRSetSet(Address address, int offset) { -#ifdef V8_HOST_ARCH_64_BIT - // TODO(X64): Reenable when RSet works. - return true; -#else // V8_HOST_ARCH_64_BIT uint32_t bitmask = 0; Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); return (Memory::uint32_at(rset_address) & bitmask) != 0; -#endif // V8_HOST_ARCH_64_BIT } diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 72b028cde..2393281bd 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -37,8 +37,8 @@ namespace internal { // For contiguous spaces, top should be in the space (or at the end) and limit // should be the end of the space. #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ - ASSERT((space).low() <= (info).top \ - && (info).top <= (space).high() \ + ASSERT((space).low() <= (info).top \ + && (info).top <= (space).high() \ && (info).limit == (space).high()) @@ -786,6 +786,77 @@ void PagedSpace::Print() { } #endif +#ifdef DEBUG +// We do not assume that the PageIterator works, because it depends on the +// invariants we are checking during verification. +void PagedSpace::Verify(ObjectVisitor* visitor) { + // The allocation pointer should be valid, and it should be in a page in the + // space. + ASSERT(allocation_info_.VerifyPagedAllocation()); + Page* top_page = Page::FromAllocationTop(allocation_info_.top); + ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); + + // Loop over all the pages. + bool above_allocation_top = false; + Page* current_page = first_page_; + while (current_page->is_valid()) { + if (above_allocation_top) { + // We don't care what's above the allocation top. + } else { + // Unless this is the last page in the space containing allocated + // objects, the allocation top should be at a constant offset from the + // object area end. + Address top = current_page->AllocationTop(); + if (current_page == top_page) { + ASSERT(top == allocation_info_.top); + // The next page will be above the allocation top. + above_allocation_top = true; + } else { + ASSERT(top == current_page->ObjectAreaEnd() - page_extra_); + } + + // It should be packed with objects from the bottom to the top. + Address current = current_page->ObjectAreaStart(); + while (current < top) { + HeapObject* object = HeapObject::FromAddress(current); + + // The first word should be a map, and we expect all map pointers to + // be in map space. + Map* map = object->map(); + ASSERT(map->IsMap()); + ASSERT(Heap::map_space()->Contains(map)); + + // Perform space-specific object verification. + VerifyObject(object); + + // The object itself should look OK. + object->Verify(); + + // All the interior pointers should be contained in the heap and + // have their remembered set bits set if required as determined + // by the visitor. + int size = object->Size(); + if (object->IsCode()) { + Code::cast(object)->ConvertICTargetsFromAddressToObject(); + object->IterateBody(map->instance_type(), size, visitor); + Code::cast(object)->ConvertICTargetsFromObjectToAddress(); + } else { + object->IterateBody(map->instance_type(), size, visitor); + } + + current += size; + } + + // The allocation pointer should not be in the middle of an object. + ASSERT(current == top); + } + + current_page = current_page->next_page(); + } +} +#endif + + // ----------------------------------------------------------------------------- // NewSpace implementation @@ -1141,7 +1212,7 @@ static void ReportHistogram(bool print_spill) { // Summarize string types. int string_number = 0; int string_bytes = 0; -#define INCREMENT(type, size, name) \ +#define INCREMENT(type, size, name, camel_name) \ string_number += heap_histograms[type].number(); \ string_bytes += heap_histograms[type].bytes(); STRING_TYPE_LIST(INCREMENT) @@ -1185,8 +1256,8 @@ static void DoReportStatistics(HistogramInfo* info, const char* description) { // Lump all the string types together. int string_number = 0; int string_bytes = 0; -#define INCREMENT(type, size, name) \ - string_number += info[type].number(); \ +#define INCREMENT(type, size, name, camel_name) \ + string_number += info[type].number(); \ string_bytes += info[type].bytes(); STRING_TYPE_LIST(INCREMENT) #undef INCREMENT @@ -1265,13 +1336,13 @@ void FreeListNode::set_size(int size_in_bytes) { // If the block is too small (eg, one or two words), to hold both a size // field and a next pointer, we give it a filler map that gives it the // correct size. - if (size_in_bytes > Array::kHeaderSize) { - set_map(Heap::byte_array_map()); + if (size_in_bytes > ByteArray::kAlignedSize) { + set_map(Heap::raw_unchecked_byte_array_map()); ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes)); } else if (size_in_bytes == kPointerSize) { - set_map(Heap::one_word_filler_map()); + set_map(Heap::raw_unchecked_one_pointer_filler_map()); } else if (size_in_bytes == 2 * kPointerSize) { - set_map(Heap::two_word_filler_map()); + set_map(Heap::raw_unchecked_two_pointer_filler_map()); } else { UNREACHABLE(); } @@ -1280,16 +1351,26 @@ void FreeListNode::set_size(int size_in_bytes) { Address FreeListNode::next() { - ASSERT(map() == Heap::byte_array_map()); - ASSERT(Size() >= kNextOffset + kPointerSize); - return Memory::Address_at(address() + kNextOffset); + ASSERT(map() == Heap::raw_unchecked_byte_array_map() || + map() == Heap::raw_unchecked_two_pointer_filler_map()); + if (map() == Heap::raw_unchecked_byte_array_map()) { + ASSERT(Size() >= kNextOffset + kPointerSize); + return Memory::Address_at(address() + kNextOffset); + } else { + return Memory::Address_at(address() + kPointerSize); + } } void FreeListNode::set_next(Address next) { - ASSERT(map() == Heap::byte_array_map()); - ASSERT(Size() >= kNextOffset + kPointerSize); - Memory::Address_at(address() + kNextOffset) = next; + ASSERT(map() == Heap::raw_unchecked_byte_array_map() || + map() == Heap::raw_unchecked_two_pointer_filler_map()); + if (map() == Heap::raw_unchecked_byte_array_map()) { + ASSERT(Size() >= kNextOffset + kPointerSize); + Memory::Address_at(address() + kNextOffset) = next; + } else { + Memory::Address_at(address() + kPointerSize) = next; + } } @@ -1445,42 +1526,42 @@ bool OldSpaceFreeList::Contains(FreeListNode* node) { #endif -MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) { - owner_ = owner; +FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size) + : owner_(owner), object_size_(object_size) { Reset(); } -void MapSpaceFreeList::Reset() { +void FixedSizeFreeList::Reset() { available_ = 0; head_ = NULL; } -void MapSpaceFreeList::Free(Address start) { +void FixedSizeFreeList::Free(Address start) { #ifdef DEBUG - for (int i = 0; i < Map::kSize; i += kPointerSize) { + for (int i = 0; i < object_size_; i += kPointerSize) { Memory::Address_at(start + i) = kZapValue; } #endif ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. FreeListNode* node = FreeListNode::FromAddress(start); - node->set_size(Map::kSize); + node->set_size(object_size_); node->set_next(head_); head_ = node->address(); - available_ += Map::kSize; + available_ += object_size_; } -Object* MapSpaceFreeList::Allocate() { +Object* FixedSizeFreeList::Allocate() { if (head_ == NULL) { - return Failure::RetryAfterGC(Map::kSize, owner_); + return Failure::RetryAfterGC(object_size_, owner_); } ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. FreeListNode* node = FreeListNode::FromAddress(head_); head_ = node->next(); - available_ -= Map::kSize; + available_ -= object_size_; return node; } @@ -1494,7 +1575,6 @@ void OldSpace::PrepareForMarkCompact(bool will_compact) { // the space is considered 'available' and we will rediscover live data // and waste during the collection. MCResetRelocationInfo(); - mc_end_of_relocation_ = bottom(); ASSERT(Available() == Capacity()); } else { // During a non-compacting collection, everything below the linear @@ -1510,24 +1590,6 @@ void OldSpace::PrepareForMarkCompact(bool will_compact) { } -void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) { - ASSERT(Contains(address)); - Address current_top = mc_end_of_relocation_; - Page* current_page = Page::FromAllocationTop(current_top); - - // No more objects relocated to this page? Move to the next. - ASSERT(current_top <= current_page->mc_relocation_top); - if (current_top == current_page->mc_relocation_top) { - // The space should already be properly expanded. - Page* next_page = current_page->next_page(); - CHECK(next_page->is_valid()); - mc_end_of_relocation_ = next_page->ObjectAreaStart(); - } - ASSERT(mc_end_of_relocation_ == address); - mc_end_of_relocation_ += size_in_bytes; -} - - void OldSpace::MCCommitRelocationInfo() { // Update fast allocation info. allocation_info_.top = mc_forwarding_info_.top; @@ -1624,76 +1686,6 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page, #ifdef DEBUG -// We do not assume that the PageIterator works, because it depends on the -// invariants we are checking during verification. -void OldSpace::Verify() { - // The allocation pointer should be valid, and it should be in a page in the - // space. - ASSERT(allocation_info_.VerifyPagedAllocation()); - Page* top_page = Page::FromAllocationTop(allocation_info_.top); - ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); - - // Loop over all the pages. - bool above_allocation_top = false; - Page* current_page = first_page_; - while (current_page->is_valid()) { - if (above_allocation_top) { - // We don't care what's above the allocation top. - } else { - // Unless this is the last page in the space containing allocated - // objects, the allocation top should be at the object area end. - Address top = current_page->AllocationTop(); - if (current_page == top_page) { - ASSERT(top == allocation_info_.top); - // The next page will be above the allocation top. - above_allocation_top = true; - } else { - ASSERT(top == current_page->ObjectAreaEnd()); - } - - // It should be packed with objects from the bottom to the top. - Address current = current_page->ObjectAreaStart(); - while (current < top) { - HeapObject* object = HeapObject::FromAddress(current); - - // The first word should be a map, and we expect all map pointers to - // be in map space. - Map* map = object->map(); - ASSERT(map->IsMap()); - ASSERT(Heap::map_space()->Contains(map)); - - // The object should not be a map. - ASSERT(!object->IsMap()); - - // The object itself should look OK. - object->Verify(); - - // All the interior pointers should be contained in the heap and have - // their remembered set bits set if they point to new space. Code - // objects do not have remembered set bits that we care about. - VerifyPointersAndRSetVisitor rset_visitor; - VerifyPointersVisitor no_rset_visitor; - int size = object->Size(); - if (object->IsCode()) { - Code::cast(object)->ConvertICTargetsFromAddressToObject(); - object->IterateBody(map->instance_type(), size, &no_rset_visitor); - Code::cast(object)->ConvertICTargetsFromObjectToAddress(); - } else { - object->IterateBody(map->instance_type(), size, &rset_visitor); - } - - current += size; - } - - // The allocation pointer should not be in the middle of an object. - ASSERT(current == top); - } - - current_page = current_page->next_page(); - } -} - - struct CommentStatistic { const char* comment; int size; @@ -1856,7 +1848,7 @@ void OldSpace::ReportStatistics() { int bitpos = intoff*kBitsPerByte + bitoff; Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); Object** obj = reinterpret_cast<Object**>(slot); - if (*obj == Heap::fixed_array_map()) { + if (*obj == Heap::raw_unchecked_fixed_array_map()) { rset_marked_arrays++; FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot)); @@ -1987,25 +1979,13 @@ void OldSpace::PrintRSet() { DoPrintRSet("old"); } #endif // ----------------------------------------------------------------------------- -// MapSpace implementation +// FixedSpace implementation -void MapSpace::PrepareForMarkCompact(bool will_compact) { +void FixedSpace::PrepareForMarkCompact(bool will_compact) { if (will_compact) { // Reset relocation info. MCResetRelocationInfo(); - // Initialize map index entry. - int page_count = 0; - PageIterator it(this, PageIterator::ALL_PAGES); - while (it.has_next()) { - ASSERT_MAP_PAGE_INDEX(page_count); - - Page* p = it.next(); - ASSERT(p->mc_page_index == page_count); - - page_addresses_[page_count++] = p->address(); - } - // During a compacting collection, everything in the space is considered // 'available' (set by the call to MCResetRelocationInfo) and we will // rediscover live and wasted bytes during the collection. @@ -2023,7 +2003,7 @@ void MapSpace::PrepareForMarkCompact(bool will_compact) { } -void MapSpace::MCCommitRelocationInfo() { +void FixedSpace::MCCommitRelocationInfo() { // Update fast allocation info. allocation_info_.top = mc_forwarding_info_.top; allocation_info_.limit = mc_forwarding_info_.limit; @@ -2053,7 +2033,8 @@ void MapSpace::MCCommitRelocationInfo() { // Slow case for normal allocation. Try in order: (1) allocate in the next // page in the space, (2) allocate off the space's free list, (3) expand the // space, (4) fail. -HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) { +HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { + ASSERT_EQ(object_size_in_bytes_, size_in_bytes); // Linear allocation in this space has failed. If there is another page // in the space, move to that page and allocate there. This allocation // should succeed. @@ -2062,10 +2043,10 @@ HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) { return AllocateInNextPage(current_page, size_in_bytes); } - // There is no next page in this space. Try free list allocation. The - // map space free list implicitly assumes that all free blocks are map - // sized. - if (size_in_bytes == Map::kSize) { + // There is no next page in this space. Try free list allocation. + // The fixed space free list implicitly assumes that all free blocks + // are of the fixed size. + if (size_in_bytes == object_size_in_bytes_) { Object* result = free_list_.Allocate(); if (!result->IsFailure()) { accounting_stats_.AllocateBytes(size_in_bytes); @@ -2094,81 +2075,19 @@ HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) { // Move to the next page (there is assumed to be one) and allocate there. // The top of page block is always wasted, because it is too small to hold a // map. -HeapObject* MapSpace::AllocateInNextPage(Page* current_page, - int size_in_bytes) { +HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, + int size_in_bytes) { ASSERT(current_page->next_page()->is_valid()); - ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra); - accounting_stats_.WasteBytes(kPageExtra); + ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_); + ASSERT_EQ(object_size_in_bytes_, size_in_bytes); + accounting_stats_.WasteBytes(page_extra_); SetAllocationInfo(&allocation_info_, current_page->next_page()); return AllocateLinearly(&allocation_info_, size_in_bytes); } #ifdef DEBUG -// We do not assume that the PageIterator works, because it depends on the -// invariants we are checking during verification. -void MapSpace::Verify() { - // The allocation pointer should be valid, and it should be in a page in the - // space. - ASSERT(allocation_info_.VerifyPagedAllocation()); - Page* top_page = Page::FromAllocationTop(allocation_info_.top); - ASSERT(MemoryAllocator::IsPageInSpace(top_page, this)); - - // Loop over all the pages. - bool above_allocation_top = false; - Page* current_page = first_page_; - while (current_page->is_valid()) { - if (above_allocation_top) { - // We don't care what's above the allocation top. - } else { - // Unless this is the last page in the space containing allocated - // objects, the allocation top should be at a constant offset from the - // object area end. - Address top = current_page->AllocationTop(); - if (current_page == top_page) { - ASSERT(top == allocation_info_.top); - // The next page will be above the allocation top. - above_allocation_top = true; - } else { - ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra); - } - - // It should be packed with objects from the bottom to the top. - Address current = current_page->ObjectAreaStart(); - while (current < top) { - HeapObject* object = HeapObject::FromAddress(current); - - // The first word should be a map, and we expect all map pointers to - // be in map space. - Map* map = object->map(); - ASSERT(map->IsMap()); - ASSERT(Heap::map_space()->Contains(map)); - - // The object should be a map or a byte array. - ASSERT(object->IsMap() || object->IsByteArray()); - - // The object itself should look OK. - object->Verify(); - - // All the interior pointers should be contained in the heap and - // have their remembered set bits set if they point to new space. - VerifyPointersAndRSetVisitor visitor; - int size = object->Size(); - object->IterateBody(map->instance_type(), size, &visitor); - - current += size; - } - - // The allocation pointer should not be in the middle of an object. - ASSERT(current == top); - } - - current_page = current_page->next_page(); - } -} - - -void MapSpace::ReportStatistics() { +void FixedSpace::ReportStatistics() { int pct = Available() * 100 / Capacity(); PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", Capacity(), Waste(), Available(), pct); @@ -2215,7 +2134,50 @@ void MapSpace::ReportStatistics() { } -void MapSpace::PrintRSet() { DoPrintRSet("map"); } +void FixedSpace::PrintRSet() { DoPrintRSet(name_); } +#endif + + +// ----------------------------------------------------------------------------- +// MapSpace implementation + +void MapSpace::PrepareForMarkCompact(bool will_compact) { + // Call prepare of the super class. + FixedSpace::PrepareForMarkCompact(will_compact); + + if (will_compact) { + // Initialize map index entry. + int page_count = 0; + PageIterator it(this, PageIterator::ALL_PAGES); + while (it.has_next()) { + ASSERT_MAP_PAGE_INDEX(page_count); + + Page* p = it.next(); + ASSERT(p->mc_page_index == page_count); + + page_addresses_[page_count++] = p->address(); + } + } +} + + +#ifdef DEBUG +void MapSpace::VerifyObject(HeapObject* object) { + // The object should be a map or a free-list node. + ASSERT(object->IsMap() || object->IsByteArray()); +} +#endif + + +// ----------------------------------------------------------------------------- +// GlobalPropertyCellSpace implementation + +#ifdef DEBUG +void CellSpace::VerifyObject(HeapObject* object) { + // The object should be a global object property cell or a free-list node. + ASSERT(object->IsJSGlobalPropertyCell() || + object->map() == Heap::two_pointer_filler_map()); +} #endif diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index 0538c5f36..ccd1d2786 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -93,13 +93,14 @@ class AllocationInfo; // bytes are used as remembered set, and the rest of the page is the object // area. // -// Pointers are aligned to the pointer size (4 bytes), only 1 bit is needed +// Pointers are aligned to the pointer size (4), only 1 bit is needed // for a pointer in the remembered set. Given an address, its remembered set // bit position (offset from the start of the page) is calculated by dividing // its page offset by 32. Therefore, the object area in a page starts at the // 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that // the first two words (64 bits) in a page can be used for other purposes. // TODO(X64): This description only represents the 32-bit layout. +// On the 64-bit platform, we add an offset to the start of the remembered set. // // The mark-compact collector transforms a map pointer into a page index and a // page offset. The map space can have up to 1024 pages, and 8M bytes (1024 * @@ -217,15 +218,25 @@ class Page { // Page size mask. static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; + // The offset of the remembered set in a page, in addition to the empty words + // formed as the remembered bits of the remembered set itself. +#ifdef V8_TARGET_ARCH_X64 + static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers. +#else + static const int kRSetOffset = 0; +#endif // The end offset of the remembered set in a page // (heaps are aligned to pointer size). - static const int kRSetEndOffset= kPageSize / kBitsPerPointer; - - // The start offset of the remembered set in a page. - static const int kRSetStartOffset = kRSetEndOffset / kBitsPerPointer; + static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer; // The start offset of the object area in a page. - static const int kObjectStartOffset = kRSetEndOffset; + // This needs to be at least (bits per uint32_t) * kBitsPerPointer, + // to align start of rset to a uint32_t address. + static const int kObjectStartOffset = 256; + + // The start offset of the remembered set in a page. + static const int kRSetStartOffset = kRSetOffset + + kObjectStartOffset / kBitsPerPointer; // Object area size in bytes. static const int kObjectAreaSize = kPageSize - kObjectStartOffset; @@ -291,7 +302,6 @@ class Space : public Malloced { virtual int Size() = 0; #ifdef DEBUG - virtual void Verify() = 0; virtual void Print() = 0; #endif @@ -825,6 +835,13 @@ class PagedSpace : public Space { // Print meta info and objects in this space. virtual void Print(); + // Verify integrity of this space. + virtual void Verify(ObjectVisitor* visitor); + + // Overridden by subclasses to verify space-specific object + // properties (e.g., only maps or free-list nodes are in map space). + virtual void VerifyObject(HeapObject* obj) {} + // Report code object related statistics void CollectCodeStatistics(); static void ReportCodeStatistics(); @@ -851,6 +868,12 @@ class PagedSpace : public Space { // Relocation information during mark-compact collections. AllocationInfo mc_forwarding_info_; + // Bytes of each page that cannot be allocated. Possibly non-zero + // for pages in spaces with only fixed-size objects. Always zero + // for pages in spaces with variable sized objects (those pages are + // padded with free-list nodes). + int page_extra_; + // Sets allocation pointer to a page bottom. static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p); @@ -1270,7 +1293,7 @@ class FreeListNode: public HeapObject { inline void set_next(Address next); private: - static const int kNextOffset = Array::kHeaderSize; + static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize); DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); }; @@ -1304,7 +1327,7 @@ class OldSpaceFreeList BASE_EMBEDDED { private: // The size range of blocks, in bytes. (Smaller allocations are allowed, but // will always result in waste.) - static const int kMinBlockSize = Array::kHeaderSize + kPointerSize; + static const int kMinBlockSize = 2 * kPointerSize; static const int kMaxBlockSize = Page::kMaxHeapObjectSize; // The identity of the owning space, for building allocation Failure @@ -1379,9 +1402,9 @@ class OldSpaceFreeList BASE_EMBEDDED { // The free list for the map space. -class MapSpaceFreeList BASE_EMBEDDED { +class FixedSizeFreeList BASE_EMBEDDED { public: - explicit MapSpaceFreeList(AllocationSpace owner); + FixedSizeFreeList(AllocationSpace owner, int object_size); // Clear the free list. void Reset(); @@ -1390,12 +1413,12 @@ class MapSpaceFreeList BASE_EMBEDDED { int available() { return available_; } // Place a node on the free list. The block starting at 'start' (assumed to - // have size Map::kSize) is placed on the free list. Bookkeeping + // have size object_size_) is placed on the free list. Bookkeeping // information will be written to the block, ie, its contents will be // destroyed. The start address should be word aligned. void Free(Address start); - // Allocate a map-sized block from the free list. The block is unitialized. + // Allocate a fixed sized block from the free list. The block is unitialized. // A failure is returned if no block is available. Object* Allocate(); @@ -1410,7 +1433,10 @@ class MapSpaceFreeList BASE_EMBEDDED { // objects. AllocationSpace owner_; - DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList); + // The size of the objects in this space. + int object_size_; + + DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList); }; @@ -1425,6 +1451,7 @@ class OldSpace : public PagedSpace { AllocationSpace id, Executability executable) : PagedSpace(max_capacity, id, executable), free_list_(id) { + page_extra_ = 0; } // The bytes available on the free list (ie, not above the linear allocation @@ -1448,20 +1475,11 @@ class OldSpace : public PagedSpace { // clears the free list. virtual void PrepareForMarkCompact(bool will_compact); - // Adjust the top of relocation pointer to point to the end of the object - // given by 'address' and 'size_in_bytes'. Move it to the next page if - // necessary, ensure that it points to the address, then increment it by the - // size. - void MCAdjustRelocationEnd(Address address, int size_in_bytes); - // Updates the allocation pointer to the relocation top after a mark-compact // collection. virtual void MCCommitRelocationInfo(); #ifdef DEBUG - // Verify integrity of this space. - virtual void Verify(); - // Reports statistics for the space void ReportStatistics(); // Dump the remembered sets in the space to stdout. @@ -1480,39 +1498,41 @@ class OldSpace : public PagedSpace { // The space's free list. OldSpaceFreeList free_list_; - // During relocation, we keep a pointer to the most recently relocated - // object in order to know when to move to the next page. - Address mc_end_of_relocation_; - public: TRACK_MEMORY("OldSpace") }; // ----------------------------------------------------------------------------- -// Old space for all map objects +// Old space for objects of a fixed size -class MapSpace : public PagedSpace { +class FixedSpace : public PagedSpace { public: - // Creates a map space object with a maximum capacity. - explicit MapSpace(int max_capacity, AllocationSpace id) - : PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { } + FixedSpace(int max_capacity, + AllocationSpace id, + int object_size_in_bytes, + const char* name) + : PagedSpace(max_capacity, id, NOT_EXECUTABLE), + object_size_in_bytes_(object_size_in_bytes), + name_(name), + free_list_(id, object_size_in_bytes) { + page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; + } // The top of allocation in a page in this space. Undefined if page is unused. virtual Address PageAllocationTop(Page* page) { return page == TopPageOf(allocation_info_) ? top() - : page->ObjectAreaEnd() - kPageExtra; + : page->ObjectAreaEnd() - page_extra_; } - // Give a map-sized block of memory to the space's free list. + int object_size_in_bytes() { return object_size_in_bytes_; } + + // Give a fixed sized block of memory to the space's free list. void Free(Address start) { free_list_.Free(start); accounting_stats_.DeallocateBytes(Map::kSize); } - // Given an index, returns the page address. - Address PageAddress(int page_index) { return page_addresses_[page_index]; } - // Prepares for a mark-compact GC. virtual void PrepareForMarkCompact(bool will_compact); @@ -1521,21 +1541,13 @@ class MapSpace : public PagedSpace { virtual void MCCommitRelocationInfo(); #ifdef DEBUG - // Verify integrity of this space. - virtual void Verify(); - // Reports statistic info of the space void ReportStatistics(); + // Dump the remembered sets in the space to stdout. void PrintRSet(); #endif - // Constants. - static const int kMapPageIndexBits = 10; - static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1; - - static const int kPageExtra = Page::kObjectAreaSize % Map::kSize; - protected: // Virtual function in the superclass. Slow path of AllocateRaw. HeapObject* SlowAllocateRaw(int size_in_bytes); @@ -1545,9 +1557,41 @@ class MapSpace : public PagedSpace { HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); private: + // The size of objects in this space. + int object_size_in_bytes_; + + // The name of this space. + const char* name_; + // The space's free list. - MapSpaceFreeList free_list_; + FixedSizeFreeList free_list_; +}; + +// ----------------------------------------------------------------------------- +// Old space for all map objects + +class MapSpace : public FixedSpace { + public: + // Creates a map space object with a maximum capacity. + MapSpace(int max_capacity, AllocationSpace id) + : FixedSpace(max_capacity, id, Map::kSize, "map") {} + + // Prepares for a mark-compact GC. + virtual void PrepareForMarkCompact(bool will_compact); + + // Given an index, returns the page address. + Address PageAddress(int page_index) { return page_addresses_[page_index]; } + + // Constants. + static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1; + + protected: +#ifdef DEBUG + virtual void VerifyObject(HeapObject* obj); +#endif + + private: // An array of page start address in a map space. Address page_addresses_[kMaxMapPageIndex + 1]; @@ -1557,6 +1601,25 @@ class MapSpace : public PagedSpace { // ----------------------------------------------------------------------------- +// Old space for all global object property cell objects + +class CellSpace : public FixedSpace { + public: + // Creates a property cell space object with a maximum capacity. + CellSpace(int max_capacity, AllocationSpace id) + : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {} + + protected: +#ifdef DEBUG + virtual void VerifyObject(HeapObject* obj); +#endif + + public: + TRACK_MEMORY("MapSpace") +}; + + +// ----------------------------------------------------------------------------- // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by // the large object space. A large object is allocated from OS heap with // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc index 44ba29746..9a137e321 100644 --- a/deps/v8/src/string-stream.cc +++ b/deps/v8/src/string-stream.cc @@ -343,10 +343,11 @@ void StringStream::PrintUsingMap(JSObject* js_object) { Add("<Invalid map>\n"); return; } - for (DescriptorReader r(map->instance_descriptors()); !r.eos(); r.advance()) { - switch (r.type()) { + DescriptorArray* descs = map->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + switch (descs->GetType(i)) { case FIELD: { - Object* key = r.GetKey(); + Object* key = descs->GetKey(i); if (key->IsString() || key->IsNumber()) { int len = 3; if (key->IsString()) { @@ -360,7 +361,7 @@ void StringStream::PrintUsingMap(JSObject* js_object) { key->ShortPrint(); } Add(": "); - Object* value = js_object->FastPropertyAt(r.GetFieldIndex()); + Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i)); Add("%o\n", value); } } diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js index 3d8a11b22..263fac56c 100644 --- a/deps/v8/src/string.js +++ b/deps/v8/src/string.js @@ -35,7 +35,7 @@ // Set the String function and constructor. %SetCode($String, function(x) { var value = %_ArgumentsLength() == 0 ? '' : ToString(x); - if (%IsConstructCall()) { + if (%_IsConstructCall()) { %_SetValueOf(this, value); } else { return value; @@ -46,7 +46,7 @@ // ECMA-262 section 15.5.4.2 function StringToString() { - if (!IS_STRING(this) && !%HasStringClass(this)) + if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) throw new $TypeError('String.prototype.toString is not generic'); return %_ValueOf(this); } @@ -54,7 +54,7 @@ function StringToString() { // ECMA-262 section 15.5.4.3 function StringValueOf() { - if (!IS_STRING(this) && !%HasStringClass(this)) + if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) throw new $TypeError('String.prototype.valueOf is not generic'); return %_ValueOf(this); } @@ -433,7 +433,7 @@ function ApplyReplacementFunction(replace, lastMatchInfo, subject) { if (m == 1) { var s = CaptureString(subject, lastMatchInfo, 0); // Don't call directly to avoid exposing the built-in global object. - return ToString(replace.call(null, s, index, subject)); + return replace.call(null, s, index, subject); } var parameters = $Array(m + 2); for (var j = 0; j < m; j++) { @@ -441,7 +441,7 @@ function ApplyReplacementFunction(replace, lastMatchInfo, subject) { } parameters[j] = index; parameters[j + 1] = subject; - return ToString(replace.apply(null, parameters)); + return replace.apply(null, parameters); } diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index 0c80378c9..7eb8cd3c6 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -172,6 +172,29 @@ Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) { } +Object* StubCache::ComputeLoadGlobal(String* name, + JSObject* receiver, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + bool is_dont_delete) { + Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL); + Object* code = receiver->map()->FindInCodeCache(name, flags); + if (code->IsUndefined()) { + LoadStubCompiler compiler; + code = compiler.CompileLoadGlobal(receiver, + holder, + cell, + name, + is_dont_delete); + if (code->IsFailure()) return code; + LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name)); + Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); + if (result->IsFailure()) return code; + } + return Set(name, receiver->map(), Code::cast(code)); +} + + Object* StubCache::ComputeKeyedLoadField(String* name, JSObject* receiver, JSObject* holder, @@ -317,6 +340,23 @@ Object* StubCache::ComputeStoreField(String* name, } +Object* StubCache::ComputeStoreGlobal(String* name, + GlobalObject* receiver, + JSGlobalPropertyCell* cell) { + Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, NORMAL); + Object* code = receiver->map()->FindInCodeCache(name, flags); + if (code->IsUndefined()) { + StoreStubCompiler compiler; + code = compiler.CompileStoreGlobal(receiver, cell, name); + if (code->IsFailure()) return code; + LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name)); + Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); + if (result->IsFailure()) return code; + } + return Set(name, receiver->map(), Code::cast(code)); +} + + Object* StubCache::ComputeStoreCallback(String* name, JSObject* receiver, AccessorInfo* callback) { @@ -409,9 +449,10 @@ Object* StubCache::ComputeCallConstant(int argc, // caches. if (!function->is_compiled()) return Failure::InternalError(); // Compile the stub - only create stubs for fully compiled functions. - CallStubCompiler compiler(argc); - code = compiler.CompileCallConstant(object, holder, function, check, flags); + CallStubCompiler compiler(argc, in_loop); + code = compiler.CompileCallConstant(object, holder, function, name, check); if (code->IsFailure()) return code; + ASSERT_EQ(flags, Code::cast(code)->flags()); LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name)); Object* result = map->UpdateCodeCache(name, Code::cast(code)); if (result->IsFailure()) return result; @@ -442,9 +483,10 @@ Object* StubCache::ComputeCallField(int argc, argc); Object* code = map->FindInCodeCache(name, flags); if (code->IsUndefined()) { - CallStubCompiler compiler(argc); - code = compiler.CompileCallField(object, holder, index, name, flags); + CallStubCompiler compiler(argc, in_loop); + code = compiler.CompileCallField(object, holder, index, name); if (code->IsFailure()) return code; + ASSERT_EQ(flags, Code::cast(code)->flags()); LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name)); Object* result = map->UpdateCodeCache(name, Code::cast(code)); if (result->IsFailure()) return result; @@ -475,9 +517,10 @@ Object* StubCache::ComputeCallInterceptor(int argc, argc); Object* code = map->FindInCodeCache(name, flags); if (code->IsUndefined()) { - CallStubCompiler compiler(argc); + CallStubCompiler compiler(argc, NOT_IN_LOOP); code = compiler.CompileCallInterceptor(object, holder, name); if (code->IsFailure()) return code; + ASSERT_EQ(flags, Code::cast(code)->flags()); LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name)); Object* result = map->UpdateCodeCache(name, Code::cast(code)); if (result->IsFailure()) return result; @@ -496,11 +539,40 @@ Object* StubCache::ComputeCallNormal(int argc, } +Object* StubCache::ComputeCallGlobal(int argc, + InLoopFlag in_loop, + String* name, + JSObject* receiver, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function) { + Code::Flags flags = + Code::ComputeMonomorphicFlags(Code::CALL_IC, NORMAL, in_loop, argc); + Object* code = receiver->map()->FindInCodeCache(name, flags); + if (code->IsUndefined()) { + // If the function hasn't been compiled yet, we cannot do it now + // because it may cause GC. To avoid this issue, we return an + // internal error which will make sure we do not update any + // caches. + if (!function->is_compiled()) return Failure::InternalError(); + CallStubCompiler compiler(argc, in_loop); + code = compiler.CompileCallGlobal(receiver, holder, cell, function, name); + if (code->IsFailure()) return code; + ASSERT_EQ(flags, Code::cast(code)->flags()); + LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name)); + Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code)); + if (result->IsFailure()) return code; + } + return Set(name, receiver->map(), Code::cast(code)); +} + + static Object* GetProbeValue(Code::Flags flags) { - Dictionary* dictionary = Heap::non_monomorphic_cache(); - int entry = dictionary->FindNumberEntry(flags); + // Use raw_unchecked... so we don't get assert failures during GC. + NumberDictionary* dictionary = Heap::raw_unchecked_non_monomorphic_cache(); + int entry = dictionary->FindEntry(flags); if (entry != -1) return dictionary->ValueAt(entry); - return Heap::undefined_value(); + return Heap::raw_unchecked_undefined_value(); } @@ -514,7 +586,7 @@ static Object* ProbeCache(Code::Flags flags) { Heap::non_monomorphic_cache()->AtNumberPut(flags, Heap::undefined_value()); if (result->IsFailure()) return result; - Heap::set_non_monomorphic_cache(Dictionary::cast(result)); + Heap::public_set_non_monomorphic_cache(NumberDictionary::cast(result)); return probe; } @@ -522,7 +594,7 @@ static Object* ProbeCache(Code::Flags flags) { static Object* FillCache(Object* code) { if (code->IsCode()) { int entry = - Heap::non_monomorphic_cache()->FindNumberEntry( + Heap::non_monomorphic_cache()->FindEntry( Code::cast(code)->flags()); // The entry must be present see comment in ProbeCache. ASSERT(entry != -1); @@ -885,6 +957,10 @@ Object* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) { Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) { + // Check for allocation failures during stub compilation. + if (failure_->IsFailure()) return failure_; + + // Create code object in the heap. CodeDesc desc; masm_.GetCode(&desc); Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject()); @@ -933,7 +1009,7 @@ Object* CallStubCompiler::GetCode(PropertyType type, String* name) { int argc = arguments_.immediate(); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, type, - NOT_IN_LOOP, + in_loop_, argc); return GetCodeWithFlags(flags, name); } diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index b79841a36..8bee37074 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -78,6 +78,13 @@ class StubCache : public AllStatic { static Object* ComputeLoadNormal(String* name, JSObject* receiver); + static Object* ComputeLoadGlobal(String* name, + JSObject* receiver, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + bool is_dont_delete); + + // --- static Object* ComputeKeyedLoadField(String* name, @@ -112,6 +119,10 @@ class StubCache : public AllStatic { int field_index, Map* transition = NULL); + static Object* ComputeStoreGlobal(String* name, + GlobalObject* receiver, + JSGlobalPropertyCell* cell); + static Object* ComputeStoreCallback(String* name, JSObject* receiver, AccessorInfo* callback); @@ -151,6 +162,14 @@ class StubCache : public AllStatic { Object* object, JSObject* holder); + static Object* ComputeCallGlobal(int argc, + InLoopFlag in_loop, + String* name, + JSObject* receiver, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function); + // --- static Object* ComputeCallInitialize(int argc, InLoopFlag in_loop); @@ -180,11 +199,13 @@ class StubCache : public AllStatic { static void GenerateMiss(MacroAssembler* masm); // Generate code for probing the stub cache table. + // If extra != no_reg it might be used as am extra scratch register. static void GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver, Register name, - Register scratch); + Register scratch, + Register extra); enum Table { kPrimary, @@ -303,7 +324,7 @@ class StubCompiler BASE_EMBEDDED { JSARRAY_HAS_FAST_ELEMENTS_CHECK }; - StubCompiler() : scope_(), masm_(NULL, 256) { } + StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { } Object* CompileCallInitialize(Code::Flags flags); Object* CompileCallPreMonomorphic(Code::Flags flags); @@ -323,40 +344,7 @@ class StubCompiler BASE_EMBEDDED { static void GenerateFastPropertyLoad(MacroAssembler* masm, Register dst, Register src, JSObject* holder, int index); - static void GenerateLoadField(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register scratch1, - Register scratch2, - int index, - Label* miss_label); - static void GenerateLoadCallback(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register name, - Register scratch1, - Register scratch2, - AccessorInfo* callback, - Label* miss_label); - static void GenerateLoadConstant(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Register receiver, - Register scratch1, - Register scratch2, - Object* value, - Label* miss_label); - static void GenerateLoadInterceptor(MacroAssembler* masm, - JSObject* object, - JSObject* holder, - Smi* lookup_hint, - Register receiver, - Register name, - Register scratch1, - Register scratch2, - Label* miss_label); + static void GenerateLoadArrayLength(MacroAssembler* masm, Register receiver, Register scratch, @@ -391,10 +379,60 @@ class StubCompiler BASE_EMBEDDED { Object* GetCodeWithFlags(Code::Flags flags, String* name); MacroAssembler* masm() { return &masm_; } + void set_failure(Failure* failure) { failure_ = failure; } + + // Check the integrity of the prototype chain to make sure that the + // current IC is still valid. + Register CheckPrototypes(JSObject* object, + Register object_reg, + JSObject* holder, + Register holder_reg, + Register scratch, + String* name, + Label* miss); + + void GenerateLoadField(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + int index, + String* name, + Label* miss); + + void GenerateLoadCallback(JSObject* object, + JSObject* holder, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + AccessorInfo* callback, + String* name, + Label* miss); + + void GenerateLoadConstant(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + Object* value, + String* name, + Label* miss); + + void GenerateLoadInterceptor(JSObject* object, + JSObject* holder, + Smi* lookup_hint, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + String* name, + Label* miss); private: HandleScope scope_; MacroAssembler masm_; + Failure* failure_; }; @@ -416,6 +454,12 @@ class LoadStubCompiler: public StubCompiler { JSObject* holder, String* name); + Object* CompileLoadGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + String* name, + bool is_dont_delete); + private: Object* GetCode(PropertyType type, String* name); }; @@ -457,6 +501,10 @@ class StoreStubCompiler: public StubCompiler { AccessorInfo* callbacks, String* name); Object* CompileStoreInterceptor(JSObject* object, String* name); + Object* CompileStoreGlobal(GlobalObject* object, + JSGlobalPropertyCell* holder, + String* name); + private: Object* GetCode(PropertyType type, String* name); @@ -477,24 +525,30 @@ class KeyedStoreStubCompiler: public StubCompiler { class CallStubCompiler: public StubCompiler { public: - explicit CallStubCompiler(int argc) : arguments_(argc) { } + explicit CallStubCompiler(int argc, InLoopFlag in_loop) + : arguments_(argc), in_loop_(in_loop) { } Object* CompileCallField(Object* object, JSObject* holder, int index, - String* name, - Code::Flags flags); + String* name); Object* CompileCallConstant(Object* object, JSObject* holder, JSFunction* function, - CheckType check, - Code::Flags flags); + String* name, + CheckType check); Object* CompileCallInterceptor(Object* object, JSObject* holder, String* name); + Object* CompileCallGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name); private: const ParameterCount arguments_; + const InLoopFlag in_loop_; const ParameterCount& arguments() { return arguments_; } diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc index 4a9e070ac..ef1359321 100644 --- a/deps/v8/src/unicode.cc +++ b/deps/v8/src/unicode.cc @@ -194,18 +194,13 @@ static int LookupMapping(const int32_t* table, uchar Utf8::CalculateValue(const byte* str, unsigned length, unsigned* cursor) { - static const uchar kMaxOneByteChar = 0x7F; - static const uchar kMaxTwoByteChar = 0x7FF; - static const uchar kMaxThreeByteChar = 0xFFFF; - static const uchar kMaxFourByteChar = 0x1FFFFF; - // We only get called for non-ascii characters. if (length == 1) { *cursor += 1; return kBadChar; } - int first = str[0]; - int second = str[1] ^ 0x80; + byte first = str[0]; + byte second = str[1] ^ 0x80; if (second & 0xC0) { *cursor += 1; return kBadChar; @@ -227,7 +222,7 @@ uchar Utf8::CalculateValue(const byte* str, *cursor += 1; return kBadChar; } - int third = str[2] ^ 0x80; + byte third = str[2] ^ 0x80; if (third & 0xC0) { *cursor += 1; return kBadChar; @@ -245,7 +240,7 @@ uchar Utf8::CalculateValue(const byte* str, *cursor += 1; return kBadChar; } - int fourth = str[3] ^ 0x80; + byte fourth = str[3] ^ 0x80; if (fourth & 0xC0) { *cursor += 1; return kBadChar; diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h index 06f116efe..a62cd7446 100644 --- a/deps/v8/src/v8-counters.h +++ b/deps/v8/src/v8-counters.h @@ -130,9 +130,15 @@ namespace internal { SC(keyed_load_inline_miss, V8.KeyedLoadInlineMiss) \ SC(named_load_inline, V8.NamedLoadInline) \ SC(named_load_inline_miss, V8.NamedLoadInlineMiss) \ + SC(named_load_global_inline, V8.NamedLoadGlobalInline) \ + SC(named_load_global_inline_miss, V8.NamedLoadGlobalInlineMiss) \ SC(keyed_store_field, V8.KeyedStoreField) \ SC(keyed_store_inline, V8.KeyedStoreInline) \ SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss) \ + SC(named_store_global_inline, V8.NamedStoreGlobalInline) \ + SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \ + SC(call_global_inline, V8.CallGlobalInline) \ + SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \ SC(for_in, V8.ForIn) \ SC(enum_cache_hits, V8.EnumCacheHits) \ SC(enum_cache_misses, V8.EnumCacheMisses) \ diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index fe463513d..841c92085 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -154,7 +154,7 @@ function SetupGlobal() { // ECMA-262 - 15.1.1.3. %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE); - + // Setup non-enumerable function on the global object. InstallFunctions(global, DONT_ENUM, $Array( "isNaN", GlobalIsNaN, @@ -174,7 +174,7 @@ SetupGlobal(); %SetCode($Boolean, function(x) { - if (%IsConstructCall()) { + if (%_IsConstructCall()) { %_SetValueOf(this, ToBoolean(x)); } else { return ToBoolean(x); @@ -192,7 +192,7 @@ $Object.prototype.constructor = $Object; // ECMA-262 - 15.2.4.2 function ObjectToString() { - var c = %ClassOf(this); + var c = %_ClassOf(this); // Hide Arguments from the outside. if (c === 'Arguments') c = 'Object'; return "[object " + c + "]"; @@ -273,7 +273,7 @@ function ObjectLookupSetter(name) { %SetCode($Object, function(x) { - if (%IsConstructCall()) { + if (%_IsConstructCall()) { if (x == null) return this; return ToObject(x); } else { @@ -311,7 +311,7 @@ SetupObject(); function BooleanToString() { // NOTE: Both Boolean objects and values can enter here as // 'this'. This is not as dictated by ECMA-262. - if (!IS_BOOLEAN(this) && !%HasBooleanClass(this)) + if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) throw new $TypeError('Boolean.prototype.toString is not generic'); return ToString(%_ValueOf(this)); } @@ -320,7 +320,7 @@ function BooleanToString() { function BooleanValueOf() { // NOTE: Both Boolean objects and values can enter here as // 'this'. This is not as dictated by ECMA-262. - if (!IS_BOOLEAN(this) && !%HasBooleanClass(this)) + if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) throw new $TypeError('Boolean.prototype.valueOf is not generic'); return %_ValueOf(this); } @@ -350,7 +350,7 @@ SetupBoolean(); // Set the Number function and constructor. %SetCode($Number, function(x) { var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x); - if (%IsConstructCall()) { + if (%_IsConstructCall()) { %_SetValueOf(this, value); } else { return value; @@ -365,7 +365,7 @@ function NumberToString(radix) { // 'this'. This is not as dictated by ECMA-262. var number = this; if (!IS_NUMBER(this)) { - if (!%HasNumberClass(this)) + if (!IS_NUMBER_WRAPPER(this)) throw new $TypeError('Number.prototype.toString is not generic'); // Get the value of this number in case it's an object. number = %_ValueOf(this); @@ -395,7 +395,7 @@ function NumberToLocaleString() { function NumberValueOf() { // NOTE: Both Number objects and values can enter here as // 'this'. This is not as dictated by ECMA-262. - if (!IS_NUMBER(this) && !%HasNumberClass(this)) + if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this)) throw new $TypeError('Number.prototype.valueOf is not generic'); return %_ValueOf(this); } @@ -502,10 +502,9 @@ SetupNumber(); $Function.prototype.constructor = $Function; function FunctionSourceString(func) { - // NOTE: Both Function objects and values can enter here as - // 'func'. This is not as dictated by ECMA-262. - if (!IS_FUNCTION(func) && !%HasFunctionClass(func)) + if (!IS_FUNCTION(func)) { throw new $TypeError('Function.prototype.toString is not generic'); + } var source = %FunctionGetSourceCode(func); if (!IS_STRING(source)) { @@ -570,4 +569,3 @@ function SetupFunction() { } SetupFunction(); - diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h index c0d14356f..3f1f11bcd 100644 --- a/deps/v8/src/variables.h +++ b/deps/v8/src/variables.h @@ -143,6 +143,12 @@ class Variable: public ZoneObject { ARGUMENTS }; + Variable(Scope* scope, + Handle<String> name, + Mode mode, + bool is_valid_lhs, + Kind kind); + // Printing support static const char* Mode2String(Mode mode); @@ -196,9 +202,6 @@ class Variable: public ZoneObject { SmiAnalysis* type() { return &type_; } private: - Variable(Scope* scope, Handle<String> name, Mode mode, bool is_valid_LHS, - Kind kind); - Scope* scope_; Handle<String> name_; Mode mode_; @@ -216,13 +219,10 @@ class Variable: public ZoneObject { SmiAnalysis type_; // Code generation. - // rewrite_ is usually a Slot or a Property, but maybe any expression. + // rewrite_ is usually a Slot or a Property, but may be any expression. Expression* rewrite_; - friend class VariableProxy; - friend class Scope; - friend class LocalsMap; - friend class AstBuildingParser; + friend class Scope; // Has explicit access to rewrite_. }; diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 306249ae6..f1833a593 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,7 +34,7 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 1 #define MINOR_VERSION 2 -#define BUILD_NUMBER 10 +#define BUILD_NUMBER 14 #define PATCH_LEVEL 0 #define CANDIDATE_VERSION false diff --git a/deps/v8/src/virtual-frame.h b/deps/v8/src/virtual-frame.h index 293f9e534..0bf0ca255 100644 --- a/deps/v8/src/virtual-frame.h +++ b/deps/v8/src/virtual-frame.h @@ -37,6 +37,8 @@ #include "x64/virtual-frame-x64.h" #elif V8_TARGET_ARCH_ARM #include "arm/virtual-frame-arm.h" +#else +#error Unsupported target architecture. #endif #endif // V8_VIRTUAL_FRAME_H_ diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 7da6a8f66..c4ee45483 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -73,45 +73,8 @@ XMMRegister xmm14 = { 14 }; XMMRegister xmm15 = { 15 }; -Operand::Operand(Register base, int32_t disp): rex_(0) { - len_ = 1; - if (base.is(rsp) || base.is(r12)) { - // SIB byte is needed to encode (rsp + offset) or (r12 + offset). - set_sib(times_1, rsp, base); - } - - if (disp == 0 && !base.is(rbp) && !base.is(r13)) { - set_modrm(0, base); - } else if (is_int8(disp)) { - set_modrm(1, base); - set_disp8(disp); - } else { - set_modrm(2, base); - set_disp32(disp); - } -} - - -Operand::Operand(Register base, - Register index, - ScaleFactor scale, - int32_t disp): rex_(0) { - ASSERT(!index.is(rsp)); - len_ = 1; - set_sib(scale, index, base); - if (disp == 0 && !base.is(rbp) && !base.is(r13)) { - // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits - // possibly set by set_sib. - set_modrm(0, rsp); - } else if (is_int8(disp)) { - set_modrm(1, rsp); - set_disp8(disp); - } else { - set_modrm(2, rsp); - set_disp32(disp); - } -} - +// ----------------------------------------------------------------------------- +// Implementation of CpuFeatures // The required user mode extensions in X64 are (from AMD64 ABI Table A.1): // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall @@ -193,6 +156,71 @@ void CpuFeatures::Probe() { ASSERT(IsSupported(CMOV)); } + +// ----------------------------------------------------------------------------- +// Implementation of RelocInfo + +// Patch the code at the current PC with a call to the target address. +// Additional guard int3 instructions can be added if required. +void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { + // Call instruction takes up 13 bytes and int3 takes up one byte. + Address patch_site = pc_; + Memory::uint16_at(patch_site) = 0xBA49u; // movq r10, imm64 + // Write "0x00, call r10" starting at last byte of address. We overwrite + // the 0x00 later, and this lets us write a uint32. + Memory::uint32_at(patch_site + 9) = 0xD2FF4900u; // 0x00, call r10 + Memory::Address_at(patch_site + 2) = target; + + // Add the requested number of int3 instructions after the call. + for (int i = 0; i < guard_bytes; i++) { + *(patch_site + 13 + i) = 0xCC; // int3 + } +} + + +// ----------------------------------------------------------------------------- +// Implementation of Operand + +Operand::Operand(Register base, int32_t disp): rex_(0) { + len_ = 1; + if (base.is(rsp) || base.is(r12)) { + // SIB byte is needed to encode (rsp + offset) or (r12 + offset). + set_sib(times_1, rsp, base); + } + + if (disp == 0 && !base.is(rbp) && !base.is(r13)) { + set_modrm(0, base); + } else if (is_int8(disp)) { + set_modrm(1, base); + set_disp8(disp); + } else { + set_modrm(2, base); + set_disp32(disp); + } +} + + +Operand::Operand(Register base, + Register index, + ScaleFactor scale, + int32_t disp): rex_(0) { + ASSERT(!index.is(rsp)); + len_ = 1; + set_sib(scale, index, base); + if (disp == 0 && !base.is(rbp) && !base.is(r13)) { + // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits + // possibly set by set_sib. + set_modrm(0, rsp); + } else if (is_int8(disp)) { + set_modrm(1, rsp); + set_disp8(disp); + } else { + set_modrm(2, rsp); + set_disp32(disp); + } +} + + // ----------------------------------------------------------------------------- // Implementation of Assembler @@ -273,6 +301,7 @@ void Assembler::GetCode(CodeDesc* desc) { desc->buffer = buffer_; desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); + ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system. desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); desc->origin = this; @@ -426,6 +455,17 @@ void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) { } +void Assembler::arithmetic_op_32(byte opcode, + const Operand& dst, + Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(src, dst); + emit(opcode); + emit_operand(src, dst); +} + + void Assembler::immediate_arithmetic_op(byte subcode, Register dst, Immediate src) { @@ -470,8 +510,8 @@ void Assembler::immediate_arithmetic_op_32(byte subcode, EnsureSpace ensure_space(this); last_pc_ = pc_; emit_optional_rex_32(dst); - emit(0x83); if (is_int8(src.value_)) { + emit(0x83); emit_modrm(subcode, dst); emit(src.value_); } else if (dst.is(rax)) { @@ -567,6 +607,23 @@ void Assembler::shift_32(Register dst, int subcode) { } +void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + ASSERT(is_uint6(shift_amount.value_)); // illegal shift count + if (shift_amount.value_ == 1) { + emit_optional_rex_32(dst); + emit(0xD1); + emit_modrm(subcode, dst); + } else { + emit_optional_rex_32(dst); + emit(0xC1); + emit_modrm(subcode, dst); + emit(shift_amount.value_); + } +} + + void Assembler::bt(const Operand& dst, Register src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -750,6 +807,15 @@ void Assembler::idiv(Register src) { } +void Assembler::imul(Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_rex_64(src); + emit(0xF7); + emit_modrm(0x5, src); +} + + void Assembler::imul(Register dst, Register src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -1058,6 +1124,19 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) { void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) { + // Non-relocatable values might not need a 64-bit representation. + if (rmode == RelocInfo::NONE) { + // Sadly, there is no zero or sign extending move for 8-bit immediates. + if (is_int32(value)) { + movq(dst, Immediate(static_cast<int32_t>(value))); + return; + } else if (is_uint32(value)) { + movl(dst, Immediate(static_cast<int32_t>(value))); + return; + } + // Value cannot be represented by 32 bits, so do a full 64 bit immediate + // value. + } EnsureSpace ensure_space(this); last_pc_ = pc_; emit_rex_64(dst); @@ -1087,16 +1166,24 @@ void Assembler::movq(const Operand& dst, Immediate value) { void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) { - EnsureSpace ensure_space(this); - last_pc_ = pc_; - ASSERT(!Heap::InNewSpace(*value)); - emit_rex_64(dst); - emit(0xB8 | dst.low_bits()); - if (value->IsHeapObject()) { - emitq(reinterpret_cast<uintptr_t>(value.location()), mode); + // If there is no relocation info, emit the value of the handle efficiently + // (possibly using less that 8 bytes for the value). + if (mode == RelocInfo::NONE) { + // There is no possible reason to store a heap pointer without relocation + // info, so it must be a smi. + ASSERT(value->IsSmi()); + // Smis never have more than 32 significant bits, but they might + // have garbage in the high bits. + movq(dst, + Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(*value)))); } else { - ASSERT_EQ(RelocInfo::NONE, mode); - emitq(reinterpret_cast<uintptr_t>(*value), RelocInfo::NONE); + EnsureSpace ensure_space(this); + last_pc_ = pc_; + ASSERT(value->IsHeapObject()); + ASSERT(!Heap::InNewSpace(*value)); + emit_rex_64(dst); + emit(0xB8 | dst.low_bits()); + emitq(reinterpret_cast<uintptr_t>(value.location()), mode); } } @@ -1439,7 +1526,7 @@ void Assembler::testb(Register reg, Immediate mask) { last_pc_ = pc_; if (reg.is(rax)) { emit(0xA8); - emit(mask); + emit(mask.value_); // Low byte emitted. } else { if (reg.code() > 3) { // Register is not one of al, bl, cl, dl. Its encoding needs REX. @@ -1463,6 +1550,15 @@ void Assembler::testb(const Operand& op, Immediate mask) { } +void Assembler::testl(Register dst, Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(dst, src); + emit(0x85); + emit_modrm(dst, src); +} + + void Assembler::testl(Register reg, Immediate mask) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -1551,6 +1647,7 @@ void Assembler::fldz() { void Assembler::fld_s(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xD9); emit_operand(0, adr); } @@ -1559,6 +1656,7 @@ void Assembler::fld_s(const Operand& adr) { void Assembler::fld_d(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDD); emit_operand(0, adr); } @@ -1567,6 +1665,7 @@ void Assembler::fld_d(const Operand& adr) { void Assembler::fstp_s(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xD9); emit_operand(3, adr); } @@ -1575,6 +1674,7 @@ void Assembler::fstp_s(const Operand& adr) { void Assembler::fstp_d(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDD); emit_operand(3, adr); } @@ -1583,6 +1683,7 @@ void Assembler::fstp_d(const Operand& adr) { void Assembler::fild_s(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDB); emit_operand(0, adr); } @@ -1591,6 +1692,7 @@ void Assembler::fild_s(const Operand& adr) { void Assembler::fild_d(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDF); emit_operand(5, adr); } @@ -1599,6 +1701,7 @@ void Assembler::fild_d(const Operand& adr) { void Assembler::fistp_s(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDB); emit_operand(3, adr); } @@ -1608,6 +1711,7 @@ void Assembler::fisttp_s(const Operand& adr) { ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3)); EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDB); emit_operand(1, adr); } @@ -1616,6 +1720,7 @@ void Assembler::fisttp_s(const Operand& adr) { void Assembler::fist_s(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDB); emit_operand(2, adr); } @@ -1624,6 +1729,7 @@ void Assembler::fist_s(const Operand& adr) { void Assembler::fistp_d(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDF); emit_operand(8, adr); } @@ -1678,6 +1784,7 @@ void Assembler::fsub(int i) { void Assembler::fisub_s(const Operand& adr) { EnsureSpace ensure_space(this); last_pc_ = pc_; + emit_optional_rex_32(adr); emit(0xDA); emit_operand(4, adr); } @@ -2001,11 +2108,11 @@ void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) { void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) { - emit(0xC0 | (dst.code() << 3) | src.code()); + emit(0xC0 | (dst.low_bits() << 3) | src.low_bits()); } void Assembler::emit_sse_operand(XMMRegister dst, Register src) { - emit(0xC0 | (dst.code() << 3) | src.code()); + emit(0xC0 | (dst.low_bits() << 3) | src.low_bits()); } diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 0d98e5fe4..e8953329b 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -160,6 +160,17 @@ struct XMMRegister { return code_; } + // Return the high bit of the register code as a 0 or 1. Used often + // when constructing the REX prefix byte. + int high_bit() const { + return code_ >> 3; + } + // Return the 3 low bits of the register code. Used when encoding registers + // in modR/M, SIB, and opcode bytes. + int low_bits() const { + return code_ & 0x7; + } + int code_; }; @@ -522,6 +533,10 @@ class Assembler : public Malloced { immediate_arithmetic_op_32(0x0, dst, src); } + void addl(const Operand& dst, Immediate src) { + immediate_arithmetic_op_32(0x0, dst, src); + } + void addq(Register dst, const Operand& src) { arithmetic_op(0x03, dst, src); } @@ -539,10 +554,6 @@ class Assembler : public Malloced { immediate_arithmetic_op(0x0, dst, src); } - void addl(const Operand& dst, Immediate src) { - immediate_arithmetic_op_32(0x0, dst, src); - } - void cmpb(Register dst, Immediate src) { immediate_arithmetic_op_8(0x7, dst, src); } @@ -551,6 +562,26 @@ class Assembler : public Malloced { immediate_arithmetic_op_8(0x7, dst, src); } + void cmpl(Register dst, Register src) { + arithmetic_op_32(0x3B, dst, src); + } + + void cmpl(Register dst, const Operand& src) { + arithmetic_op_32(0x3B, src, dst); + } + + void cmpl(const Operand& dst, Register src) { + arithmetic_op_32(0x39, dst, src); + } + + void cmpl(Register dst, Immediate src) { + immediate_arithmetic_op_32(0x7, dst, src); + } + + void cmpl(const Operand& dst, Immediate src) { + immediate_arithmetic_op_32(0x7, dst, src); + } + void cmpq(Register dst, Register src) { arithmetic_op(0x3B, dst, src); } @@ -567,10 +598,6 @@ class Assembler : public Malloced { immediate_arithmetic_op(0x7, dst, src); } - void cmpl(Register dst, Immediate src) { - immediate_arithmetic_op_32(0x7, dst, src); - } - void cmpq(const Operand& dst, Immediate src) { immediate_arithmetic_op(0x7, dst, src); } @@ -605,12 +632,13 @@ class Assembler : public Malloced { // Divide rdx:rax by src. Quotient in rax, remainder in rdx. void idiv(Register src); - void imul(Register dst, Register src); - void imul(Register dst, const Operand& src); - // Performs the operation dst = src * imm. - void imul(Register dst, Register src, Immediate imm); + // Signed multiply instructions. + void imul(Register src); // rdx:rax = rax * src. + void imul(Register dst, Register src); // dst = dst * src. + void imul(Register dst, const Operand& src); // dst = dst * src. + void imul(Register dst, Register src, Immediate imm); // dst = src * imm. // Multiply 32 bit registers - void imull(Register dst, Register src); + void imull(Register dst, Register src); // dst = dst * src. void incq(Register dst); void incq(const Operand& dst); @@ -662,11 +690,22 @@ class Assembler : public Malloced { shift(dst, shift_amount, 0x7); } + // Shifts dst right, duplicating sign bit, by shift_amount bits. + // Shifting by 1 is handled efficiently. + void sarl(Register dst, Immediate shift_amount) { + shift_32(dst, shift_amount, 0x7); + } + // Shifts dst right, duplicating sign bit, by cl % 64 bits. void sar(Register dst) { shift(dst, 0x7); } + // Shifts dst right, duplicating sign bit, by cl % 64 bits. + void sarl(Register dst) { + shift_32(dst, 0x7); + } + void shl(Register dst, Immediate shift_amount) { shift(dst, shift_amount, 0x4); } @@ -722,8 +761,13 @@ class Assembler : public Malloced { immediate_arithmetic_op_32(0x5, dst, src); } + void subl(Register dst, Immediate src) { + immediate_arithmetic_op_32(0x5, dst, src); + } + void testb(Register reg, Immediate mask); void testb(const Operand& op, Immediate mask); + void testl(Register dst, Register src); void testl(Register reg, Immediate mask); void testl(const Operand& op, Immediate mask); void testq(const Operand& op, Register reg); @@ -1070,6 +1114,7 @@ class Assembler : public Malloced { // ModR/M byte. void arithmetic_op(byte opcode, Register dst, Register src); void arithmetic_op_32(byte opcode, Register dst, Register src); + void arithmetic_op_32(byte opcode, const Operand& dst, Register src); void arithmetic_op(byte opcode, Register reg, const Operand& op); void immediate_arithmetic_op(byte subcode, Register dst, Immediate src); void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src); @@ -1089,6 +1134,7 @@ class Assembler : public Malloced { Immediate src); // Emit machine code for a shift operation. void shift(Register dst, Immediate shift_amount, int subcode); + void shift_32(Register dst, Immediate shift_amount, int subcode); // Shift dst by cl % 64 bits. void shift(Register dst, int subcode); void shift_32(Register dst, int subcode); diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index fc196ce79..e3e32e694 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -25,9 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// TODO(X64): Remove stdio.h when compiler test is removed. -#include <stdio.h> - #include "v8.h" #include "bootstrapper.h" @@ -38,9 +35,6 @@ #include "register-allocator-inl.h" #include "scopes.h" -// TODO(X64): Remove compiler.h when compiler test is removed. -#include "compiler.h" - namespace v8 { namespace internal { @@ -138,81 +132,6 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { } -void CodeGenerator::TestCodeGenerator() { - // Compile a function from a string, and run it. - - // Set flags appropriately for this stage of implementation. - // TODO(X64): Make ic work, and stop disabling them. - // These settings stick - remove them when we don't want them anymore. -#ifdef DEBUG - FLAG_print_builtin_source = true; - FLAG_print_builtin_ast = true; -#endif - FLAG_use_ic = false; - - // Read the file "test.js" from the current directory, compile, and run it. - // If the file is not there, use a simple script embedded here instead. - Handle<String> test_script; - FILE* file = fopen("test.js", "rb"); - if (file == NULL) { - test_script = Factory::NewStringFromAscii(CStrVector( - "// Put all code in anonymous function to avoid global scope.\n" - "(function(){" - " var x = true ? 47 : 32;" - " return x;" - "})()")); - } else { - fseek(file, 0, SEEK_END); - int size = ftell(file); - rewind(file); - - char* chars = new char[size + 1]; - chars[size] = '\0'; - for (int i = 0; i < size;) { - int read = fread(&chars[i], 1, size - i, file); - i += read; - } - fclose(file); - test_script = Factory::NewStringFromAscii(CStrVector(chars)); - delete[] chars; - } - - Handle<JSFunction> test_function = Compiler::Compile( - test_script, - Factory::NewStringFromAscii(CStrVector("CodeGeneratorTestScript")), - 0, - 0, - NULL, - NULL); - - Code* code_object = test_function->code(); // Local for debugging ease. - USE(code_object); - - // Create a dummy function and context. - Handle<JSFunction> bridge = - Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value()); - Handle<Context> context = - Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge); - - test_function = Factory::NewFunctionFromBoilerplate( - test_function, - context); - - bool pending_exceptions; - Handle<Object> result = - Execution::Call(test_function, - Handle<Object>::cast(test_function), - 0, - NULL, - &pending_exceptions); - // Function compiles and runs, but returns a JSFunction object. -#ifdef DEBUG - PrintF("Result of test function: "); - result->Print(); -#endif -} - - void CodeGenerator::GenCode(FunctionLiteral* function) { // Record the position for debugging purposes. CodeForFunctionPosition(function); @@ -235,8 +154,7 @@ void CodeGenerator::GenCode(FunctionLiteral* function) { #ifdef DEBUG if (strlen(FLAG_stop_at) > 0 && - // fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { - false) { + function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { frame_->SpillAll(); __ int3(); } @@ -437,26 +355,22 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) { // receiver. frame_->Exit(); masm_->ret((scope_->num_parameters() + 1) * kPointerSize); + // Add padding that will be overwritten by a debugger breakpoint. + // frame_->Exit() generates "movq rsp, rbp; pop rbp" length 5. + // "ret k" has length 2. + const int kPadding = Debug::kX64JSReturnSequenceLength - 5 - 2; + for (int i = 0; i < kPadding; ++i) { + masm_->int3(); + } DeleteFrame(); - // TODO(x64): introduce kX64JSReturnSequenceLength and enable assert. - // Check that the size of the code used for returning matches what is // expected by the debugger. - // ASSERT_EQ(Debug::kIa32JSReturnSequenceLength, - // masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); + ASSERT_EQ(Debug::kX64JSReturnSequenceLength, + masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); } -void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a, - int b, - int c, - Label* d, - Vector<Label*> e, - Vector<Label> f) { - UNIMPLEMENTED(); -} - #ifdef DEBUG bool CodeGenerator::HasValidEntryRegisters() { return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0)) @@ -1358,7 +1272,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { frame_->EmitPush(rax); // <- slot 3 frame_->EmitPush(rdx); // <- slot 2 - __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); + __ movsxlq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); __ shl(rax, Immediate(kSmiTagSize)); frame_->EmitPush(rax); // <- slot 1 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0 @@ -1370,7 +1284,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { frame_->EmitPush(rax); // <- slot 2 // Push the length of the array and the initial index onto the stack. - __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset)); + __ movsxlq(rax, FieldOperand(rax, FixedArray::kLengthOffset)); __ shl(rax, Immediate(kSmiTagSize)); frame_->EmitPush(rax); // <- slot 1 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0 @@ -1383,15 +1297,14 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); __ movq(rax, frame_->ElementAt(0)); // load the current count - __ cmpq(rax, frame_->ElementAt(1)); // compare to the array length + __ cmpl(rax, frame_->ElementAt(1)); // compare to the array length node->break_target()->Branch(above_equal); // Get the i'th entry of the array. __ movq(rdx, frame_->ElementAt(2)); ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // Multiplier is times_4 since rax is already a Smi. - __ movq(rbx, Operand(rdx, rax, times_4, - FixedArray::kHeaderSize - kHeapObjectTag)); + __ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize)); // Get the expected map from the stack or a zero map in the // permanent slow case rax: current iteration count rbx: i'th entry @@ -1895,7 +1808,7 @@ void CodeGenerator::VisitConditional(Conditional* node) { void CodeGenerator::VisitSlot(Slot* node) { Comment cmnt(masm_, "[ Slot"); - LoadFromSlot(node, typeof_state()); + LoadFromSlotCheckForArguments(node, typeof_state()); } @@ -2227,12 +2140,12 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { Result elements = frame_->Pop(); elements.ToRegister(); frame_->Spill(elements.reg()); - // Get the elements array. + // Get the elements FixedArray. __ movq(elements.reg(), FieldOperand(elements.reg(), JSObject::kElementsOffset)); // Write to the indexed properties array. - int offset = i * kPointerSize + Array::kHeaderSize; + int offset = i * kPointerSize + FixedArray::kHeaderSize; __ movq(FieldOperand(elements.reg(), offset), prop_value.reg()); // Update the write barrier for the array address. @@ -2300,7 +2213,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) { // or the right hand side is a different variable. TakeValue invalidates // the target, with an implicit promise that it will be written to again // before it is read. - // TODO(X64): Implement TakeValue optimization. + // TODO(X64): Implement TakeValue optimization. Check issue 150016. if (false) { // if (literal != NULL || (right_var != NULL && right_var != var)) { // target.TakeValue(NOT_INSIDE_TYPEOF); @@ -2410,9 +2323,6 @@ void CodeGenerator::VisitCall(Call* node) { frame_->SetElementAt(0, &result); } else if (var != NULL && var->slot() != NULL && var->slot()->type() == Slot::LOOKUP) { - // TODO(X64): Enable calls of non-global functions. - UNIMPLEMENTED(); - /* // ---------------------------------- // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj // ---------------------------------- @@ -2420,8 +2330,8 @@ void CodeGenerator::VisitCall(Call* node) { // Load the function from the context. Sync the frame so we can // push the arguments directly into place. frame_->SyncRange(0, frame_->element_count() - 1); - frame_->EmitPush(esi); - frame_->EmitPush(Immediate(var->name())); + frame_->EmitPush(rsi); + frame_->EmitPush(var->name()); frame_->CallRuntime(Runtime::kLoadContextSlot, 2); // The runtime call returns a pair of values in rax and rdx. The // looked-up function is in rax and the receiver is in rdx. These @@ -2437,7 +2347,6 @@ void CodeGenerator::VisitCall(Call* node) { // Call the function. CallWithArguments(args, node->position()); - */ } else if (property != NULL) { // Check if the key is a literal string. Literal* literal = property->key()->AsLiteral(); @@ -2545,13 +2454,13 @@ void CodeGenerator::VisitCallEval(CallEval* node) { // receiver. Use a scratch register to avoid destroying the result. Result scratch = allocator_->Allocate(); ASSERT(scratch.is_valid()); - __ movl(scratch.reg(), + __ movq(scratch.reg(), FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0))); frame_->SetElementAt(arg_count + 1, &scratch); // We can reuse the result register now. frame_->Spill(result.reg()); - __ movl(result.reg(), + __ movq(result.reg(), FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1))); frame_->SetElementAt(arg_count, &result); @@ -2820,12 +2729,6 @@ class DeferredPrefixCountOperation: public DeferredCode { void DeferredPrefixCountOperation::Generate() { - // Undo the optimistic smi operation. - if (is_increment_) { - __ subq(dst_, Immediate(Smi::FromInt(1))); - } else { - __ addq(dst_, Immediate(Smi::FromInt(1))); - } __ push(dst_); __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); __ push(rax); @@ -2861,12 +2764,6 @@ class DeferredPostfixCountOperation: public DeferredCode { void DeferredPostfixCountOperation::Generate() { - // Undo the optimistic smi operation. - if (is_increment_) { - __ subq(dst_, Immediate(Smi::FromInt(1))); - } else { - __ addq(dst_, Immediate(Smi::FromInt(1))); - } __ push(dst_); __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); @@ -2923,19 +2820,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { // Ensure the new value is writable. frame_->Spill(new_value.reg()); - // In order to combine the overflow and the smi tag check, we need - // to be able to allocate a byte register. We attempt to do so - // without spilling. If we fail, we will generate separate overflow - // and smi tag checks. - // - // We allocate and clear the temporary register before - // performing the count operation since clearing the register using - // xor will clear the overflow flag. - Result tmp = allocator_->AllocateWithoutSpilling(); - - // Clear scratch register to prepare it for setcc after the operation below. - __ xor_(kScratchRegister, kScratchRegister); - DeferredCode* deferred = NULL; if (is_postfix) { deferred = new DeferredPostfixCountOperation(new_value.reg(), @@ -2946,25 +2830,26 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { is_increment); } + Result tmp = allocator_->AllocateWithoutSpilling(); + ASSERT(kSmiTagMask == 1 && kSmiTag == 0); + __ movl(tmp.reg(), Immediate(kSmiTagMask)); + // Smi test. + __ movq(kScratchRegister, new_value.reg()); if (is_increment) { - __ addq(new_value.reg(), Immediate(Smi::FromInt(1))); + __ addl(kScratchRegister, Immediate(Smi::FromInt(1))); } else { - __ subq(new_value.reg(), Immediate(Smi::FromInt(1))); + __ subl(kScratchRegister, Immediate(Smi::FromInt(1))); } - - // If the count operation didn't overflow and the result is a valid - // smi, we're done. Otherwise, we jump to the deferred slow-case - // code. - - // We combine the overflow and the smi tag check. - __ setcc(overflow, kScratchRegister); - __ or_(kScratchRegister, new_value.reg()); - __ testl(kScratchRegister, Immediate(kSmiTagMask)); + // deferred->Branch(overflow); + __ cmovl(overflow, kScratchRegister, tmp.reg()); + __ testl(kScratchRegister, tmp.reg()); tmp.Unuse(); deferred->Branch(not_zero); + __ movq(new_value.reg(), kScratchRegister); deferred->BindExit(); + // Postfix: store the old value in the allocated slot under the // reference. if (is_postfix) frame_->SetElementAt(target.size(), &old_value); @@ -3227,13 +3112,12 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { // It can be an undetectable object. __ movq(kScratchRegister, FieldOperand(answer.reg(), HeapObject::kMapOffset)); - __ movb(kScratchRegister, - FieldOperand(kScratchRegister, Map::kBitFieldOffset)); - __ testb(kScratchRegister, Immediate(1 << Map::kIsUndetectable)); + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), + Immediate(1 << Map::kIsUndetectable)); destination()->false_target()->Branch(not_zero); - __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE)); + __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE); destination()->false_target()->Branch(below); - __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE)); + __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); answer.Unuse(); destination()->Split(below_equal); } else { @@ -3330,6 +3214,29 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { } +void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { + ASSERT(args->length() == 0); + + // Get the frame pointer for the calling frame. + Result fp = allocator()->Allocate(); + __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); + + // Skip the arguments adaptor frame if it exists. + Label check_frame_marker; + __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset), + Immediate(ArgumentsAdaptorFrame::SENTINEL)); + __ j(not_equal, &check_frame_marker); + __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); + + // Check the marker in the calling frame. + __ bind(&check_frame_marker); + __ cmpq(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset), + Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); + fp.Unuse(); + destination()->Split(equal); +} + + void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { ASSERT(args->length() == 0); // ArgumentsAccessStub takes the parameter count as an input argument @@ -3412,13 +3319,107 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { } +void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) { + ASSERT(args->length() == 0); + frame_->SpillAll(); -void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* a) { - UNIMPLEMENTED(); + // Make sure the frame is aligned like the OS expects. + static const int kFrameAlignment = OS::ActivationFrameAlignment(); + if (kFrameAlignment > 0) { + ASSERT(IsPowerOf2(kFrameAlignment)); + __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register. + __ and_(rsp, Immediate(-kFrameAlignment)); + } + + // Call V8::RandomPositiveSmi(). + __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY); + + // Restore stack pointer from callee-saved register edi. + if (kFrameAlignment > 0) { + __ movq(rsp, rbx); + } + + Result result = allocator_->Allocate(rax); + frame_->Push(&result); } + void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) { - UNIMPLEMENTED(); + // TODO(X64): Use inline floating point in the fast case. + ASSERT(args->length() == 1); + + // Load number. + Load(args->at(0)); + Result answer; + switch (op) { + case SIN: + answer = frame_->CallRuntime(Runtime::kMath_sin, 1); + break; + case COS: + answer = frame_->CallRuntime(Runtime::kMath_cos, 1); + break; + } + frame_->Push(&answer); +} + + +void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { + ASSERT(args->length() == 1); + JumpTarget leave, null, function, non_function_constructor; + Load(args->at(0)); // Load the object. + Result obj = frame_->Pop(); + obj.ToRegister(); + frame_->Spill(obj.reg()); + + // If the object is a smi, we return null. + __ testl(obj.reg(), Immediate(kSmiTagMask)); + null.Branch(zero); + + // Check that the object is a JS object but take special care of JS + // functions to make sure they have 'Function' as their class. + + __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); + null.Branch(less); + + // As long as JS_FUNCTION_TYPE is the last instance type and it is + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for + // LAST_JS_OBJECT_TYPE. + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); + __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE); + function.Branch(equal); + + // Check if the constructor in the map is a function. + __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset)); + __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); + non_function_constructor.Branch(not_equal); + + // The obj register now contains the constructor function. Grab the + // instance class name from there. + __ movq(obj.reg(), + FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); + __ movq(obj.reg(), + FieldOperand(obj.reg(), + SharedFunctionInfo::kInstanceClassNameOffset)); + frame_->Push(&obj); + leave.Jump(); + + // Functions have class 'Function'. + function.Bind(); + frame_->Push(Factory::function_class_symbol()); + leave.Jump(); + + // Objects with a non-function constructor have class 'Object'. + non_function_constructor.Bind(); + frame_->Push(Factory::Object_symbol()); + leave.Jump(); + + // Non-JS objects have class null. + null.Bind(); + frame_->Push(Factory::null_value()); + + // All done. + leave.Bind(); } @@ -3795,8 +3796,28 @@ Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, Result tmp, JumpTarget* slow) { - UNIMPLEMENTED(); - return Operand(rsp, 0); + ASSERT(slot->type() == Slot::CONTEXT); + ASSERT(tmp.is_register()); + Register context = rsi; + + for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + slow->Branch(not_equal, not_taken); + } + __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); + context = tmp.reg(); + } + } + // Check that last extension is NULL. + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); + slow->Branch(not_equal, not_taken); + __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX)); + return ContextOperand(tmp.reg(), slot->index()); } @@ -3906,6 +3927,44 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { } +void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, + TypeofState state) { + LoadFromSlot(slot, state); + + // Bail out quickly if we're not using lazy arguments allocation. + if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return; + + // ... or if the slot isn't a non-parameter arguments slot. + if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; + + // Pop the loaded value from the stack. + Result value = frame_->Pop(); + + // If the loaded value is a constant, we know if the arguments + // object has been lazily loaded yet. + if (value.is_constant()) { + if (value.handle()->IsTheHole()) { + Result arguments = StoreArgumentsObject(false); + frame_->Push(&arguments); + } else { + frame_->Push(&value); + } + return; + } + + // The loaded value is in a register. If it is the sentinel that + // indicates that we haven't loaded the arguments object yet, we + // need to do it now. + JumpTarget exit; + __ Cmp(value.reg(), Factory::the_hole_value()); + frame_->Push(&value); + exit.Branch(not_equal); + Result arguments = StoreArgumentsObject(false); + frame_->SetElementAt(0, &arguments); + exit.Bind(); +} + + void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { // TODO(X64): Enable more types of slot. @@ -4009,8 +4068,72 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( Slot* slot, TypeofState typeof_state, JumpTarget* slow) { - UNIMPLEMENTED(); - return Result(rax); + // Check that no extension objects have been created by calls to + // eval from the current scope to the global scope. + Register context = rsi; + Result tmp = allocator_->Allocate(); + ASSERT(tmp.is_valid()); // All non-reserved registers were available. + + Scope* s = scope(); + while (s != NULL) { + if (s->num_heap_slots() > 0) { + if (s->calls_eval()) { + // Check that extension is NULL. + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), + Immediate(0)); + slow->Branch(not_equal, not_taken); + } + // Load next context in chain. + __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); + context = tmp.reg(); + } + // If no outer scope calls eval, we do not need to check more + // context extensions. If we have reached an eval scope, we check + // all extensions from this point. + if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; + s = s->outer_scope(); + } + + if (s->is_eval_scope()) { + // Loop up the context chain. There is no frame effect so it is + // safe to use raw labels here. + Label next, fast; + if (!context.is(tmp.reg())) { + __ movq(tmp.reg(), context); + } + // Load map for comparison into register, outside loop. + __ Move(kScratchRegister, Factory::global_context_map()); + __ bind(&next); + // Terminate at global context. + __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); + __ j(equal, &fast); + // Check that extension is NULL. + __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); + slow->Branch(not_equal); + // Load next context in chain. + __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); + __ jmp(&next); + __ bind(&fast); + } + tmp.Unuse(); + + // All extension objects were empty and it is safe to use a global + // load IC call. + LoadGlobal(); + frame_->Push(slot->var()->name()); + RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) + ? RelocInfo::CODE_TARGET + : RelocInfo::CODE_TARGET_CONTEXT; + Result answer = frame_->CallLoadIC(mode); + // A test rax instruction following the call signals that the inobject + // property case was inlined. Ensure that there is not a test eax + // instruction here. + __ nop(); + // Discard the global object. The result is in answer. + frame_->Drop(); + return answer; } @@ -4120,39 +4243,6 @@ void CodeGenerator::LoadTypeofExpression(Expression* x) { } -class CompareStub: public CodeStub { - public: - CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { } - - void Generate(MacroAssembler* masm); - - private: - Condition cc_; - bool strict_; - - Major MajorKey() { return Compare; } - - int MinorKey() { - // Encode the three parameters in a unique 16 bit value. - ASSERT(static_cast<int>(cc_) < (1 << 15)); - return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0); - } - - // Branch to the label if the given object isn't a symbol. - void BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object); - -#ifdef DEBUG - void Print() { - PrintF("CompareStub (cc %d), (strict %s)\n", - static_cast<int>(cc_), - strict_ ? "true" : "false"); - } -#endif -}; - - void CodeGenerator::Comparison(Condition cc, bool strict, ControlDestination* dest) { @@ -4239,12 +4329,8 @@ void CodeGenerator::Comparison(Condition cc, left_side = Result(left_reg); right_side = Result(right_val); // Test smi equality and comparison by signed int comparison. - if (IsUnsafeSmi(right_side.handle())) { - right_side.ToRegister(); - __ cmpq(left_side.reg(), right_side.reg()); - } else { - __ Cmp(left_side.reg(), right_side.handle()); - } + // Both sides are smis, so we can use an Immediate. + __ cmpl(left_side.reg(), Immediate(Smi::cast(*right_side.handle()))); left_side.Unuse(); right_side.Unuse(); dest->Split(cc); @@ -4296,7 +4382,8 @@ void CodeGenerator::Comparison(Condition cc, // When non-smi, call out to the compare stub. CompareStub stub(cc, strict); Result answer = frame_->CallStub(&stub, &left_side, &right_side); - __ testq(answer.reg(), answer.reg()); // Both zero and sign flag right. + // The result is a Smi, which is negative, zero, or positive. + __ testl(answer.reg(), answer.reg()); // Both zero and sign flag right. answer.Unuse(); dest->Split(cc); } else { @@ -4309,18 +4396,14 @@ void CodeGenerator::Comparison(Condition cc, Register left_reg = left_side.reg(); Register right_reg = right_side.reg(); - __ movq(kScratchRegister, left_side.reg()); - __ or_(kScratchRegister, right_side.reg()); + __ movq(kScratchRegister, left_reg); + __ or_(kScratchRegister, right_reg); __ testl(kScratchRegister, Immediate(kSmiTagMask)); is_smi.Branch(zero, taken); // When non-smi, call out to the compare stub. CompareStub stub(cc, strict); Result answer = frame_->CallStub(&stub, &left_side, &right_side); - if (cc == equal) { - __ testq(answer.reg(), answer.reg()); - } else { - __ cmpq(answer.reg(), Immediate(0)); - } + __ testl(answer.reg(), answer.reg()); // Sets both zero and sign flags. answer.Unuse(); dest->true_target()->Branch(cc); dest->false_target()->Jump(); @@ -4328,7 +4411,7 @@ void CodeGenerator::Comparison(Condition cc, is_smi.Bind(); left_side = Result(left_reg); right_side = Result(right_reg); - __ cmpq(left_side.reg(), right_side.reg()); + __ cmpl(left_side.reg(), right_side.reg()); right_side.Unuse(); left_side.Unuse(); dest->Split(cc); @@ -4649,8 +4732,6 @@ class DeferredInlineSmiAdd: public DeferredCode { void DeferredInlineSmiAdd::Generate() { - // Undo the optimistic add operation and call the shared stub. - __ subq(dst_, Immediate(value_)); __ push(dst_); __ push(Immediate(value_)); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); @@ -4681,8 +4762,6 @@ class DeferredInlineSmiAddReversed: public DeferredCode { void DeferredInlineSmiAddReversed::Generate() { - // Undo the optimistic add operation and call the shared stub. - __ subq(dst_, Immediate(value_)); __ push(Immediate(value_)); __ push(dst_); GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED); @@ -4714,8 +4793,6 @@ class DeferredInlineSmiSub: public DeferredCode { void DeferredInlineSmiSub::Generate() { - // Undo the optimistic sub operation and call the shared stub. - __ addq(dst_, Immediate(value_)); __ push(dst_); __ push(Immediate(value_)); GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED); @@ -4757,9 +4834,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, case Token::ADD: { operand->ToRegister(); frame_->Spill(operand->reg()); - - // Optimistically add. Call the specialized add stub if the - // result is not a smi or overflows. DeferredCode* deferred = NULL; if (reversed) { deferred = new DeferredInlineSmiAddReversed(operand->reg(), @@ -4770,11 +4844,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, smi_value, overwrite_mode); } - __ movq(kScratchRegister, value, RelocInfo::NONE); - __ addl(operand->reg(), kScratchRegister); - deferred->Branch(overflow); __ testl(operand->reg(), Immediate(kSmiTagMask)); deferred->Branch(not_zero); + // A smi currently fits in a 32-bit Immediate. + __ addl(operand->reg(), Immediate(smi_value)); + Label add_success; + __ j(no_overflow, &add_success); + __ subl(operand->reg(), Immediate(smi_value)); + __ movsxlq(operand->reg(), operand->reg()); + deferred->Jump(); + __ bind(&add_success); + __ movsxlq(operand->reg(), operand->reg()); deferred->BindExit(); frame_->Push(operand); break; @@ -4982,12 +5062,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, // Perform the operation. switch (op) { case Token::SAR: - __ sar(answer.reg()); + __ sarl(answer.reg()); // No checks of result necessary break; case Token::SHR: { Label result_ok; - __ shr(answer.reg()); + __ shrl(answer.reg()); // Check that the *unsigned* result fits in a smi. Neither of // the two high-order bits can be set: // * 0x80000000: high bit would be lost when smi tagging. @@ -5010,7 +5090,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, Label result_ok; __ shl(answer.reg()); // Check that the *signed* result fits in a smi. - __ cmpq(answer.reg(), Immediate(0xc0000000)); + __ cmpl(answer.reg(), Immediate(0xc0000000)); __ j(positive, &result_ok); ASSERT(kSmiTag == 0); __ shl(rcx, Immediate(kSmiTagSize)); @@ -5060,12 +5140,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, __ movq(answer.reg(), left->reg()); switch (op) { case Token::ADD: - __ addl(answer.reg(), right->reg()); // Add optimistically. + __ addl(answer.reg(), right->reg()); deferred->Branch(overflow); break; case Token::SUB: - __ subl(answer.reg(), right->reg()); // Subtract optimistically. + __ subl(answer.reg(), right->reg()); deferred->Branch(overflow); break; @@ -5148,7 +5228,7 @@ void Reference::GetValue(TypeofState typeof_state) { Comment cmnt(masm, "[ Load from Slot"); Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); ASSERT(slot != NULL); - cgen_->LoadFromSlot(slot, typeof_state); + cgen_->LoadFromSlotCheckForArguments(slot, typeof_state); break; } @@ -5347,6 +5427,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ j(equal, &false_result); // Get the map and type of the heap object. + // We don't use CmpObjectType because we manipulate the type field. __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset)); @@ -5372,6 +5453,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ bind(¬_string); // HeapNumber => false iff +0, -0, or NaN. + // These three cases set C3 when compared to zero in the FPU. __ Cmp(rdx, Factory::heap_number_map()); __ j(not_equal, &true_result); // TODO(x64): Don't use fp stack, use MMX registers? @@ -5381,9 +5463,9 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ fucompp(); // Compare and pop both values. __ movq(kScratchRegister, rax); __ fnstsw_ax(); // Store fp status word in ax, no checking for exceptions. - __ testb(rax, Immediate(0x08)); // Test FP condition flag C3. + __ testl(rax, Immediate(0x4000)); // Test FP condition flag C3, bit 16. __ movq(rax, kScratchRegister); - __ j(zero, &false_result); + __ j(not_zero, &false_result); // Fall through to |true_result|. // Return 1/0 for true/false in rax. @@ -5481,12 +5563,32 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { } - - // End of CodeGenerator implementation. void UnarySubStub::Generate(MacroAssembler* masm) { - UNIMPLEMENTED(); + Label slow; + Label done; + + // Check whether the value is a smi. + __ testl(rax, Immediate(kSmiTagMask)); + // TODO(X64): Add inline code that handles floats, as on ia32 platform. + __ j(not_zero, &slow); + // Enter runtime system if the value of the smi is zero + // to make sure that we switch between 0 and -0. + // Also enter it if the value of the smi is Smi::kMinValue + __ testl(rax, Immediate(0x7FFFFFFE)); + __ j(zero, &slow); + __ neg(rax); + __ jmp(&done); + // Enter runtime system. + __ bind(&slow); + __ pop(rcx); // pop return address + __ push(rax); + __ push(rcx); // push return address + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); + + __ bind(&done); + __ StubReturn(1); } @@ -5523,7 +5625,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // The representation of NaN values has all exponent bits (52..62) set, // and not all mantissa bits (0..51) clear. // Read double representation into rax. - __ movq(rbx, 0x7ff0000000000000, RelocInfo::NONE); + __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE); __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset)); // Test that exponent bits are all set. __ or_(rbx, rax); @@ -5533,7 +5635,8 @@ void CompareStub::Generate(MacroAssembler* masm) { __ shl(rax, Immediate(12)); // If all bits in the mantissa are zero the number is Infinity, and // we return zero. Otherwise it is a NaN, and we return non-zero. - // So just return rax. + // We cannot just return rax because only eax is tested on return. + __ setcc(not_zero, rax); __ ret(0); __ bind(¬_identical); @@ -5571,7 +5674,7 @@ void CompareStub::Generate(MacroAssembler* masm) { Factory::heap_number_map()); // If heap number, handle it in the slow case. __ j(equal, &slow); - // Return non-equal (ebx is not zero) + // Return non-equal. ebx (the lower half of rbx) is not zero. __ movq(rax, rbx); __ ret(0); @@ -5587,7 +5690,7 @@ void CompareStub::Generate(MacroAssembler* masm) { Label first_non_object; __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); __ j(below, &first_non_object); - // Return non-zero (rax is not zero) + // Return non-zero (eax (not rax) is not zero) Label return_not_equal; ASSERT(kHeapObjectTag != 0); __ bind(&return_not_equal); @@ -5647,11 +5750,11 @@ void CompareStub::Generate(MacroAssembler* masm) { // Fast negative check for symbol-to-symbol equality. __ bind(&check_for_symbols); if (cc_ == equal) { - BranchIfNonSymbol(masm, &call_builtin, rax); - BranchIfNonSymbol(masm, &call_builtin, rdx); + BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister); + BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister); // We've already checked for object identity, so if both operands - // are symbols they aren't equal. Register rax already holds a + // are symbols they aren't equal. Register eax (not rax) already holds a // non-zero value, which indicates not equal, so just return. __ ret(2 * kPointerSize); } @@ -5691,14 +5794,15 @@ void CompareStub::Generate(MacroAssembler* masm) { void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, Label* label, - Register object) { + Register object, + Register scratch) { __ testl(object, Immediate(kSmiTagMask)); __ j(zero, label); - __ movq(kScratchRegister, FieldOperand(object, HeapObject::kMapOffset)); - __ movzxbq(kScratchRegister, - FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); - __ and_(kScratchRegister, Immediate(kIsSymbolMask | kIsNotStringMask)); - __ cmpb(kScratchRegister, Immediate(kSymbolTag | kStringTag)); + __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset)); + __ movzxbq(scratch, + FieldOperand(scratch, Map::kInstanceTypeOffset)); + __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask)); + __ cmpb(scratch, Immediate(kSymbolTag | kStringTag)); __ j(not_equal, label); } @@ -5728,6 +5832,62 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, void InstanceofStub::Generate(MacroAssembler* masm) { + // Implements "value instanceof function" operator. + // Expected input state: + // rsp[0] : return address + // rsp[1] : function pointer + // rsp[2] : value + + // Get the object - go slow case if it's a smi. + Label slow; + __ movq(rax, Operand(rsp, 2 * kPointerSize)); + __ testl(rax, Immediate(kSmiTagMask)); + __ j(zero, &slow); + + // Check that the left hand is a JS object. Leave its map in rax. + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); + __ j(below, &slow); + __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); + __ j(above, &slow); + + // Get the prototype of the function. + __ movq(rdx, Operand(rsp, 1 * kPointerSize)); + __ TryGetFunctionPrototype(rdx, rbx, &slow); + + // Check that the function prototype is a JS object. + __ testl(rbx, Immediate(kSmiTagMask)); + __ j(zero, &slow); + __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); + __ j(below, &slow); + __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); + __ j(above, &slow); + + // Register mapping: rax is object map and rbx is function prototype. + __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); + + // Loop through the prototype chain looking for the function prototype. + Label loop, is_instance, is_not_instance; + __ Move(kScratchRegister, Factory::null_value()); + __ bind(&loop); + __ cmpq(rcx, rbx); + __ j(equal, &is_instance); + __ cmpq(rcx, kScratchRegister); + __ j(equal, &is_not_instance); + __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); + __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset)); + __ jmp(&loop); + + __ bind(&is_instance); + __ xor_(rax, rax); + __ ret(2 * kPointerSize); + + __ bind(&is_not_instance); + __ movq(rax, Immediate(Smi::FromInt(1))); + __ ret(2 * kPointerSize); + + // Slow-case: Go through the JavaScript implementation. + __ bind(&slow); + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); } @@ -5878,13 +6038,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // rbx: pointer to C function (C callee-saved). // rbp: frame pointer (restored after C call). // rsp: stack pointer (restored after C call). - // rdi: number of arguments including receiver. + // r14: number of arguments including receiver (C callee-saved). // r15: pointer to the first argument (C callee-saved). // This pointer is reused in LeaveExitFrame(), so it is stored in a // callee-saved register. if (do_gc) { - __ movq(Operand(rsp, 0), rax); // Result. + // Pass failure code returned from last attempt as first argument to GC. +#ifdef __MSVC__ + __ movq(rcx, rax); // argc. +#else // ! defined(__MSVC__) + __ movq(rdi, rax); // argv. +#endif __ movq(kScratchRegister, FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); @@ -5901,11 +6066,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Call C function. #ifdef __MSVC__ // MSVC passes arguments in rcx, rdx, r8, r9 - __ movq(rcx, rdi); // argc. + __ movq(rcx, r14); // argc. __ movq(rdx, r15); // argv. #else // ! defined(__MSVC__) // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. - // First argument is already in rdi. + __ movq(rdi, r14); // argc. __ movq(rsi, r15); // argv. #endif __ call(rbx); @@ -6047,10 +6212,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { // rax: number of arguments including receiver // rbx: pointer to C function (C callee-saved) - // rbp: frame pointer (restored after C call) + // rbp: frame pointer of calling JS frame (restored after C call) // rsp: stack pointer (restored after C call) - // rsi: current context (C callee-saved) - // rdi: caller's parameter pointer pp (C callee-saved) + // rsi: current context (restored) // NOTE: Invocations of builtins may return failure objects // instead of a proper result. The builtin entry handles @@ -6064,16 +6228,16 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { // Enter the exit frame that transitions from JavaScript to C++. __ EnterExitFrame(frame_type); - // rax: result parameter for PerformGC, if any (setup below). - // Holds the result of a previous call to GenerateCore that - // returned a failure. On next call, it's used as parameter - // to Runtime::PerformGC. + // rax: Holds the context at this point, but should not be used. + // On entry to code generated by GenerateCore, it must hold + // a failure result if the collect_garbage argument to GenerateCore + // is true. This failure result can be the result of code + // generated by a previous call to GenerateCore. The value + // of rax is then passed to Runtime::PerformGC. // rbx: pointer to builtin function (C callee-saved). - // rbp: frame pointer (restored after C call). - // rsp: stack pointer (restored after C call). - // rdi: number of arguments including receiver (destroyed by C call). - // The rdi register is not callee-save in Unix 64-bit ABI, so - // we must treat it as volatile. + // rbp: frame pointer of exit frame (restored after C call). + // rsp: stack pointer (restored after C call). + // r14: number of arguments including receiver (C callee-saved). // r15: argv pointer (C callee-saved). Label throw_out_of_memory_exception; @@ -6338,17 +6502,18 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, __ bind(&load_smi_lhs); ASSERT(kSmiTagSize == 1); ASSERT(kSmiTag == 0); - __ lea(kScratchRegister, Operand(lhs, lhs, times_1, 0)); + __ movsxlq(kScratchRegister, lhs); + __ sar(kScratchRegister, Immediate(kSmiTagSize)); __ push(kScratchRegister); - __ fild_s(Operand(rsp, 0)); + __ fild_d(Operand(rsp, 0)); __ pop(kScratchRegister); __ jmp(&done_load_lhs); __ bind(&load_smi_rhs); - __ movq(kScratchRegister, rhs); + __ movsxlq(kScratchRegister, rhs); __ sar(kScratchRegister, Immediate(kSmiTagSize)); __ push(kScratchRegister); - __ fild_s(Operand(rsp, 0)); + __ fild_d(Operand(rsp, 0)); __ pop(kScratchRegister); __ bind(&done); @@ -6357,24 +6522,18 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, Label* non_float) { Label test_other, done; - // Test if both operands are floats or smi -> scratch=k_is_float; - // Otherwise scratch = k_not_float. + // Test if both operands are numbers (heap_numbers or smis). + // If not, jump to label non_float. __ testl(rdx, Immediate(kSmiTagMask)); __ j(zero, &test_other); // argument in rdx is OK - __ movq(kScratchRegister, - Factory::heap_number_map(), - RelocInfo::EMBEDDED_OBJECT); - __ cmpq(kScratchRegister, FieldOperand(rdx, HeapObject::kMapOffset)); - __ j(not_equal, non_float); // argument in rdx is not a number -> NaN + __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map()); + __ j(not_equal, non_float); // The argument in rdx is not a number. __ bind(&test_other); __ testl(rax, Immediate(kSmiTagMask)); __ j(zero, &done); // argument in rax is OK - __ movq(kScratchRegister, - Factory::heap_number_map(), - RelocInfo::EMBEDDED_OBJECT); - __ cmpq(kScratchRegister, FieldOperand(rax, HeapObject::kMapOffset)); - __ j(not_equal, non_float); // argument in rax is not a number -> NaN + __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map()); + __ j(not_equal, non_float); // The argument in rax is not a number. // Fall-through: Both operands are numbers. __ bind(&done); @@ -6401,49 +6560,26 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { // Perform fast-case smi code for the operation (rax <op> rbx) and // leave result in register rax. - // Prepare the smi check of both operands by or'ing them together - // before checking against the smi mask. + // Smi check both operands. __ movq(rcx, rbx); __ or_(rcx, rax); + __ testl(rcx, Immediate(kSmiTagMask)); + __ j(not_zero, slow); switch (op_) { - case Token::ADD: - __ addl(rax, rbx); // add optimistically - __ j(overflow, slow); + case Token::ADD: { + __ addl(rax, rbx); + __ j(overflow, slow); // The slow case rereads operands from the stack. __ movsxlq(rax, rax); // Sign extend eax into rax. break; + } - case Token::SUB: - __ subl(rax, rbx); // subtract optimistically - __ j(overflow, slow); + case Token::SUB: { + __ subl(rax, rbx); + __ j(overflow, slow); // The slow case rereads operands from the stack. __ movsxlq(rax, rax); // Sign extend eax into rax. break; - - case Token::DIV: - case Token::MOD: - // Sign extend rax into rdx:rax - // (also sign extends eax into edx if eax is Smi). - __ cqo(); - // Check for 0 divisor. - __ testq(rbx, rbx); - __ j(zero, slow); - break; - - default: - // Fall-through to smi check. - break; - } - - // Perform the actual smi check. - ASSERT(kSmiTag == 0); // adjust zero check if not the case - __ testl(rcx, Immediate(kSmiTagMask)); - __ j(not_zero, slow); - - switch (op_) { - case Token::ADD: - case Token::SUB: - // Do nothing here. - break; + } case Token::MUL: // If the smi tag is 0 we can just leave the tag on one operand. @@ -6460,6 +6596,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { break; case Token::DIV: + // Sign extend rax into rdx:rax + // (also sign extends eax into edx if eax is Smi). + __ cqo(); + // Check for 0 divisor. + __ testq(rbx, rbx); + __ j(zero, slow); // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax). __ idiv(rbx); // Check that the remainder is zero. @@ -6481,6 +6623,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { break; case Token::MOD: + // Sign extend rax into rdx:rax + // (also sign extends eax into edx if eax is Smi). + __ cqo(); + // Check for 0 divisor. + __ testq(rbx, rbx); + __ j(zero, slow); // Divide rdx:rax by rbx. __ idiv(rbx); // Check for negative zero result. @@ -6508,12 +6656,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { // Move the second operand into register ecx. __ movq(rcx, rbx); // Remove tags from operands (but keep sign). - __ sar(rax, Immediate(kSmiTagSize)); - __ sar(rcx, Immediate(kSmiTagSize)); + __ sarl(rax, Immediate(kSmiTagSize)); + __ sarl(rcx, Immediate(kSmiTagSize)); // Perform the operation. switch (op_) { case Token::SAR: - __ sar(rax); + __ sarl(rax); // No checks of result necessary break; case Token::SHR: @@ -6524,19 +6672,17 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { // - 0x40000000: this number would convert to negative when // Smi tagging these two cases can only happen with shifts // by 0 or 1 when handed a valid smi. - __ testq(rax, Immediate(0xc0000000)); + __ testl(rax, Immediate(0xc0000000)); __ j(not_zero, slow); break; case Token::SHL: __ shll(rax); - // TODO(Smi): Significant change if Smi changes. // Check that the *signed* result fits in a smi. // It does, if the 30th and 31st bits are equal, since then // shifting the SmiTag in at the bottom doesn't change the sign. ASSERT(kSmiTagSize == 1); __ cmpl(rax, Immediate(0xc0000000)); __ j(sign, slow); - __ movsxlq(rax, rax); // Extend new sign of eax into rax. break; default: UNREACHABLE(); @@ -6674,9 +6820,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { case Token::BIT_OR: __ or_(rax, rcx); break; case Token::BIT_AND: __ and_(rax, rcx); break; case Token::BIT_XOR: __ xor_(rax, rcx); break; - case Token::SAR: __ sar(rax); break; - case Token::SHL: __ shl(rax); break; - case Token::SHR: __ shr(rax); break; + case Token::SAR: __ sarl(rax); break; + case Token::SHL: __ shll(rax); break; + case Token::SHR: __ shrl(rax); break; default: UNREACHABLE(); } if (op_ == Token::SHR) { @@ -6752,8 +6898,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { // If all else fails, use the runtime system to get the correct // result. __ bind(&call_runtime); - // Disable builtin-calls until JS builtins can compile and run. - __ Abort("Disabled until builtins compile and run."); switch (op_) { case Token::ADD: __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); @@ -6794,6 +6938,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { } +int CompareStub::MinorKey() { + // Encode the two parameters in a unique 16 bit value. + ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); + return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h index af82de8ff..bb4b53880 100644 --- a/deps/v8/src/x64/codegen-x64.h +++ b/deps/v8/src/x64/codegen-x64.h @@ -294,15 +294,6 @@ class CodeGenerator: public AstVisitor { Handle<Script> script, bool is_eval); - // During implementation of CodeGenerator, this call creates a - // CodeGenerator instance, and calls GenCode on it with a null - // function literal. CodeGenerator will then construct and return - // a simple dummy function. Call this during bootstrapping before - // trying to compile any real functions, to get CodeGenerator up - // and running. - // TODO(X64): Remove once we can get through the bootstrapping process. - static void TestCodeGenerator(); - #ifdef ENABLE_LOGGING_AND_PROFILING static bool ShouldGenerateLog(Expression* type); #endif @@ -432,6 +423,7 @@ class CodeGenerator: public AstVisitor { // Read a value from a slot and leave it on top of the expression stack. void LoadFromSlot(Slot* slot, TypeofState typeof_state); + void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state); Result LoadFromGlobalSlotCheckExtensions(Slot* slot, TypeofState typeof_state, JumpTarget* slow); @@ -522,11 +514,15 @@ class CodeGenerator: public AstVisitor { void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args); void GenerateIsArray(ZoneList<Expression*>* args); + // Support for construct call checks. + void GenerateIsConstructCall(ZoneList<Expression*>* args); + // Support for arguments.length and arguments[?]. void GenerateArgumentsLength(ZoneList<Expression*>* args); void GenerateArgumentsAccess(ZoneList<Expression*>* args); - // Support for accessing the value field of an object (used by Date). + // Support for accessing the class and value fields of an object. + void GenerateClassOf(ZoneList<Expression*>* args); void GenerateValueOf(ZoneList<Expression*>* args); void GenerateSetValueOf(ZoneList<Expression*>* args); @@ -547,58 +543,6 @@ class CodeGenerator: public AstVisitor { inline void GenerateMathSin(ZoneList<Expression*>* args); inline void GenerateMathCos(ZoneList<Expression*>* args); - // Methods and constants for fast case switch statement support. - // - // Only allow fast-case switch if the range of labels is at most - // this factor times the number of case labels. - // Value is derived from comparing the size of code generated by the normal - // switch code for Smi-labels to the size of a single pointer. If code - // quality increases this number should be decreased to match. - static const int kFastSwitchMaxOverheadFactor = 5; - - // Minimal number of switch cases required before we allow jump-table - // optimization. - static const int kFastSwitchMinCaseCount = 5; - - // The limit of the range of a fast-case switch, as a factor of the number - // of cases of the switch. Each platform should return a value that - // is optimal compared to the default code generated for a switch statement - // on that platform. - int FastCaseSwitchMaxOverheadFactor(); - - // The minimal number of cases in a switch before the fast-case switch - // optimization is enabled. Each platform should return a value that - // is optimal compared to the default code generated for a switch statement - // on that platform. - int FastCaseSwitchMinCaseCount(); - - // Allocate a jump table and create code to jump through it. - // Should call GenerateFastCaseSwitchCases to generate the code for - // all the cases at the appropriate point. - void GenerateFastCaseSwitchJumpTable(SwitchStatement* node, - int min_index, - int range, - Label* fail_label, - Vector<Label*> case_targets, - Vector<Label> case_labels); - - // Generate the code for cases for the fast case switch. - // Called by GenerateFastCaseSwitchJumpTable. - void GenerateFastCaseSwitchCases(SwitchStatement* node, - Vector<Label> case_labels, - VirtualFrame* start_frame); - - // Fast support for constant-Smi switches. - void GenerateFastCaseSwitchStatement(SwitchStatement* node, - int min_index, - int range, - int default_index); - - // Fast support for constant-Smi switches. Tests whether switch statement - // permits optimization and calls GenerateFastCaseSwitch if it does. - // Returns true if the fast-case switch was generated, and false if not. - bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node); - // Methods used to indicate which source code is generated for. Source // positions are collected by the assembler and emitted with the relocation // information. diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc index 3b101325e..e94e781d4 100644 --- a/deps/v8/src/x64/debug-x64.cc +++ b/deps/v8/src/x64/debug-x64.cc @@ -38,8 +38,10 @@ namespace internal { #ifdef ENABLE_DEBUGGER_SUPPORT bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) { - UNIMPLEMENTED(); - return false; + ASSERT(RelocInfo::IsJSReturn(rinfo->rmode())); + // 11th byte of patch is 0x49, 11th byte of JS return is 0xCC (int3). + ASSERT(*(rinfo->pc() + 10) == 0x49 || *(rinfo->pc() + 10) == 0xCC); + return (*(rinfo->pc() + 10) == 0x49); } void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) { diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index 767b1247a..f962c0193 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -25,64 +25,1408 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#include <assert.h> +#include <stdio.h> +#include <stdarg.h> + #include "v8.h" #include "disasm.h" namespace disasm { -Disassembler::Disassembler(NameConverter const& converter) - : converter_(converter) { - UNIMPLEMENTED(); +enum OperandOrder { + UNSET_OP_ORDER = 0, REG_OPER_OP_ORDER, OPER_REG_OP_ORDER +}; + +//------------------------------------------------------------------ +// Tables +//------------------------------------------------------------------ +struct ByteMnemonic { + int b; // -1 terminates, otherwise must be in range (0..255) + OperandOrder op_order_; + const char* mnem; +}; + + +static ByteMnemonic two_operands_instr[] = { + { 0x03, REG_OPER_OP_ORDER, "add" }, + { 0x21, OPER_REG_OP_ORDER, "and" }, + { 0x23, REG_OPER_OP_ORDER, "and" }, + { 0x3B, REG_OPER_OP_ORDER, "cmp" }, + { 0x8D, REG_OPER_OP_ORDER, "lea" }, + { 0x09, OPER_REG_OP_ORDER, "or" }, + { 0x0B, REG_OPER_OP_ORDER, "or" }, + { 0x1B, REG_OPER_OP_ORDER, "sbb" }, + { 0x29, OPER_REG_OP_ORDER, "sub" }, + { 0x2B, REG_OPER_OP_ORDER, "sub" }, + { 0x85, REG_OPER_OP_ORDER, "test" }, + { 0x31, OPER_REG_OP_ORDER, "xor" }, + { 0x33, REG_OPER_OP_ORDER, "xor" }, + { 0x87, REG_OPER_OP_ORDER, "xchg" }, + { 0x8A, REG_OPER_OP_ORDER, "movb" }, + { 0x8B, REG_OPER_OP_ORDER, "mov" }, + { -1, UNSET_OP_ORDER, "" } +}; + + +static ByteMnemonic zero_operands_instr[] = { + { 0xC3, UNSET_OP_ORDER, "ret" }, + { 0xC9, UNSET_OP_ORDER, "leave" }, + { 0x90, UNSET_OP_ORDER, "nop" }, + { 0xF4, UNSET_OP_ORDER, "hlt" }, + { 0xCC, UNSET_OP_ORDER, "int3" }, + { 0x60, UNSET_OP_ORDER, "pushad" }, + { 0x61, UNSET_OP_ORDER, "popad" }, + { 0x9C, UNSET_OP_ORDER, "pushfd" }, + { 0x9D, UNSET_OP_ORDER, "popfd" }, + { 0x9E, UNSET_OP_ORDER, "sahf" }, + { 0x99, UNSET_OP_ORDER, "cdq" }, + { 0x9B, UNSET_OP_ORDER, "fwait" }, + { -1, UNSET_OP_ORDER, "" } +}; + + +static ByteMnemonic call_jump_instr[] = { + { 0xE8, UNSET_OP_ORDER, "call" }, + { 0xE9, UNSET_OP_ORDER, "jmp" }, + { -1, UNSET_OP_ORDER, "" } +}; + + +static ByteMnemonic short_immediate_instr[] = { + { 0x05, UNSET_OP_ORDER, "add" }, + { 0x0D, UNSET_OP_ORDER, "or" }, + { 0x15, UNSET_OP_ORDER, "adc" }, + { 0x25, UNSET_OP_ORDER, "and" }, + { 0x2D, UNSET_OP_ORDER, "sub" }, + { 0x35, UNSET_OP_ORDER, "xor" }, + { 0x3D, UNSET_OP_ORDER, "cmp" }, + { -1, UNSET_OP_ORDER, "" } +}; + + +static const char* conditional_code_suffix[] = { + "o", "no", "c", "nc", "z", "nz", "a", "na", + "s", "ns", "pe", "po", "l", "ge", "le", "g" +}; + + +enum InstructionType { + NO_INSTR, + ZERO_OPERANDS_INSTR, + TWO_OPERANDS_INSTR, + JUMP_CONDITIONAL_SHORT_INSTR, + REGISTER_INSTR, + PUSHPOP_INSTR, // Has implicit 64-bit operand size. + MOVE_REG_INSTR, + CALL_JUMP_INSTR, + SHORT_IMMEDIATE_INSTR +}; + + +struct InstructionDesc { + const char* mnem; + InstructionType type; + OperandOrder op_order_; +}; + + +class InstructionTable { + public: + InstructionTable(); + const InstructionDesc& Get(byte x) const { + return instructions_[x]; + } + + private: + InstructionDesc instructions_[256]; + void Clear(); + void Init(); + void CopyTable(ByteMnemonic bm[], InstructionType type); + void SetTableRange(InstructionType type, byte start, byte end, + const char* mnem); + void AddJumpConditionalShort(); +}; + + +InstructionTable::InstructionTable() { + Clear(); + Init(); +} + + +void InstructionTable::Clear() { + for (int i = 0; i < 256; i++) { + instructions_[i].mnem = ""; + instructions_[i].type = NO_INSTR; + instructions_[i].op_order_ = UNSET_OP_ORDER; + } +} + + +void InstructionTable::Init() { + CopyTable(two_operands_instr, TWO_OPERANDS_INSTR); + CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR); + CopyTable(call_jump_instr, CALL_JUMP_INSTR); + CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR); + AddJumpConditionalShort(); + SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, "push"); + SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, "pop"); + SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov"); +} + + +void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) { + for (int i = 0; bm[i].b >= 0; i++) { + InstructionDesc* id = &instructions_[bm[i].b]; + id->mnem = bm[i].mnem; + id->op_order_ = bm[i].op_order_; + assert(id->type == NO_INSTR); // Information already entered + id->type = type; + } +} + + +void InstructionTable::SetTableRange(InstructionType type, byte start, + byte end, const char* mnem) { + for (byte b = start; b <= end; b++) { + InstructionDesc* id = &instructions_[b]; + assert(id->type == NO_INSTR); // Information already entered + id->mnem = mnem; + id->type = type; + } +} + + +void InstructionTable::AddJumpConditionalShort() { + for (byte b = 0x70; b <= 0x7F; b++) { + InstructionDesc* id = &instructions_[b]; + assert(id->type == NO_INSTR); // Information already entered + id->mnem = NULL; // Computed depending on condition code. + id->type = JUMP_CONDITIONAL_SHORT_INSTR; + } +} + + +static InstructionTable instruction_table; + + +// The X64 disassembler implementation. +enum UnimplementedOpcodeAction { + CONTINUE_ON_UNIMPLEMENTED_OPCODE, + ABORT_ON_UNIMPLEMENTED_OPCODE +}; + + +class DisassemblerX64 { + public: + DisassemblerX64(const NameConverter& converter, + UnimplementedOpcodeAction unimplemented_action = + ABORT_ON_UNIMPLEMENTED_OPCODE) + : converter_(converter), + tmp_buffer_pos_(0), + abort_on_unimplemented_( + unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE), + rex_(0), + operand_size_(0) { + tmp_buffer_[0] = '\0'; + } + + virtual ~DisassemblerX64() { + } + + // Writes one disassembled instruction into 'buffer' (0-terminated). + // Returns the length of the disassembled machine instruction in bytes. + int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction); + + private: + + const NameConverter& converter_; + v8::internal::EmbeddedVector<char, 128> tmp_buffer_; + unsigned int tmp_buffer_pos_; + bool abort_on_unimplemented_; + // Prefixes parsed + byte rex_; + byte operand_size_; + + void setOperandSizePrefix(byte prefix) { + ASSERT_EQ(0x66, prefix); + operand_size_ = prefix; + } + + void setRex(byte rex) { + ASSERT_EQ(0x40, rex & 0xF0); + rex_ = rex; + } + + bool rex() { return rex_ != 0; } + + bool rex_b() { return (rex_ & 0x01) != 0; } + + // Actual number of base register given the low bits and the rex.b state. + int base_reg(int low_bits) { return low_bits | ((rex_ & 0x01) << 3); } + + bool rex_x() { return (rex_ & 0x02) != 0; } + + bool rex_r() { return (rex_ & 0x04) != 0; } + + bool rex_w() { return (rex_ & 0x08) != 0; } + + int operand_size() { + return rex_w() ? 64 : (operand_size_ != 0) ? 16 : 32; + } + + char operand_size_code() { + return rex_w() ? 'q' : (operand_size_ != 0) ? 'w' : 'l'; + } + + const char* NameOfCPURegister(int reg) const { + return converter_.NameOfCPURegister(reg); + } + + const char* NameOfByteCPURegister(int reg) const { + return converter_.NameOfByteCPURegister(reg); + } + + const char* NameOfXMMRegister(int reg) const { + return converter_.NameOfXMMRegister(reg); + } + + const char* NameOfAddress(byte* addr) const { + return converter_.NameOfAddress(addr); + } + + // Disassembler helper functions. + void get_modrm(byte data, + int* mod, + int* regop, + int* rm) { + *mod = (data >> 6) & 3; + *regop = ((data & 0x38) >> 3) | (rex_r() ? 8 : 0); + *rm = (data & 7) | (rex_b() ? 8 : 0); + } + + void get_sib(byte data, + int* scale, + int* index, + int* base) { + *scale = (data >> 6) & 3; + *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0); + *base = data & 7 | (rex_b() ? 8 : 0); + } + + typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const; + + int PrintRightOperandHelper(byte* modrmp, + RegisterNameMapping register_name); + int PrintRightOperand(byte* modrmp); + int PrintRightByteOperand(byte* modrmp); + int PrintOperands(const char* mnem, + OperandOrder op_order, + byte* data); + int PrintImmediateOp(byte* data); + int F7Instruction(byte* data); + int D1D3C1Instruction(byte* data); + int JumpShort(byte* data); + int JumpConditional(byte* data); + int JumpConditionalShort(byte* data); + int SetCC(byte* data); + int FPUInstruction(byte* data); + void AppendToBuffer(const char* format, ...); + + void UnimplementedInstruction() { + if (abort_on_unimplemented_) { + UNIMPLEMENTED(); + } else { + AppendToBuffer("'Unimplemented Instruction'"); + } + } +}; + + +void DisassemblerX64::AppendToBuffer(const char* format, ...) { + v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_; + va_list args; + va_start(args, format); + int result = v8::internal::OS::VSNPrintF(buf, format, args); + va_end(args); + tmp_buffer_pos_ += result; +} + + +int DisassemblerX64::PrintRightOperandHelper( + byte* modrmp, + RegisterNameMapping register_name) { + int mod, regop, rm; + get_modrm(*modrmp, &mod, ®op, &rm); + switch (mod) { + case 0: + if ((rm & 7) == 5) { + int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1); + AppendToBuffer("[0x%x]", disp); + return 5; + } else if ((rm & 7) == 4) { + // Codes for SIB byte. + byte sib = *(modrmp + 1); + int scale, index, base; + get_sib(sib, &scale, &index, &base); + if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) { + // index == rsp means no index. Only use sib byte with no index for + // rsp and r12 base. + AppendToBuffer("[%s]", (this->*register_name)(base)); + return 2; + } else if (base == 5) { + // base == rbp means no base register (when mod == 0). + int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2); + AppendToBuffer("[%s*%d+0x%x]", + (this->*register_name)(index), + 1 << scale, disp); + return 6; + } else if (index != 4 && base != 5) { + // [base+index*scale] + AppendToBuffer("[%s+%s*%d]", + (this->*register_name)(base), + (this->*register_name)(index), + 1 << scale); + return 2; + } else { + UnimplementedInstruction(); + return 1; + } + } else { + AppendToBuffer("[%s]", (this->*register_name)(rm)); + return 1; + } + break; + case 1: // fall through + case 2: + if ((rm & 7) == 4) { + byte sib = *(modrmp + 1); + int scale, index, base; + get_sib(sib, &scale, &index, &base); + int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2) + : *reinterpret_cast<char*>(modrmp + 2); + if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) { + if (-disp > 0) { + AppendToBuffer("[%s-0x%x]", (this->*register_name)(base), -disp); + } else { + AppendToBuffer("[%s+0x%x]", (this->*register_name)(base), disp); + } + } else { + if (-disp > 0) { + AppendToBuffer("[%s+%s*%d-0x%x]", + (this->*register_name)(base), + (this->*register_name)(index), + 1 << scale, + -disp); + } else { + AppendToBuffer("[%s+%s*%d+0x%x]", + (this->*register_name)(base), + (this->*register_name)(index), + 1 << scale, + disp); + } + } + return mod == 2 ? 6 : 3; + } else { + // No sib. + int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1) + : *reinterpret_cast<char*>(modrmp + 1); + if (-disp > 0) { + AppendToBuffer("[%s-0x%x]", (this->*register_name)(rm), -disp); + } else { + AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp); + } + return (mod == 2) ? 5 : 2; + } + break; + case 3: + AppendToBuffer("%s", (this->*register_name)(rm)); + return 1; + default: + UnimplementedInstruction(); + return 1; + } + UNREACHABLE(); +} + + +int DisassemblerX64::PrintRightOperand(byte* modrmp) { + return PrintRightOperandHelper(modrmp, + &DisassemblerX64::NameOfCPURegister); +} + + +int DisassemblerX64::PrintRightByteOperand(byte* modrmp) { + return PrintRightOperandHelper(modrmp, + &DisassemblerX64::NameOfByteCPURegister); } -Disassembler::~Disassembler() { - UNIMPLEMENTED(); +// Returns number of bytes used including the current *data. +// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'. +int DisassemblerX64::PrintOperands(const char* mnem, + OperandOrder op_order, + byte* data) { + byte modrm = *data; + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + int advance = 0; + switch (op_order) { + case REG_OPER_OP_ORDER: { + AppendToBuffer("%s%c %s,", + mnem, + operand_size_code(), + NameOfCPURegister(regop)); + advance = PrintRightOperand(data); + break; + } + case OPER_REG_OP_ORDER: { + AppendToBuffer("%s%c ", mnem, operand_size_code()); + advance = PrintRightOperand(data); + AppendToBuffer(",%s", NameOfCPURegister(regop)); + break; + } + default: + UNREACHABLE(); + break; + } + return advance; } -const char* NameConverter::NameOfAddress(unsigned char* addr) const { - UNIMPLEMENTED(); - return NULL; +// Returns number of bytes used by machine instruction, including *data byte. +// Writes immediate instructions to 'tmp_buffer_'. +int DisassemblerX64::PrintImmediateOp(byte* data) { + bool sign_extension_bit = (*data & 0x02) != 0; + byte modrm = *(data + 1); + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + const char* mnem = "Imm???"; + switch (regop) { + case 0: + mnem = "add"; + break; + case 1: + mnem = "or"; + break; + case 2: + mnem = "adc"; + break; + case 4: + mnem = "and"; + break; + case 5: + mnem = "sub"; + break; + case 6: + mnem = "xor"; + break; + case 7: + mnem = "cmp"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data + 1); + if (sign_extension_bit) { + AppendToBuffer(",0x%x", *(data + 1 + count)); + return 1 + count + 1 /*int8*/; + } else { + AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count)); + return 1 + count + 4 /*int32_t*/; + } +} + + +// Returns number of bytes used, including *data. +int DisassemblerX64::F7Instruction(byte* data) { + assert(*data == 0xF7); + byte modrm = *(data + 1); + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + if (mod == 3 && regop != 0) { + const char* mnem = NULL; + switch (regop) { + case 2: + mnem = "not"; + break; + case 3: + mnem = "neg"; + break; + case 4: + mnem = "mul"; + break; + case 7: + mnem = "idiv"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s%c %s", + mnem, + operand_size_code(), + NameOfCPURegister(rm)); + return 2; + } else if (mod == 3 && regop == 0) { + int32_t imm = *reinterpret_cast<int32_t*>(data + 2); + AppendToBuffer("test%c %s,0x%x", + operand_size_code(), + NameOfCPURegister(rm), + imm); + return 6; + } else if (regop == 0) { + AppendToBuffer("test%c ", operand_size_code()); + int count = PrintRightOperand(data + 1); + int32_t imm = *reinterpret_cast<int32_t*>(data + 1 + count); + AppendToBuffer(",0x%x", imm); + return 1 + count + 4 /*int32_t*/; + } else { + UnimplementedInstruction(); + return 2; + } +} + + +int DisassemblerX64::D1D3C1Instruction(byte* data) { + byte op = *data; + assert(op == 0xD1 || op == 0xD3 || op == 0xC1); + byte modrm = *(data + 1); + int mod, regop, rm; + get_modrm(modrm, &mod, ®op, &rm); + ASSERT(regop < 8); + int imm8 = -1; + int num_bytes = 2; + if (mod == 3) { + const char* mnem = NULL; + if (op == 0xD1) { + imm8 = 1; + switch (regop) { + case 2: + mnem = "rcl"; + break; + case 7: + mnem = "sar"; + break; + case 4: + mnem = "shl"; + break; + default: + UnimplementedInstruction(); + } + } else if (op == 0xC1) { + imm8 = *(data + 2); + num_bytes = 3; + switch (regop) { + case 2: + mnem = "rcl"; + break; + case 4: + mnem = "shl"; + break; + case 5: + mnem = "shr"; + break; + case 7: + mnem = "sar"; + break; + default: + UnimplementedInstruction(); + } + } else if (op == 0xD3) { + switch (regop) { + case 4: + mnem = "shl"; + break; + case 5: + mnem = "shr"; + break; + case 7: + mnem = "sar"; + break; + default: + UnimplementedInstruction(); + } + } + assert(mnem != NULL); + AppendToBuffer("%s%c %s,", + mnem, + operand_size_code(), + NameOfCPURegister(rm)); + if (imm8 > 0) { + AppendToBuffer("%d", imm8); + } else { + AppendToBuffer("cl"); + } + } else { + UnimplementedInstruction(); + } + return num_bytes; +} + + +// Returns number of bytes used, including *data. +int DisassemblerX64::JumpShort(byte* data) { + assert(*data == 0xEB); + byte b = *(data + 1); + byte* dest = data + static_cast<int8_t>(b) + 2; + AppendToBuffer("jmp %s", NameOfAddress(dest)); + return 2; +} + + +// Returns number of bytes used, including *data. +int DisassemblerX64::JumpConditional(byte* data) { + assert(*data == 0x0F); + byte cond = *(data + 1) & 0x0F; + byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6; + const char* mnem = conditional_code_suffix[cond]; + AppendToBuffer("j%s %s", mnem, NameOfAddress(dest)); + return 6; // includes 0x0F +} + + +// Returns number of bytes used, including *data. +int DisassemblerX64::JumpConditionalShort(byte* data) { + byte cond = *data & 0x0F; + byte b = *(data + 1); + byte* dest = data + static_cast<int8_t>(b) + 2; + const char* mnem = conditional_code_suffix[cond]; + AppendToBuffer("j%s %s", mnem, NameOfAddress(dest)); + return 2; +} + + +// Returns number of bytes used, including *data. +int DisassemblerX64::SetCC(byte* data) { + assert(*data == 0x0F); + byte cond = *(data + 1) & 0x0F; + const char* mnem = conditional_code_suffix[cond]; + AppendToBuffer("set%s%c ", mnem, operand_size_code()); + PrintRightByteOperand(data + 2); + return 3; // includes 0x0F +} + + +// Returns number of bytes used, including *data. +int DisassemblerX64::FPUInstruction(byte* data) { + byte b1 = *data; + byte b2 = *(data + 1); + if (b1 == 0xD9) { + const char* mnem = NULL; + switch (b2) { + case 0xE8: + mnem = "fld1"; + break; + case 0xEE: + mnem = "fldz"; + break; + case 0xE1: + mnem = "fabs"; + break; + case 0xE0: + mnem = "fchs"; + break; + case 0xF8: + mnem = "fprem"; + break; + case 0xF5: + mnem = "fprem1"; + break; + case 0xF7: + mnem = "fincstp"; + break; + case 0xE4: + mnem = "ftst"; + break; + } + if (mnem != NULL) { + AppendToBuffer("%s", mnem); + return 2; + } else if ((b2 & 0xF8) == 0xC8) { + AppendToBuffer("fxch st%d", b2 & 0x7); + return 2; + } else { + int mod, regop, rm; + get_modrm(*(data + 1), &mod, ®op, &rm); + const char* mnem = "?"; + switch (regop) { + case 0: + mnem = "fld_s"; + break; + case 3: + mnem = "fstp_s"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data + 1); + return count + 1; + } + } else if (b1 == 0xDD) { + if ((b2 & 0xF8) == 0xC0) { + AppendToBuffer("ffree st%d", b2 & 0x7); + return 2; + } else { + int mod, regop, rm; + get_modrm(*(data + 1), &mod, ®op, &rm); + const char* mnem = "?"; + switch (regop) { + case 0: + mnem = "fld_d"; + break; + case 3: + mnem = "fstp_d"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data + 1); + return count + 1; + } + } else if (b1 == 0xDB) { + int mod, regop, rm; + get_modrm(*(data + 1), &mod, ®op, &rm); + const char* mnem = "?"; + switch (regop) { + case 0: + mnem = "fild_s"; + break; + case 2: + mnem = "fist_s"; + break; + case 3: + mnem = "fistp_s"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data + 1); + return count + 1; + } else if (b1 == 0xDF) { + if (b2 == 0xE0) { + AppendToBuffer("fnstsw_ax"); + return 2; + } + int mod, regop, rm; + get_modrm(*(data + 1), &mod, ®op, &rm); + const char* mnem = "?"; + switch (regop) { + case 5: + mnem = "fild_d"; + break; + case 7: + mnem = "fistp_d"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s ", mnem); + int count = PrintRightOperand(data + 1); + return count + 1; + } else if (b1 == 0xDC || b1 == 0xDE) { + bool is_pop = (b1 == 0xDE); + if (is_pop && b2 == 0xD9) { + AppendToBuffer("fcompp"); + return 2; + } + const char* mnem = "FP0xDC"; + switch (b2 & 0xF8) { + case 0xC0: + mnem = "fadd"; + break; + case 0xE8: + mnem = "fsub"; + break; + case 0xC8: + mnem = "fmul"; + break; + case 0xF8: + mnem = "fdiv"; + break; + default: + UnimplementedInstruction(); + } + AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7); + return 2; + } else if (b1 == 0xDA && b2 == 0xE9) { + const char* mnem = "fucompp"; + AppendToBuffer("%s", mnem); + return 2; + } + AppendToBuffer("Unknown FP instruction"); + return 2; +} + +// Mnemonics for instructions 0xF0 byte. +// Returns NULL if the instruction is not handled here. +static const char* F0Mnem(byte f0byte) { + switch (f0byte) { + case 0x1F: + return "nop"; + case 0x31: + return "rdtsc"; + case 0xA2: + return "cpuid"; + case 0xBE: + return "movsxb"; + case 0xBF: + return "movsxw"; + case 0xB6: + return "movzxb"; + case 0xB7: + return "movzxw"; + case 0xAF: + return "imul"; + case 0xA5: + return "shld"; + case 0xAD: + return "shrd"; + case 0xAB: + return "bts"; + default: + return NULL; + } +} + +// Disassembled instruction '*instr' and writes it into 'out_buffer'. +int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, + byte* instr) { + tmp_buffer_pos_ = 0; // starting to write as position 0 + byte* data = instr; + bool processed = true; // Will be set to false if the current instruction + // is not in 'instructions' table. + byte current; + + // Scan for prefixes. + while (true) { + current = *data; + if (current == 0x66) { + setOperandSizePrefix(current); + data++; + } else if ((current & 0xF0) == 0x40) { + setRex(current); + if (rex_w()) AppendToBuffer("REX.W "); + data++; + } else { + break; + } + } + + const InstructionDesc& idesc = instruction_table.Get(current); + switch (idesc.type) { + case ZERO_OPERANDS_INSTR: + AppendToBuffer(idesc.mnem); + data++; + break; + + case TWO_OPERANDS_INSTR: + data++; + data += PrintOperands(idesc.mnem, idesc.op_order_, data); + break; + + case JUMP_CONDITIONAL_SHORT_INSTR: + data += JumpConditionalShort(data); + break; + + case REGISTER_INSTR: + AppendToBuffer("%s%c %s", + idesc.mnem, + operand_size_code(), + NameOfCPURegister(base_reg(current & 0x07))); + data++; + break; + case PUSHPOP_INSTR: + AppendToBuffer("%s %s", + idesc.mnem, + NameOfCPURegister(base_reg(current & 0x07))); + data++; + break; + case MOVE_REG_INSTR: { + byte* addr = NULL; + switch (operand_size()) { + case 16: + addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1)); + data += 3; + break; + case 32: + addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1)); + data += 5; + break; + case 64: + addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1)); + data += 9; + break; + default: + UNREACHABLE(); + } + AppendToBuffer("mov%c %s,%s", + operand_size_code(), + NameOfCPURegister(base_reg(current & 0x07)), + NameOfAddress(addr)); + break; + } + + case CALL_JUMP_INSTR: { + byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5; + AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr)); + data += 5; + break; + } + + case SHORT_IMMEDIATE_INSTR: { + byte* addr = + reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1)); + AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr)); + data += 5; + break; + } + + case NO_INSTR: + processed = false; + break; + + default: + UNIMPLEMENTED(); // This type is not implemented. + } + + // The first byte didn't match any of the simple opcodes, so we + // need to do special processing on it. + if (!processed) { + switch (*data) { + case 0xC2: + AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1)); + data += 3; + break; + + case 0x69: // fall through + case 0x6B: { + int mod, regop, rm; + get_modrm(*(data + 1), &mod, ®op, &rm); + int32_t imm = *data == 0x6B ? *(data + 2) + : *reinterpret_cast<int32_t*>(data + 2); + AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop), + NameOfCPURegister(rm), imm); + data += 2 + (*data == 0x6B ? 1 : 4); + } + break; + + case 0xF6: { + int mod, regop, rm; + get_modrm(*(data + 1), &mod, ®op, &rm); + if (mod == 3 && regop == 0) { + AppendToBuffer("testb %s,%d", NameOfCPURegister(rm), *(data + 2)); + } else { + UnimplementedInstruction(); + } + data += 3; + } + break; + + case 0x81: // fall through + case 0x83: // 0x81 with sign extension bit set + data += PrintImmediateOp(data); + break; + + case 0x0F: { + byte f0byte = *(data + 1); + const char* f0mnem = F0Mnem(f0byte); + if (f0byte == 0x1F) { + data += 1; + byte modrm = *data; + data += 1; + if (((modrm >> 3) & 7) == 4) { + // SIB byte present. + data += 1; + } + int mod = modrm >> 6; + if (mod == 1) { + // Byte displacement. + data += 1; + } else if (mod == 2) { + // 32-bit displacement. + data += 4; + } + AppendToBuffer("nop"); + } else if (f0byte == 0xA2 || f0byte == 0x31) { + AppendToBuffer("%s", f0mnem); + data += 2; + } else if ((f0byte & 0xF0) == 0x80) { + data += JumpConditional(data); + } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || f0byte + == 0xB7 || f0byte == 0xAF) { + data += 2; + data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data); + } else if ((f0byte & 0xF0) == 0x90) { + data += SetCC(data); + } else { + data += 2; + if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) { + // shrd, shld, bts + AppendToBuffer("%s ", f0mnem); + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + data += PrintRightOperand(data); + if (f0byte == 0xAB) { + AppendToBuffer(",%s", NameOfCPURegister(regop)); + } else { + AppendToBuffer(",%s,cl", NameOfCPURegister(regop)); + } + } else { + UnimplementedInstruction(); + } + } + } + break; + + case 0x8F: { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (regop == 0) { + AppendToBuffer("pop "); + data += PrintRightOperand(data); + } + } + break; + + case 0xFF: { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + const char* mnem = NULL; + switch (regop) { + case 0: + mnem = "inc"; + break; + case 1: + mnem = "dec"; + break; + case 2: + mnem = "call"; + break; + case 4: + mnem = "jmp"; + break; + case 6: + mnem = "push"; + break; + default: + mnem = "???"; + } + AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "), + mnem, + operand_size_code()); + data += PrintRightOperand(data); + } + break; + + case 0xC7: // imm32, fall through + case 0xC6: // imm8 + { + bool is_byte = *data == 0xC6; + data++; + + AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code()); + data += PrintRightOperand(data); + int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data); + AppendToBuffer(",0x%x", imm); + data += is_byte ? 1 : 4; + } + break; + + case 0x80: { + data++; + AppendToBuffer("cmpb "); + data += PrintRightOperand(data); + int32_t imm = *data; + AppendToBuffer(",0x%x", imm); + data++; + } + break; + + case 0x88: // 8bit, fall through + case 0x89: // 32bit + { + bool is_byte = *data == 0x88; + int mod, regop, rm; + data++; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code()); + data += PrintRightOperand(data); + AppendToBuffer(",%s", NameOfCPURegister(regop)); + } + break; + + case 0x90: + case 0x91: + case 0x92: + case 0x93: + case 0x94: + case 0x95: + case 0x96: + case 0x97: { + int reg = current & 0x7 | (rex_b() ? 8 : 0); + if (reg == 0) { + AppendToBuffer("nop"); // Common name for xchg rax,rax. + } else { + AppendToBuffer("xchg%c rax, %s", + operand_size_code(), + NameOfByteCPURegister(reg)); + } + } + + + case 0xFE: { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (mod == 3 && regop == 1) { + AppendToBuffer("decb %s", NameOfCPURegister(rm)); + } else { + UnimplementedInstruction(); + } + data++; + } + break; + + case 0x68: + AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1)); + data += 5; + break; + + case 0x6A: + AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1)); + data += 2; + break; + + case 0xA8: + AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1)); + data += 2; + break; + + case 0xA9: + AppendToBuffer("test%c rax,0x%x", // CHECKME! + operand_size_code(), + *reinterpret_cast<int32_t*>(data + 1)); + data += 5; + break; + + case 0xD1: // fall through + case 0xD3: // fall through + case 0xC1: + data += D1D3C1Instruction(data); + break; + + case 0xD9: // fall through + case 0xDA: // fall through + case 0xDB: // fall through + case 0xDC: // fall through + case 0xDD: // fall through + case 0xDE: // fall through + case 0xDF: + data += FPUInstruction(data); + break; + + case 0xEB: + data += JumpShort(data); + break; + + case 0xF2: + if (*(data + 1) == 0x0F) { + byte b2 = *(data + 2); + if (b2 == 0x11) { + AppendToBuffer("movsd "); + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + data += PrintRightOperand(data); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else if (b2 == 0x10) { + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movsd %s,", NameOfXMMRegister(regop)); + data += PrintRightOperand(data); + } else { + const char* mnem = "?"; + switch (b2) { + case 0x2A: + mnem = "cvtsi2sd"; + break; + case 0x58: + mnem = "addsd"; + break; + case 0x59: + mnem = "mulsd"; + break; + case 0x5C: + mnem = "subsd"; + break; + case 0x5E: + mnem = "divsd"; + break; + } + data += 3; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + if (b2 == 0x2A) { + AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop)); + data += PrintRightOperand(data); + } else { + AppendToBuffer("%s %s,%s", mnem, NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; + } + } + } else { + UnimplementedInstruction(); + } + break; + + case 0xF3: + if (*(data + 1) == 0x0F && *(data + 2) == 0x2C) { + data += 3; + data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data); + } else { + UnimplementedInstruction(); + } + break; + + case 0xF7: + data += F7Instruction(data); + break; + + default: + UnimplementedInstruction(); + } + } // !processed + + if (tmp_buffer_pos_ < sizeof tmp_buffer_) { + tmp_buffer_[tmp_buffer_pos_] = '\0'; + } + + int instr_len = data - instr; + ASSERT(instr_len > 0); // Ensure progress. + + int outp = 0; + // Instruction bytes. + for (byte* bp = instr; bp < data; bp++) { + outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp); + } + for (int i = 6 - instr_len; i >= 0; i--) { + outp += v8::internal::OS::SNPrintF(out_buffer + outp, " "); + } + + outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s", + tmp_buffer_.start()); + return instr_len; +} + +//------------------------------------------------------------------------------ + + +static const char* cpu_regs[16] = { + "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" +}; + + +static const char* byte_cpu_regs[16] = { + "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil", + "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l" +}; + + +static const char* xmm_regs[16] = { + "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", + "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15" +}; + + +const char* NameConverter::NameOfAddress(byte* addr) const { + static v8::internal::EmbeddedVector<char, 32> tmp_buffer; + v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr); + return tmp_buffer.start(); +} + + +const char* NameConverter::NameOfConstant(byte* addr) const { + return NameOfAddress(addr); } const char* NameConverter::NameOfCPURegister(int reg) const { - UNIMPLEMENTED(); - return NULL; + if (0 <= reg && reg < 16) + return cpu_regs[reg]; + return "noreg"; } -int Disassembler::ConstantPoolSizeAt(unsigned char* addr) { - UNIMPLEMENTED(); - return 0; +const char* NameConverter::NameOfByteCPURegister(int reg) const { + if (0 <= reg && reg < 16) + return byte_cpu_regs[reg]; + return "noreg"; } -int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer, - unsigned char* instruction) { - UNIMPLEMENTED(); - return 0; +const char* NameConverter::NameOfXMMRegister(int reg) const { + if (0 <= reg && reg < 16) + return xmm_regs[reg]; + return "noxmmreg"; } -const char* NameConverter::NameOfByteCPURegister(int a) const { - UNIMPLEMENTED(); - return NULL; + +const char* NameConverter::NameInCode(byte* addr) const { + // X64 does not embed debug strings at the moment. + UNREACHABLE(); + return ""; } -const char* NameConverter::NameOfXMMRegister(int a) const { - UNIMPLEMENTED(); - return NULL; +//------------------------------------------------------------------------------ + +Disassembler::Disassembler(const NameConverter& converter) + : converter_(converter) { } + +Disassembler::~Disassembler() { } + + +int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer, + byte* instruction) { + DisassemblerX64 d(converter_, CONTINUE_ON_UNIMPLEMENTED_OPCODE); + return d.InstructionDecode(buffer, instruction); } -const char* NameConverter::NameOfConstant(unsigned char* a) const { - UNIMPLEMENTED(); - return NULL; + +// The X64 assembler does not use constant pools. +int Disassembler::ConstantPoolSizeAt(byte* instruction) { + return -1; } -const char* NameConverter::NameInCode(unsigned char* a) const { - UNIMPLEMENTED(); - return NULL; + +void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) { + NameConverter converter; + Disassembler d(converter); + for (byte* pc = begin; pc < end;) { + v8::internal::EmbeddedVector<char, 128> buffer; + buffer[0] = '\0'; + byte* prev_pc = pc; + pc += d.InstructionDecode(buffer, pc); + fprintf(f, "%p", prev_pc); + fprintf(f, " "); + + for (byte* bp = prev_pc; bp < pc; bp++) { + fprintf(f, "%02x", *bp); + } + for (int i = 6 - (pc - prev_pc); i >= 0; i--) { + fprintf(f, " "); + } + fprintf(f, " %s\n", buffer.start()); + } } } // namespace disasm diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h index d4ab2c62e..24c78da99 100644 --- a/deps/v8/src/x64/frames-x64.h +++ b/deps/v8/src/x64/frames-x64.h @@ -59,12 +59,7 @@ class StackHandlerConstants : public AllStatic { class EntryFrameConstants : public AllStatic { public: - static const int kCallerFPOffset = -6 * kPointerSize; - - static const int kFunctionArgOffset = +3 * kPointerSize; - static const int kReceiverArgOffset = +4 * kPointerSize; - static const int kArgcOffset = +5 * kPointerSize; - static const int kArgvOffset = +6 * kPointerSize; + static const int kCallerFPOffset = -10 * kPointerSize; }; diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index abaffb338..7b8699f6d 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -212,11 +212,9 @@ void CallIC::Generate(MacroAssembler* masm, __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver __ testl(rdx, Immediate(kSmiTagMask)); __ j(zero, &invoke); - __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); - __ movzxbq(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset)); - __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_GLOBAL_OBJECT_TYPE))); + __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx); __ j(equal, &global); - __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_BUILTINS_OBJECT_TYPE))); + __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE); __ j(not_equal, &invoke); // Patch the receiver on the stack. diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index f6a5ffecc..099a46103 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -79,51 +79,6 @@ void MacroAssembler::NegativeZeroTest(Register result, } -void MacroAssembler::ConstructAndTestJSFunction() { - const int initial_buffer_size = 4 * KB; - char* buffer = new char[initial_buffer_size]; - MacroAssembler masm(buffer, initial_buffer_size); - - const uint64_t secret = V8_INT64_C(0xdeadbeefcafebabe); - Handle<String> constant = - Factory::NewStringFromAscii(Vector<const char>("451", 3), TENURED); -#define __ ACCESS_MASM((&masm)) - // Construct a simple JSfunction here, using Assembler and MacroAssembler - // commands. - __ movq(rax, constant, RelocInfo::EMBEDDED_OBJECT); - __ push(rax); - __ CallRuntime(Runtime::kStringParseFloat, 1); - __ movq(kScratchRegister, secret, RelocInfo::NONE); - __ addq(rax, kScratchRegister); - __ ret(0); -#undef __ - CodeDesc desc; - masm.GetCode(&desc); - Code::Flags flags = Code::ComputeFlags(Code::FUNCTION); - Object* code = Heap::CreateCode(desc, NULL, flags, Handle<Object>::null()); - if (!code->IsFailure()) { - Handle<Code> code_handle(Code::cast(code)); - Handle<String> name = - Factory::NewStringFromAscii(Vector<const char>("foo", 3), NOT_TENURED); - Handle<JSFunction> function = - Factory::NewFunction(name, - JS_FUNCTION_TYPE, - JSObject::kHeaderSize, - code_handle, - true); - bool pending_exceptions; - Handle<Object> result = - Execution::Call(function, - Handle<Object>::cast(function), - 0, - NULL, - &pending_exceptions); - CHECK(result->IsSmi()); - CHECK(secret + (451 << kSmiTagSize) == reinterpret_cast<uint64_t>(*result)); - } -} - - void MacroAssembler::Abort(const char* msg) { // We want to pass the msg string like a smi to avoid GC // problems, however msg is not guaranteed to be aligned @@ -221,7 +176,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { const char* name = Builtins::GetName(id); int argc = Builtins::GetArgumentsCount(id); - movq(target, code, RelocInfo::EXTERNAL_REFERENCE); // Is external reference? + movq(target, code, RelocInfo::EMBEDDED_OBJECT); if (!resolved) { uint32_t flags = Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | @@ -253,7 +208,9 @@ Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id, void MacroAssembler::Set(Register dst, int64_t x) { - if (is_int32(x)) { + if (x == 0) { + xor_(dst, dst); + } else if (is_int32(x)) { movq(dst, Immediate(x)); } else if (is_uint32(x)) { movl(dst, Immediate(x)); @@ -264,14 +221,17 @@ void MacroAssembler::Set(Register dst, int64_t x) { void MacroAssembler::Set(const Operand& dst, int64_t x) { - if (is_int32(x)) { - movq(kScratchRegister, Immediate(x)); + if (x == 0) { + xor_(kScratchRegister, kScratchRegister); + movq(dst, kScratchRegister); + } else if (is_int32(x)) { + movq(dst, Immediate(x)); } else if (is_uint32(x)) { - movl(kScratchRegister, Immediate(x)); + movl(dst, Immediate(x)); } else { movq(kScratchRegister, x, RelocInfo::NONE); + movq(dst, kScratchRegister); } - movq(dst, kScratchRegister); } @@ -285,11 +245,13 @@ void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) { void MacroAssembler::Move(Register dst, Handle<Object> source) { + ASSERT(!source->IsFailure()); if (source->IsSmi()) { if (IsUnsafeSmi(source)) { LoadUnsafeSmi(dst, source); } else { - movq(dst, source, RelocInfo::NONE); + int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); + movq(dst, Immediate(smi)); } } else { movq(dst, source, RelocInfo::EMBEDDED_OBJECT); @@ -298,8 +260,13 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) { void MacroAssembler::Move(const Operand& dst, Handle<Object> source) { - Move(kScratchRegister, source); - movq(dst, kScratchRegister); + if (source->IsSmi()) { + int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); + movq(dst, Immediate(smi)); + } else { + movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); + movq(dst, kScratchRegister); + } } @@ -310,14 +277,37 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) { void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) { - Move(kScratchRegister, source); - cmpq(dst, kScratchRegister); + if (source->IsSmi()) { + if (IsUnsafeSmi(source)) { + LoadUnsafeSmi(kScratchRegister, source); + cmpl(dst, kScratchRegister); + } else { + // For smi-comparison, it suffices to compare the low 32 bits. + int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); + cmpl(dst, Immediate(smi)); + } + } else { + ASSERT(source->IsHeapObject()); + movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); + cmpq(dst, kScratchRegister); + } } void MacroAssembler::Push(Handle<Object> source) { - Move(kScratchRegister, source); - push(kScratchRegister); + if (source->IsSmi()) { + if (IsUnsafeSmi(source)) { + LoadUnsafeSmi(kScratchRegister, source); + push(kScratchRegister); + } else { + int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source)); + push(Immediate(smi)); + } + } else { + ASSERT(source->IsHeapObject()); + movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); + push(kScratchRegister); + } } @@ -444,6 +434,51 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { } +void MacroAssembler::TryGetFunctionPrototype(Register function, + Register result, + Label* miss) { + // Check that the receiver isn't a smi. + testl(function, Immediate(kSmiTagMask)); + j(zero, miss); + + // Check that the function really is a function. + CmpObjectType(function, JS_FUNCTION_TYPE, result); + j(not_equal, miss); + + // Make sure that the function has an instance prototype. + Label non_instance; + testb(FieldOperand(result, Map::kBitFieldOffset), + Immediate(1 << Map::kHasNonInstancePrototype)); + j(not_zero, &non_instance); + + // Get the prototype or initial map from the function. + movq(result, + FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); + + // If the prototype or initial map is the hole, don't return it and + // simply miss the cache instead. This will allow us to allocate a + // prototype object on-demand in the runtime system. + Cmp(result, Factory::the_hole_value()); + j(equal, miss); + + // If the function does not have an initial map, we're done. + Label done; + CmpObjectType(result, MAP_TYPE, kScratchRegister); + j(not_equal, &done); + + // Get the prototype from the initial map. + movq(result, FieldOperand(result, Map::kPrototypeOffset)); + jmp(&done); + + // Non-instance prototype: Fetch prototype from constructor field + // in initial map. + bind(&non_instance); + movq(result, FieldOperand(result, Map::kConstructorOffset)); + + // All done. + bind(&done); +} + void MacroAssembler::SetCounter(StatsCounter* counter, int value) { if (FLAG_native_code_counters && counter->Enabled()) { @@ -589,7 +624,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) { if (!resolved) { uint32_t flags = Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | - Bootstrapper::FixupFlagsIsPCRelative::encode(true) | + Bootstrapper::FixupFlagsIsPCRelative::encode(false) | Bootstrapper::FixupFlagsUseCodeObject::encode(false); Unresolved entry = { pc_offset() - kTargetAddrToReturnAddrDist, flags, name }; @@ -749,6 +784,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); // Setup the frame structure on the stack. + // All constants are relative to the frame pointer of the exit frame. ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); @@ -763,7 +799,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { // Save the frame pointer and the context in top. ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); ExternalReference context_address(Top::k_context_address); - movq(rdi, rax); // Backup rax before we use it. + movq(r14, rax); // Backup rax before we use it. movq(rax, rbp); store_rax(c_entry_fp_address); @@ -773,7 +809,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame, // so it must be retained across the C-call. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; - lea(r15, Operand(rbp, rdi, times_pointer_size, offset)); + lea(r15, Operand(rbp, r14, times_pointer_size, offset)); #ifdef ENABLE_DEBUGGER_SUPPORT // Save the state of all registers to the stack from the memory diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 3ae78ba63..f13a7adae 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -67,16 +67,6 @@ class MacroAssembler: public Assembler { MacroAssembler(void* buffer, int size); // --------------------------------------------------------------------------- - // x64 Implementation Support - - // Test the MacroAssembler by constructing and calling a simple JSFunction. - // Cannot be done using API because this must be done in the middle of the - // bootstrapping process. - // TODO(X64): Remove once we can get through the bootstrapping process. - - static void ConstructAndTestJSFunction(); - - // --------------------------------------------------------------------------- // GC Support // Set the remembered set bit for [object+offset]. @@ -186,6 +176,7 @@ class MacroAssembler: public Assembler { // Compare object type for heap object. // Incoming register is heap_object and outgoing register is map. + // They may be the same register, and may be kScratchRegister. void CmpObjectType(Register heap_object, InstanceType type, Register map); // Compare instance type for map. @@ -247,11 +238,10 @@ class MacroAssembler: public Assembler { // Try to get function prototype of a function and puts the value in // the result register. Checks that the function really is a // function and jumps to the miss label if the fast checks fail. The - // function register will be untouched; the other registers may be + // function register will be untouched; the other register may be // clobbered. void TryGetFunctionPrototype(Register function, Register result, - Register scratch, Label* miss); // Generates code for reporting that an illegal operation has @@ -394,12 +384,12 @@ extern void LogGeneratedCodeCoverage(const char* file_line); #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) #define ACCESS_MASM(masm) { \ - byte* x64_coverage_function = \ + byte* x64_coverage_function = \ reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \ masm->pushfd(); \ masm->pushad(); \ masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \ - masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \ + masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \ masm->pop(rax); \ masm->popad(); \ masm->popfd(); \ diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index fdfa67fcc..c5776159d 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -42,8 +42,8 @@ namespace internal { Object* CallStubCompiler::CompileCallConstant(Object* a, JSObject* b, JSFunction* c, - StubCompiler::CheckType d, - Code::Flags flags) { + String* d, + StubCompiler::CheckType e) { UNIMPLEMENTED(); return NULL; } @@ -51,8 +51,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* a, Object* CallStubCompiler::CompileCallField(Object* a, JSObject* b, int c, - String* d, - Code::Flags flags) { + String* d) { UNIMPLEMENTED(); return NULL; } @@ -67,6 +66,16 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* a, +Object* CallStubCompiler::CompileCallGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + JSFunction* function, + String* name) { + UNIMPLEMENTED(); + return NULL; +} + + Object* LoadStubCompiler::CompileLoadCallback(JSObject* a, JSObject* b, AccessorInfo* c, @@ -102,6 +111,16 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a, } +Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, + GlobalObject* holder, + JSGlobalPropertyCell* cell, + String* name, + bool is_dont_delete) { + UNIMPLEMENTED(); + return NULL; +} + + Object* StoreStubCompiler::CompileStoreCallback(JSObject* a, AccessorInfo* b, String* c) { @@ -125,6 +144,14 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) { } +Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, + JSGlobalPropertyCell* cell, + String* name) { + UNIMPLEMENTED(); + return NULL; +} + + // TODO(1241006): Avoid having lazy compile stubs specialized by the // number of arguments. It is not needed anymore. Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index 68aabb516..bb82fc83c 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -60,3 +60,65 @@ test-log/ProfLazyMode: SKIP # the JavaScript stacks are separate. test-api/ExceptionOrder: FAIL test-api/TryCatchInTryFinally: FAIL + + +[ $arch == x64 ] +test-regexp/Graph: CRASH || FAIL +test-decls/Present: CRASH || FAIL +test-decls/Unknown: CRASH || FAIL +test-decls/Appearing: CRASH || FAIL +test-decls/Absent: CRASH || FAIL +test-debug/DebugStub: CRASH || FAIL +test-decls/AbsentInPrototype: CRASH || FAIL +test-decls/Reappearing: CRASH || FAIL +test-debug/DebugInfo: CRASH || FAIL +test-decls/ExistsInPrototype: CRASH || FAIL +test-debug/BreakPointICStore: CRASH || FAIL +test-debug/BreakPointICLoad: CRASH || FAIL +test-debug/BreakPointICCall: CRASH || FAIL +test-debug/BreakPointReturn: CRASH || FAIL +test-debug/GCDuringBreakPointProcessing: CRASH || FAIL +test-debug/BreakPointSurviveGC: CRASH || FAIL +test-debug/BreakPointThroughJavaScript: CRASH || FAIL +test-debug/ScriptBreakPointByNameThroughJavaScript: CRASH || FAIL +test-debug/ScriptBreakPointByIdThroughJavaScript: CRASH || FAIL +test-debug/EnableDisableScriptBreakPoint: CRASH || FAIL +test-debug/ConditionalScriptBreakPoint: CRASH || FAIL +test-debug/ScriptBreakPointIgnoreCount: CRASH || FAIL +test-debug/ScriptBreakPointReload: CRASH || FAIL +test-debug/ScriptBreakPointMultiple: CRASH || FAIL +test-debug/RemoveBreakPointInBreak: CRASH || FAIL +test-debug/DebugEvaluate: CRASH || FAIL +test-debug/ScriptBreakPointLine: CRASH || FAIL +test-debug/ScriptBreakPointLineOffset: CRASH || FAIL +test-debug/DebugStepLinear: CRASH || FAIL +test-debug/DebugStepKeyedLoadLoop: CRASH || FAIL +test-debug/DebugStepKeyedStoreLoop: CRASH || FAIL +test-debug/DebugStepLinearMixedICs: CRASH || FAIL +test-debug/DebugStepFor: CRASH || FAIL +test-debug/DebugStepIf: CRASH || FAIL +test-debug/DebugStepSwitch: CRASH || FAIL +test-debug/StepInOutSimple: CRASH || FAIL +test-debug/StepInOutBranch: CRASH || FAIL +test-debug/StepInOutTree: CRASH || FAIL +test-debug/DebugStepNatives: CRASH || FAIL +test-debug/DebugStepFunctionApply: CRASH || FAIL +test-debug/DebugStepFunctionCall: CRASH || FAIL +test-debug/StepWithException: CRASH || FAIL +test-debug/DebugBreak: CRASH || FAIL +test-debug/DisableBreak: CRASH || FAIL +test-debug/MessageQueues: CRASH || FAIL +test-debug/CallFunctionInDebugger: CRASH || FAIL +test-debug/RecursiveBreakpoints: CRASH || FAIL +test-debug/DebuggerUnload: CRASH || FAIL +test-debug/DebuggerClearMessageHandler: CRASH || FAIL +test-debug/DebuggerClearMessageHandlerWhileActive: CRASH || FAIL +test-debug/DebuggerHostDispatch: CRASH || FAIL +test-debug/DebugBreakInMessageHandler: CRASH || FAIL +test-api/HugeConsStringOutOfMemory: CRASH || FAIL +test-api/OutOfMemory: CRASH || FAIL +test-api/OutOfMemoryNested: CRASH || FAIL +test-api/Threading: CRASH || FAIL +test-api/TryCatchSourceInfo: CRASH || FAIL +test-api/RegExpInterruption: CRASH || FAIL +test-api/RegExpStringModification: CRASH || FAIL diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index df289e580..5b04b2cd8 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -2796,17 +2796,14 @@ static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message, CHECK_EQ(v8::Undefined(), message->GetScriptResourceName()); message->GetLineNumber(); message->GetSourceLine(); - message_received = true; } THREADED_TEST(ErrorWithMissingScriptInfo) { - message_received = false; v8::HandleScope scope; LocalContext context; v8::V8::AddMessageListener(MissingScriptInfoMessageListener); Script::Compile(v8_str("throw Error()"))->Run(); - CHECK(message_received); v8::V8::RemoveMessageListeners(MissingScriptInfoMessageListener); } @@ -6043,6 +6040,7 @@ THREADED_TEST(DisableAccessChecksWhileConfiguring) { CHECK(value->BooleanValue()); } + static bool NamedGetAccessBlocker(Local<v8::Object> obj, Local<Value> name, v8::AccessType type, @@ -6096,6 +6094,7 @@ THREADED_TEST(AccessChecksReenabledCorrectly) { CHECK(value_2->IsUndefined()); } + // This tests that access check information remains on the global // object template when creating contexts. THREADED_TEST(AccessControlRepeatedContextCreation) { @@ -6114,6 +6113,71 @@ THREADED_TEST(AccessControlRepeatedContextCreation) { } +THREADED_TEST(TurnOnAccessCheck) { + v8::HandleScope handle_scope; + + // Create an environment with access check to the global object disabled by + // default. + v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); + global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker, + IndexedGetAccessBlocker, + v8::Handle<v8::Value>(), + false); + v8::Persistent<Context> context = Context::New(NULL, global_template); + Context::Scope context_scope(context); + + // Set up a property and a number of functions. + context->Global()->Set(v8_str("a"), v8_num(1)); + CompileRun("function f1() {return a;}" + "function f2() {return a;}" + "function g1() {return h();}" + "function g2() {return h();}" + "function h() {return 1;}"); + Local<Function> f1 = + Local<Function>::Cast(context->Global()->Get(v8_str("f1"))); + Local<Function> f2 = + Local<Function>::Cast(context->Global()->Get(v8_str("f2"))); + Local<Function> g1 = + Local<Function>::Cast(context->Global()->Get(v8_str("g1"))); + Local<Function> g2 = + Local<Function>::Cast(context->Global()->Get(v8_str("g2"))); + Local<Function> h = + Local<Function>::Cast(context->Global()->Get(v8_str("h"))); + + // Get the global object. + v8::Handle<v8::Object> global = context->Global(); + + // Call f1 one time and f2 a number of times. This will ensure that f1 still + // uses the runtime system to retreive property a whereas f2 uses global load + // inline cache. + CHECK(f1->Call(global, 0, NULL)->Equals(v8_num(1))); + for (int i = 0; i < 4; i++) { + CHECK(f2->Call(global, 0, NULL)->Equals(v8_num(1))); + } + + // Same for g1 and g2. + CHECK(g1->Call(global, 0, NULL)->Equals(v8_num(1))); + for (int i = 0; i < 4; i++) { + CHECK(g2->Call(global, 0, NULL)->Equals(v8_num(1))); + } + + // Detach the global and turn on access check. + context->DetachGlobal(); + context->Global()->TurnOnAccessCheck(); + + // Failing access check to property get results in undefined. + CHECK(f1->Call(global, 0, NULL)->IsUndefined()); + CHECK(f2->Call(global, 0, NULL)->IsUndefined()); + + // Failing access check to function call results in exception. + CHECK(g1->Call(global, 0, NULL).IsEmpty()); + CHECK(g2->Call(global, 0, NULL).IsEmpty()); + + // No failing access check when just returning a constant. + CHECK(h->Call(global, 0, NULL)->Equals(v8_num(1))); +} + + // This test verifies that pre-compilation (aka preparsing) can be called // without initializing the whole VM. Thus we cannot run this test in a // multi-threaded setup. @@ -6898,6 +6962,28 @@ THREADED_TEST(ForceDeleteWithInterceptor) { } +// Make sure that forcing a delete invalidates any IC stubs, so we +// don't read the hole value. +THREADED_TEST(ForceDeleteIC) { + v8::HandleScope scope; + LocalContext context; + // Create a DontDelete variable on the global object. + CompileRun("this.__proto__ = { foo: 'horse' };" + "var foo = 'fish';" + "function f() { return foo.length; }"); + // Initialize the IC for foo in f. + CompileRun("for (var i = 0; i < 4; i++) f();"); + // Make sure the value of foo is correct before the deletion. + CHECK_EQ(4, CompileRun("f()")->Int32Value()); + // Force the deletion of foo. + CHECK(context->Global()->ForceDelete(v8_str("foo"))); + // Make sure the value for foo is read from the prototype, and that + // we don't get in trouble with reading the deleted cell value + // sentinel. + CHECK_EQ(5, CompileRun("f()")->Int32Value()); +} + + v8::Persistent<Context> calling_context0; v8::Persistent<Context> calling_context1; v8::Persistent<Context> calling_context2; @@ -6960,3 +7046,38 @@ THREADED_TEST(GetCallingContext) { calling_context1.Clear(); calling_context2.Clear(); } + + +// Check that a variable declaration with no explicit initialization +// value does not shadow an existing property in the prototype chain. +// +// This is consistent with Firefox and Safari. +// +// See http://crbug.com/12548. +THREADED_TEST(InitGlobalVarInProtoChain) { + v8::HandleScope scope; + LocalContext context; + // Introduce a variable in the prototype chain. + CompileRun("__proto__.x = 42"); + v8::Handle<v8::Value> result = CompileRun("var x; x"); + CHECK(!result->IsUndefined()); + CHECK_EQ(42, result->Int32Value()); +} + + +// Regression test for issue 398. +// If a function is added to an object, creating a constant function +// field, and the result is cloned, replacing the constant function on the +// original should not affect the clone. +// See http://code.google.com/p/v8/issues/detail?id=398 +THREADED_TEST(ReplaceConstantFunction) { + v8::HandleScope scope; + LocalContext context; + v8::Handle<v8::Object> obj = v8::Object::New(); + v8::Handle<v8::FunctionTemplate> func_templ = v8::FunctionTemplate::New(); + v8::Handle<v8::String> foo_string = v8::String::New("foo"); + obj->Set(foo_string, func_templ->GetFunction()); + v8::Handle<v8::Object> obj_clone = obj->Clone(); + obj_clone->Set(foo_string, v8::String::New("Hello")); + CHECK(!obj->Get(foo_string)->IsUndefined()); +} diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc index 9e95f328d..cd750c5df 100644 --- a/deps/v8/test/cctest/test-assembler-x64.cc +++ b/deps/v8/test/cctest/test-assembler-x64.cc @@ -44,6 +44,7 @@ using v8::internal::Label; using v8::internal::rax; using v8::internal::rsi; using v8::internal::rdi; +using v8::internal::rdx; using v8::internal::rbp; using v8::internal::rsp; using v8::internal::FUNCTION_CAST; @@ -63,8 +64,8 @@ using v8::internal::greater; // with GCC. A different convention is used on 64-bit windows. typedef int (*F0)(); -typedef int (*F1)(int x); -typedef int (*F2)(int x, int y); +typedef int (*F1)(int64_t x); +typedef int (*F2)(int64_t x, int64_t y); #define __ assm. @@ -130,7 +131,7 @@ TEST(AssemblerX64ArithmeticOperations) { CHECK(buffer); Assembler assm(buffer, actual_size); - // Assemble a simple function that copies argument 2 and returns it. + // Assemble a simple function that adds arguments returning the sum. __ movq(rax, rsi); __ addq(rax, rdi); __ ret(0); @@ -142,6 +143,33 @@ TEST(AssemblerX64ArithmeticOperations) { CHECK_EQ(5, result); } +TEST(AssemblerX64ImulOperation) { + // Allocate an executable page of memory. + size_t actual_size; + byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize, + &actual_size, + true)); + CHECK(buffer); + Assembler assm(buffer, actual_size); + + // Assemble a simple function that multiplies arguments returning the high + // word. + __ movq(rax, rsi); + __ imul(rdi); + __ movq(rax, rdx); + __ ret(0); + + CodeDesc desc; + assm.GetCode(&desc); + // Call the function from C++. + int result = FUNCTION_CAST<F2>(buffer)(3, 2); + CHECK_EQ(0, result); + result = FUNCTION_CAST<F2>(buffer)(0x100000000l, 0x100000000l); + CHECK_EQ(1, result); + result = FUNCTION_CAST<F2>(buffer)(-0x100000000l, 0x100000000l); + CHECK_EQ(-1, result); +} + TEST(AssemblerX64MemoryOperands) { // Allocate an executable page of memory. size_t actual_size; diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc index a884d773b..fddd00028 100644 --- a/deps/v8/test/cctest/test-debug.cc +++ b/deps/v8/test/cctest/test-debug.cc @@ -4892,6 +4892,7 @@ TEST(DebugBreakInMessageHandler) { } +#ifdef V8_NATIVE_REGEXP // Debug event handler which gets the function on the top frame and schedules a // break a number of times. static void DebugEventDebugBreak( @@ -4928,11 +4929,10 @@ static void DebugEventDebugBreak( TEST(RegExpDebugBreak) { + // This test only applies to native regexps. v8::HandleScope scope; DebugLocalContext env; - i::FLAG_regexp_native = true; - // Create a function for checking the function when hitting a break point. frame_function_name = CompileFunction(&env, frame_function_name_source, @@ -4957,6 +4957,7 @@ TEST(RegExpDebugBreak) { CHECK_EQ(20, break_point_hit_count); CHECK_EQ("exec", last_function_hit); } +#endif // V8_NATIVE_REGEXP // Common part of EvalContextData and NestedBreakEventContextData tests. diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc index ecdad2e7d..6c48f64f7 100644 --- a/deps/v8/test/cctest/test-decls.cc +++ b/deps/v8/test/cctest/test-decls.cc @@ -534,10 +534,10 @@ TEST(ExistsInPrototype) { { ExistsInPrototypeContext context; context.Check("var x; x", - 0, + 1, // get 0, 1, // declaration - EXPECT_RESULT, Undefined()); + EXPECT_EXCEPTION); } { ExistsInPrototypeContext context; diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc index 8761cf51b..33a83c779 100644 --- a/deps/v8/test/cctest/test-regexp.cc +++ b/deps/v8/test/cctest/test-regexp.cc @@ -204,8 +204,8 @@ TEST(Parser) { CHECK_PARSE_EQ("(?=a){9,10}a", "(: (-> + 'a') 'a')"); CHECK_PARSE_EQ("(?!a)?a", "'a'"); CHECK_PARSE_EQ("\\1(a)", "(^ 'a')"); - CHECK_PARSE_EQ("(?!(a))\\1", "(-> - (^ 'a'))"); - CHECK_PARSE_EQ("(?!\\1(a\\1)\\1)\\1", "(-> - (: (^ 'a') (<- 1)))"); + CHECK_PARSE_EQ("(?!(a))\\1", "(: (-> - (^ 'a')) (<- 1))"); + CHECK_PARSE_EQ("(?!\\1(a\\1)\\1)\\1", "(: (-> - (: (^ 'a') (<- 1))) (<- 1))"); CHECK_PARSE_EQ("[\\0]", "[\\x00]"); CHECK_PARSE_EQ("[\\11]", "[\\x09]"); CHECK_PARSE_EQ("[\\11a]", "[\\x09 a]"); @@ -597,6 +597,8 @@ TEST(DispatchTableConstruction) { } +// Tests of interpreter. + TEST(MacroAssembler) { V8::Initialize(NULL); byte codes[1024]; @@ -660,8 +662,8 @@ TEST(MacroAssembler) { CHECK_EQ(42, captures[0]); } - -#ifdef V8_TARGET_ARCH_IA32 // IA32 only tests. +#ifdef V8_TARGET_ARCH_IA32 // IA32 Native Regexp only tests. +#ifdef V8_NATIVE_REGEXP class ContextInitializer { public: @@ -1284,10 +1286,10 @@ TEST(MacroAssemblerIA32LotsOfRegisters) { Top::clear_pending_exception(); } +#endif // V8_REGEXP_NATIVE +#endif // V8_TARGET_ARCH_IA32 -#endif // !defined ARM - TEST(AddInverseToTable) { static const int kLimit = 1000; static const int kRangeCount = 16; diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status index fc2896b1c..d40151e8d 100644 --- a/deps/v8/test/message/message.status +++ b/deps/v8/test/message/message.status @@ -29,3 +29,16 @@ prefix message # All tests in the bug directory are expected to fail. bugs: FAIL + +[ $arch == x64 ] + +simple-throw.js: FAIL +try-catch-finally-throw-in-catch-and-finally.js: FAIL +try-catch-finally-throw-in-catch.js: FAIL +try-catch-finally-throw-in-finally.js: FAIL +try-finally-throw-in-finally.js: FAIL +try-finally-throw-in-try-and-finally.js: FAIL +try-finally-throw-in-try.js: FAIL +overwritten-builtins.js: FAIL +regress-73.js: FAIL +regress-75.js: FAIL diff --git a/deps/v8/test/mjsunit/apply.js b/deps/v8/test/mjsunit/apply.js index 1d9dde9b1..a4b0fd7f0 100644 --- a/deps/v8/test/mjsunit/apply.js +++ b/deps/v8/test/mjsunit/apply.js @@ -38,12 +38,12 @@ assertTrue(this === f0.apply(), "1-0"); assertTrue(this === f0.apply(this), "2a"); assertTrue(this === f0.apply(this, new Array(1)), "2b"); assertTrue(this === f0.apply(this, new Array(2)), "2c"); -assertTrue(this === f0.apply(this, new Array(4242)), "2c"); +assertTrue(this === f0.apply(this, new Array(4242)), "2d"); assertTrue(this === f0.apply(null), "3a"); assertTrue(this === f0.apply(null, new Array(1)), "3b"); assertTrue(this === f0.apply(null, new Array(2)), "3c"); -assertTrue(this === f0.apply(this, new Array(4242)), "2c"); +assertTrue(this === f0.apply(this, new Array(4242)), "3d"); assertTrue(this === f0.apply(void 0), "4a"); assertTrue(this === f0.apply(void 0, new Array(1)), "4b"); @@ -51,26 +51,26 @@ assertTrue(this === f0.apply(void 0, new Array(2)), "4c"); assertTrue(void 0 === f1.apply(), "1-1"); -assertTrue(void 0 === f1.apply(this), "2a"); -assertTrue(void 0 === f1.apply(this, new Array(1)), "2b"); -assertTrue(void 0 === f1.apply(this, new Array(2)), "2c"); -assertTrue(void 0 === f1.apply(this, new Array(4242)), "2c"); -assertTrue(42 === f1.apply(this, new Array(42, 43)), "2c"); -assertEquals("foo", f1.apply(this, new Array("foo", "bar", "baz", "boo")), "2c"); - -assertTrue(void 0 === f1.apply(null), "3a"); -assertTrue(void 0 === f1.apply(null, new Array(1)), "3b"); -assertTrue(void 0 === f1.apply(null, new Array(2)), "3c"); -assertTrue(void 0 === f1.apply(null, new Array(4242)), "2c"); -assertTrue(42 === f1.apply(null, new Array(42, 43)), "2c"); -assertEquals("foo", f1.apply(null, new Array("foo", "bar", "baz", "boo")), "2c"); - -assertTrue(void 0 === f1.apply(void 0), "4a"); -assertTrue(void 0 === f1.apply(void 0, new Array(1)), "4b"); -assertTrue(void 0 === f1.apply(void 0, new Array(2)), "4c"); -assertTrue(void 0 === f1.apply(void 0, new Array(4242)), "4c"); -assertTrue(42 === f1.apply(void 0, new Array(42, 43)), "2c"); -assertEquals("foo", f1.apply(void 0, new Array("foo", "bar", "baz", "boo")), "2c"); +assertTrue(void 0 === f1.apply(this), "5a"); +assertTrue(void 0 === f1.apply(this, new Array(1)), "5b"); +assertTrue(void 0 === f1.apply(this, new Array(2)), "5c"); +assertTrue(void 0 === f1.apply(this, new Array(4242)), "5d"); +assertTrue(42 === f1.apply(this, new Array(42, 43)), "5e"); +assertEquals("foo", f1.apply(this, new Array("foo", "bar", "baz", "bo")), "5f"); + +assertTrue(void 0 === f1.apply(null), "6a"); +assertTrue(void 0 === f1.apply(null, new Array(1)), "6b"); +assertTrue(void 0 === f1.apply(null, new Array(2)), "6c"); +assertTrue(void 0 === f1.apply(null, new Array(4242)), "6d"); +assertTrue(42 === f1.apply(null, new Array(42, 43)), "6e"); +assertEquals("foo", f1.apply(null, new Array("foo", "bar", "baz", "bo")), "6f"); + +assertTrue(void 0 === f1.apply(void 0), "7a"); +assertTrue(void 0 === f1.apply(void 0, new Array(1)), "7b"); +assertTrue(void 0 === f1.apply(void 0, new Array(2)), "7c"); +assertTrue(void 0 === f1.apply(void 0, new Array(4242)), "7d"); +assertTrue(42 === f1.apply(void 0, new Array(42, 43)), "7e"); +assertEquals("foo", f1.apply(void 0, new Array("foo", "bar", "ba", "b")), "7f"); var arr = new Array(42, "foo", "fish", "horse"); function j(a, b, c, d, e, f, g, h, i, j, k, l) { @@ -81,7 +81,7 @@ function j(a, b, c, d, e, f, g, h, i, j, k, l) { var expect = "42foofishhorse"; for (var i = 0; i < 8; i++) expect += "undefined"; -assertEquals(expect, j.apply(undefined, arr)); +assertEquals(expect, j.apply(undefined, arr), "apply to undefined"); assertThrows("f0.apply(this, 1);"); assertThrows("f0.apply(this, 1, 2);"); @@ -95,7 +95,7 @@ function f() { return doo; } -assertEquals("42foofishhorse", f.apply(this, arr)); +assertEquals("42foofishhorse", f.apply(this, arr), "apply to this"); function s() { var doo = this; @@ -105,7 +105,7 @@ function s() { return doo; } -assertEquals("bar42foofishhorse", s.apply("bar", arr)); +assertEquals("bar42foofishhorse", s.apply("bar", arr), "apply to string"); function al() { assertEquals(345, this); @@ -118,19 +118,24 @@ for (var j = 1; j < 0x40000000; j <<= 1) { a[j - 1] = 42; assertEquals(42 + j, al.apply(345, a)); } catch (e) { - assertTrue(e.toString().indexOf("Function.prototype.apply") != -1); + assertTrue(e.toString().indexOf("Function.prototype.apply") != -1, + "exception does not contain Function.prototype.apply: " + + e.toString()); for (; j < 0x40000000; j <<= 1) { var caught = false; try { a = new Array(j); a[j - 1] = 42; al.apply(345, a); - assertEquals("Shouldn't get", "here"); + assertUnreachable("Apply of arrray with length " + a.length + + " should have thrown"); } catch (e) { - assertTrue(e.toString().indexOf("Function.prototype.apply") != -1); + assertTrue(e.toString().indexOf("Function.prototype.apply") != -1, + "exception does not contain Function.prototype.apply [" + + "length = " + j + "]: " + e.toString()); caught = true; } - assertTrue(caught); + assertTrue(caught, "exception not caught"); } break; } @@ -160,8 +165,8 @@ assertEquals(1229, primes.length); var same_primes = Array.prototype.constructor.apply(Array, primes); for (var i = 0; i < primes.length; i++) - assertEquals(primes[i], same_primes[i]); -assertEquals(primes.length, same_primes.length); + assertEquals(primes[i], same_primes[i], "prime" + primes[i]); +assertEquals(primes.length, same_primes.length, "prime-length"); Array.prototype["1"] = "sep"; @@ -170,15 +175,22 @@ var holey = new Array(3); holey[0] = "mor"; holey[2] = "er"; -assertEquals("morseper", String.prototype.concat.apply("", holey)); -assertEquals("morseper", String.prototype.concat.apply("", holey, 1)); -assertEquals("morseper", String.prototype.concat.apply("", holey, 1, 2)); -assertEquals("morseper", String.prototype.concat.apply("", holey, 1, 2, 3)); -assertEquals("morseper", String.prototype.concat.apply("", holey, 1, 2, 3, 4)); +assertEquals("morseper", String.prototype.concat.apply("", holey), + "moreseper0"); +assertEquals("morseper", String.prototype.concat.apply("", holey, 1), + "moreseper1"); +assertEquals("morseper", String.prototype.concat.apply("", holey, 1, 2), + "moreseper2"); +assertEquals("morseper", String.prototype.concat.apply("", holey, 1, 2, 3), + "morseper3"); +assertEquals("morseper", String.prototype.concat.apply("", holey, 1, 2, 3, 4), + "morseper4"); primes[0] = ""; primes[1] = holey; assertThrows("String.prototype.concat.apply.apply('foo', primes)"); -assertEquals("morseper", String.prototype.concat.apply.apply(String.prototype.concat, primes)); +assertEquals("morseper", + String.prototype.concat.apply.apply(String.prototype.concat, primes), + "moreseper-prime"); delete(Array.prototype["1"]); diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js index e476e1cb0..83d9023a4 100644 --- a/deps/v8/test/mjsunit/array-reduce.js +++ b/deps/v8/test/mjsunit/array-reduce.js @@ -413,7 +413,7 @@ testReduce("reduceRight", "ArrayWithNonElementPropertiesReduceRight", 6, try { [1].reduce("not a function"); - fail("Reduce callback not a function not throwing"); + assertUnreachable("Reduce callback not a function not throwing"); } catch (e) { assertTrue(e instanceof TypeError, "reduce callback not a function not throwing TypeError"); @@ -423,7 +423,7 @@ try { try { [1].reduceRight("not a function"); - fail("ReduceRight callback not a function not throwing"); + assertUnreachable("ReduceRight callback not a function not throwing"); } catch (e) { assertTrue(e instanceof TypeError, "reduceRight callback not a function not throwing TypeError"); @@ -434,7 +434,7 @@ try { try { [].reduce(sum); - fail("Reduce no initial value not throwing"); + assertUnreachable("Reduce no initial value not throwing"); } catch (e) { assertTrue(e instanceof TypeError, "reduce no initial value not throwing TypeError"); @@ -444,7 +444,7 @@ try { try { [].reduceRight(sum); - fail("ReduceRight no initial value not throwing"); + assertUnreachable("ReduceRight no initial value not throwing"); } catch (e) { assertTrue(e instanceof TypeError, "reduceRight no initial value not throwing TypeError"); @@ -455,7 +455,7 @@ try { try { [,,,].reduce(sum); - fail("Reduce sparse no initial value not throwing"); + assertUnreachable("Reduce sparse no initial value not throwing"); } catch (e) { assertTrue(e instanceof TypeError, "reduce sparse no initial value not throwing TypeError"); @@ -465,7 +465,7 @@ try { try { [,,,].reduceRight(sum); - fail("ReduceRight sparse no initial value not throwing"); + assertUnreachable("ReduceRight sparse no initial value not throwing"); } catch (e) { assertTrue(e instanceof TypeError, "reduceRight sparse no initial value not throwing TypeError"); diff --git a/deps/v8/test/mjsunit/big-array-literal.js b/deps/v8/test/mjsunit/big-array-literal.js index d64c96868..a0fad7c2e 100644 --- a/deps/v8/test/mjsunit/big-array-literal.js +++ b/deps/v8/test/mjsunit/big-array-literal.js @@ -81,7 +81,7 @@ function testLiteral(size, array_in_middle) { } // The sizes to test. -var sizes = [1, 2, 100, 200, 400]; +var sizes = [1, 2, 100, 200, 300]; // Run the test. for (var i = 0; i < sizes.length; i++) { diff --git a/deps/v8/test/mjsunit/big-object-literal.js b/deps/v8/test/mjsunit/big-object-literal.js index 8417951af..c937f54de 100644 --- a/deps/v8/test/mjsunit/big-object-literal.js +++ b/deps/v8/test/mjsunit/big-object-literal.js @@ -84,7 +84,7 @@ function testLiteral(size, array_in_middle) { } // The sizes to test. -var sizes = [1, 2, 100, 200, 350]; +var sizes = [1, 2, 100, 200]; // Run the test. for (var i = 0; i < sizes.length; i++) { diff --git a/deps/v8/test/mjsunit/call-non-function.js b/deps/v8/test/mjsunit/call-non-function.js index 8ed5ccb20..9fe3b0fba 100644 --- a/deps/v8/test/mjsunit/call-non-function.js +++ b/deps/v8/test/mjsunit/call-non-function.js @@ -51,4 +51,13 @@ TryCall(1234); TryCall("hest"); - +// Make sure that calling a non-function global doesn't crash the +// system while building the IC for it. +var NonFunction = 42; +function WillThrow() { + NonFunction(); +} +assertThrows(WillThrow); +assertThrows(WillThrow); +assertThrows(WillThrow); +assertThrows(WillThrow); diff --git a/deps/v8/test/mjsunit/compare-nan.js b/deps/v8/test/mjsunit/compare-nan.js index 29818c83c..fc40acc04 100644 --- a/deps/v8/test/mjsunit/compare-nan.js +++ b/deps/v8/test/mjsunit/compare-nan.js @@ -28,17 +28,17 @@ var a = [NaN, -1, 0, 1, 1.2, -7.9, true, false, 'foo', '0', 'NaN' ]; for (var i in a) { var x = a[i]; - assertFalse(NaN == x); - assertFalse(NaN === x); - assertFalse(NaN < x); - assertFalse(NaN > x); - assertFalse(NaN <= x); - assertFalse(NaN >= x); + assertFalse(NaN == x, "NaN == " + x); + assertFalse(NaN === x, "NaN === " + x); + assertFalse(NaN < x, "NaN < " + x); + assertFalse(NaN > x, "NaN > " + x); + assertFalse(NaN <= x, "NaN <= " + x); + assertFalse(NaN >= x, "NaN >= " + x); - assertFalse(x == NaN); - assertFalse(x === NaN); - assertFalse(x < NaN); - assertFalse(x > NaN); - assertFalse(x <= NaN); - assertFalse(x >= NaN); + assertFalse(x == NaN, "" + x + " == NaN"); + assertFalse(x === NaN, "" + x + " === NaN"); + assertFalse(x < NaN, "" + x + " < NaN"); + assertFalse(x > NaN, "" + x + " > NaN"); + assertFalse(x <= NaN, "" + x + " <= NaN"); + assertFalse(x >= NaN, "" + x + " >= NaN"); } diff --git a/deps/v8/test/mjsunit/date-parse.js b/deps/v8/test/mjsunit/date-parse.js index 56ceba384..bb7ecd27c 100644 --- a/deps/v8/test/mjsunit/date-parse.js +++ b/deps/v8/test/mjsunit/date-parse.js @@ -33,16 +33,16 @@ function testDateParse(string) { var d = Date.parse(string); - assertEquals(946713600000, d, string); + assertEquals(946713600000, d, "parse: " + string); }; // For local time we just test that parsing returns non-NaN positive // number of milliseconds to make it timezone independent. function testDateParseLocalTime(string) { - var d = Date.parse(string); - assertTrue(!isNaN(d), string + " is NaN."); - assertTrue(d > 0, string + " <= 0."); + var d = Date.parse("parse-local-time:" + string); + assertTrue(!isNaN(d), "parse-local-time: " + string + " is NaN."); + assertTrue(d > 0, "parse-local-time: " + string + " <= 0."); }; @@ -51,7 +51,7 @@ function testDateParseMisc(array) { var string = array[0]; var expected = array[1]; var d = Date.parse(string); - assertEquals(expected, d, string); + assertEquals(expected, d, "parse-misc: " + string); } diff --git a/deps/v8/test/mjsunit/debug-backtrace.js b/deps/v8/test/mjsunit/debug-backtrace.js index 1d2bb9af9..0c200aee3 100644 --- a/deps/v8/test/mjsunit/debug-backtrace.js +++ b/deps/v8/test/mjsunit/debug-backtrace.js @@ -76,167 +76,166 @@ ParsedResponse.prototype.lookup = function(handle) { function listener(event, exec_state, event_data, data) { try { - if (event == Debug.DebugEvent.Break) - { - // The expected backtrace is - // 0: f - // 1: m - // 2: g - // 3: [anonymous] - - var response; - var backtrace; - var frame; - var source; - - // Get the debug command processor. - var dcp = exec_state.debugCommandProcessor(); - - // Get the backtrace. - var json; - json = '{"seq":0,"type":"request","command":"backtrace"}' - var resp = dcp.processDebugJSONRequest(json); - response = new ParsedResponse(resp); - backtrace = response.body(); - assertEquals(0, backtrace.fromFrame); - assertEquals(4, backtrace.toFrame); - assertEquals(4, backtrace.totalFrames); - var frames = backtrace.frames; - assertEquals(4, frames.length); - for (var i = 0; i < frames.length; i++) { - assertEquals('frame', frames[i].type); + if (event == Debug.DebugEvent.Break) { + // The expected backtrace is + // 0: f + // 1: m + // 2: g + // 3: [anonymous] + + var response; + var backtrace; + var frame; + var source; + + // Get the debug command processor. + var dcp = exec_state.debugCommandProcessor(); + + // Get the backtrace. + var json; + json = '{"seq":0,"type":"request","command":"backtrace"}' + var resp = dcp.processDebugJSONRequest(json); + response = new ParsedResponse(resp); + backtrace = response.body(); + assertEquals(0, backtrace.fromFrame); + assertEquals(4, backtrace.toFrame); + assertEquals(4, backtrace.totalFrames); + var frames = backtrace.frames; + assertEquals(4, frames.length); + for (var i = 0; i < frames.length; i++) { + assertEquals('frame', frames[i].type); + } + assertEquals(0, frames[0].index); + assertEquals("f", response.lookup(frames[0].func.ref).name); + assertEquals(1, frames[1].index); + assertEquals("", response.lookup(frames[1].func.ref).name); + assertEquals("m", response.lookup(frames[1].func.ref).inferredName); + assertEquals(2, frames[2].index); + assertEquals("g", response.lookup(frames[2].func.ref).name); + assertEquals(3, frames[3].index); + assertEquals("", response.lookup(frames[3].func.ref).name); + + // Get backtrace with two frames. + json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":1,"toFrame":3}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + backtrace = response.body(); + assertEquals(1, backtrace.fromFrame); + assertEquals(3, backtrace.toFrame); + assertEquals(4, backtrace.totalFrames); + var frames = backtrace.frames; + assertEquals(2, frames.length); + for (var i = 0; i < frames.length; i++) { + assertEquals('frame', frames[i].type); + } + assertEquals(1, frames[0].index); + assertEquals("", response.lookup(frames[0].func.ref).name); + assertEquals("m", response.lookup(frames[0].func.ref).inferredName); + assertEquals(2, frames[1].index); + assertEquals("g", response.lookup(frames[1].func.ref).name); + + // Get backtrace with bottom two frames. + json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":0,"toFrame":2, "bottom":true}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + backtrace = response.body(); + assertEquals(2, backtrace.fromFrame); + assertEquals(4, backtrace.toFrame); + assertEquals(4, backtrace.totalFrames); + var frames = backtrace.frames; + assertEquals(2, frames.length); + for (var i = 0; i < frames.length; i++) { + assertEquals('frame', frames[i].type); + } + assertEquals(2, frames[0].index); + assertEquals("g", response.lookup(frames[0].func.ref).name); + assertEquals(3, frames[1].index); + assertEquals("", response.lookup(frames[1].func.ref).name); + + // Get the individual frames. + json = '{"seq":0,"type":"request","command":"frame"}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + frame = response.body(); + assertEquals(0, frame.index); + assertEquals("f", response.lookup(frame.func.ref).name); + assertTrue(frame.constructCall); + assertEquals(31, frame.line); + assertEquals(3, frame.column); + assertEquals(2, frame.arguments.length); + assertEquals('x', frame.arguments[0].name); + assertEquals('number', response.lookup(frame.arguments[0].value.ref).type); + assertEquals(1, response.lookup(frame.arguments[0].value.ref).value); + assertEquals('y', frame.arguments[1].name); + assertEquals('undefined', response.lookup(frame.arguments[1].value.ref).type); + + json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":0}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + frame = response.body(); + assertEquals(0, frame.index); + assertEquals("f", response.lookup(frame.func.ref).name); + assertEquals(31, frame.line); + assertEquals(3, frame.column); + assertEquals(2, frame.arguments.length); + assertEquals('x', frame.arguments[0].name); + assertEquals('number', response.lookup(frame.arguments[0].value.ref).type); + assertEquals(1, response.lookup(frame.arguments[0].value.ref).value); + assertEquals('y', frame.arguments[1].name); + assertEquals('undefined', response.lookup(frame.arguments[1].value.ref).type); + + json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":1}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + frame = response.body(); + assertEquals(1, frame.index); + assertEquals("", response.lookup(frame.func.ref).name); + assertEquals("m", response.lookup(frame.func.ref).inferredName); + assertFalse(frame.constructCall); + assertEquals(35, frame.line); + assertEquals(2, frame.column); + assertEquals(0, frame.arguments.length); + + json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":3}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + frame = response.body(); + assertEquals(3, frame.index); + assertEquals("", response.lookup(frame.func.ref).name); + + // Source slices for the individual frames (they all refer to this script). + json = '{"seq":0,"type":"request","command":"source",' + + '"arguments":{"frame":0,"fromLine":30,"toLine":32}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + source = response.body(); + assertEquals("function f(x, y) {", source.source.substring(0, 18)); + assertEquals(30, source.fromLine); + assertEquals(32, source.toLine); + + json = '{"seq":0,"type":"request","command":"source",' + + '"arguments":{"frame":1,"fromLine":31,"toLine":32}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + source = response.body(); + assertEquals(" a=1;", source.source.substring(0, 6)); + assertEquals(31, source.fromLine); + assertEquals(32, source.toLine); + + json = '{"seq":0,"type":"request","command":"source",' + + '"arguments":{"frame":2,"fromLine":35,"toLine":36}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + source = response.body(); + assertEquals(" new f(1);", source.source.substring(0, 11)); + assertEquals(35, source.fromLine); + assertEquals(36, source.toLine); + + // Test line interval way beyond this script will result in an error. + json = '{"seq":0,"type":"request","command":"source",' + + '"arguments":{"frame":0,"fromLine":10000,"toLine":20000}}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + assertFalse(response.response().success); + + // Test without arguments. + json = '{"seq":0,"type":"request","command":"source"}' + response = new ParsedResponse(dcp.processDebugJSONRequest(json)); + source = response.body(); + assertEquals(Debug.findScript(f).source, source.source); + + listenerCalled = true; } - assertEquals(0, frames[0].index); - assertEquals("f", response.lookup(frames[0].func.ref).name); - assertEquals(1, frames[1].index); - assertEquals("", response.lookup(frames[1].func.ref).name); - assertEquals("m", response.lookup(frames[1].func.ref).inferredName); - assertEquals(2, frames[2].index); - assertEquals("g", response.lookup(frames[2].func.ref).name); - assertEquals(3, frames[3].index); - assertEquals("", response.lookup(frames[3].func.ref).name); - - // Get backtrace with two frames. - json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":1,"toFrame":3}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - backtrace = response.body(); - assertEquals(1, backtrace.fromFrame); - assertEquals(3, backtrace.toFrame); - assertEquals(4, backtrace.totalFrames); - var frames = backtrace.frames; - assertEquals(2, frames.length); - for (var i = 0; i < frames.length; i++) { - assertEquals('frame', frames[i].type); - } - assertEquals(1, frames[0].index); - assertEquals("", response.lookup(frames[0].func.ref).name); - assertEquals("m", response.lookup(frames[0].func.ref).inferredName); - assertEquals(2, frames[1].index); - assertEquals("g", response.lookup(frames[1].func.ref).name); - - // Get backtrace with bottom two frames. - json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":0,"toFrame":2, "bottom":true}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - backtrace = response.body(); - assertEquals(2, backtrace.fromFrame); - assertEquals(4, backtrace.toFrame); - assertEquals(4, backtrace.totalFrames); - var frames = backtrace.frames; - assertEquals(2, frames.length); - for (var i = 0; i < frames.length; i++) { - assertEquals('frame', frames[i].type); - } - assertEquals(2, frames[0].index); - assertEquals("g", response.lookup(frames[0].func.ref).name); - assertEquals(3, frames[1].index); - assertEquals("", response.lookup(frames[1].func.ref).name); - - // Get the individual frames. - json = '{"seq":0,"type":"request","command":"frame"}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - frame = response.body(); - assertEquals(0, frame.index); - assertEquals("f", response.lookup(frame.func.ref).name); - assertTrue(frame.constructCall); - assertEquals(31, frame.line); - assertEquals(3, frame.column); - assertEquals(2, frame.arguments.length); - assertEquals('x', frame.arguments[0].name); - assertEquals('number', response.lookup(frame.arguments[0].value.ref).type); - assertEquals(1, response.lookup(frame.arguments[0].value.ref).value); - assertEquals('y', frame.arguments[1].name); - assertEquals('undefined', response.lookup(frame.arguments[1].value.ref).type); - - json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":0}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - frame = response.body(); - assertEquals(0, frame.index); - assertEquals("f", response.lookup(frame.func.ref).name); - assertEquals(31, frame.line); - assertEquals(3, frame.column); - assertEquals(2, frame.arguments.length); - assertEquals('x', frame.arguments[0].name); - assertEquals('number', response.lookup(frame.arguments[0].value.ref).type); - assertEquals(1, response.lookup(frame.arguments[0].value.ref).value); - assertEquals('y', frame.arguments[1].name); - assertEquals('undefined', response.lookup(frame.arguments[1].value.ref).type); - - json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":1}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - frame = response.body(); - assertEquals(1, frame.index); - assertEquals("", response.lookup(frame.func.ref).name); - assertEquals("m", response.lookup(frame.func.ref).inferredName); - assertFalse(frame.constructCall); - assertEquals(35, frame.line); - assertEquals(2, frame.column); - assertEquals(0, frame.arguments.length); - - json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":3}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - frame = response.body(); - assertEquals(3, frame.index); - assertEquals("", response.lookup(frame.func.ref).name); - - // Source slices for the individual frames (they all refer to this script). - json = '{"seq":0,"type":"request","command":"source",' + - '"arguments":{"frame":0,"fromLine":30,"toLine":32}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - source = response.body(); - assertEquals("function f(x, y) {", source.source.substring(0, 18)); - assertEquals(30, source.fromLine); - assertEquals(32, source.toLine); - - json = '{"seq":0,"type":"request","command":"source",' + - '"arguments":{"frame":1,"fromLine":31,"toLine":32}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - source = response.body(); - assertEquals(" a=1;", source.source.substring(0, 6)); - assertEquals(31, source.fromLine); - assertEquals(32, source.toLine); - - json = '{"seq":0,"type":"request","command":"source",' + - '"arguments":{"frame":2,"fromLine":35,"toLine":36}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - source = response.body(); - assertEquals(" new f(1);", source.source.substring(0, 11)); - assertEquals(35, source.fromLine); - assertEquals(36, source.toLine); - - // Test line interval way beyond this script will result in an error. - json = '{"seq":0,"type":"request","command":"source",' + - '"arguments":{"frame":0,"fromLine":10000,"toLine":20000}}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - assertFalse(response.response().success); - - // Test without arguments. - json = '{"seq":0,"type":"request","command":"source"}' - response = new ParsedResponse(dcp.processDebugJSONRequest(json)); - source = response.body(); - assertEquals(Debug.findScript(f).source, source.source); - - listenerCalled = true; - } } catch (e) { exception = e }; diff --git a/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js b/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js new file mode 100644 index 000000000..eca937875 --- /dev/null +++ b/deps/v8/test/mjsunit/debug-clearbreakpointgroup.js @@ -0,0 +1,117 @@ +// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+var Debug = debug.Debug
+
+// Simple function which stores the last debug event.
+var listenerComplete = false;
+var exception = false;
+
+var base_request = '"seq":0,"type":"request","command":"clearbreakpointgroup"';
+var scriptId = null;
+
+function safeEval(code) {
+ try {
+ return eval('(' + code + ')');
+ } catch (e) {
+ assertEquals(void 0, e);
+ return undefined;
+ }
+}
+
+function testArguments(dcp, arguments, success) {
+ var request = '{' + base_request + ',"arguments":' + arguments + '}'
+ var json_response = dcp.processDebugJSONRequest(request);
+ var response = safeEval(json_response);
+ if (success) {
+ assertTrue(response.success, json_response);
+ } else {
+ assertFalse(response.success, json_response);
+ }
+}
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor();
+
+ // Clear breakpoint group 1.
+ testArguments(dcp, '{"groupId":1}', true);
+
+ // Indicate that all was processed.
+ listenerComplete = true;
+ } else if (event == Debug.DebugEvent.AfterCompile) {
+ scriptId = event_data.script().id();
+ assertEquals(source, event_data.script().source());
+ }
+ } catch (e) {
+ exception = e
+ };
+};
+
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var source = 'function f(n) {\nreturn n+1;\n}\nfunction g() {return f(10);}' +
+ '\nvar r = g(); g;';
+eval(source);
+
+assertNotNull(scriptId);
+
+var groupId1 = 1;
+var groupId2 = 2;
+// Set a break point and call to invoke the debug event listener.
+var bp1 = Debug.setScriptBreakPointById(scriptId, 1, null, null, groupId1);
+var bp2 = Debug.setScriptBreakPointById(scriptId, 1, null, null, groupId2);
+var bp3 = Debug.setScriptBreakPointById(scriptId, 1, null, null, null);
+var bp4 = Debug.setScriptBreakPointById(scriptId, 3, null, null, groupId1);
+var bp5 = Debug.setScriptBreakPointById(scriptId, 4, null, null, groupId2);
+
+assertEquals(5, Debug.scriptBreakPoints().length);
+
+// Call function 'g' from the compiled script to trigger breakpoint.
+g();
+
+// Make sure that the debug event listener vas invoked.
+assertTrue(listenerComplete,
+ "listener did not run to completion: " + exception);
+
+var breakpoints = Debug.scriptBreakPoints();
+assertEquals(3, breakpoints.length);
+var breakpointNumbers = breakpoints.map(
+ function(scriptBreakpoint) { return scriptBreakpoint.number(); },
+ breakpointNumbers);
+
+// Check that all breakpoints from group 1 were deleted and all the
+// rest are preserved.
+assertEquals([bp2, bp3, bp5].sort(), breakpointNumbers.sort());
+
+assertFalse(exception, "exception in listener");
diff --git a/deps/v8/test/mjsunit/debug-stepin-function-call.js b/deps/v8/test/mjsunit/debug-stepin-function-call.js new file mode 100644 index 000000000..9f24c017c --- /dev/null +++ b/deps/v8/test/mjsunit/debug-stepin-function-call.js @@ -0,0 +1,149 @@ +// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 0;
+
+// Simple debug event handler which first time will cause 'step in' action
+// to get into g.call and than check that execution is pauesed inside
+// function 'g'.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 0) {
+ // Step into f2.call:
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals('g', event_data.func().name());
+ assertEquals(' return t + 1; // expected line',
+ event_data.sourceLineText());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Sample functions.
+function g(t) {
+ return t + 1; // expected line
+}
+
+// Test step into function call from a function without local variables.
+function call1() {
+ debugger;
+ g.call(null, 3);
+}
+
+
+// Test step into function call from a function with some local variables.
+function call2() {
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+ debugger;
+ g.call(null, 3);
+}
+
+// Test step into function call which is a part of an expression.
+function call3() {
+ var alias = g;
+ debugger;
+ var r = 10 + alias.call(null, 3);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+// Test step into function call from a function with some local variables.
+function call4() {
+ var alias = g;
+ debugger;
+ alias.call(null, 3);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+// Test step into function apply from a function without local variables.
+function apply1() {
+ debugger;
+ g.apply(null, [3]);
+}
+
+
+// Test step into function apply from a function with some local variables.
+function apply2() {
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+ debugger;
+ g.apply(null, [3, 4]);
+}
+
+// Test step into function apply which is a part of an expression.
+function apply3() {
+ var alias = g;
+ debugger;
+ var r = 10 + alias.apply(null, [3, 'unused arg']);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+// Test step into function apply from a function with some local variables.
+function apply4() {
+ var alias = g;
+ debugger;
+ alias.apply(null, [3]);
+ var aLocalVar = 'test';
+ var anotherLocalVar = g(aLocalVar) + 's';
+ var yetAnotherLocal = 10;
+}
+
+var testFunctions =
+ [call1, call2, call3, call4, apply1, apply2, apply3, apply4];
+
+for (var i = 0; i < testFunctions.length; i++) {
+ state = 0;
+ testFunctions[i]();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
\ No newline at end of file diff --git a/deps/v8/test/mjsunit/div-mod.js b/deps/v8/test/mjsunit/div-mod.js new file mode 100644 index 000000000..39fab2782 --- /dev/null +++ b/deps/v8/test/mjsunit/div-mod.js @@ -0,0 +1,95 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test fast div and mod. + +function divmod(div_func, mod_func, x, y) { + var div_answer = (div_func)(x); + assertEquals(x / y, div_answer, x + "/" + y); + var mod_answer = (mod_func)(x); + assertEquals(x % y, mod_answer, x + "%" + y); + var minus_div_answer = (div_func)(-x); + assertEquals(-x / y, minus_div_answer, "-" + x + "/" + y); + var minus_mod_answer = (mod_func)(-x); + assertEquals(-x % y, minus_mod_answer, "-" + x + "%" + y); +} + + +function run_tests_for(divisor) { + print("(function(left) { return left / " + divisor + "; })"); + var div_func = this.eval("(function(left) { return left / " + divisor + "; })"); + var mod_func = this.eval("(function(left) { return left % " + divisor + "; })"); + var exp; + // Strange number test. + divmod(div_func, mod_func, 0, divisor); + divmod(div_func, mod_func, 1 / 0, divisor); + // Floating point number test. + for (exp = -1024; exp <= 1024; exp += 4) { + divmod(div_func, mod_func, Math.pow(2, exp), divisor); + divmod(div_func, mod_func, 0.9999999 * Math.pow(2, exp), divisor); + divmod(div_func, mod_func, 1.0000001 * Math.pow(2, exp), divisor); + } + // Integer number test. + for (exp = 0; exp <= 32; exp++) { + divmod(div_func, mod_func, 1 << exp, divisor); + divmod(div_func, mod_func, (1 << exp) + 1, divisor); + divmod(div_func, mod_func, (1 << exp) - 1, divisor); + } + divmod(div_func, mod_func, Math.floor(0x1fffffff / 3), divisor); + divmod(div_func, mod_func, Math.floor(-0x20000000 / 3), divisor); +} + + +var divisors = [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + // These ones in the middle don't add much apart from slowness to the test. + 0x1000000, + 0x2000000, + 0x4000000, + 0x8000000, + 0x10000000, + 0x20000000, + 0x40000000, + 12, + 60, + 100, + 1000 * 60 * 60 * 24]; + +for (var i = 0; i < divisors.length; i++) { + run_tests_for(divisors[i]); +} + diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js index debcc9a89..c653b18c1 100644 --- a/deps/v8/test/mjsunit/fuzz-natives.js +++ b/deps/v8/test/mjsunit/fuzz-natives.js @@ -126,7 +126,9 @@ var knownProblems = { "CreateArrayLiteralBoilerplate": true, "IS_VAR": true, "ResolvePossiblyDirectEval": true, - "Log": true + "Log": true, + + "CollectStackTrace": true }; var currentlyUncallable = { diff --git a/deps/v8/test/mjsunit/global-deleted-property-ic.js b/deps/v8/test/mjsunit/global-deleted-property-ic.js new file mode 100644 index 000000000..b90fc797a --- /dev/null +++ b/deps/v8/test/mjsunit/global-deleted-property-ic.js @@ -0,0 +1,45 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +function LoadX(obj) { return obj.x; } + +// Load x from the prototype of this. Make sure to initialize the IC. +this.__proto__ = { x: 42 }; +for (var i = 0; i < 3; i++) assertEquals(42, LoadX(this)); + +// Introduce a global variable and make sure we load that from LoadX. +this.x = 87; +for (var i = 0; i < 3; i++) assertEquals(87, LoadX(this)); + +// Delete the global variable and make sure we get back to loading from +// the prototype. +delete this.x; +for (var i = 0; i < 3; i++) assertEquals(42, LoadX(this)); + +// ... and go back again to loading directly from the object. +this.x = 99; +for (var i = 0; i < 3; i++) assertEquals(99, LoadX(this)); diff --git a/deps/v8/test/mjsunit/global-deleted-property-keyed.js b/deps/v8/test/mjsunit/global-deleted-property-keyed.js new file mode 100644 index 000000000..e249fd32b --- /dev/null +++ b/deps/v8/test/mjsunit/global-deleted-property-keyed.js @@ -0,0 +1,38 @@ +// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Flags: --expose-natives_as natives
+// Test keyed access to deleted property in a global object without access checks.
+// Regression test that exposed the_hole value from Runtime_KeyedGetPoperty.
+
+var name = "fisk";
+natives[name] = name;
+function foo() { natives[name] + 12; }
+for(var i = 0; i < 3; i++) foo();
+delete natives[name];
+for(var i = 0; i < 3; i++) foo();
diff --git a/deps/v8/test/mjsunit/global-ic.js b/deps/v8/test/mjsunit/global-ic.js new file mode 100644 index 000000000..22c49ab1f --- /dev/null +++ b/deps/v8/test/mjsunit/global-ic.js @@ -0,0 +1,48 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +function f() { + return 87; +} + +function LoadFromGlobal(global) { return global.x; } +function StoreToGlobal(global, value) { global.x = value; } +function CallOnGlobal(global) { return global.f(); } + +// Initialize the ICs in the functions. +for (var i = 0; i < 3; i++) { + StoreToGlobal(this, 42 + i); + assertEquals(42 + i, LoadFromGlobal(this)); + assertEquals(87, CallOnGlobal(this)); +} + +// Try the ICs with a smi. This should not crash. +for (var i = 0; i < 3; i++) { + StoreToGlobal(i, 42 + i); + assertTrue(typeof LoadFromGlobal(i) == "undefined"); + assertThrows("CallOnGlobal(" + i + ")"); +} diff --git a/deps/v8/test/mjsunit/html-comments.js b/deps/v8/test/mjsunit/html-comments.js index f39271aca..cc2315b0f 100644 --- a/deps/v8/test/mjsunit/html-comments.js +++ b/deps/v8/test/mjsunit/html-comments.js @@ -32,26 +32,26 @@ var x = 1; --> so must this... --> and this. x-->0; -assertEquals(0, x); +assertEquals(0, x, 'a'); var x = 0; x <!-- x -assertEquals(0, x); +assertEquals(0, x, 'b'); var x = 1; x <!--x -assertEquals(1, x); +assertEquals(1, x, 'c'); var x = 2; x <!-- x; x = 42; -assertEquals(2, x); +assertEquals(2, x, 'd'); var x = 1; x <! x--; -assertEquals(0, x); +assertEquals(0, x, 'e'); var x = 1; x <!- x--; -assertEquals(0, x); +assertEquals(0, x, 'f'); var b = true <! true; -assertFalse(b); +assertFalse(b, 'g'); var b = true <!- true; -assertFalse(b); +assertFalse(b, 'h'); diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status index 4bb7c1652..962e4d313 100644 --- a/deps/v8/test/mjsunit/mjsunit.status +++ b/deps/v8/test/mjsunit/mjsunit.status @@ -51,6 +51,7 @@ debug-continue: SKIP debug-evaluate-recursive: CRASH || FAIL debug-changebreakpoint: CRASH || FAIL debug-clearbreakpoint: CRASH || FAIL +debug-clearbreakpointgroup: PASS, FAIL if $mode == debug debug-conditional-breakpoints: FAIL debug-evaluate: CRASH || FAIL debug-ignore-breakpoints: CRASH || FAIL @@ -58,6 +59,7 @@ debug-multiple-breakpoints: CRASH || FAIL debug-setbreakpoint: CRASH || FAIL || PASS debug-step-stub-callfunction: SKIP debug-stepin-constructor: CRASH, FAIL +debug-stepin-function-call: CRASH || FAIL debug-step: SKIP debug-breakpoints: PASS || FAIL debug-handle: CRASH || FAIL || PASS @@ -66,3 +68,41 @@ regress/regress-269: SKIP # Bug number 130 http://code.google.com/p/v8/issues/detail?id=130 # Fails on real ARM hardware but not on the simulator. string-compare-alignment: PASS || FAIL + + +[ $arch == x64 ] + +debug-backtrace.js: CRASH || FAIL +date-parse.js: CRASH || FAIL +debug-backtrace-text.js: CRASH || FAIL +debug-multiple-breakpoints.js: CRASH || FAIL +debug-breakpoints.js: CRASH || FAIL +debug-changebreakpoint.js: CRASH || FAIL +debug-clearbreakpoint.js: CRASH || FAIL +debug-conditional-breakpoints.js: CRASH || FAIL +debug-constructor.js: CRASH || FAIL +debug-continue.js: CRASH || FAIL +debug-enable-disable-breakpoints.js: CRASH || FAIL +debug-evaluate-recursive.js: CRASH || FAIL +debug-event-listener.js: CRASH || FAIL +debug-evaluate.js: CRASH || FAIL +debug-ignore-breakpoints.js: CRASH || FAIL +debug-setbreakpoint.js: CRASH || FAIL +debug-step-stub-callfunction.js: CRASH || FAIL +debug-step.js: CRASH || FAIL +mirror-date.js: CRASH || FAIL +invalid-lhs.js: CRASH || FAIL +debug-stepin-constructor.js: CRASH || FAIL +new.js: CRASH || FAIL +fuzz-natives.js: CRASH || FAIL +greedy.js: CRASH || FAIL +debug-handle.js: CRASH || FAIL +string-indexof.js: CRASH || FAIL +debug-clearbreakpointgroup.js: CRASH || FAIL +regress/regress-269.js: CRASH || FAIL +div-mod.js: CRASH || FAIL +unicode-test.js: CRASH || FAIL +regress/regress-392.js: CRASH || FAIL +regress/regress-1200351.js: CRASH || FAIL +regress/regress-998565.js: CRASH || FAIL +tools/tickprocessor.js: CRASH || FAIL diff --git a/deps/v8/test/mjsunit/regress/regress-244.js b/deps/v8/test/mjsunit/regress/regress-244.js index ffddcf835..dc5336f84 100644 --- a/deps/v8/test/mjsunit/regress/regress-244.js +++ b/deps/v8/test/mjsunit/regress/regress-244.js @@ -57,7 +57,7 @@ function run() { var threw = false; try { decodeURI(value); - fail(value); + assertUnreachable(value); } catch (e) { assertInstanceof(e, URIError); } diff --git a/deps/v8/test/mjsunit/regress/regress-394.js b/deps/v8/test/mjsunit/regress/regress-394.js new file mode 100644 index 000000000..7b98205b3 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-394.js @@ -0,0 +1,47 @@ +// Copyright 2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See <URL:http://code.google.com/p/v8/issues/detail?id=394> + +function setx(){ + x=1; +} + +function getx(){ + return x; +} + +setx() +setx() +__defineSetter__('x',function(){}); +__defineGetter__('x',function(){return 2;}); +setx() +assertEquals(2, x); + +assertEquals(2, getx()); +assertEquals(2, getx()); +assertEquals(2, getx()); diff --git a/deps/v8/test/mjsunit/regress/regress-396.js b/deps/v8/test/mjsunit/regress/regress-396.js new file mode 100644 index 000000000..e6f2ce3a6 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-396.js @@ -0,0 +1,39 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// http://code.google.com/p/v8/issues/detail?id=396 + +function DateYear(date) { + var string = date.getYear() + ''; + if (string.length < 4) { + string = '' + (string - 0 + 1900); + } + return string; +} + +assertEquals('1995', DateYear(new Date('Dec 25, 1995'))); +assertEquals('2005', DateYear(new Date('Dec 25, 2005'))); diff --git a/deps/v8/test/mjsunit/regress/regress-397.js b/deps/v8/test/mjsunit/regress/regress-397.js new file mode 100644 index 000000000..111f4a6e5 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-397.js @@ -0,0 +1,34 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See http://code.google.com/p/v8/issues/detail?id=397 + +assertEquals("Infinity", String(Math.pow(Infinity, 0.5))); +assertEquals(0, Math.pow(Infinity, -0.5)); + +assertEquals("Infinity", String(Math.pow(-Infinity, 0.5))); +assertEquals(0, Math.pow(-Infinity, -0.5)); diff --git a/deps/v8/test/mjsunit/regress/regress-399.js b/deps/v8/test/mjsunit/regress/regress-399.js new file mode 100644 index 000000000..2ee998ba3 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-399.js @@ -0,0 +1,32 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See http://code.google.com/p/v8/issues/detail?id=399 + +var date = new Date(1.009804e12); +var year = String(date).match(/.*(200\d)/)[1]; +assertEquals(year, date.getFullYear()); diff --git a/deps/v8/test/mjsunit/smi-negative-zero.js b/deps/v8/test/mjsunit/smi-negative-zero.js index 99ddc9720..afeb6de9c 100644 --- a/deps/v8/test/mjsunit/smi-negative-zero.js +++ b/deps/v8/test/mjsunit/smi-negative-zero.js @@ -37,64 +37,64 @@ var minus_four = -4; // variable op variable -assertEquals(one / (-zero), -Infinity); +assertEquals(one / (-zero), -Infinity, "one / -0 I"); -assertEquals(one / (zero * minus_one), -Infinity); -assertEquals(one / (minus_one * zero), -Infinity); -assertEquals(one / (zero * zero), Infinity); -assertEquals(one / (minus_one * minus_one), 1); +assertEquals(one / (zero * minus_one), -Infinity, "one / -1"); +assertEquals(one / (minus_one * zero), -Infinity, "one / -0 II"); +assertEquals(one / (zero * zero), Infinity, "one / 0 I"); +assertEquals(one / (minus_one * minus_one), 1, "one / 1"); -assertEquals(one / (zero / minus_one), -Infinity); -assertEquals(one / (zero / one), Infinity); +assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III"); +assertEquals(one / (zero / one), Infinity, "one / 0 II"); -assertEquals(one / (minus_four % two), -Infinity); -assertEquals(one / (minus_four % minus_two), -Infinity); -assertEquals(one / (four % two), Infinity); -assertEquals(one / (four % minus_two), Infinity); +assertEquals(one / (minus_four % two), -Infinity, "foo"); +assertEquals(one / (minus_four % minus_two), -Infinity, "foo"); +assertEquals(one / (four % two), Infinity, "foo"); +assertEquals(one / (four % minus_two), Infinity, "foo"); // literal op variable -assertEquals(one / (0 * minus_one), -Infinity); -assertEquals(one / (-1 * zero), -Infinity); -assertEquals(one / (0 * zero), Infinity); -assertEquals(one / (-1 * minus_one), 1); +assertEquals(one / (0 * minus_one), -Infinity, "bar"); +assertEquals(one / (-1 * zero), -Infinity, "bar"); +assertEquals(one / (0 * zero), Infinity, "bar"); +assertEquals(one / (-1 * minus_one), 1, "bar"); -assertEquals(one / (0 / minus_one), -Infinity); -assertEquals(one / (0 / one), Infinity); +assertEquals(one / (0 / minus_one), -Infinity, "baz"); +assertEquals(one / (0 / one), Infinity, "baz"); -assertEquals(one / (-4 % two), -Infinity); -assertEquals(one / (-4 % minus_two), -Infinity); -assertEquals(one / (4 % two), Infinity); -assertEquals(one / (4 % minus_two), Infinity); +assertEquals(one / (-4 % two), -Infinity, "baz"); +assertEquals(one / (-4 % minus_two), -Infinity, "baz"); +assertEquals(one / (4 % two), Infinity, "baz"); +assertEquals(one / (4 % minus_two), Infinity, "baz"); // variable op literal -assertEquals(one / (zero * -1), -Infinity); -assertEquals(one / (minus_one * 0), -Infinity); -assertEquals(one / (zero * 0), Infinity); -assertEquals(one / (minus_one * -1), 1); +assertEquals(one / (zero * -1), -Infinity, "fizz"); +assertEquals(one / (minus_one * 0), -Infinity, "fizz"); +assertEquals(one / (zero * 0), Infinity, "fizz"); +assertEquals(one / (minus_one * -1), 1, "fizz"); -assertEquals(one / (zero / -1), -Infinity); -assertEquals(one / (zero / 1), Infinity); +assertEquals(one / (zero / -1), -Infinity, "buzz"); +assertEquals(one / (zero / 1), Infinity, "buzz"); -assertEquals(one / (minus_four % 2), -Infinity); -assertEquals(one / (minus_four % -2), -Infinity); -assertEquals(one / (four % 2), Infinity); -assertEquals(one / (four % -2), Infinity); +assertEquals(one / (minus_four % 2), -Infinity, "buzz"); +assertEquals(one / (minus_four % -2), -Infinity, "buzz"); +assertEquals(one / (four % 2), Infinity, "buzz"); +assertEquals(one / (four % -2), Infinity, "buzz"); // literal op literal -assertEquals(one / (-0), -Infinity); +assertEquals(one / (-0), -Infinity, "fisk1"); -assertEquals(one / (0 * -1), -Infinity); -assertEquals(one / (-1 * 0), -Infinity); -assertEquals(one / (0 * 0), Infinity); -assertEquals(one / (-1 * -1), 1); +assertEquals(one / (0 * -1), -Infinity, "fisk2"); +assertEquals(one / (-1 * 0), -Infinity, "fisk3"); +assertEquals(one / (0 * 0), Infinity, "fisk4"); +assertEquals(one / (-1 * -1), 1, "fisk5"); -assertEquals(one / (0 / -1), -Infinity); -assertEquals(one / (0 / 1), Infinity); +assertEquals(one / (0 / -1), -Infinity, "hest"); +assertEquals(one / (0 / 1), Infinity, "hest"); -assertEquals(one / (-4 % 2), -Infinity); -assertEquals(one / (-4 % -2), -Infinity); -assertEquals(one / (4 % 2), Infinity); -assertEquals(one / (4 % -2), Infinity); +assertEquals(one / (-4 % 2), -Infinity, "fiskhest"); +assertEquals(one / (-4 % -2), -Infinity, "fiskhest"); +assertEquals(one / (4 % 2), Infinity, "fiskhest"); +assertEquals(one / (4 % -2), Infinity, "fiskhest"); diff --git a/deps/v8/test/mjsunit/smi-ops.js b/deps/v8/test/mjsunit/smi-ops.js index cdff8964e..284050d36 100644 --- a/deps/v8/test/mjsunit/smi-ops.js +++ b/deps/v8/test/mjsunit/smi-ops.js @@ -56,15 +56,15 @@ function Add100Reversed(x) { assertEquals(1, Add1(0)); // fast case assertEquals(1, Add1Reversed(0)); // fast case -assertEquals(SMI_MAX + ONE, Add1(SMI_MAX)); // overflow -assertEquals(SMI_MAX + ONE, Add1Reversed(SMI_MAX)); // overflow +assertEquals(SMI_MAX + ONE, Add1(SMI_MAX), "smimax + 1"); +assertEquals(SMI_MAX + ONE, Add1Reversed(SMI_MAX), "1 + smimax"); assertEquals(42 + ONE, Add1(OBJ_42)); // non-smi assertEquals(42 + ONE, Add1Reversed(OBJ_42)); // non-smi assertEquals(100, Add100(0)); // fast case assertEquals(100, Add100Reversed(0)); // fast case -assertEquals(SMI_MAX + ONE_HUNDRED, Add100(SMI_MAX)); // overflow -assertEquals(SMI_MAX + ONE_HUNDRED, Add100Reversed(SMI_MAX)); // overflow +assertEquals(SMI_MAX + ONE_HUNDRED, Add100(SMI_MAX), "smimax + 100"); +assertEquals(SMI_MAX + ONE_HUNDRED, Add100Reversed(SMI_MAX), " 100 + smimax"); assertEquals(42 + ONE_HUNDRED, Add100(OBJ_42)); // non-smi assertEquals(42 + ONE_HUNDRED, Add100Reversed(OBJ_42)); // non-smi @@ -148,8 +148,8 @@ assertEquals(21, Sar1(OBJ_42)); assertEquals(0, Shr1Reversed(OBJ_42)); assertEquals(0, Sar1Reversed(OBJ_42)); -assertEquals(6, Shr100(100)); -assertEquals(6, Sar100(100)); +assertEquals(6, Shr100(100), "100 >>> 100"); +assertEquals(6, Sar100(100), "100 >> 100"); assertEquals(12, Shr100Reversed(99)); assertEquals(12, Sar100Reversed(99)); assertEquals(201326592, Shr100(SMI_MIN)); @@ -196,6 +196,54 @@ assertEquals(78, Xor100Reversed(OBJ_42)); var x = 0x23; var y = 0x35; assertEquals(0x16, x ^ y); + +// Bitwise not. +var v = 0; +assertEquals(-1, ~v); +v = SMI_MIN; +assertEquals(0x3fffffff, ~v, "~smimin"); +v = SMI_MAX; +assertEquals(-0x40000000, ~v, "~smimax"); + +// Overflowing ++ and --. +v = SMI_MAX; +v++; +assertEquals(0x40000000, v, "smimax++"); +v = SMI_MIN; +v--; +assertEquals(-0x40000001, v, "smimin--"); + +// Not actually Smi operations. +// Check that relations on unary ops work. +var v = -1.2; +assertTrue(v == v); +assertTrue(v === v); +assertTrue(v <= v); +assertTrue(v >= v); +assertFalse(v < v); +assertFalse(v > v); +assertFalse(v != v); +assertFalse(v !== v); + +// Right hand side of unary minus is overwritable. +v = 1.5 +assertEquals(-2.25, -(v * v)); + +// Smi input to bitop gives non-smi result where the rhs is a float that +// can be overwritten. +var x1 = 0x10000000; +var x2 = 0x40000002; +var x3 = 0x40000000; +assertEquals(0x40000000, x1 << (x2 - x3), "0x10000000<<1(1)"); + +// Smi input to bitop gives non-smi result where the rhs could be overwritten +// if it were a float, but it isn't. +x1 = 0x10000000 +x2 = 4 +x3 = 2 +assertEquals(0x40000000, x1 << (x2 - x3), "0x10000000<<2(2)"); + + // Test shift operators on non-smi inputs, giving smi and non-smi results. function testShiftNonSmis() { var pos_non_smi = 2000000000; @@ -210,12 +258,12 @@ function testShiftNonSmis() { assertEquals(neg_non_smi, (neg_non_smi) >> 0); assertEquals(neg_non_smi + 0x100000000, (neg_non_smi) >>> 0); assertEquals(neg_non_smi, (neg_non_smi) << 0); - assertEquals(pos_smi, (pos_smi) >> 0); - assertEquals(pos_smi, (pos_smi) >>> 0); - assertEquals(pos_smi, (pos_smi) << 0); - assertEquals(neg_smi, (neg_smi) >> 0); - assertEquals(neg_smi + 0x100000000, (neg_smi) >>> 0); - assertEquals(neg_smi, (neg_smi) << 0); + assertEquals(pos_smi, (pos_smi) >> 0, "possmi >> 0"); + assertEquals(pos_smi, (pos_smi) >>> 0, "possmi >>>0"); + assertEquals(pos_smi, (pos_smi) << 0, "possmi << 0"); + assertEquals(neg_smi, (neg_smi) >> 0, "negsmi >> 0"); + assertEquals(neg_smi + 0x100000000, (neg_smi) >>> 0, "negsmi >>> 0"); + assertEquals(neg_smi, (neg_smi) << 0), "negsmi << 0"; assertEquals(pos_non_smi / 2, (pos_non_smi) >> 1); assertEquals(pos_non_smi / 2, (pos_non_smi) >>> 1); @@ -235,18 +283,22 @@ function testShiftNonSmis() { assertEquals(-0x46536000, (pos_non_smi + 0.5) << 3); assertEquals(0x73594000, (pos_non_smi + 0.5) << 4); - assertEquals(neg_non_smi / 2, (neg_non_smi) >> 1); - assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> 1); + assertEquals(neg_non_smi / 2, (neg_non_smi) >> 1, "negnonsmi >> 1"); + + assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi) >>> 1, + "negnonsmi >>> 1"); assertEquals(0x1194D800, (neg_non_smi) << 1); assertEquals(neg_non_smi / 8, (neg_non_smi) >> 3); assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi) >>> 3); assertEquals(0x46536000, (neg_non_smi) << 3); assertEquals(-0x73594000, (neg_non_smi) << 4); assertEquals(neg_non_smi, (neg_non_smi - 0.5) >> 0); - assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> 0); + assertEquals(neg_non_smi + 0x100000000, (neg_non_smi - 0.5) >>> 0, + "negnonsmi.5 >>> 0"); assertEquals(neg_non_smi, (neg_non_smi - 0.5) << 0); assertEquals(neg_non_smi / 2, (neg_non_smi - 0.5) >> 1); - assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> 1); + assertEquals(neg_non_smi / 2 + 0x100000000 / 2, (neg_non_smi - 0.5) >>> 1, + "negnonsmi.5 >>> 1"); assertEquals(0x1194D800, (neg_non_smi - 0.5) << 1); assertEquals(neg_non_smi / 8, (neg_non_smi - 0.5) >> 3); assertEquals(neg_non_smi / 8 + 0x100000000 / 8, (neg_non_smi - 0.5) >>> 3); @@ -260,9 +312,9 @@ function testShiftNonSmis() { assertEquals(pos_smi / 8, (pos_smi) >>> 3); assertEquals(-0x2329b000, (pos_smi) << 3); assertEquals(0x73594000, (pos_smi) << 5); - assertEquals(pos_smi, (pos_smi + 0.5) >> 0); - assertEquals(pos_smi, (pos_smi + 0.5) >>> 0); - assertEquals(pos_smi, (pos_smi + 0.5) << 0); + assertEquals(pos_smi, (pos_smi + 0.5) >> 0, "possmi.5 >> 0"); + assertEquals(pos_smi, (pos_smi + 0.5) >>> 0, "possmi.5 >>> 0"); + assertEquals(pos_smi, (pos_smi + 0.5) << 0, "possmi.5 << 0"); assertEquals(pos_smi / 2, (pos_smi + 0.5) >> 1); assertEquals(pos_smi / 2, (pos_smi + 0.5) >>> 1); assertEquals(pos_non_smi, (pos_smi + 0.5) << 1); @@ -278,9 +330,9 @@ function testShiftNonSmis() { assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> 3); assertEquals(0x46536000, (neg_smi) << 4); assertEquals(-0x73594000, (neg_smi) << 5); - assertEquals(neg_smi, (neg_smi - 0.5) >> 0); - assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> 0); - assertEquals(neg_smi, (neg_smi - 0.5) << 0); + assertEquals(neg_smi, (neg_smi - 0.5) >> 0, "negsmi.5 >> 0"); + assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> 0, "negsmi.5 >>> 0"); + assertEquals(neg_smi, (neg_smi - 0.5) << 0, "negsmi.5 << 0"); assertEquals(neg_smi / 2, (neg_smi - 0.5) >> 1); assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> 1); assertEquals(neg_non_smi, (neg_smi - 0.5) << 1); @@ -301,12 +353,12 @@ function testShiftNonSmis() { assertEquals(neg_non_smi, (neg_32 + neg_non_smi) >> 0); assertEquals(neg_non_smi + 0x100000000, (neg_32 + neg_non_smi) >>> 0); assertEquals(neg_non_smi, (neg_32 + neg_non_smi) << 0); - assertEquals(pos_smi, (two_32 + pos_smi) >> 0); - assertEquals(pos_smi, (two_32 + pos_smi) >>> 0); - assertEquals(pos_smi, (two_32 + pos_smi) << 0); - assertEquals(neg_smi, (neg_32 + neg_smi) >> 0); + assertEquals(pos_smi, (two_32 + pos_smi) >> 0, "2^32+possmi >> 0"); + assertEquals(pos_smi, (two_32 + pos_smi) >>> 0, "2^32+possmi >>> 0"); + assertEquals(pos_smi, (two_32 + pos_smi) << 0, "2^32+possmi << 0"); + assertEquals(neg_smi, (neg_32 + neg_smi) >> 0, "2^32+negsmi >> 0"); assertEquals(neg_smi + 0x100000000, (neg_32 + neg_smi) >>> 0); - assertEquals(neg_smi, (neg_32 + neg_smi) << 0); + assertEquals(neg_smi, (neg_32 + neg_smi) << 0, "2^32+negsmi << 0"); assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi) >> 1); assertEquals(pos_non_smi / 2, (two_32 + pos_non_smi) >>> 1); @@ -371,9 +423,9 @@ function testShiftNonSmis() { assertEquals((neg_smi + 0x100000000) / 8, (neg_32 + neg_smi) >>> 3); assertEquals(0x46536000, (neg_32 + neg_smi) << 4); assertEquals(-0x73594000, (neg_32 + neg_smi) << 5); - assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) >> 0); + assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) >> 0, "-2^32+negsmi.5 >> 0"); assertEquals(neg_smi + 0x100000000, (neg_32 + neg_smi - 0.5) >>> 0); - assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) << 0); + assertEquals(neg_smi, (neg_32 + neg_smi - 0.5) << 0, "-2^32+negsmi.5 << 0"); assertEquals(neg_smi / 2, (neg_32 + neg_smi - 0.5) >> 1); assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_32 + neg_smi - 0.5) >>> 1); assertEquals(neg_non_smi, (neg_32 + neg_smi - 0.5) << 1); @@ -399,9 +451,9 @@ function testShiftNonSmis() { assertEquals(pos_smi, (pos_smi) >> zero); assertEquals(pos_smi, (pos_smi) >>> zero); assertEquals(pos_smi, (pos_smi) << zero); - assertEquals(neg_smi, (neg_smi) >> zero); + assertEquals(neg_smi, (neg_smi) >> zero, "negsmi >> zero"); assertEquals(neg_smi + 0x100000000, (neg_smi) >>> zero); - assertEquals(neg_smi, (neg_smi) << zero); + assertEquals(neg_smi, (neg_smi) << zero, "negsmi << zero"); assertEquals(pos_non_smi / 2, (pos_non_smi) >> one); assertEquals(pos_non_smi / 2, (pos_non_smi) >>> one); @@ -495,9 +547,9 @@ function testShiftNonSmis() { assertEquals(pos_smi, (pos_smi) >> zero); assertEquals(pos_smi, (pos_smi) >>> zero); assertEquals(pos_smi, (pos_smi) << zero); - assertEquals(neg_smi, (neg_smi) >> zero); + assertEquals(neg_smi, (neg_smi) >> zero, "negsmi >> zero(2)"); assertEquals(neg_smi + 0x100000000, (neg_smi) >>> zero); - assertEquals(neg_smi, (neg_smi) << zero); + assertEquals(neg_smi, (neg_smi) << zero, "negsmi << zero(2)"); assertEquals(pos_non_smi / 2, (pos_non_smi) >> one); assertEquals(pos_non_smi / 2, (pos_non_smi) >>> one); @@ -561,9 +613,9 @@ function testShiftNonSmis() { assertEquals(neg_smi / 8 + 0x100000000 / 8, (neg_smi) >>> three); assertEquals(0x46536000, (neg_smi) << four); assertEquals(-0x73594000, (neg_smi) << five); - assertEquals(neg_smi, (neg_smi - 0.5) >> zero); + assertEquals(neg_smi, (neg_smi - 0.5) >> zero, "negsmi.5 >> zero"); assertEquals(neg_smi + 0x100000000, (neg_smi - 0.5) >>> zero); - assertEquals(neg_smi, (neg_smi - 0.5) << zero); + assertEquals(neg_smi, (neg_smi - 0.5) << zero, "negsmi.5 << zero"); assertEquals(neg_smi / 2, (neg_smi - 0.5) >> one); assertEquals(neg_smi / 2 + 0x100000000 / 2, (neg_smi - 0.5) >>> one); assertEquals(neg_non_smi, (neg_smi - 0.5) << one); diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js new file mode 100644 index 000000000..e457ece3c --- /dev/null +++ b/deps/v8/test/mjsunit/stack-traces.js @@ -0,0 +1,171 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +function testMethodNameInference() { + function Foo() { } + Foo.prototype.bar = function () { FAIL; }; + (new Foo).bar(); +} + +function testNested() { + function one() { + function two() { + function three() { + FAIL; + } + three(); + } + two(); + } + one(); +} + +function testArrayNative() { + [1, 2, 3].map(function () { FAIL; }); +} + +function testImplicitConversion() { + function Nirk() { } + Nirk.prototype.valueOf = function () { FAIL; }; + return 1 + (new Nirk); +} + +function testEval() { + eval("function Doo() { FAIL; }; Doo();"); +} + +function testNestedEval() { + var x = "FAIL"; + eval("function Outer() { eval('function Inner() { eval(x); }'); Inner(); }; Outer();"); +} + +function testValue() { + Number.prototype.causeError = function () { FAIL; }; + (1).causeError(); +} + +function testConstructor() { + function Plonk() { FAIL; } + new Plonk(); +} + +function testRenamedMethod() { + function a$b$c$d() { return FAIL; } + function Wookie() { } + Wookie.prototype.d = a$b$c$d; + (new Wookie).d(); +} + +function testAnonymousMethod() { + (function () { FAIL }).call([1, 2, 3]); +} + +// Utility function for testing that the expected strings occur +// in the stack trace produced when running the given function. +function testTrace(fun, expected) { + var threw = false; + try { + fun(); + } catch (e) { + for (var i = 0; i < expected.length; i++) { + assertTrue(e.stack.indexOf(expected[i]) != -1); + } + threw = true; + } + assertTrue(threw); +} + +// Test that the error constructor is not shown in the trace +function testCallerCensorship() { + var threw = false; + try { + FAIL; + } catch (e) { + assertEquals(-1, e.stack.indexOf('at new ReferenceError')); + threw = true; + } + assertTrue(threw); +} + +// Test that the explicit constructor call is shown in the trace +function testUnintendedCallerCensorship() { + var threw = false; + try { + new ReferenceError({ + toString: function () { + FAIL; + } + }); + } catch (e) { + assertTrue(e.stack.indexOf('at new ReferenceError') != -1); + threw = true; + } + assertTrue(threw); +} + +// If an error occurs while the stack trace is being formatted it should +// be handled gracefully. +function testErrorsDuringFormatting() { + function Nasty() { } + Nasty.prototype.foo = function () { throw new RangeError(); }; + var n = new Nasty(); + n.__defineGetter__('constructor', function () { CONS_FAIL; }); + var threw = false; + try { + n.foo(); + } catch (e) { + threw = true; + assertTrue(e.stack.indexOf('<error: ReferenceError') != -1); + } + assertTrue(threw); + threw = false; + // Now we can't even format the message saying that we couldn't format + // the stack frame. Put that in your pipe and smoke it! + ReferenceError.prototype.toString = function () { NESTED_FAIL; }; + try { + n.foo(); + } catch (e) { + threw = true; + assertTrue(e.stack.indexOf('<error>') != -1); + } + assertTrue(threw); +} + +testTrace(testArrayNative, ["Array.map (native)"]); +testTrace(testNested, ["at one", "at two", "at three"]); +testTrace(testMethodNameInference, ["at Foo.bar"]); +testTrace(testImplicitConversion, ["at Nirk.valueOf"]); +testTrace(testEval, ["at Doo (eval at testEval"]); +testTrace(testNestedEval, ["eval at Inner (eval at Outer"]); +testTrace(testValue, ["at Number.causeError"]); +testTrace(testConstructor, ["new Plonk"]); +testTrace(testRenamedMethod, ["Wookie.a$b$c$d [as d]"]); +testTrace(testAnonymousMethod, ["Array.<anonymous>"]); + +testCallerCensorship(); +testUnintendedCallerCensorship(); +testErrorsDuringFormatting(); diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py index 9c7e02807..96840f5cf 100644 --- a/deps/v8/test/mjsunit/testcfg.py +++ b/deps/v8/test/mjsunit/testcfg.py @@ -29,10 +29,12 @@ import test import os from os.path import join, dirname, exists import re +import tempfile FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)") FILES_PATTERN = re.compile(r"//\s+Files:(.*)") +SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME") class MjsunitTestCase(test.TestCase): @@ -42,6 +44,7 @@ class MjsunitTestCase(test.TestCase): self.file = file self.config = config self.mode = mode + self.self_script = False def GetLabel(self): return "%s %s" % (self.mode, self.GetName()) @@ -55,19 +58,43 @@ class MjsunitTestCase(test.TestCase): flags_match = FLAGS_PATTERN.search(source) if flags_match: result += flags_match.group(1).strip().split() - files_match = FILES_PATTERN.search(source); additional_files = [] - if files_match: - additional_files += files_match.group(1).strip().split() + files_match = FILES_PATTERN.search(source); + # Accept several lines of 'Files:' + while True: + if files_match: + additional_files += files_match.group(1).strip().split() + files_match = FILES_PATTERN.search(source, files_match.end()) + else: + break for a_file in additional_files: result.append(join(dirname(self.config.root), '..', a_file)) framework = join(dirname(self.config.root), 'mjsunit', 'mjsunit.js') + if SELF_SCRIPT_PATTERN.search(source): + result.append(self.CreateSelfScript()) result += [framework, self.file] return result def GetSource(self): return open(self.file).read() + def CreateSelfScript(self): + (fd_self_script, self_script) = tempfile.mkstemp(suffix=".js") + def MakeJsConst(name, value): + return "var %(name)s=\'%(value)s\';\n" % \ + {'name': name, \ + 'value': value.replace('\\', '\\\\').replace('\'', '\\\'') } + try: + os.write(fd_self_script, MakeJsConst('TEST_FILE_NAME', self.file)) + except IOError, e: + test.PrintError("write() " + str(e)) + os.close(fd_self_script) + self.self_script = self_script + return self_script + + def Cleanup(self): + if self.self_script: + test.CheckedUnlink(self.self_script) class MjsunitTestConfiguration(test.TestConfiguration): diff --git a/deps/v8/test/mjsunit/to_number_order.js b/deps/v8/test/mjsunit/to_number_order.js new file mode 100644 index 000000000..1329bad2c --- /dev/null +++ b/deps/v8/test/mjsunit/to_number_order.js @@ -0,0 +1,129 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var x = ""; +var v = new Object(); +var w = new Object(); +var vv = function() { x += "hest"; return 1; } +var ww = function() { x += "fisk"; return 2; } +v.valueOf = vv; +w.valueOf = ww; +assertEquals(1, Math.min(v,w)); +assertEquals("hestfisk", x, "min"); + +x = ""; +assertEquals(2, Math.max(v,w)); +assertEquals("hestfisk", x, "max"); + +x = ""; +assertEquals(Math.atan2(1, 2), Math.atan2(v, w)); +// JSC says fiskhest. +assertEquals("hestfisk", x, "atan2"); + +x = ""; +assertEquals(1, Math.pow(v, w)); +assertEquals("hestfisk", x, "pow"); + +var year = { valueOf: function() { x += 1; return 2007; } }; +var month = { valueOf: function() { x += 2; return 2; } }; +var date = { valueOf: function() { x += 3; return 4; } }; +var hours = { valueOf: function() { x += 4; return 13; } }; +var minutes = { valueOf: function() { x += 5; return 50; } }; +var seconds = { valueOf: function() { x += 6; return 0; } }; +var ms = { valueOf: function() { x += 7; return 999; } }; + +x = ""; +new Date(year, month, date, hours, minutes, seconds, ms); +// JSC fails this one: Returns 12345671234567. +assertEquals("1234567", x, "Date"); + +x = ""; +Date(year, month, date, hours, minutes, seconds, ms); +assertEquals("", x, "Date not constructor"); + +x = ""; +Date.UTC(year, month, date, hours, minutes, seconds, ms); +// JSC fails this one: Returns 12345671234567. +assertEquals("1234567", x, "Date.UTC"); + +x = ""; +new Date().setSeconds(seconds, ms); +assertEquals("67", x, "Date.UTC"); + +x = ""; +new Date().setSeconds(seconds, ms); +assertEquals("67", x, "Date.setSeconds"); + +x = ""; +new Date().setUTCSeconds(seconds, ms); +assertEquals("67", x, "Date.setUTCSeconds"); + +x = ""; +new Date().setMinutes(minutes, seconds, ms); +assertEquals("567", x, "Date.setMinutes"); + +x = ""; +new Date().setUTCMinutes(minutes, seconds, ms); +assertEquals("567", x, "Date.setUTCMinutes"); + +x = ""; +new Date().setHours(hours, minutes, seconds, ms); +assertEquals("4567", x, "Date.setHours"); + +x = ""; +new Date().setUTCHours(hours, minutes, seconds, ms); +assertEquals("4567", x, "Date.setUTCHours"); + +x = ""; +new Date().setDate(date, hours, minutes, seconds, ms); +assertEquals("3", x, "Date.setDate"); + +x = ""; +new Date().setUTCDate(date, hours, minutes, seconds, ms); +assertEquals("3", x, "Date.setUTCDate"); + +x = ""; +new Date().setMonth(month, date, hours, minutes, seconds, ms); +assertEquals("23", x, "Date.setMonth"); + +x = ""; +new Date().setUTCMonth(month, date, hours, minutes, seconds, ms); +assertEquals("23", x, "Date.setUTCMonth"); + +x = ""; +new Date().setFullYear(year, month, date, hours, minutes, seconds, ms); +assertEquals("123", x, "Date.setFullYear"); + +x = ""; +new Date().setUTCFullYear(year, month, date, hours, minutes, seconds, ms); +assertEquals("123", x, "Date.setUTCFullYear"); + +var a = { valueOf: function() { x += "hest"; return 97; } }; +var b = { valueOf: function() { x += "fisk"; return 98; } }; +assertEquals("ab", String.fromCharCode(a, b), "String.fromCharCode"); + +print("ok"); diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.default b/deps/v8/test/mjsunit/tools/tickprocessor-test.default new file mode 100644 index 000000000..a689ea8c3 --- /dev/null +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.default @@ -0,0 +1,60 @@ +Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). + + [Unknown]: + ticks total nonlib name + 2 15.4% + + [Shared libraries]: + ticks total nonlib name + 2 15.4% 0.0% /lib32/libm-2.7.so + 1 7.7% 0.0% ffffe000-fffff000 + + [JavaScript]: + ticks total nonlib name + 1 7.7% 10.0% LazyCompile: exp native math.js:41 + + [C++]: + ticks total nonlib name + 2 15.4% 20.0% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 1 7.7% 10.0% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 7.7% 10.0% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 7.7% 10.0% fegetexcept + 1 7.7% 10.0% exp + + [GC]: + ticks total nonlib name + 0 0.0% + + [Bottom up (heavy) profile]: + Note: percentage shows a share of a particular caller in the total + amount of its parent calls. + Callers occupying less than 2.0% are not shown. + + ticks parent name + 2 15.4% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 2 100.0% LazyCompile: exp native math.js:41 + 2 100.0% Script: exp.js + + 2 15.4% /lib32/libm-2.7.so + 2 100.0% LazyCompile: exp native math.js:41 + 2 100.0% Script: exp.js + + 1 7.7% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 100.0% Script: exp.js + + 1 7.7% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 100.0% Script: exp.js + + 1 7.7% ffffe000-fffff000 + + 1 7.7% fegetexcept + 1 100.0% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + + 1 7.7% exp + 1 100.0% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + + 1 7.7% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.gc-state b/deps/v8/test/mjsunit/tools/tickprocessor-test.gc-state new file mode 100644 index 000000000..40f90db4f --- /dev/null +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.gc-state @@ -0,0 +1,21 @@ +Statistical profiling result from v8.log, (13 ticks, 0 unaccounted, 13 excluded). + + [Shared libraries]: + ticks total nonlib name + + [JavaScript]: + ticks total nonlib name + + [C++]: + ticks total nonlib name + + [GC]: + ticks total nonlib name + 0 0.0% + + [Bottom up (heavy) profile]: + Note: percentage shows a share of a particular caller in the total + amount of its parent calls. + Callers occupying less than 2.0% are not shown. + + ticks parent name diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown b/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown new file mode 100644 index 000000000..87beb0842 --- /dev/null +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown @@ -0,0 +1,56 @@ +Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). + + [Shared libraries]: + ticks total nonlib name + 2 18.2% 0.0% /lib32/libm-2.7.so + 1 9.1% 0.0% ffffe000-fffff000 + + [JavaScript]: + ticks total nonlib name + 1 9.1% 12.5% LazyCompile: exp native math.js:41 + + [C++]: + ticks total nonlib name + 2 18.2% 25.0% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 1 9.1% 12.5% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 9.1% 12.5% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 9.1% 12.5% fegetexcept + 1 9.1% 12.5% exp + + [GC]: + ticks total nonlib name + 0 0.0% + + [Bottom up (heavy) profile]: + Note: percentage shows a share of a particular caller in the total + amount of its parent calls. + Callers occupying less than 2.0% are not shown. + + ticks parent name + 2 18.2% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 2 100.0% LazyCompile: exp native math.js:41 + 2 100.0% Script: exp.js + + 2 18.2% /lib32/libm-2.7.so + 2 100.0% LazyCompile: exp native math.js:41 + 2 100.0% Script: exp.js + + 1 9.1% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 100.0% Script: exp.js + + 1 9.1% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 100.0% Script: exp.js + + 1 9.1% ffffe000-fffff000 + + 1 9.1% fegetexcept + 1 100.0% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + + 1 9.1% exp + 1 100.0% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + + 1 9.1% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.log b/deps/v8/test/mjsunit/tools/tickprocessor-test.log new file mode 100644 index 000000000..75daad6b2 --- /dev/null +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.log @@ -0,0 +1,24 @@ +shared-library,"shell",0x08048000,0x081ee000 +shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000 +shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000 +profiler,"begin",1 +code-creation,Stub,0xf540a100,474,"CEntryStub" +code-creation,Script,0xf541cd80,736,"exp.js" +code-creation,Stub,0xf541d0e0,47,"RuntimeStub_Math_exp" +code-creation,LazyCompile,0xf541d120,145,"exp native math.js:41" +code-creation,LoadIC,0xf541d280,117,"j" +code-creation,LoadIC,0xf541d360,63,"i" +tick,0x80f82d1,0xffdfe880,0,0xf541ce5c +tick,0x80f89a1,0xffdfecf0,0,0xf541ce5c +tick,0x8123b5c,0xffdff1a0,0,0xf541d1a1,0xf541ceea +tick,0x8123b65,0xffdff1a0,0,0xf541d1a1,0xf541ceea +tick,0xf541d2be,0xffdff1e4,0 +tick,0xf541d320,0xffdff1dc,0 +tick,0xf541d384,0xffdff1d8,0 +tick,0xf7db94da,0xffdff0ec,0,0xf541d1a1,0xf541ceea +tick,0xf7db951c,0xffdff0f0,0,0xf541d1a1,0xf541ceea +tick,0xf7dbc508,0xffdff14c,0,0xf541d1a1,0xf541ceea +tick,0xf7dbff21,0xffdff198,0,0xf541d1a1,0xf541ceea +tick,0xf7edec90,0xffdff0ec,0,0xf541d1a1,0xf541ceea +tick,0xffffe402,0xffdff488,0 +profiler,"end" diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic b/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic new file mode 100644 index 000000000..7eb3d9a7a --- /dev/null +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic @@ -0,0 +1,66 @@ +Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). + + [Unknown]: + ticks total nonlib name + 2 15.4% + + [Shared libraries]: + ticks total nonlib name + 2 15.4% 0.0% /lib32/libm-2.7.so + 1 7.7% 0.0% ffffe000-fffff000 + + [JavaScript]: + ticks total nonlib name + 1 7.7% 10.0% LoadIC: j + 1 7.7% 10.0% LoadIC: i + 1 7.7% 10.0% LazyCompile: exp native math.js:41 + + [C++]: + ticks total nonlib name + 2 15.4% 20.0% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 1 7.7% 10.0% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 7.7% 10.0% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 7.7% 10.0% fegetexcept + 1 7.7% 10.0% exp + + [GC]: + ticks total nonlib name + 0 0.0% + + [Bottom up (heavy) profile]: + Note: percentage shows a share of a particular caller in the total + amount of its parent calls. + Callers occupying less than 2.0% are not shown. + + ticks parent name + 2 15.4% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 2 100.0% LazyCompile: exp native math.js:41 + 2 100.0% Script: exp.js + + 2 15.4% /lib32/libm-2.7.so + 2 100.0% LazyCompile: exp native math.js:41 + 2 100.0% Script: exp.js + + 1 7.7% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 100.0% Script: exp.js + + 1 7.7% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 100.0% Script: exp.js + + 1 7.7% ffffe000-fffff000 + + 1 7.7% fegetexcept + 1 100.0% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + + 1 7.7% exp + 1 100.0% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + + 1 7.7% LoadIC: j + + 1 7.7% LoadIC: i + + 1 7.7% LazyCompile: exp native math.js:41 + 1 100.0% Script: exp.js + diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js new file mode 100644 index 000000000..587106ac0 --- /dev/null +++ b/deps/v8/test/mjsunit/tools/tickprocessor.js @@ -0,0 +1,268 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Load implementations from <project root>/tools. +// Files: tools/splaytree.js tools/codemap.js tools/csvparser.js +// Files: tools/consarray.js tools/profile.js tools/profile_view.js +// Files: tools/logreader.js tools/tickprocessor.js +// Env: TEST_FILE_NAME + +(function testArgumentsProcessor() { + var p_default = new ArgumentsProcessor([]); + assertTrue(p_default.parse()); + assertEquals(ArgumentsProcessor.DEFAULTS, p_default.result()); + + var p_logFile = new ArgumentsProcessor(['logfile.log']); + assertTrue(p_logFile.parse()); + assertEquals('logfile.log', p_logFile.result().logFileName); + + var p_platformAndLog = new ArgumentsProcessor(['--windows', 'winlog.log']); + assertTrue(p_platformAndLog.parse()); + assertEquals('windows', p_platformAndLog.result().platform); + assertEquals('winlog.log', p_platformAndLog.result().logFileName); + + var p_flags = new ArgumentsProcessor(['--gc', '--separate-ic']); + assertTrue(p_flags.parse()); + assertEquals(TickProcessor.VmStates.GC, p_flags.result().stateFilter); + assertTrue(p_flags.result().separateIc); + + var p_nmAndLog = new ArgumentsProcessor(['--nm=mn', 'nmlog.log']); + assertTrue(p_nmAndLog.parse()); + assertEquals('mn', p_nmAndLog.result().nm); + assertEquals('nmlog.log', p_nmAndLog.result().logFileName); + + var p_bad = new ArgumentsProcessor(['--unknown', 'badlog.log']); + assertFalse(p_bad.parse()); +})(); + + +(function testUnixCppEntriesProvider() { + var oldLoadSymbols = UnixCppEntriesProvider.prototype.loadSymbols; + + // shell executable + UnixCppEntriesProvider.prototype.loadSymbols = function(libName) { + this.symbols = [[ + ' U operator delete[](void*)@@GLIBCXX_3.4', + '08049790 T _init', + '08049f50 T _start', + '08139150 t v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', + '08139ca0 T v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', + '0813a0b0 t v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', + '08181d30 W v8::internal::RegExpMacroAssemblerIrregexp::stack_limit_slack()', + ' w __gmon_start__', + '081f08a0 B stdout' + ].join('\n'), '']; + }; + + var shell_prov = new UnixCppEntriesProvider(); + var shell_syms = []; + shell_prov.parseVmSymbols('shell', 0x08048000, 0x081ee000, + function (name, start, end) { + shell_syms.push(Array.prototype.slice.apply(arguments, [0])); + }); + assertEquals( + [['_init', 0x08049790, 0x08049f50], + ['_start', 0x08049f50, 0x08139150], + ['v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', 0x08139150, 0x08139ca0], + ['v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', 0x08139ca0, 0x0813a0b0], + ['v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', 0x0813a0b0, 0x08181d30], + ['v8::internal::RegExpMacroAssemblerIrregexp::stack_limit_slack()', 0x08181d30, 0x081ee000]], + shell_syms); + + // libc library + UnixCppEntriesProvider.prototype.loadSymbols = function(libName) { + this.symbols = [[ + '000162a0 T __libc_init_first', + '0002a5f0 T __isnan', + '0002a5f0 W isnan', + '0002aaa0 W scalblnf', + '0002aaa0 W scalbnf', + '0011a340 T __libc_thread_freeres', + '00128860 R _itoa_lower_digits'].join('\n'), '']; + }; + var libc_prov = new UnixCppEntriesProvider(); + var libc_syms = []; + libc_prov.parseVmSymbols('libc', 0xf7c5c000, 0xf7da5000, + function (name, start, end) { + libc_syms.push(Array.prototype.slice.apply(arguments, [0])); + }); + assertEquals( + [['__libc_init_first', 0xf7c5c000 + 0x000162a0, 0xf7c5c000 + 0x0002a5f0], + ['isnan', 0xf7c5c000 + 0x0002a5f0, 0xf7c5c000 + 0x0002aaa0], + ['scalbnf', 0xf7c5c000 + 0x0002aaa0, 0xf7c5c000 + 0x0011a340], + ['__libc_thread_freeres', 0xf7c5c000 + 0x0011a340, 0xf7da5000]], + libc_syms); + + UnixCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols; +})(); + + +(function testWindowsCppEntriesProvider() { + var oldLoadSymbols = WindowsCppEntriesProvider.prototype.loadSymbols; + + WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) { + this.symbols = [ + ' Start Length Name Class', + ' 0001:00000000 000ac902H .text CODE', + ' 0001:000ac910 000005e2H .text$yc CODE', + ' Address Publics by Value Rva+Base Lib:Object', + ' 0000:00000000 __except_list 00000000 <absolute>', + ' 0001:00000000 ?ReadFile@@YA?AV?$Handle@VString@v8@@@v8@@PBD@Z 00401000 f shell.obj', + ' 0001:000000a0 ?Print@@YA?AV?$Handle@VValue@v8@@@v8@@ABVArguments@2@@Z 004010a0 f shell.obj', + ' 0001:00001230 ??1UTF8Buffer@internal@v8@@QAE@XZ 00402230 f v8_snapshot:scanner.obj', + ' 0001:00001230 ??1Utf8Value@String@v8@@QAE@XZ 00402230 f v8_snapshot:api.obj', + ' 0001:000954ba __fclose_nolock 004964ba f LIBCMT:fclose.obj', + ' 0002:00000000 __imp__SetThreadPriority@8 004af000 kernel32:KERNEL32.dll', + ' 0003:00000418 ?in_use_list_@PreallocatedStorage@internal@v8@@0V123@A 00544418 v8_snapshot:allocation.obj', + ' Static symbols', + ' 0001:00000b70 ?DefaultFatalErrorHandler@v8@@YAXPBD0@Z 00401b70 f v8_snapshot:api.obj', + ' 0001:000010b0 ?EnsureInitialized@v8@@YAXPBD@Z 004020b0 f v8_snapshot:api.obj', + ' 0001:000ad17b ??__Fnomem@?5???2@YAPAXI@Z@YAXXZ 004ae17b f LIBCMT:new.obj' + ].join('\r\n'); + }; + var shell_prov = new WindowsCppEntriesProvider(); + var shell_syms = []; + shell_prov.parseVmSymbols('shell.exe', 0x00400000, 0x0057c000, + function (name, start, end) { + shell_syms.push(Array.prototype.slice.apply(arguments, [0])); + }); + assertEquals( + [['ReadFile', 0x00401000, 0x004010a0], + ['Print', 0x004010a0, 0x00402230], + ['v8::String::?1Utf8Value', 0x00402230, 0x004964ba], + ['v8::DefaultFatalErrorHandler', 0x00401b70, 0x004020b0], + ['v8::EnsureInitialized', 0x004020b0, 0x0057c000]], + shell_syms); + + WindowsCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols; +})(); + + +function CppEntriesProviderMock() { +}; + + +CppEntriesProviderMock.prototype.parseVmSymbols = function( + name, startAddr, endAddr, symbolAdder) { + var symbols = { + 'shell': + [['v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)', 0x080f8800, 0x080f8d90], + ['v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)', 0x080f8210, 0x080f8800], + ['v8::internal::Runtime_Math_exp(v8::internal::Arguments)', 0x08123b20, 0x08123b80]], + '/lib32/libm-2.7.so': + [['exp', startAddr + 0x00009e80, startAddr + 0x00009f30], + ['fegetexcept', startAddr + 0x000061e0, startAddr + 0x00008b10]], + 'ffffe000-fffff000': []}; + assertTrue(name in symbols); + var syms = symbols[name]; + for (var i = 0; i < syms.length; ++i) { + symbolAdder.apply(null, syms[i]); + } +}; + + +function PrintMonitor(outputOrFileName) { + var expectedOut = typeof outputOrFileName == 'string' ? + this.loadExpectedOutput(outputOrFileName) : outputOrFileName; + var outputPos = 0; + var diffs = this.diffs = []; + var realOut = this.realOut = []; + + this.oldPrint = print; + print = function(str) { + var strSplit = str.split('\n'); + for (var i = 0; i < strSplit.length; ++i) { + s = strSplit[i]; + realOut.push(s); + assertTrue(outputPos < expectedOut.length, + 'unexpected output: "' + s + '"'); + if (expectedOut[outputPos] != s) { + diffs.push('line ' + outputPos + ': expected <' + + expectedOut[outputPos] + '> found <' + s + '>\n'); + } + outputPos++; + } + }; +}; + + +PrintMonitor.prototype.loadExpectedOutput = function(fileName) { + var output = readFile(fileName); + return output.split('\n'); +}; + + +PrintMonitor.prototype.finish = function() { + print = this.oldPrint; + if (this.diffs.length > 0) { + print(this.realOut.join('\n')); + assertEquals([], this.diffs); + } +}; + + +function driveTickProcessorTest( + separateIc, ignoreUnknown, stateFilter, logInput, refOutput) { + // TEST_FILE_NAME must be provided by test runner. + assertEquals('string', typeof TEST_FILE_NAME); + var pathLen = TEST_FILE_NAME.lastIndexOf('/'); + if (pathLen == -1) { + pathLen = TEST_FILE_NAME.lastIndexOf('\\'); + } + assertTrue(pathLen != -1); + var testsPath = TEST_FILE_NAME.substr(0, pathLen + 1); + var tp = new TickProcessor( + new CppEntriesProviderMock(), separateIc, ignoreUnknown, stateFilter); + var pm = new PrintMonitor(testsPath + refOutput); + tp.processLogFile(testsPath + logInput); + // Hack file name to avoid dealing with platform specifics. + tp.lastLogFileName_ = 'v8.log'; + tp.printStatistics(); + pm.finish(); +}; + + +(function testProcessing() { + var testData = { + 'Default': [ + false, false, null, + 'tickprocessor-test.log', 'tickprocessor-test.default'], + 'SeparateIc': [ + true, false, null, + 'tickprocessor-test.log', 'tickprocessor-test.separate-ic'], + 'IgnoreUnknown': [ + false, true, null, + 'tickprocessor-test.log', 'tickprocessor-test.ignore-unknown'], + 'GcState': [ + false, false, TickProcessor.VmStates.GC, + 'tickprocessor-test.log', 'tickprocessor-test.gc-state'] + }; + for (var testName in testData) { + print('=== testProcessing-' + testName + ' ==='); + driveTickProcessorTest.apply(null, testData[testName]); + } +})(); diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status index 760ed4100..13ae29c8e 100644 --- a/deps/v8/test/mozilla/mozilla.status +++ b/deps/v8/test/mozilla/mozilla.status @@ -476,12 +476,11 @@ js1_2/Array/array_split_1: FAIL_OK js1_5/Array/regress-313153: FAIL_OK -# Properties stack, fileName, and lineNumber of Error instances are +# Properties fileName, and lineNumber of Error instances are # not supported. Mozilla specific extension. js1_5/Exceptions/errstack-001: FAIL_OK js1_5/Exceptions/regress-257751: FAIL_OK js1_5/Regress/regress-119719: FAIL_OK -js1_5/Regress/regress-139316: FAIL_OK js1_5/Regress/regress-167328: FAIL_OK js1_5/Regress/regress-243869: FAIL_OK diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp index 8815456d7..b11a7ffaa 100644 --- a/deps/v8/tools/gyp/v8.gyp +++ b/deps/v8/tools/gyp/v8.gyp @@ -30,244 +30,8 @@ 'chromium_code': 1, 'msvs_use_common_release': 0, 'gcc_version%': 'unknown', - 'base_source_files': [ - '../../src/arm/assembler-arm-inl.h', - '../../src/arm/assembler-arm.cc', - '../../src/arm/assembler-arm.h', - '../../src/arm/builtins-arm.cc', - '../../src/arm/codegen-arm.cc', - '../../src/arm/codegen-arm.h', - '../../src/arm/constants-arm.h', - '../../src/arm/cpu-arm.cc', - '../../src/arm/debug-arm.cc', - '../../src/arm/disasm-arm.cc', - '../../src/arm/frames-arm.cc', - '../../src/arm/frames-arm.h', - '../../src/arm/ic-arm.cc', - '../../src/arm/jump-target-arm.cc', - '../../src/arm/macro-assembler-arm.cc', - '../../src/arm/macro-assembler-arm.h', - '../../src/arm/regexp-macro-assembler-arm.cc', - '../../src/arm/regexp-macro-assembler-arm.h', - '../../src/arm/register-allocator-arm.cc', - '../../src/arm/simulator-arm.cc', - '../../src/arm/stub-cache-arm.cc', - '../../src/arm/virtual-frame-arm.cc', - '../../src/arm/virtual-frame-arm.h', - '../../src/ia32/assembler-ia32-inl.h', - '../../src/ia32/assembler-ia32.cc', - '../../src/ia32/assembler-ia32.h', - '../../src/ia32/builtins-ia32.cc', - '../../src/ia32/codegen-ia32.cc', - '../../src/ia32/codegen-ia32.h', - '../../src/ia32/cpu-ia32.cc', - '../../src/ia32/debug-ia32.cc', - '../../src/ia32/disasm-ia32.cc', - '../../src/ia32/frames-ia32.cc', - '../../src/ia32/frames-ia32.h', - '../../src/ia32/ic-ia32.cc', - '../../src/ia32/jump-target-ia32.cc', - '../../src/ia32/macro-assembler-ia32.cc', - '../../src/ia32/macro-assembler-ia32.h', - '../../src/ia32/regexp-macro-assembler-ia32.cc', - '../../src/ia32/regexp-macro-assembler-ia32.h', - '../../src/ia32/register-allocator-ia32.cc', - '../../src/ia32/stub-cache-ia32.cc', - '../../src/ia32/virtual-frame-ia32.cc', - '../../src/ia32/virtual-frame-ia32.h', - '../../src/third_party/dtoa/dtoa.c', - '../../src/accessors.cc', - '../../src/accessors.h', - '../../src/allocation.cc', - '../../src/allocation.h', - '../../src/api.cc', - '../../src/api.h', - '../../src/apiutils.h', - '../../src/arguments.h', - '../../src/assembler.cc', - '../../src/assembler.h', - '../../src/ast.cc', - '../../src/ast.h', - '../../src/bootstrapper.cc', - '../../src/bootstrapper.h', - '../../src/builtins.cc', - '../../src/builtins.h', - '../../src/bytecodes-irregexp.h', - '../../src/char-predicates-inl.h', - '../../src/char-predicates.h', - '../../src/checks.cc', - '../../src/checks.h', - '../../src/code-stubs.cc', - '../../src/code-stubs.h', - '../../src/code.h', - '../../src/codegen-inl.h', - '../../src/codegen.cc', - '../../src/codegen.h', - '../../src/compilation-cache.cc', - '../../src/compilation-cache.h', - '../../src/compiler.cc', - '../../src/compiler.h', - '../../src/contexts.cc', - '../../src/contexts.h', - '../../src/conversions-inl.h', - '../../src/conversions.cc', - '../../src/conversions.h', - '../../src/counters.cc', - '../../src/counters.h', - '../../src/cpu.h', - '../../src/dateparser.cc', - '../../src/dateparser.h', - '../../src/dateparser-inl.h', - '../../src/debug.cc', - '../../src/debug.h', - '../../src/debug-agent.cc', - '../../src/debug-agent.h', - '../../src/disasm.h', - '../../src/disassembler.cc', - '../../src/disassembler.h', - '../../src/dtoa-config.c', - '../../src/execution.cc', - '../../src/execution.h', - '../../src/factory.cc', - '../../src/factory.h', - '../../src/flag-definitions.h', - '../../src/flags.cc', - '../../src/flags.h', - '../../src/frames-inl.h', - '../../src/frames.cc', - '../../src/frames.h', - '../../src/frame-element.h', - '../../src/func-name-inferrer.cc', - '../../src/func-name-inferrer.h', - '../../src/global-handles.cc', - '../../src/global-handles.h', - '../../src/globals.h', - '../../src/handles-inl.h', - '../../src/handles.cc', - '../../src/handles.h', - '../../src/hashmap.cc', - '../../src/hashmap.h', - '../../src/heap-inl.h', - '../../src/heap.cc', - '../../src/heap.h', - '../../src/ic-inl.h', - '../../src/ic.cc', - '../../src/ic.h', - '../../src/interpreter-irregexp.cc', - '../../src/interpreter-irregexp.h', - '../../src/jump-target.cc', - '../../src/jump-target.h', - '../../src/jump-target-inl.h', - '../../src/jsregexp-inl.h', - '../../src/jsregexp.cc', - '../../src/jsregexp.h', - '../../src/list-inl.h', - '../../src/list.h', - '../../src/log.cc', - '../../src/log-inl.h', - '../../src/log.h', - '../../src/log-utils.cc', - '../../src/log-utils.h', - '../../src/macro-assembler.h', - '../../src/mark-compact.cc', - '../../src/mark-compact.h', - '../../src/memory.h', - '../../src/messages.cc', - '../../src/messages.h', - '../../src/natives.h', - '../../src/objects-debug.cc', - '../../src/objects-inl.h', - '../../src/objects.cc', - '../../src/objects.h', - '../../src/oprofile-agent.h', - '../../src/oprofile-agent.cc', - '../../src/parser.cc', - '../../src/parser.h', - '../../src/platform-freebsd.cc', - '../../src/platform-linux.cc', - '../../src/platform-macos.cc', - '../../src/platform-nullos.cc', - '../../src/platform-posix.cc', - '../../src/platform-win32.cc', - '../../src/platform.h', - '../../src/prettyprinter.cc', - '../../src/prettyprinter.h', - '../../src/property.cc', - '../../src/property.h', - '../../src/regexp-macro-assembler-irregexp-inl.h', - '../../src/regexp-macro-assembler-irregexp.cc', - '../../src/regexp-macro-assembler-irregexp.h', - '../../src/regexp-macro-assembler-tracer.cc', - '../../src/regexp-macro-assembler-tracer.h', - '../../src/regexp-macro-assembler.cc', - '../../src/regexp-macro-assembler.h', - '../../src/regexp-stack.cc', - '../../src/regexp-stack.h', - '../../src/register-allocator.h', - '../../src/register-allocator-inl.h', - '../../src/register-allocator.cc', - '../../src/rewriter.cc', - '../../src/rewriter.h', - '../../src/runtime.cc', - '../../src/runtime.h', - '../../src/scanner.cc', - '../../src/scanner.h', - '../../src/scopeinfo.cc', - '../../src/scopeinfo.h', - '../../src/scopes.cc', - '../../src/scopes.h', - '../../src/serialize.cc', - '../../src/serialize.h', - '../../src/shell.h', - '../../src/smart-pointer.h', - '../../src/snapshot-common.cc', - '../../src/snapshot.h', - '../../src/spaces-inl.h', - '../../src/spaces.cc', - '../../src/spaces.h', - '../../src/string-stream.cc', - '../../src/string-stream.h', - '../../src/stub-cache.cc', - '../../src/stub-cache.h', - '../../src/token.cc', - '../../src/token.h', - '../../src/top.cc', - '../../src/top.h', - '../../src/unicode-inl.h', - '../../src/unicode.cc', - '../../src/unicode.h', - '../../src/usage-analyzer.cc', - '../../src/usage-analyzer.h', - '../../src/utils.cc', - '../../src/utils.h', - '../../src/v8-counters.cc', - '../../src/v8-counters.h', - '../../src/v8.cc', - '../../src/v8.h', - '../../src/v8threads.cc', - '../../src/v8threads.h', - '../../src/variables.cc', - '../../src/variables.h', - '../../src/version.cc', - '../../src/version.h', - '../../src/virtual-frame.h', - '../../src/virtual-frame.cc', - '../../src/zone-inl.h', - '../../src/zone.cc', - '../../src/zone.h', - ], - 'not_base_source_files': [ - # These files are #included by others and are not meant to be compiled - # directly. - '../../src/third_party/dtoa/dtoa.c', - ], - 'd8_source_files': [ - '../../src/d8-debug.cc', - '../../src/d8-posix.cc', - '../../src/d8-readline.cc', - '../../src/d8-windows.cc', - '../../src/d8.cc', - ], + 'target_arch%': 'ia32', + 'v8_use_snapshot%': 'true', }, 'includes': [ '../../../build/common.gypi', @@ -276,6 +40,19 @@ 'defines': [ 'ENABLE_LOGGING_AND_PROFILING', ], + 'conditions': [ + ['target_arch=="arm"', { + 'defines': [ + 'V8_TARGET_ARCH_ARM', + ], + }], + ['target_arch=="ia32"', { + 'defines': [ + 'V8_TARGET_ARCH_IA32', + 'V8_NATIVE_REGEXP', + ], + }], + ], 'configurations': { 'Debug': { 'defines': [ @@ -306,10 +83,10 @@ 'conditions': [ [ 'gcc_version=="44"', { 'cflags': [ - # Avoid gcc 4.4 strict aliasing issues in dtoa.c - '-fno-strict-aliasing', - # Avoid crashes with gcc 4.4 in the v8 test suite. - '-fno-tree-vrp', + # Avoid gcc 4.4 strict aliasing issues in dtoa.c + '-fno-strict-aliasing', + # Avoid crashes with gcc 4.4 in the v8 test suite. + '-fno-tree-vrp', ], }], ], @@ -350,117 +127,328 @@ }, }, 'targets': [ - # Targets that apply to any architecture. { - 'target_name': 'js2c', + 'target_name': 'v8', 'type': 'none', - 'variables': { - 'library_files': [ - '../../src/runtime.js', - '../../src/v8natives.js', - '../../src/array.js', - '../../src/string.js', - '../../src/uri.js', - '../../src/math.js', - '../../src/messages.js', - '../../src/apinatives.js', - '../../src/debug-delay.js', - '../../src/mirror-delay.js', - '../../src/date-delay.js', - '../../src/json-delay.js', - '../../src/regexp-delay.js', - '../../src/macros.py', - ], - }, - 'actions': [ - { - 'action_name': 'js2c', - 'inputs': [ - '../../tools/js2c.py', - '<@(library_files)', - ], - 'outputs': [ - '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', - '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc', - ], - 'action': ['python', '../../tools/js2c.py', '<@(_outputs)', 'CORE', '<@(library_files)'], + 'conditions': [ + ['v8_use_snapshot=="true"', { + 'dependencies': ['v8_snapshot'], }, + { + 'dependencies': ['v8_nosnapshot'], + }], ], - }, - { - 'target_name': 'd8_js2c', - 'type': 'none', - 'variables': { - 'library_files': [ - '../../src/d8.js', - '../../src/macros.py', + 'direct_dependent_settings': { + 'include_dirs': [ + '../../include', ], }, + }, + { + 'target_name': 'v8_snapshot', + 'type': '<(library)', + 'dependencies': [ + 'mksnapshot', + 'js2c', + 'v8_base', + ], + 'include_dirs+': [ + '../../src', + ], + 'sources': [ + '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc', + '<(INTERMEDIATE_DIR)/snapshot.cc', + ], 'actions': [ { - 'action_name': 'js2c', + 'action_name': 'run_mksnapshot', 'inputs': [ - '../../tools/js2c.py', - '<@(library_files)', - ], - 'extra_inputs': [ + '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)', ], 'outputs': [ - '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', - '<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc', + '<(INTERMEDIATE_DIR)/snapshot.cc', ], - 'action': ['python', '../../tools/js2c.py', '<@(_outputs)', 'D8', '<@(library_files)'], + 'action': ['<@(_inputs)', '<@(_outputs)'], }, ], }, - - # Targets to build v8 for the native architecture (ia32). { - 'target_name': 'v8_base', + 'target_name': 'v8_nosnapshot', 'type': '<(library)', - 'defines': [ - 'V8_TARGET_ARCH_IA32' + 'dependencies': [ + 'js2c', + 'v8_base', ], 'include_dirs+': [ '../../src', - '../../src/ia32', ], - 'msvs_guid': 'EC8B7909-62AF-470D-A75D-E1D89C837142', 'sources': [ - '<@(base_source_files)', + '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', + '../../src/snapshot-empty.cc', ], - 'sources!': [ - '<@(not_base_source_files)', + }, + { + 'target_name': 'v8_base', + 'type': '<(library)', + 'include_dirs+': [ + '../../src', ], - 'sources/': [ - ['exclude', '-arm\\.cc$'], - ['exclude', 'src/platform-.*\\.cc$' ], + 'sources': [ + '../../src/accessors.cc', + '../../src/accessors.h', + '../../src/allocation.cc', + '../../src/allocation.h', + '../../src/api.cc', + '../../src/api.h', + '../../src/apiutils.h', + '../../src/arguments.h', + '../../src/assembler.cc', + '../../src/assembler.h', + '../../src/ast.cc', + '../../src/ast.h', + '../../src/bootstrapper.cc', + '../../src/bootstrapper.h', + '../../src/builtins.cc', + '../../src/builtins.h', + '../../src/bytecodes-irregexp.h', + '../../src/char-predicates-inl.h', + '../../src/char-predicates.h', + '../../src/checks.cc', + '../../src/checks.h', + '../../src/code-stubs.cc', + '../../src/code-stubs.h', + '../../src/code.h', + '../../src/codegen-inl.h', + '../../src/codegen.cc', + '../../src/codegen.h', + '../../src/compilation-cache.cc', + '../../src/compilation-cache.h', + '../../src/compiler.cc', + '../../src/compiler.h', + '../../src/contexts.cc', + '../../src/contexts.h', + '../../src/conversions-inl.h', + '../../src/conversions.cc', + '../../src/conversions.h', + '../../src/counters.cc', + '../../src/counters.h', + '../../src/cpu.h', + '../../src/dateparser.cc', + '../../src/dateparser.h', + '../../src/dateparser-inl.h', + '../../src/debug.cc', + '../../src/debug.h', + '../../src/debug-agent.cc', + '../../src/debug-agent.h', + '../../src/disasm.h', + '../../src/disassembler.cc', + '../../src/disassembler.h', + '../../src/dtoa-config.c', + '../../src/execution.cc', + '../../src/execution.h', + '../../src/factory.cc', + '../../src/factory.h', + '../../src/flag-definitions.h', + '../../src/flags.cc', + '../../src/flags.h', + '../../src/frames-inl.h', + '../../src/frames.cc', + '../../src/frames.h', + '../../src/frame-element.h', + '../../src/func-name-inferrer.cc', + '../../src/func-name-inferrer.h', + '../../src/global-handles.cc', + '../../src/global-handles.h', + '../../src/globals.h', + '../../src/handles-inl.h', + '../../src/handles.cc', + '../../src/handles.h', + '../../src/hashmap.cc', + '../../src/hashmap.h', + '../../src/heap-inl.h', + '../../src/heap.cc', + '../../src/heap.h', + '../../src/ic-inl.h', + '../../src/ic.cc', + '../../src/ic.h', + '../../src/interpreter-irregexp.cc', + '../../src/interpreter-irregexp.h', + '../../src/jump-target.cc', + '../../src/jump-target.h', + '../../src/jump-target-inl.h', + '../../src/jsregexp-inl.h', + '../../src/jsregexp.cc', + '../../src/jsregexp.h', + '../../src/list-inl.h', + '../../src/list.h', + '../../src/log.cc', + '../../src/log-inl.h', + '../../src/log.h', + '../../src/log-utils.cc', + '../../src/log-utils.h', + '../../src/macro-assembler.h', + '../../src/mark-compact.cc', + '../../src/mark-compact.h', + '../../src/memory.h', + '../../src/messages.cc', + '../../src/messages.h', + '../../src/natives.h', + '../../src/objects-debug.cc', + '../../src/objects-inl.h', + '../../src/objects.cc', + '../../src/objects.h', + '../../src/oprofile-agent.h', + '../../src/oprofile-agent.cc', + '../../src/parser.cc', + '../../src/parser.h', + '../../src/platform.h', + '../../src/prettyprinter.cc', + '../../src/prettyprinter.h', + '../../src/property.cc', + '../../src/property.h', + '../../src/regexp-macro-assembler-irregexp-inl.h', + '../../src/regexp-macro-assembler-irregexp.cc', + '../../src/regexp-macro-assembler-irregexp.h', + '../../src/regexp-macro-assembler-tracer.cc', + '../../src/regexp-macro-assembler-tracer.h', + '../../src/regexp-macro-assembler.cc', + '../../src/regexp-macro-assembler.h', + '../../src/regexp-stack.cc', + '../../src/regexp-stack.h', + '../../src/register-allocator.h', + '../../src/register-allocator-inl.h', + '../../src/register-allocator.cc', + '../../src/rewriter.cc', + '../../src/rewriter.h', + '../../src/runtime.cc', + '../../src/runtime.h', + '../../src/scanner.cc', + '../../src/scanner.h', + '../../src/scopeinfo.cc', + '../../src/scopeinfo.h', + '../../src/scopes.cc', + '../../src/scopes.h', + '../../src/serialize.cc', + '../../src/serialize.h', + '../../src/shell.h', + '../../src/smart-pointer.h', + '../../src/snapshot-common.cc', + '../../src/snapshot.h', + '../../src/spaces-inl.h', + '../../src/spaces.cc', + '../../src/spaces.h', + '../../src/string-stream.cc', + '../../src/string-stream.h', + '../../src/stub-cache.cc', + '../../src/stub-cache.h', + '../../src/token.cc', + '../../src/token.h', + '../../src/top.cc', + '../../src/top.h', + '../../src/unicode-inl.h', + '../../src/unicode.cc', + '../../src/unicode.h', + '../../src/usage-analyzer.cc', + '../../src/usage-analyzer.h', + '../../src/utils.cc', + '../../src/utils.h', + '../../src/v8-counters.cc', + '../../src/v8-counters.h', + '../../src/v8.cc', + '../../src/v8.h', + '../../src/v8threads.cc', + '../../src/v8threads.h', + '../../src/variables.cc', + '../../src/variables.h', + '../../src/version.cc', + '../../src/version.h', + '../../src/virtual-frame.h', + '../../src/virtual-frame.cc', + '../../src/zone-inl.h', + '../../src/zone.cc', + '../../src/zone.h', ], 'conditions': [ - ['OS=="linux"', - { + ['target_arch=="arm"', { + 'include_dirs+': [ + '../../src/arm', + ], + 'sources': [ + '../../src/arm/assembler-arm-inl.h', + '../../src/arm/assembler-arm.cc', + '../../src/arm/assembler-arm.h', + '../../src/arm/builtins-arm.cc', + '../../src/arm/codegen-arm.cc', + '../../src/arm/codegen-arm.h', + '../../src/arm/constants-arm.h', + '../../src/arm/cpu-arm.cc', + '../../src/arm/debug-arm.cc', + '../../src/arm/disasm-arm.cc', + '../../src/arm/frames-arm.cc', + '../../src/arm/frames-arm.h', + '../../src/arm/ic-arm.cc', + '../../src/arm/jump-target-arm.cc', + '../../src/arm/macro-assembler-arm.cc', + '../../src/arm/macro-assembler-arm.h', + '../../src/arm/regexp-macro-assembler-arm.cc', + '../../src/arm/regexp-macro-assembler-arm.h', + '../../src/arm/register-allocator-arm.cc', + '../../src/arm/simulator-arm.cc', + '../../src/arm/stub-cache-arm.cc', + '../../src/arm/virtual-frame-arm.cc', + '../../src/arm/virtual-frame-arm.h', + ], + }], + ['target_arch=="ia32"', { + 'include_dirs+': [ + '../../src/ia32', + ], + 'sources': [ + '../../src/ia32/assembler-ia32-inl.h', + '../../src/ia32/assembler-ia32.cc', + '../../src/ia32/assembler-ia32.h', + '../../src/ia32/builtins-ia32.cc', + '../../src/ia32/codegen-ia32.cc', + '../../src/ia32/codegen-ia32.h', + '../../src/ia32/cpu-ia32.cc', + '../../src/ia32/debug-ia32.cc', + '../../src/ia32/disasm-ia32.cc', + '../../src/ia32/frames-ia32.cc', + '../../src/ia32/frames-ia32.h', + '../../src/ia32/ic-ia32.cc', + '../../src/ia32/jump-target-ia32.cc', + '../../src/ia32/macro-assembler-ia32.cc', + '../../src/ia32/macro-assembler-ia32.h', + '../../src/ia32/regexp-macro-assembler-ia32.cc', + '../../src/ia32/regexp-macro-assembler-ia32.h', + '../../src/ia32/register-allocator-ia32.cc', + '../../src/ia32/stub-cache-ia32.cc', + '../../src/ia32/virtual-frame-ia32.cc', + '../../src/ia32/virtual-frame-ia32.h', + ], + }], + ['OS=="linux"', { 'link_settings': { 'libraries': [ # Needed for clock_gettime() used by src/platform-linux.cc. '-lrt', - ], - }, - 'sources/': [ - ['include', 'src/platform-linux\\.cc$'], - ['include', 'src/platform-posix\\.cc$'] - ] + ]}, + 'sources': [ + '../../src/platform-linux.cc', + '../../src/platform-posix.cc' + ], } ], - ['OS=="mac"', - { - 'sources/': [ - ['include', 'src/platform-macos\\.cc$'], - ['include', 'src/platform-posix\\.cc$'] - ] - } + ['OS=="mac"', { + 'sources': [ + '../../src/platform-macos.cc', + '../../src/platform-posix.cc' + ]}, ], ['OS=="win"', { - 'sources/': [['include', 'src/platform-win32\\.cc$']], + 'sources': [ + '../../src/platform-win32.cc', + ], # 4355, 4800 came from common.vsprops # 4018, 4244 were a per file config on dtoa-config.c # TODO: It's probably possible and desirable to stop disabling the @@ -474,209 +462,65 @@ ], }, { - 'target_name': 'v8_nosnapshot', - 'type': '<(library)', - 'defines': [ - 'V8_TARGET_ARCH_IA32' - ], - 'dependencies': [ - 'js2c', - 'v8_base', - ], - 'include_dirs': [ - '../../src', - ], - 'sources': [ - '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', - '../../src/snapshot-empty.cc', - ], - 'export_dependent_settings': [ - 'v8_base', - ], - }, - { - 'target_name': 'mksnapshot', - 'type': 'executable', - 'dependencies': [ - 'v8_nosnapshot', - ], - 'msvs_guid': '865575D0-37E2-405E-8CBA-5F6C485B5A26', - 'sources': [ - '../../src/mksnapshot.cc', - ], - }, - { - 'target_name': 'v8', - 'type': '<(library)', - 'defines': [ - 'V8_TARGET_ARCH_IA32' - ], - 'dependencies': [ - 'js2c', - 'mksnapshot', - 'v8_base', - ], - 'msvs_guid': '21E22961-22BF-4493-BD3A-868F93DA5179', + 'target_name': 'js2c', + 'type': 'none', + 'variables': { + 'library_files': [ + '../../src/runtime.js', + '../../src/v8natives.js', + '../../src/array.js', + '../../src/string.js', + '../../src/uri.js', + '../../src/math.js', + '../../src/messages.js', + '../../src/apinatives.js', + '../../src/debug-delay.js', + '../../src/mirror-delay.js', + '../../src/date-delay.js', + '../../src/json-delay.js', + '../../src/regexp-delay.js', + '../../src/macros.py', + ], + }, 'actions': [ { - 'action_name': 'mksnapshot', + 'action_name': 'js2c', 'inputs': [ - '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)', + '../../tools/js2c.py', + '<@(library_files)', ], 'outputs': [ - '<(INTERMEDIATE_DIR)/snapshot.cc', + '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', + '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc', + ], + 'action': [ + 'python', + '../../tools/js2c.py', + '<@(_outputs)', + 'CORE', + '<@(library_files)' ], - 'action': ['<@(_inputs)', '<@(_outputs)'], }, ], - 'include_dirs': [ - '../../src', - ], - 'sources': [ - '<(SHARED_INTERMEDIATE_DIR)/libraries-empty.cc', - '<(INTERMEDIATE_DIR)/snapshot.cc', - ], - 'direct_dependent_settings': { - 'include_dirs': [ - '../../include', - ], - }, - 'export_dependent_settings': [ - 'v8_base', - ], }, { - 'target_name': 'v8_shell', - 'type': 'executable', - 'defines': [ - 'V8_TARGET_ARCH_IA32' - ], - 'dependencies': [ - 'v8', - ], - 'sources': [ - '../../samples/shell.cc', - ], - 'conditions': [ - [ 'OS=="win"', { - # This could be gotten by not setting chromium_code, if that's OK. - 'defines': ['_CRT_SECURE_NO_WARNINGS'], - }], - ], - }, - ], - - 'conditions': [ ['OS=="mac"', { 'targets': [ - # TODO(bradnelson): temporarily disable 'd8' target on Windows while - # we work fix the performance regressions. - # TODO(sgk): temporarily disable 'd8' target on Linux while - # we work out getting the readline library on all the systems. - { - 'target_name': 'd8', + 'target_name': 'mksnapshot', 'type': 'executable', 'dependencies': [ - 'd8_js2c', - 'v8', - ], - 'defines': [ - 'V8_TARGET_ARCH_IA32' - ], - 'include_dirs': [ - '../../src', - ], - 'sources': [ - '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', - '<@(d8_source_files)', - ], - 'conditions': [ - [ 'OS=="linux"', { - 'sources!': [ '../../src/d8-windows.cc' ], - 'link_settings': { 'libraries': [ '-lreadline' ] }, - }], - [ 'OS=="mac"', { - 'sources!': [ '../../src/d8-windows.cc' ], - 'link_settings': { 'libraries': [ - '$(SDKROOT)/usr/lib/libreadline.dylib' - ]}, - }], - [ 'OS=="win"', { - 'sources!': [ '../../src/d8-readline.cc', '../../src/d8-posix.cc' ], - }], - ], - }, - # TODO(sgk): temporarily disable the arm targets on Linux while - # we work out how to refactor the generator and/or add configuration - # settings to the .gyp file to handle building both variants in - # the same output directory. - # - # ARM targets, to test ARM code generation. These use an ARM simulator - # (src/simulator-arm.cc). The ARM targets are not snapshot-enabled. - { - 'target_name': 'v8_arm', - 'type': '<(library)', - 'dependencies': [ - 'js2c', - ], - 'defines': [ - 'V8_TARGET_ARCH_ARM', + 'v8_nosnapshot', ], 'include_dirs+': [ '../../src', - '../../src/arm', ], 'sources': [ - '<@(base_source_files)', - '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', - '../../src/snapshot-empty.cc', - ], - 'sources!': [ - '<@(not_base_source_files)', - ], - 'sources/': [ - ['exclude', '-ia32\\.cc$'], - ['exclude', 'src/platform-.*\\.cc$' ], - ], - 'direct_dependent_settings': { - 'include_dirs': [ - '../../include', - ], - }, - 'conditions': [ - ['OS=="linux"', - { - 'sources/': [ - ['include', 'src/platform-linux\\.cc$'], - ['include', 'src/platform-posix\\.cc$'] - ] - } - ], - ['OS=="mac"', - { - 'sources/': [ - ['include', 'src/platform-macos\\.cc$'], - ['include', 'src/platform-posix\\.cc$'] - ] - } - ], - ['OS=="win"', { - 'sources/': [['include', 'src/platform-win32\\.cc$']], - # 4355, 4800 came from common.vsprops - # 4018, 4244 were a per file config on dtoa-config.c - # TODO: It's probably possible and desirable to stop disabling the - # dtoa-specific warnings by modifying dtoa as was done in Chromium - # r9255. Refer to that revision for details. - 'msvs_disabled_warnings': [4355, 4800, 4018, 4244], - }], + '../../src/mksnapshot.cc', ], }, { - 'target_name': 'v8_shell_arm', + 'target_name': 'v8_shell', 'type': 'executable', 'dependencies': [ - 'v8_arm', - ], - 'defines': [ - 'V8_TARGET_ARCH_ARM', + 'v8' ], 'sources': [ '../../samples/shell.cc', @@ -688,55 +532,5 @@ }], ], }, - { - 'target_name': 'd8_arm', - 'type': 'executable', - 'dependencies': [ - 'd8_js2c', - 'v8_arm', - ], - 'defines': [ - 'V8_TARGET_ARCH_ARM', - ], - 'include_dirs': [ - '../../src', - ], - 'sources': [ - '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', - '<@(d8_source_files)', - ], - 'conditions': [ - [ 'OS=="linux"', { - 'sources!': [ '../../src/d8-windows.cc' ], - 'link_settings': { 'libraries': [ '-lreadline' ] }, - }], - [ 'OS=="mac"', { - 'sources!': [ '../../src/d8-windows.cc' ], - 'link_settings': { 'libraries': [ - '$(SDKROOT)/usr/lib/libreadline.dylib' - ]}, - }], - [ 'OS=="win"', { - 'sources!': [ '../../src/d8-readline.cc', '../../src/d8-posix.cc' ], - }], - ], - }, - ]}], # OS != "linux" (temporary, TODO(sgk)) - - - ['OS=="win"', { - 'target_defaults': { - 'defines': [ - '_USE_32BIT_TIME_T', - '_CRT_SECURE_NO_DEPRECATE', - '_CRT_NONSTDC_NO_DEPRECATE', - ], - 'msvs_settings': { - 'VCLinkerTool': { - 'AdditionalOptions': '/IGNORE:4221 /NXCOMPAT', - }, - }, - }, - }], ], } diff --git a/deps/v8/tools/linux-tick-processor b/deps/v8/tools/linux-tick-processor index c5130ff12..ca1c72126 100644 --- a/deps/v8/tools/linux-tick-processor +++ b/deps/v8/tools/linux-tick-processor @@ -20,4 +20,5 @@ fi $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \ $tools_path/csvparser.js $tools_path/consarray.js \ $tools_path/profile.js $tools_path/profile_view.js \ - $tools_path/logreader.js $tools_path/tickprocessor.js -- $@ 2>/dev/null + $tools_path/logreader.js $tools_path/tickprocessor.js \ + $tools_path/tickprocessor-driver.js -- $@ 2>/dev/null diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py index f701ceb77..05eb9fdc0 100755 --- a/deps/v8/tools/test.py +++ b/deps/v8/tools/test.py @@ -356,11 +356,15 @@ class TestCase(object): def RunCommand(self, command): full_command = self.context.processor(command) output = Execute(full_command, self.context, self.context.timeout) + self.Cleanup() return TestOutput(self, full_command, output) def Run(self): return self.RunCommand(self.GetCommand()) + def Cleanup(self): + return + class TestOutput(object): @@ -473,6 +477,13 @@ def PrintError(str): sys.stderr.write('\n') +def CheckedUnlink(name): + try: + os.unlink(name) + except OSError, e: + PrintError("os.unlink() " + str(e)) + + def Execute(args, context, timeout=None): (fd_out, outname) = tempfile.mkstemp() (fd_err, errname) = tempfile.mkstemp() @@ -487,11 +498,6 @@ def Execute(args, context, timeout=None): os.close(fd_err) output = file(outname).read() errors = file(errname).read() - def CheckedUnlink(name): - try: - os.unlink(name) - except OSError, e: - PrintError("os.unlink() " + str(e)) CheckedUnlink(outname) CheckedUnlink(errname) return CommandOutput(exit_code, timed_out, output, errors) diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js new file mode 100644 index 000000000..f7cfd13f0 --- /dev/null +++ b/deps/v8/tools/tickprocessor-driver.js @@ -0,0 +1,49 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// Tick Processor's code flow. + +function processArguments(args) { + var processor = new ArgumentsProcessor(args); + if (processor.parse()) { + return processor.result(); + } else { + processor.printUsageAndExit(); + } +} + + +var params = processArguments(arguments); +var tickProcessor = new TickProcessor( + params.platform == 'unix' ? new UnixCppEntriesProvider(params.nm) : + new WindowsCppEntriesProvider(), + params.separateIc, + params.ignoreUnknown, + params.stateFilter); +tickProcessor.processLogFile(params.logFileName); +tickProcessor.printStatistics(); diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js index 4afc69f61..c95a4e616 100644 --- a/deps/v8/tools/tickprocessor.js +++ b/deps/v8/tools/tickprocessor.js @@ -288,7 +288,11 @@ TickProcessor.prototype.printStatistics = function() { function padLeft(s, len) { s = s.toString(); if (s.length < len) { - s = (new Array(len - s.length + 1).join(' ')) + s; + var padLength = len - s.length; + if (!(padLength in padLeft)) { + padLeft[padLength] = new Array(padLength + 1).join(' '); + } + s = padLeft[padLength] + s; } return s; }; @@ -511,25 +515,11 @@ WindowsCppEntriesProvider.prototype.unmangleName = function(name) { }; -function padRight(s, len) { - s = s.toString(); - if (s.length < len) { - s = s + (new Array(len - s.length + 1).join(' ')); - } - return s; -}; - +function ArgumentsProcessor(args) { + this.args_ = args; + this.result_ = ArgumentsProcessor.DEFAULTS; -function processArguments(args) { - var result = { - logFileName: 'v8.log', - platform: 'unix', - stateFilter: null, - ignoreUnknown: false, - separateIc: false, - nm: 'nm' - }; - var argsDispatch = { + this.argsDispatch_ = { '-j': ['stateFilter', TickProcessor.VmStates.JS, 'Show only ticks from JS VM state'], '-g': ['stateFilter', TickProcessor.VmStates.GC, @@ -551,63 +541,82 @@ function processArguments(args) { '--nm': ['nm', 'nm', 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'] }; - argsDispatch['--js'] = argsDispatch['-j']; - argsDispatch['--gc'] = argsDispatch['-g']; - argsDispatch['--compiler'] = argsDispatch['-c']; - argsDispatch['--other'] = argsDispatch['-o']; - argsDispatch['--external'] = argsDispatch['-e']; - - function printUsageAndExit() { - print('Cmdline args: [options] [log-file-name]\n' + - 'Default log file name is "v8.log".\n'); - print('Options:'); - for (var arg in argsDispatch) { - var synonims = [arg]; - var dispatch = argsDispatch[arg]; - for (var synArg in argsDispatch) { - if (arg !== synArg && dispatch === argsDispatch[synArg]) { - synonims.push(synArg); - delete argsDispatch[synArg]; - } - } - print(' ' + padRight(synonims.join(', '), 20) + dispatch[2]); - } - quit(2); - } + this.argsDispatch_['--js'] = this.argsDispatch_['-j']; + this.argsDispatch_['--gc'] = this.argsDispatch_['-g']; + this.argsDispatch_['--compiler'] = this.argsDispatch_['-c']; + this.argsDispatch_['--other'] = this.argsDispatch_['-o']; + this.argsDispatch_['--external'] = this.argsDispatch_['-e']; +}; + - while (args.length) { - var arg = args[0]; +ArgumentsProcessor.DEFAULTS = { + logFileName: 'v8.log', + platform: 'unix', + stateFilter: null, + ignoreUnknown: false, + separateIc: false, + nm: 'nm' +}; + + +ArgumentsProcessor.prototype.parse = function() { + while (this.args_.length) { + var arg = this.args_[0]; if (arg.charAt(0) != '-') { break; } - args.shift(); + this.args_.shift(); var userValue = null; var eqPos = arg.indexOf('='); if (eqPos != -1) { userValue = arg.substr(eqPos + 1); arg = arg.substr(0, eqPos); } - if (arg in argsDispatch) { - var dispatch = argsDispatch[arg]; - result[dispatch[0]] = userValue == null ? dispatch[1] : userValue; + if (arg in this.argsDispatch_) { + var dispatch = this.argsDispatch_[arg]; + this.result_[dispatch[0]] = userValue == null ? dispatch[1] : userValue; } else { - printUsageAndExit(); + return false; } } - if (args.length >= 1) { - result.logFileName = args.shift(); + if (this.args_.length >= 1) { + this.result_.logFileName = this.args_.shift(); } - return result; + return true; }; -var params = processArguments(arguments); -var tickProcessor = new TickProcessor( - params.platform == 'unix' ? new UnixCppEntriesProvider(params.nm) : - new WindowsCppEntriesProvider(), - params.separateIc, - params.ignoreUnknown, - params.stateFilter); -tickProcessor.processLogFile(params.logFileName); -tickProcessor.printStatistics(); +ArgumentsProcessor.prototype.result = function() { + return this.result_; +}; + + +ArgumentsProcessor.prototype.printUsageAndExit = function() { + + function padRight(s, len) { + s = s.toString(); + if (s.length < len) { + s = s + (new Array(len - s.length + 1).join(' ')); + } + return s; + } + + print('Cmdline args: [options] [log-file-name]\n' + + 'Default log file name is "' + + ArgumentsProcessor.DEFAULTS.logFileName + '".\n'); + print('Options:'); + for (var arg in this.argsDispatch_) { + var synonims = [arg]; + var dispatch = this.argsDispatch_[arg]; + for (var synArg in this.argsDispatch_) { + if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) { + synonims.push(synArg); + delete this.argsDispatch_[synArg]; + } + } + print(' ' + padRight(synonims.join(', '), 20) + dispatch[2]); + } + quit(2); +}; + diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj index 6e3d2765e..368ba3f3e 100755 --- a/deps/v8/tools/v8.xcodeproj/project.pbxproj +++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj @@ -1449,6 +1449,7 @@ GCC_PREPROCESSOR_DEFINITIONS = ( "$(GCC_PREPROCESSOR_DEFINITIONS)", V8_TARGET_ARCH_IA32, + V8_NATIVE_REGEXP, DEBUG, ); HEADER_SEARCH_PATHS = ../src; @@ -1462,6 +1463,7 @@ GCC_PREPROCESSOR_DEFINITIONS = ( "$(GCC_PREPROCESSOR_DEFINITIONS)", V8_TARGET_ARCH_IA32, + V8_NATIVE_REGEXP, NDEBUG, ); HEADER_SEARCH_PATHS = ../src; @@ -1477,6 +1479,7 @@ "$(GCC_PREPROCESSOR_DEFINITIONS)", ENABLE_DISASSEMBLER, V8_TARGET_ARCH_IA32, + V8_NATIVE_REGEXP, ENABLE_LOGGING_AND_PROFILING, ); HEADER_SEARCH_PATHS = ../src; @@ -1492,6 +1495,7 @@ GCC_PREPROCESSOR_DEFINITIONS = ( "$(GCC_PREPROCESSOR_DEFINITIONS)", V8_TARGET_ARCH_IA32, + V8_NATIVE_REGEXP, NDEBUG, ); HEADER_SEARCH_PATHS = ../src; diff --git a/deps/v8/tools/visual_studio/ia32.vsprops b/deps/v8/tools/visual_studio/ia32.vsprops index fda6c32a8..aff08716f 100644 --- a/deps/v8/tools/visual_studio/ia32.vsprops +++ b/deps/v8/tools/visual_studio/ia32.vsprops @@ -6,6 +6,6 @@ > <Tool Name="VCCLCompilerTool" - PreprocessorDefinitions="V8_TARGET_ARCH_IA32" + PreprocessorDefinitions="V8_TARGET_ARCH_IA32;V8_NATIVE_REGEXP" /> </VisualStudioPropertySheet> diff --git a/deps/v8/tools/windows-tick-processor.bat b/deps/v8/tools/windows-tick-processor.bat index 67cbe98d7..6743f68b3 100644 --- a/deps/v8/tools/windows-tick-processor.bat +++ b/deps/v8/tools/windows-tick-processor.bat @@ -2,4 +2,4 @@ SET tools_dir=%~dp0 -%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js -- --windows %* +%tools_dir%..\d8 %tools_dir%splaytree.js %tools_dir%codemap.js %tools_dir%csvparser.js %tools_dir%consarray.js %tools_dir%profile.js %tools_dir%profile_view.js %tools_dir%logreader.js %tools_dir%tickprocessor.js %tools_dir%tickprocessor-driver.js -- --windows %* |