summaryrefslogtreecommitdiff
path: root/src/jit
diff options
context:
space:
mode:
authorCarol Eidt <carol.eidt@microsoft.com>2017-06-07 17:20:38 -0700
committerGitHub <noreply@github.com>2017-06-07 17:20:38 -0700
commit1a6c1d1b4a6abfd7a37dd7fd4a6f22dfce4de17b (patch)
tree21a867150e8bb521ff803cc027cea866314c5d27 /src/jit
parent738945cfaf69b0f37d3020944936fe4ad0eef127 (diff)
parent2011e0b3ee5588de94f29ac5c32bdb638ef83652 (diff)
downloadcoreclr-1a6c1d1b4a6abfd7a37dd7fd4a6f22dfce4de17b.tar.gz
coreclr-1a6c1d1b4a6abfd7a37dd7fd4a6f22dfce4de17b.tar.bz2
coreclr-1a6c1d1b4a6abfd7a37dd7fd4a6f22dfce4de17b.zip
Merge pull request #12087 from sdmaclea/PR-JIT-AcquireRelease-SimpleCases
[Arm64] Pr jit acquire release simple cases
Diffstat (limited to 'src/jit')
-rw-r--r--src/jit/codegenarm64.cpp45
-rw-r--r--src/jit/codegenarmarch.cpp54
-rw-r--r--src/jit/emitarm64.cpp4
-rw-r--r--src/jit/importer.cpp8
4 files changed, 91 insertions, 20 deletions
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index 9e27ec0100..ccc86faca0 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -3206,10 +3206,12 @@ void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
- GenTree* data = tree->Data();
- GenTree* addr = tree->Addr();
- var_types targetType = tree->TypeGet();
- emitter* emit = getEmitter();
+ GenTree* data = tree->Data();
+ GenTree* addr = tree->Addr();
+ var_types targetType = tree->TypeGet();
+ emitter* emit = getEmitter();
+ emitAttr attr = emitTypeSize(tree);
+ instruction ins = ins_Store(targetType);
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
@@ -3282,13 +3284,42 @@ void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
dataReg = data->gtRegNum;
}
+ assert((attr != EA_1BYTE) || !(tree->gtFlags & GTF_IND_UNALIGNED));
+
if (tree->gtFlags & GTF_IND_VOLATILE)
{
- // issue a full memory barrier a before volatile StInd
- instGen_MemoryBarrier();
+ bool useStoreRelease =
+ genIsValidIntReg(dataReg) && !addr->isContained() && !(tree->gtFlags & GTF_IND_UNALIGNED);
+
+ if (useStoreRelease)
+ {
+ switch (EA_SIZE(attr))
+ {
+ case EA_1BYTE:
+ assert(ins == INS_strb);
+ ins = INS_stlrb;
+ break;
+ case EA_2BYTE:
+ assert(ins == INS_strh);
+ ins = INS_stlrh;
+ break;
+ case EA_4BYTE:
+ case EA_8BYTE:
+ assert(ins == INS_str);
+ ins = INS_stlr;
+ break;
+ default:
+ assert(false); // We should not get here
+ }
+ }
+ else
+ {
+ // issue a full memory barrier before a volatile StInd
+ instGen_MemoryBarrier();
+ }
}
- emit->emitInsLoadStoreOp(ins_Store(targetType), emitTypeSize(tree), dataReg, tree);
+ emit->emitInsLoadStoreOp(ins, attr, dataReg, tree);
}
}
diff --git a/src/jit/codegenarmarch.cpp b/src/jit/codegenarmarch.cpp
index c2bc8912c7..dc0503a34c 100644
--- a/src/jit/codegenarmarch.cpp
+++ b/src/jit/codegenarmarch.cpp
@@ -1338,24 +1338,62 @@ void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
- var_types targetType = tree->TypeGet();
- regNumber targetReg = tree->gtRegNum;
- emitter* emit = getEmitter();
+ var_types targetType = tree->TypeGet();
+ regNumber targetReg = tree->gtRegNum;
+ emitter* emit = getEmitter();
+ emitAttr attr = emitTypeSize(tree);
+ instruction ins = ins_Load(targetType);
- genConsumeAddress(tree->Addr());
- emit->emitInsLoadStoreOp(ins_Load(targetType), emitTypeSize(tree), targetReg, tree);
- genProduceReg(tree);
+ assert((attr != EA_1BYTE) || !(tree->gtFlags & GTF_IND_UNALIGNED));
+ genConsumeAddress(tree->Addr());
if (tree->gtFlags & GTF_IND_VOLATILE)
{
#ifdef _TARGET_ARM64_
- // issue a INS_BARRIER_LD after a volatile LdInd operation
- instGen_MemoryBarrier(INS_BARRIER_LD);
+ GenTree* addr = tree->Addr();
+ bool useLoadAcquire = genIsValidIntReg(targetReg) && !addr->isContained() &&
+ (varTypeIsUnsigned(targetType) || varTypeIsI(targetType)) &&
+ !(tree->gtFlags & GTF_IND_UNALIGNED);
+
+ if (useLoadAcquire)
+ {
+ switch (EA_SIZE(attr))
+ {
+ case EA_1BYTE:
+ assert(ins == INS_ldrb);
+ ins = INS_ldarb;
+ break;
+ case EA_2BYTE:
+ assert(ins == INS_ldrh);
+ ins = INS_ldarh;
+ break;
+ case EA_4BYTE:
+ case EA_8BYTE:
+ assert(ins == INS_ldr);
+ ins = INS_ldar;
+ break;
+ default:
+ assert(false); // We should not get here
+ }
+ }
+
+ emit->emitInsLoadStoreOp(ins, attr, targetReg, tree);
+
+ if (!useLoadAcquire) // issue a INS_BARRIER_OSHLD after a volatile LdInd operation
+ instGen_MemoryBarrier(INS_BARRIER_OSHLD);
#else
+ emit->emitInsLoadStoreOp(ins, attr, targetReg, tree);
+
// issue a full memory barrier after a volatile LdInd operation
instGen_MemoryBarrier();
#endif // _TARGET_ARM64_
}
+ else
+ {
+ emit->emitInsLoadStoreOp(ins, attr, targetReg, tree);
+ }
+
+ genProduceReg(tree);
}
// Generate code for a CpBlk node by the means of the VM memcpy helper call
diff --git a/src/jit/emitarm64.cpp b/src/jit/emitarm64.cpp
index e2dfbe6981..9943dc293e 100644
--- a/src/jit/emitarm64.cpp
+++ b/src/jit/emitarm64.cpp
@@ -7391,7 +7391,9 @@ void emitter::emitIns_Call(EmitCallType callType,
/*static*/ emitter::code_t emitter::insEncodeDatasizeLS(emitter::code_t code, emitAttr size)
{
- if (code & 0x00800000) // Is this a sign-extending opcode? (i.e. ldrsw, ldrsh, ldrsb)
+ bool exclusive = ((code & 0x35000000) == 0);
+
+ if ((code & 0x00800000) && !exclusive) // Is this a sign-extending opcode? (i.e. ldrsw, ldrsh, ldrsb)
{
assert((size == EA_4BYTE) || (size == EA_8BYTE));
if ((code & 0x80000000) == 0) // Is it a ldrsh or ldrsb and not ldrsw ?
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 4f5aec0ff4..356480850f 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -12261,7 +12261,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op1->gtFlags |= GTF_IND_VOLATILE;
}
- if (prefixFlags & PREFIX_UNALIGNED)
+ if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
@@ -12358,7 +12358,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op1->gtFlags |= GTF_IND_VOLATILE;
}
- if (prefixFlags & PREFIX_UNALIGNED)
+ if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert(op1->OperGet() == GT_IND);
op1->gtFlags |= GTF_IND_UNALIGNED;
@@ -13388,7 +13388,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
}
}
- if (prefixFlags & PREFIX_UNALIGNED)
+ if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
if (!usesHelper)
{
@@ -13631,7 +13631,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
op1->gtFlags |= GTF_IND_VOLATILE;
}
- if (prefixFlags & PREFIX_UNALIGNED)
+ if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
{
assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
op1->gtFlags |= GTF_IND_UNALIGNED;