summaryrefslogtreecommitdiff
path: root/core/arch/arm/kernel/spin_lock_a64.S
diff options
context:
space:
mode:
Diffstat (limited to 'core/arch/arm/kernel/spin_lock_a64.S')
-rw-r--r--core/arch/arm/kernel/spin_lock_a64.S89
1 files changed, 89 insertions, 0 deletions
diff --git a/core/arch/arm/kernel/spin_lock_a64.S b/core/arch/arm/kernel/spin_lock_a64.S
new file mode 100644
index 0000000..97fce42
--- /dev/null
+++ b/core/arch/arm/kernel/spin_lock_a64.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of ARM nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm.S>
+#include <kernel/spinlock.h>
+
+/* void __cpu_spin_lock(unsigned int *lock); */
+FUNC __cpu_spin_lock , :
+ mov w2, #SPINLOCK_LOCK
+ sevl
+l1: wfe
+l2: ldaxr w1, [x0]
+ cbnz w1, l1
+ stxr w1, w2, [x0]
+ cbnz w1, l2
+ ret
+END_FUNC __cpu_spin_lock
+
+/* unsigned int __cpu_spin_trylock(unsigned int *lock); */
+FUNC __cpu_spin_trylock , :
+ mov x1, x0
+ mov w2, #SPINLOCK_LOCK
+.loop: ldaxr w0, [x1]
+ cbnz w0, .cpu_spin_trylock_out
+ stxr w0, w2, [x1]
+ cbnz w0, .loop
+.cpu_spin_trylock_out:
+ ret
+END_FUNC __cpu_spin_trylock
+
+/* void __cpu_spin_unlock(unsigned int *lock); */
+FUNC __cpu_spin_unlock , :
+ stlr wzr, [x0]
+ ret
+END_FUNC __cpu_spin_unlock