aboutsummaryrefslogtreecommitdiff
path: root/v_windows/v/old/vlib/sync/atomic2
diff options
context:
space:
mode:
authorIndrajith K L2022-12-03 17:00:20 +0530
committerIndrajith K L2022-12-03 17:00:20 +0530
commitf5c4671bfbad96bf346bd7e9a21fc4317b4959df (patch)
tree2764fc62da58f2ba8da7ed341643fc359873142f /v_windows/v/old/vlib/sync/atomic2
downloadcli-tools-windows-f5c4671bfbad96bf346bd7e9a21fc4317b4959df.tar.gz
cli-tools-windows-f5c4671bfbad96bf346bd7e9a21fc4317b4959df.tar.bz2
cli-tools-windows-f5c4671bfbad96bf346bd7e9a21fc4317b4959df.zip
Adds most of the toolsHEADmaster
Diffstat (limited to 'v_windows/v/old/vlib/sync/atomic2')
-rw-r--r--v_windows/v/old/vlib/sync/atomic2/atomic.v88
-rw-r--r--v_windows/v/old/vlib/sync/atomic2/atomic_test.v105
2 files changed, 193 insertions, 0 deletions
diff --git a/v_windows/v/old/vlib/sync/atomic2/atomic.v b/v_windows/v/old/vlib/sync/atomic2/atomic.v
new file mode 100644
index 0000000..2ff64f2
--- /dev/null
+++ b/v_windows/v/old/vlib/sync/atomic2/atomic.v
@@ -0,0 +1,88 @@
+module atomic2
+
+/*
+Implements the atomic operations. For now TCC does not support
+the atomic versions on nix so it uses locks to simulate the same behavor.
+On windows tcc can simulate with other atomic operations.
+
+The @VEXEROOT/thirdparty/stdatomic contains compability header files
+for stdatomic that supports both nix, windows and c++.
+
+This implementations should be regarded as alpha stage and be
+further tested.
+*/
+
+#flag windows -I @VEXEROOT/thirdparty/stdatomic/win
+#flag linux -I @VEXEROOT/thirdparty/stdatomic/nix
+#flag darwin -I @VEXEROOT/thirdparty/stdatomic/nix
+#flag freebsd -I @VEXEROOT/thirdparty/stdatomic/nix
+#flag solaris -I @VEXEROOT/thirdparty/stdatomic/nix
+
+$if linux {
+ $if tinyc {
+ $if amd64 {
+ // most Linux distributions have /usr/lib/libatomic.so, but Ubuntu uses gcc version specific dir
+ #flag -L/usr/lib/gcc/x86_64-linux-gnu/6
+ #flag -L/usr/lib/gcc/x86_64-linux-gnu/7
+ #flag -L/usr/lib/gcc/x86_64-linux-gnu/8
+ #flag -L/usr/lib/gcc/x86_64-linux-gnu/9
+ #flag -L/usr/lib/gcc/x86_64-linux-gnu/10
+ #flag -L/usr/lib/gcc/x86_64-linux-gnu/11
+ #flag -L/usr/lib/gcc/x86_64-linux-gnu/12
+ } $else $if arm64 {
+ #flag -L/usr/lib/gcc/aarch64-linux-gnu/6
+ #flag -L/usr/lib/gcc/aarch64-linux-gnu/7
+ #flag -L/usr/lib/gcc/aarch64-linux-gnu/8
+ #flag -L/usr/lib/gcc/aarch64-linux-gnu/9
+ #flag -L/usr/lib/gcc/aarch64-linux-gnu/10
+ #flag -L/usr/lib/gcc/aarch64-linux-gnu/11
+ #flag -L/usr/lib/gcc/aarch64-linux-gnu/12
+ }
+ #flag -latomic
+ }
+}
+#include <atomic.h>
+// add_u64 adds provided delta as an atomic operation
+pub fn add_u64(ptr &u64, delta int) bool {
+ res := C.atomic_fetch_add_u64(voidptr(ptr), delta)
+ return res == 0
+}
+
+// sub_u64 subtracts provided delta as an atomic operation
+pub fn sub_u64(ptr &u64, delta int) bool {
+ res := C.atomic_fetch_sub_u64(voidptr(ptr), delta)
+ return res == 0
+}
+
+// add_i64 adds provided delta as an atomic operation
+pub fn add_i64(ptr &i64, delta int) bool {
+ res := C.atomic_fetch_add_u64(voidptr(ptr), delta)
+ return res == 0
+}
+
+// add_i64 subtracts provided delta as an atomic operation
+pub fn sub_i64(ptr &i64, delta int) bool {
+ res := C.atomic_fetch_sub_u64(voidptr(ptr), delta)
+ return res == 0
+}
+
+// atomic store/load operations have to be used when there might be another concurrent access
+// atomicall set a value
+pub fn store_u64(ptr &u64, val u64) {
+ C.atomic_store_u64(voidptr(ptr), val)
+}
+
+// atomicall get a value
+pub fn load_u64(ptr &u64) u64 {
+ return C.atomic_load_u64(voidptr(ptr))
+}
+
+// atomicall set a value
+pub fn store_i64(ptr &i64, val i64) {
+ C.atomic_store_u64(voidptr(ptr), val)
+}
+
+// atomicall get a value
+pub fn load_i64(ptr &i64) i64 {
+ return i64(C.atomic_load_u64(voidptr(ptr)))
+}
diff --git a/v_windows/v/old/vlib/sync/atomic2/atomic_test.v b/v_windows/v/old/vlib/sync/atomic2/atomic_test.v
new file mode 100644
index 0000000..7a5ffd8
--- /dev/null
+++ b/v_windows/v/old/vlib/sync/atomic2/atomic_test.v
@@ -0,0 +1,105 @@
+import sync.atomic2
+import sync
+
+const (
+ iterations_per_cycle = 100_000
+)
+
+struct Counter {
+mut:
+ counter u64
+}
+
+// without proper syncronization this would fail
+fn test_count_10_times_1_cycle_should_result_10_cycles_with_sync() {
+ desired_iterations := 10 * iterations_per_cycle
+ mut wg := sync.new_waitgroup()
+ mut counter := &Counter{}
+ wg.add(10)
+ for i := 0; i < 10; i++ {
+ go count_one_cycle(mut counter, mut wg)
+ }
+ wg.wait()
+ assert counter.counter == desired_iterations
+ eprintln(' with synchronization the counter is: ${counter.counter:10} , expectedly == ${desired_iterations:10}')
+}
+
+// This test just to make sure that we have an anti-test to prove it works
+fn test_count_10_times_1_cycle_should_not_be_10_cycles_without_sync() {
+ desired_iterations := 10 * iterations_per_cycle
+ mut wg := sync.new_waitgroup()
+ mut counter := &Counter{}
+ wg.add(10)
+ for i := 0; i < 10; i++ {
+ go count_one_cycle_without_sync(mut counter, mut wg)
+ }
+ wg.wait()
+ // NB: we do not assert here, just print, because sometimes by chance counter.counter may be == desired_iterations
+ eprintln('without synchronization the counter is: ${counter.counter:10} , expectedly != ${desired_iterations:10}')
+}
+
+fn test_count_plus_one_u64() {
+ mut c := u64(0)
+ atomic2.add_u64(&c, 1)
+ assert atomic2.load_u64(&c) == 1
+}
+
+fn test_count_plus_one_i64() {
+ mut c := i64(0)
+ atomic2.add_i64(&c, 1)
+ assert atomic2.load_i64(&c) == 1
+}
+
+fn test_count_plus_greater_than_one_u64() {
+ mut c := u64(0)
+ atomic2.add_u64(&c, 10)
+ assert atomic2.load_u64(&c) == 10
+}
+
+fn test_count_plus_greater_than_one_i64() {
+ mut c := i64(0)
+ atomic2.add_i64(&c, 10)
+ assert atomic2.load_i64(&c) == 10
+}
+
+fn test_count_minus_one_u64() {
+ mut c := u64(1)
+ atomic2.sub_u64(&c, 1)
+ assert atomic2.load_u64(&c) == 0
+}
+
+fn test_count_minus_one_i64() {
+ mut c := i64(0)
+ atomic2.sub_i64(&c, 1)
+ assert atomic2.load_i64(&c) == -1
+}
+
+fn test_count_minus_greater_than_one_u64() {
+ mut c := u64(0)
+ atomic2.store_u64(&c, 10)
+ atomic2.sub_u64(&c, 10)
+ assert atomic2.load_u64(&c) == 0
+}
+
+fn test_count_minus_greater_than_one_i64() {
+ mut c := i64(0)
+ atomic2.store_i64(&c, 10)
+ atomic2.sub_i64(&c, 20)
+ assert atomic2.load_i64(&c) == -10
+}
+
+// count_one_cycle counts the common counter iterations_per_cycle times in thread-safe way
+fn count_one_cycle(mut counter Counter, mut group sync.WaitGroup) {
+ for i := 0; i < iterations_per_cycle; i++ {
+ atomic2.add_u64(&counter.counter, 1)
+ }
+ group.done()
+}
+
+// count_one_cycle_without_sync counts the common counter iterations_per_cycle times in none thread-safe way
+fn count_one_cycle_without_sync(mut counter Counter, mut group sync.WaitGroup) {
+ for i := 0; i < iterations_per_cycle; i++ {
+ counter.counter++
+ }
+ group.done()
+}