From 8f0dc0f00e99cf83c341256411c46a5718d65842 Mon Sep 17 00:00:00 2001 From: skeris Date: Mon, 26 Aug 2024 22:46:07 +0300 Subject: [PATCH 1/5] feat: create experimental go like benchmark module --- vlib/x/benchmark/benchmark.v | 249 ++++++++++++++++++++++++++++++ vlib/x/benchmark/benchmark_test.v | 170 ++++++++++++++++++++ 2 files changed, 419 insertions(+) create mode 100644 vlib/x/benchmark/benchmark.v create mode 100644 vlib/x/benchmark/benchmark_test.v diff --git a/vlib/x/benchmark/benchmark.v b/vlib/x/benchmark/benchmark.v new file mode 100644 index 00000000000000..63610ec0c73e80 --- /dev/null +++ b/vlib/x/benchmark/benchmark.v @@ -0,0 +1,249 @@ +module benchmark + +import time +import math +import sync + +const default_duration = time.second + +// Represent all significant data for benchmarking. Provide clear way for getting result in convinient way by exported methods +@[noinit] +pub struct Benchmark { + pub mut: + n i64 // Number of iterations. Set explicitly or computed from expected time of benchmarking + bench_func fn()! @[required] // function for benchmarking + bench_time time.Duration // benchmark duration + is_parallel bool // if true every bench_func run in separate coroutine + benchmark_result BenchmarkResult // accumulator of benchmark metrics + timer_on bool // inner flag of time recording + start_time time.Time // start timestamp of timer + duration time.Duration // expected time of benchmark process + failed bool // flag of bench_func failure. true if one of bench_func run failed + start_memory usize // memory status on start benchmark + start_allocs usize // size of object allocated on heap +} + +// constructor of benchmark +// arguments: +// - bench_func - function to benchmark. required, if you have no function - you don't need benchmark +// - n - number of iterations. set if you know how many runs of function you need. if qou don't know how many you need - set 0 +// - duration - by default 1s. expecting duration of all benchmark runs. doesn't work if is_parallel == true +// - is_parallel - if true, every bench_func run in separate coroutine +pub fn Benchmark.new(bench_func fn()!, n i64, duration time.Duration, is_parallel bool) !Benchmark{ + if bench_func == voidptr(0) { + return error('Benchmark function cannot be empty') + } + + if duration > 0 && is_parallel { + return error('can not predict number of parallel iterations') + } + + return Benchmark{ + n:n + bench_func: bench_func + bench_time: if duration > 0 {duration} else {default_duration} + is_parallel: is_parallel + } +} + +// function for start benchmarking +// run benchmark n times, or duration time +pub fn (mut b Benchmark) run_benchmark(){ + // run bench_func one time for heat up processor cache and get elapsed time for n prediction + b.run_n(1) + + // if one iteration failed no need to do more + if b.failed { + b.n = 1 + // show failed result. bad result is steel result + b.benchmark_result.print() + } + + // if n is provided we should run exactly n times. but 1 time we already run + if b.n > 1 { + b.run_n(b.n - 1) + } + + // if n is zero then we should run bench_func enough time for estimate duration time of execution + if b.n == 0 { + b.n = 1 + // if one of runs failed - bench_func is not valid + // but 1e9 times of evaluation is too much + // so we need to repeat prediction-execition process while elapsed time less then expected time + for !b.failed && b.duration < b.bench_time && b.n < 1000000000 { + // we need predict new amount of executions to estimate expected time + n := b.predict_n() + + // later we predict how many runs we need yet. so we run predicted times + b.run_n(n) + b.n += n + } + } + + // if n is provided, duration will be calculated. otherwise n will + b.benchmark_result.n = b.n + b.benchmark_result.t = b.duration + + // despite of the way of usage of benchmark result(send py api, send to chat, process, logging, etc), we print it + b.benchmark_result.print() +} + +// run bench_func n times +fn (mut b Benchmark) run_n(n i64) { + // clear memory for avoid GC influence + gc_collect() + + // reset and start timer for get elapsed time + b.reset_timer() + b.start_timer() + + // unwrap function from struct field + mut f := b.bench_func + + if !b.is_parallel { + // run n times consistently + for i := i64(0); i < n; i++ { + f() or { + // if one execution failed print err, set failed flag and stop execution + b.failed = true + // workaround for consider unsuccesful runs + b.n -= n-i + eprintln('Error: $err') + return + } + } + } + + // spawn n coroutines, wait end of spawning and unpause all coroutines + if b.is_parallel { + // WaitGroup for spawn and pause enough coroutines + mut spawnwg := sync.new_waitgroup() + spawnwg.add(int(n)) + // WaitGroup for wait of end of execution + mut workwg := sync.new_waitgroup() + workwg.add(int(n)) + + for i := i64(0); i < n; i++ { + spawn run_in_one_time(mut workwg, mut spawnwg, f) + spawnwg.done() + } + workwg.wait() + + } + + // stop timer and collect data + b.stop_timer() +} + +fn run_in_one_time(mut workwg &sync.WaitGroup, mut spawnwg &sync.WaitGroup, f fn()!){ + defer { + workwg.done() + } + spawnwg.wait() + f() or { + return + }// TODO: add error handling +} + +// predict number of executions to estimate duration +// based on previous values +fn (mut b Benchmark) predict_n() i64 { + // goal duration in nanoseconds + mut goal_ns := b.bench_time.nanoseconds() + // get number of previous iterations + prev_iters := b.n + // get elapsed time in nanoseconds + mut prev_ns := b.duration.nanoseconds() + + // to avoid division by zero + if prev_ns <= 0 { + prev_ns = 1 + } + + // multiple first to avoid division with less then 0 result + mut n := goal_ns * prev_iters + n = n / prev_ns + // grow at least in 1.2 + n += n / 5 + + // to not grow to fast + n = math.min(n, 100 * b.n) + // to grow at least on 1 + n = math.max(n, b.n + 1) + // to avoid run more then 1e9 times + n = math.min(n, 1000000000) + + return n +} + +// clear timer and reset memory start data +fn (mut b Benchmark) reset_timer() { + // if timer_on we should restart it + if b.timer_on { + b.start_time = time.now() + b.start_memory = gc_memory_use() + b.start_allocs = gc_heap_usage().bytes_since_gc + } +} + +// start timer and register start measures of memory +fn (mut b Benchmark) start_timer() { + // you do not need to start timer that already started + if !b.timer_on{ + b.start_time = time.now() + b.start_memory = gc_memory_use() + b.start_allocs = gc_heap_usage().bytes_since_gc + b.timer_on = true + } +} + +// stop timer and accumulate menchmark data +fn (mut b Benchmark) stop_timer() { + if b.timer_on{ + // accumulate delta time of execution + b.duration += time.since(b.start_time) + // accumulate memory growth + b.benchmark_result.mem += gc_memory_use() - b.start_memory + // accumulate heap usage + b.benchmark_result.allocs += gc_heap_usage().bytes_since_gc - b.start_allocs + b.timer_on = false + } +} + +// struct for represent result of benchmark +struct BenchmarkResult { +pub mut: + n i64 // iterations count + t time.Duration // elapsed time + mem usize // all allocated memory + allocs usize // heap allocated memory +} + +// elapsed time in nanoseconds per iteration +fn (r BenchmarkResult) ns_per_op() i64 { + if r.n <= 0 { + return 0 + } + return r.t.nanoseconds() / i64(r.n) +} + +// heap usage per iteration +fn (r BenchmarkResult) allocs_per_op() i64 { + if r.n <= 0 { + return 0 + } + return i64(r.allocs) / i64(r.n) +} + +// memory usage per iteration +fn (r BenchmarkResult) alloced_bytes_per_op() i64 { + if r.n <= 0 { + return 0 + } + return i64(r.mem) / i64(r.n) +} + +// print all measurements +fn (r BenchmarkResult) print() { + println('Iterations: ${r.n}\t\tTotal Duration: ${r.t}\tns/op: ${r.ns_per_op()}\tB/op: ${r.alloced_bytes_per_op()}\tallocs/op: ${r.allocs_per_op()}') +} diff --git a/vlib/x/benchmark/benchmark_test.v b/vlib/x/benchmark/benchmark_test.v new file mode 100644 index 00000000000000..642c214c5121a9 --- /dev/null +++ b/vlib/x/benchmark/benchmark_test.v @@ -0,0 +1,170 @@ +module benchmark + +import time + +// if n == 0, n predict == 1 +fn test_predict_n_zero() { + mut b := Benchmark{ + n: 0, + duration: 0, + bench_time: time.second, + bench_func: fn()!{}, + } + expected := 1 + println(b.predict_n()) + assert b.predict_n() == expected +} + +// n can't be more 1000000000 +fn test_predict_n_limit() { + mut b := Benchmark{ + n: 10000000000, + duration: 0, + bench_time: time.second, + bench_func: fn()!{}, + } + expected := 1000000000 + assert b.predict_n() == expected +} + +// test prediction for slow bench function +fn test_slow_fn() { + mut b := Benchmark{ + duration: time.second, + bench_func: fn()!{}, + } + assert b.predict_n() == 1 +} + +// if bench_func cause error set failed true, n = 1 +fn test_fn_with_error() { + f := fn() ! { + return error('error') + } + mut bench := Benchmark.new(f, 0, 0, false) or { + eprintln('Error creating benchmark: $err') + return + } + + bench.run_benchmark() + + assert bench.failed == true + assert bench.benchmark_result.n == 1 +} + +fn test_n_must_be_over_1(){ + f := fn() ! { + mut i := 0 + i++ + } + mut bench := Benchmark.new(f, 0, 0, false) or { + eprintln('Error creating benchmark: $err') + return + } + + bench.run_benchmark() + + assert bench.benchmark_result.n > 1 +} + +fn test_n(){ + f := fn() ! { + mut i := 0 + i++ + } + mut bench := Benchmark.new(f, 1000, 0, false) or { + eprintln('Error creating benchmark: $err') + return + } + + bench.run_benchmark() + + assert bench.benchmark_result.n == 1000 +} + +fn test_max_bench_time(){ + f := fn() ! { + time.sleep(500* time.millisecond) + } + mut bench := Benchmark.new(f, 0, 0, false) or { + eprintln('Error creating benchmark: $err') + return + } + + bench.run_benchmark() + + assert bench.benchmark_result.n == 3 + assert bench.benchmark_result.t >= time.second +} + +fn test_performance() { + scheduler := [func_1,func_2,func_3] + expected := [false,false,false] + mut actual := []bool{} + + for i in scheduler{ + mut bench := Benchmark.new(i, 0, 0, false) or { + eprintln('Error creating benchmark: $err') + return + } + + bench.run_benchmark() + actual << bench.failed + } + + assert expected.len == actual.len + for i:=0;itarget{ + right = mid - 1 + } + } + return +} + + +fn func_3() !{ + mut arr := [10,2,13,4,5,16,7,1,9,20] + + for i :=0; i arr[j+1]{ + arr[j],arr[j+1]=arr[j+1],arr[j] + } + } + } +} + From 9f0b196e6751de677b98319610451835f246e02e Mon Sep 17 00:00:00 2001 From: skeris Date: Wed, 28 Aug 2024 22:26:21 +0300 Subject: [PATCH 2/5] v vet warns fix --- vlib/x/benchmark/benchmark.v | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/vlib/x/benchmark/benchmark.v b/vlib/x/benchmark/benchmark.v index 63610ec0c73e80..c62168c6b9ed29 100644 --- a/vlib/x/benchmark/benchmark.v +++ b/vlib/x/benchmark/benchmark.v @@ -6,7 +6,7 @@ import sync const default_duration = time.second -// Represent all significant data for benchmarking. Provide clear way for getting result in convinient way by exported methods +// Benchmark represent all significant data for benchmarking. Provide clear way for getting result in convinient way by exported methods @[noinit] pub struct Benchmark { pub mut: @@ -23,7 +23,7 @@ pub struct Benchmark { start_allocs usize // size of object allocated on heap } -// constructor of benchmark +// Benchmark.new - constructor of benchmark // arguments: // - bench_func - function to benchmark. required, if you have no function - you don't need benchmark // - n - number of iterations. set if you know how many runs of function you need. if qou don't know how many you need - set 0 @@ -46,7 +46,7 @@ pub fn Benchmark.new(bench_func fn()!, n i64, duration time.Duration, is_paralle } } -// function for start benchmarking +// run_benchmark - function for start benchmarking // run benchmark n times, or duration time pub fn (mut b Benchmark) run_benchmark(){ // run bench_func one time for heat up processor cache and get elapsed time for n prediction @@ -88,7 +88,7 @@ pub fn (mut b Benchmark) run_benchmark(){ b.benchmark_result.print() } -// run bench_func n times +// run_n - run bench_func n times fn (mut b Benchmark) run_n(n i64) { // clear memory for avoid GC influence gc_collect() @@ -145,7 +145,7 @@ fn run_in_one_time(mut workwg &sync.WaitGroup, mut spawnwg &sync.WaitGroup, f fn }// TODO: add error handling } -// predict number of executions to estimate duration +// predict_n - predict number of executions to estimate duration // based on previous values fn (mut b Benchmark) predict_n() i64 { // goal duration in nanoseconds @@ -176,7 +176,7 @@ fn (mut b Benchmark) predict_n() i64 { return n } -// clear timer and reset memory start data +// reset_timer - clear timer and reset memory start data fn (mut b Benchmark) reset_timer() { // if timer_on we should restart it if b.timer_on { @@ -186,7 +186,7 @@ fn (mut b Benchmark) reset_timer() { } } -// start timer and register start measures of memory +// starttimer - register start measures of memory fn (mut b Benchmark) start_timer() { // you do not need to start timer that already started if !b.timer_on{ @@ -197,7 +197,7 @@ fn (mut b Benchmark) start_timer() { } } -// stop timer and accumulate menchmark data +// stop_timer - accumulate menchmark data fn (mut b Benchmark) stop_timer() { if b.timer_on{ // accumulate delta time of execution @@ -210,7 +210,7 @@ fn (mut b Benchmark) stop_timer() { } } -// struct for represent result of benchmark +// BenchmarkResult - struct for represent result of benchmark struct BenchmarkResult { pub mut: n i64 // iterations count @@ -219,7 +219,7 @@ pub mut: allocs usize // heap allocated memory } -// elapsed time in nanoseconds per iteration +// ns_per_op - elapsed time in nanoseconds per iteration fn (r BenchmarkResult) ns_per_op() i64 { if r.n <= 0 { return 0 @@ -227,7 +227,7 @@ fn (r BenchmarkResult) ns_per_op() i64 { return r.t.nanoseconds() / i64(r.n) } -// heap usage per iteration +// allocs_per_op - heap usage per iteration fn (r BenchmarkResult) allocs_per_op() i64 { if r.n <= 0 { return 0 @@ -235,7 +235,7 @@ fn (r BenchmarkResult) allocs_per_op() i64 { return i64(r.allocs) / i64(r.n) } -// memory usage per iteration +// alloced_bytes_per_op - memory usage per iteration fn (r BenchmarkResult) alloced_bytes_per_op() i64 { if r.n <= 0 { return 0 @@ -243,7 +243,7 @@ fn (r BenchmarkResult) alloced_bytes_per_op() i64 { return i64(r.mem) / i64(r.n) } -// print all measurements +// print - all measurements fn (r BenchmarkResult) print() { println('Iterations: ${r.n}\t\tTotal Duration: ${r.t}\tns/op: ${r.ns_per_op()}\tB/op: ${r.alloced_bytes_per_op()}\tallocs/op: ${r.allocs_per_op()}') } From 96ff251191c8076e98c3e0d1880c8b49667ee940 Mon Sep 17 00:00:00 2001 From: skeris Date: Wed, 28 Aug 2024 22:33:07 +0300 Subject: [PATCH 3/5] vfmted --- vlib/x/benchmark/benchmark.v | 59 ++++++++++---------- vlib/x/benchmark/benchmark_test.v | 91 +++++++++++++++---------------- 2 files changed, 72 insertions(+), 78 deletions(-) diff --git a/vlib/x/benchmark/benchmark.v b/vlib/x/benchmark/benchmark.v index c62168c6b9ed29..8be74f2f3e8409 100644 --- a/vlib/x/benchmark/benchmark.v +++ b/vlib/x/benchmark/benchmark.v @@ -9,18 +9,18 @@ const default_duration = time.second // Benchmark represent all significant data for benchmarking. Provide clear way for getting result in convinient way by exported methods @[noinit] pub struct Benchmark { - pub mut: - n i64 // Number of iterations. Set explicitly or computed from expected time of benchmarking - bench_func fn()! @[required] // function for benchmarking - bench_time time.Duration // benchmark duration - is_parallel bool // if true every bench_func run in separate coroutine - benchmark_result BenchmarkResult // accumulator of benchmark metrics - timer_on bool // inner flag of time recording - start_time time.Time // start timestamp of timer - duration time.Duration // expected time of benchmark process - failed bool // flag of bench_func failure. true if one of bench_func run failed - start_memory usize // memory status on start benchmark - start_allocs usize // size of object allocated on heap +pub mut: + n i64 // Number of iterations. Set explicitly or computed from expected time of benchmarking + bench_func fn () ! @[required] // function for benchmarking + bench_time time.Duration // benchmark duration + is_parallel bool // if true every bench_func run in separate coroutine + benchmark_result BenchmarkResult // accumulator of benchmark metrics + timer_on bool // inner flag of time recording + start_time time.Time // start timestamp of timer + duration time.Duration // expected time of benchmark process + failed bool // flag of bench_func failure. true if one of bench_func run failed + start_memory usize // memory status on start benchmark + start_allocs usize // size of object allocated on heap } // Benchmark.new - constructor of benchmark @@ -29,8 +29,8 @@ pub struct Benchmark { // - n - number of iterations. set if you know how many runs of function you need. if qou don't know how many you need - set 0 // - duration - by default 1s. expecting duration of all benchmark runs. doesn't work if is_parallel == true // - is_parallel - if true, every bench_func run in separate coroutine -pub fn Benchmark.new(bench_func fn()!, n i64, duration time.Duration, is_parallel bool) !Benchmark{ - if bench_func == voidptr(0) { +pub fn Benchmark.new(bench_func fn () !, n i64, duration time.Duration, is_parallel bool) !Benchmark { + if bench_func == unsafe { nil } { return error('Benchmark function cannot be empty') } @@ -39,16 +39,16 @@ pub fn Benchmark.new(bench_func fn()!, n i64, duration time.Duration, is_paralle } return Benchmark{ - n:n - bench_func: bench_func - bench_time: if duration > 0 {duration} else {default_duration} + n: n + bench_func: bench_func + bench_time: if duration > 0 { duration } else { benchmark.default_duration } is_parallel: is_parallel } } // run_benchmark - function for start benchmarking // run benchmark n times, or duration time -pub fn (mut b Benchmark) run_benchmark(){ +pub fn (mut b Benchmark) run_benchmark() { // run bench_func one time for heat up processor cache and get elapsed time for n prediction b.run_n(1) @@ -107,8 +107,8 @@ fn (mut b Benchmark) run_n(n i64) { // if one execution failed print err, set failed flag and stop execution b.failed = true // workaround for consider unsuccesful runs - b.n -= n-i - eprintln('Error: $err') + b.n -= n - i + eprintln('Error: ${err}') return } } @@ -128,21 +128,18 @@ fn (mut b Benchmark) run_n(n i64) { spawnwg.done() } workwg.wait() - } // stop timer and collect data b.stop_timer() } -fn run_in_one_time(mut workwg &sync.WaitGroup, mut spawnwg &sync.WaitGroup, f fn()!){ +fn run_in_one_time(mut workwg sync.WaitGroup, mut spawnwg sync.WaitGroup, f fn () !) { defer { workwg.done() } spawnwg.wait() - f() or { - return - }// TODO: add error handling + f() or { return } // TODO: add error handling } // predict_n - predict number of executions to estimate duration @@ -189,7 +186,7 @@ fn (mut b Benchmark) reset_timer() { // starttimer - register start measures of memory fn (mut b Benchmark) start_timer() { // you do not need to start timer that already started - if !b.timer_on{ + if !b.timer_on { b.start_time = time.now() b.start_memory = gc_memory_use() b.start_allocs = gc_heap_usage().bytes_since_gc @@ -199,7 +196,7 @@ fn (mut b Benchmark) start_timer() { // stop_timer - accumulate menchmark data fn (mut b Benchmark) stop_timer() { - if b.timer_on{ + if b.timer_on { // accumulate delta time of execution b.duration += time.since(b.start_time) // accumulate memory growth @@ -213,10 +210,10 @@ fn (mut b Benchmark) stop_timer() { // BenchmarkResult - struct for represent result of benchmark struct BenchmarkResult { pub mut: - n i64 // iterations count - t time.Duration // elapsed time - mem usize // all allocated memory - allocs usize // heap allocated memory + n i64 // iterations count + t time.Duration // elapsed time + mem usize // all allocated memory + allocs usize // heap allocated memory } // ns_per_op - elapsed time in nanoseconds per iteration diff --git a/vlib/x/benchmark/benchmark_test.v b/vlib/x/benchmark/benchmark_test.v index 642c214c5121a9..22020906964959 100644 --- a/vlib/x/benchmark/benchmark_test.v +++ b/vlib/x/benchmark/benchmark_test.v @@ -5,10 +5,10 @@ import time // if n == 0, n predict == 1 fn test_predict_n_zero() { mut b := Benchmark{ - n: 0, - duration: 0, - bench_time: time.second, - bench_func: fn()!{}, + n: 0 + duration: 0 + bench_time: time.second + bench_func: fn () ! {} } expected := 1 println(b.predict_n()) @@ -18,10 +18,10 @@ fn test_predict_n_zero() { // n can't be more 1000000000 fn test_predict_n_limit() { mut b := Benchmark{ - n: 10000000000, - duration: 0, - bench_time: time.second, - bench_func: fn()!{}, + n: 10000000000 + duration: 0 + bench_time: time.second + bench_func: fn () ! {} } expected := 1000000000 assert b.predict_n() == expected @@ -30,19 +30,19 @@ fn test_predict_n_limit() { // test prediction for slow bench function fn test_slow_fn() { mut b := Benchmark{ - duration: time.second, - bench_func: fn()!{}, + duration: time.second + bench_func: fn () ! {} } assert b.predict_n() == 1 } // if bench_func cause error set failed true, n = 1 fn test_fn_with_error() { - f := fn() ! { + f := fn () ! { return error('error') } mut bench := Benchmark.new(f, 0, 0, false) or { - eprintln('Error creating benchmark: $err') + eprintln('Error creating benchmark: ${err}') return } @@ -52,13 +52,13 @@ fn test_fn_with_error() { assert bench.benchmark_result.n == 1 } -fn test_n_must_be_over_1(){ - f := fn() ! { +fn test_n_must_be_over_1() { + f := fn () ! { mut i := 0 i++ } mut bench := Benchmark.new(f, 0, 0, false) or { - eprintln('Error creating benchmark: $err') + eprintln('Error creating benchmark: ${err}') return } @@ -67,13 +67,13 @@ fn test_n_must_be_over_1(){ assert bench.benchmark_result.n > 1 } -fn test_n(){ - f := fn() ! { +fn test_n() { + f := fn () ! { mut i := 0 i++ } mut bench := Benchmark.new(f, 1000, 0, false) or { - eprintln('Error creating benchmark: $err') + eprintln('Error creating benchmark: ${err}') return } @@ -82,12 +82,12 @@ fn test_n(){ assert bench.benchmark_result.n == 1000 } -fn test_max_bench_time(){ - f := fn() ! { - time.sleep(500* time.millisecond) +fn test_max_bench_time() { + f := fn () ! { + time.sleep(500 * time.millisecond) } mut bench := Benchmark.new(f, 0, 0, false) or { - eprintln('Error creating benchmark: $err') + eprintln('Error creating benchmark: ${err}') return } @@ -98,13 +98,13 @@ fn test_max_bench_time(){ } fn test_performance() { - scheduler := [func_1,func_2,func_3] - expected := [false,false,false] + scheduler := [func_1, func_2, func_3] + expected := [false, false, false] mut actual := []bool{} - for i in scheduler{ + for i in scheduler { mut bench := Benchmark.new(i, 0, 0, false) or { - eprintln('Error creating benchmark: $err') + eprintln('Error creating benchmark: ${err}') return } @@ -113,58 +113,55 @@ fn test_performance() { } assert expected.len == actual.len - for i:=0;itarget{ + if arr[mid] > target { right = mid - 1 } } return } +fn func_3() ! { + mut arr := [10, 2, 13, 4, 5, 16, 7, 1, 9, 20] -fn func_3() !{ - mut arr := [10,2,13,4,5,16,7,1,9,20] - - for i :=0; i arr[j+1]{ - arr[j],arr[j+1]=arr[j+1],arr[j] + for i := 0; i < arr.len - 1; i++ { + for j := 0; j < arr.len - i - 1; j++ { + if arr[j] > arr[j + 1] { + arr[j], arr[j + 1] = arr[j + 1], arr[j] } } } } - From fa9ca619a3eb2fab981b518e18ade5fc76620981 Mon Sep 17 00:00:00 2001 From: Delyan Angelov Date: Sat, 14 Sep 2024 20:43:07 +0300 Subject: [PATCH 4/5] rebase over current master, and run `v fmt -w .` again --- vlib/x/benchmark/benchmark.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vlib/x/benchmark/benchmark.v b/vlib/x/benchmark/benchmark.v index 8be74f2f3e8409..8f3b6fc021b532 100644 --- a/vlib/x/benchmark/benchmark.v +++ b/vlib/x/benchmark/benchmark.v @@ -41,7 +41,7 @@ pub fn Benchmark.new(bench_func fn () !, n i64, duration time.Duration, is_paral return Benchmark{ n: n bench_func: bench_func - bench_time: if duration > 0 { duration } else { benchmark.default_duration } + bench_time: if duration > 0 { duration } else { default_duration } is_parallel: is_parallel } } From 869b974f3ddb03fe065bf5aa7d705bfbfe8e6154 Mon Sep 17 00:00:00 2001 From: skeris Date: Mon, 23 Sep 2024 00:45:47 +0300 Subject: [PATCH 5/5] - change static factory method to factory function `setup` - create params struct to make more convinient way to create benchmark - make err messages more clear --- vlib/x/benchmark/benchmark.v | 32 +++++++++++++++++++------------ vlib/x/benchmark/benchmark_test.v | 22 +++++++++++---------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/vlib/x/benchmark/benchmark.v b/vlib/x/benchmark/benchmark.v index 8f3b6fc021b532..2d334b6c04bc54 100644 --- a/vlib/x/benchmark/benchmark.v +++ b/vlib/x/benchmark/benchmark.v @@ -4,8 +4,6 @@ import time import math import sync -const default_duration = time.second - // Benchmark represent all significant data for benchmarking. Provide clear way for getting result in convinient way by exported methods @[noinit] pub struct Benchmark { @@ -23,32 +21,42 @@ pub mut: start_allocs usize // size of object allocated on heap } +// BenchmarkDefaults is params struct for providing parameters of benchmarking to setup function +// - n - number of iterations. set if you know how many runs of function you need. if you don't know how many you need - set 0 +// - duration - by default 1s. expecting duration of all benchmark runs. doesn't work if is_parallel == true +// - is_parallel - if true, every bench_func run in separate coroutine +@[params] +pub struct BenchmarkDefaults { +pub: + duration time.Duration = time.second + is_parallel bool + n i64 +} + // Benchmark.new - constructor of benchmark // arguments: // - bench_func - function to benchmark. required, if you have no function - you don't need benchmark -// - n - number of iterations. set if you know how many runs of function you need. if qou don't know how many you need - set 0 -// - duration - by default 1s. expecting duration of all benchmark runs. doesn't work if is_parallel == true -// - is_parallel - if true, every bench_func run in separate coroutine -pub fn Benchmark.new(bench_func fn () !, n i64, duration time.Duration, is_parallel bool) !Benchmark { +// - params - structure of benchmark parameters +pub fn setup(bench_func fn () !, params BenchmarkDefaults) !Benchmark { if bench_func == unsafe { nil } { - return error('Benchmark function cannot be empty') + return error('Benchmark function cannot be `nil`') } - if duration > 0 && is_parallel { + if params.duration > 0 && params.is_parallel { return error('can not predict number of parallel iterations') } return Benchmark{ - n: n + n: params.n bench_func: bench_func - bench_time: if duration > 0 { duration } else { default_duration } - is_parallel: is_parallel + bench_time: params.duration + is_parallel: params.is_parallel } } // run_benchmark - function for start benchmarking // run benchmark n times, or duration time -pub fn (mut b Benchmark) run_benchmark() { +pub fn (mut b Benchmark) run() { // run bench_func one time for heat up processor cache and get elapsed time for n prediction b.run_n(1) diff --git a/vlib/x/benchmark/benchmark_test.v b/vlib/x/benchmark/benchmark_test.v index 22020906964959..5393898051b034 100644 --- a/vlib/x/benchmark/benchmark_test.v +++ b/vlib/x/benchmark/benchmark_test.v @@ -41,12 +41,12 @@ fn test_fn_with_error() { f := fn () ! { return error('error') } - mut bench := Benchmark.new(f, 0, 0, false) or { + mut bench := setup(f) or { eprintln('Error creating benchmark: ${err}') return } - bench.run_benchmark() + bench.run() assert bench.failed == true assert bench.benchmark_result.n == 1 @@ -57,12 +57,12 @@ fn test_n_must_be_over_1() { mut i := 0 i++ } - mut bench := Benchmark.new(f, 0, 0, false) or { + mut bench := setup(f) or { eprintln('Error creating benchmark: ${err}') return } - bench.run_benchmark() + bench.run() assert bench.benchmark_result.n > 1 } @@ -72,12 +72,14 @@ fn test_n() { mut i := 0 i++ } - mut bench := Benchmark.new(f, 1000, 0, false) or { + mut bench := setup(f, BenchmarkDefaults{ + n: 1000 + }) or { eprintln('Error creating benchmark: ${err}') return } - bench.run_benchmark() + bench.run() assert bench.benchmark_result.n == 1000 } @@ -86,12 +88,12 @@ fn test_max_bench_time() { f := fn () ! { time.sleep(500 * time.millisecond) } - mut bench := Benchmark.new(f, 0, 0, false) or { + mut bench := setup(f) or { eprintln('Error creating benchmark: ${err}') return } - bench.run_benchmark() + bench.run() assert bench.benchmark_result.n == 3 assert bench.benchmark_result.t >= time.second @@ -103,12 +105,12 @@ fn test_performance() { mut actual := []bool{} for i in scheduler { - mut bench := Benchmark.new(i, 0, 0, false) or { + mut bench := setup(i) or { eprintln('Error creating benchmark: ${err}') return } - bench.run_benchmark() + bench.run() actual << bench.failed }