aboutsummaryrefslogtreecommitdiff
path: root/v_windows/v/old/cmd/tools
diff options
context:
space:
mode:
Diffstat (limited to 'v_windows/v/old/cmd/tools')
-rw-r--r--v_windows/v/old/cmd/tools/bench/wyhash.v56
-rw-r--r--v_windows/v/old/cmd/tools/check_os_api_parity.v130
-rw-r--r--v_windows/v/old/cmd/tools/fast/.gitignore5
-rw-r--r--v_windows/v/old/cmd/tools/fast/fast.v178
-rw-r--r--v_windows/v/old/cmd/tools/fast/fast_job.v43
-rw-r--r--v_windows/v/old/cmd/tools/fast/fast_main.js67
-rw-r--r--v_windows/v/old/cmd/tools/fast/footer.html4
-rw-r--r--v_windows/v/old/cmd/tools/fast/header.html65
-rw-r--r--v_windows/v/old/cmd/tools/fuzz/fuzz.sh18
-rw-r--r--v_windows/v/old/cmd/tools/fuzz/map_fuzz.v144
-rw-r--r--v_windows/v/old/cmd/tools/gen1m.v16
-rw-r--r--v_windows/v/old/cmd/tools/gen_vc.v370
-rw-r--r--v_windows/v/old/cmd/tools/missdoc.v141
-rw-r--r--v_windows/v/old/cmd/tools/modules/scripting/scripting.v180
-rw-r--r--v_windows/v/old/cmd/tools/modules/testing/common.v488
-rw-r--r--v_windows/v/old/cmd/tools/modules/vgit/vgit.v197
-rw-r--r--v_windows/v/old/cmd/tools/modules/vhelp/vhelp.v14
-rw-r--r--v_windows/v/old/cmd/tools/oldv.v176
-rw-r--r--v_windows/v/old/cmd/tools/performance_compare.v215
-rw-r--r--v_windows/v/old/cmd/tools/repeat.v374
-rw-r--r--v_windows/v/old/cmd/tools/test_if_v_test_system_works.v74
-rw-r--r--v_windows/v/old/cmd/tools/test_os_process.v82
-rw-r--r--v_windows/v/old/cmd/tools/vast/cjson.v114
-rw-r--r--v_windows/v/old/cmd/tools/vast/test/.gitignore1
-rw-r--r--v_windows/v/old/cmd/tools/vast/test/demo.v121
-rw-r--r--v_windows/v/old/cmd/tools/vast/vast.v2246
-rw-r--r--v_windows/v/old/cmd/tools/vbin2v.v146
-rw-r--r--v_windows/v/old/cmd/tools/vbug.v208
-rw-r--r--v_windows/v/old/cmd/tools/vbuild-examples.v15
-rw-r--r--v_windows/v/old/cmd/tools/vbuild-tools.v71
-rw-r--r--v_windows/v/old/cmd/tools/vbuild-vbinaries.v9
-rw-r--r--v_windows/v/old/cmd/tools/vcheck-md.v540
-rw-r--r--v_windows/v/old/cmd/tools/vcomplete.v451
-rw-r--r--v_windows/v/old/cmd/tools/vcreate.v186
-rw-r--r--v_windows/v/old/cmd/tools/vcreate_test.v79
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/html.v553
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/html_tag_escape_test.v6
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/markdown.v55
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/arrow.svg1
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/dark-mode.js6
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/dark.svg1
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/doc.css725
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/doc.js235
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-192x192.pngbin0 -> 6083 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-512x512.pngbin0 -> 18209 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/apple-touch-icon.pngbin0 -> 5707 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/browserconfig.xml9
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-16x16.pngbin0 -> 853 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-32x32.pngbin0 -> 1305 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon.icobin0 -> 15086 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-144x144.pngbin0 -> 4512 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-150x150.pngbin0 -> 4360 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x150.pngbin0 -> 4927 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x310.pngbin0 -> 10195 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-70x70.pngbin0 -> 3093 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/safari-pinned-tab.svg39
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/favicons/site.webmanifest19
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/light.svg1
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/link.svg1
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/menu.svg1
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/resources/normalize.css171
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.out1
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.v8
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/tests/vdoc_file_test.v72
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/utils.v275
-rw-r--r--v_windows/v/old/cmd/tools/vdoc/vdoc.v511
-rw-r--r--v_windows/v/old/cmd/tools/vdoctor.exebin0 -> 880128 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vdoctor.v264
-rw-r--r--v_windows/v/old/cmd/tools/vfmt.v334
-rw-r--r--v_windows/v/old/cmd/tools/vpm.exebin0 -> 962048 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vpm.v601
-rw-r--r--v_windows/v/old/cmd/tools/vrepl.exebin0 -> 498688 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vrepl.v390
-rw-r--r--v_windows/v/old/cmd/tools/vself.exebin0 -> 432128 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vself.v89
-rw-r--r--v_windows/v/old/cmd/tools/vsetup-freetype.v28
-rw-r--r--v_windows/v/old/cmd/tools/vsymlink.v182
-rw-r--r--v_windows/v/old/cmd/tools/vtest-all.v187
-rw-r--r--v_windows/v/old/cmd/tools/vtest-cleancode.v102
-rw-r--r--v_windows/v/old/cmd/tools/vtest-fmt.v43
-rw-r--r--v_windows/v/old/cmd/tools/vtest-parser.v289
-rw-r--r--v_windows/v/old/cmd/tools/vtest-self.v220
-rw-r--r--v_windows/v/old/cmd/tools/vtest.v135
-rw-r--r--v_windows/v/old/cmd/tools/vtracev.v17
-rw-r--r--v_windows/v/old/cmd/tools/vup.exebin0 -> 1010176 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vup.v164
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.out2
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.vv5
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.out6
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.vv24
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/module_file_test.out5
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/module_file_test.vv55
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.out2
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.vv4
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.out2
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.vv4
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/trailing_space.out7
-rw-r--r--v_windows/v/old/cmd/tools/vvet/tests/trailing_space.vv16
-rw-r--r--v_windows/v/old/cmd/tools/vvet/vet_test.v72
-rw-r--r--v_windows/v/old/cmd/tools/vvet/vvet.v256
-rw-r--r--v_windows/v/old/cmd/tools/vwatch.exebin0 -> 560640 bytes
-rw-r--r--v_windows/v/old/cmd/tools/vwatch.v381
-rw-r--r--v_windows/v/old/cmd/tools/vwipe-cache.v13
103 files changed, 13813 insertions, 0 deletions
diff --git a/v_windows/v/old/cmd/tools/bench/wyhash.v b/v_windows/v/old/cmd/tools/bench/wyhash.v
new file mode 100644
index 0000000..b760ad6
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/bench/wyhash.v
@@ -0,0 +1,56 @@
+module main
+
+import hash.fnv1a
+import hash as wyhash
+import rand
+import benchmark
+
+fn main() {
+ rand.seed([u32(42), 0])
+ sample_size := 10000000
+ min_str_len := 20
+ max_str_len := 40
+ println('Generating $sample_size strings between $min_str_len - $max_str_len chars long...')
+ mut checksum := u64(0)
+ mut start_pos := 0
+ mut bgenerating := benchmark.start()
+ mut bytepile := []byte{}
+ for _ in 0 .. sample_size * max_str_len {
+ bytepile << byte(rand.int_in_range(40, 125))
+ }
+ mut str_lens := []int{}
+ for _ in 0 .. sample_size {
+ str_lens << rand.int_in_range(min_str_len, max_str_len)
+ }
+ bgenerating.measure('generating strings')
+ println('Hashing each of the generated strings...')
+ //
+ mut bhashing_1 := benchmark.start()
+ start_pos = 0
+ checksum = 0
+ for len in str_lens {
+ end_pos := start_pos + len
+ checksum ^= wyhash.wyhash_c(unsafe { &byte(bytepile.data) + start_pos }, u64(len),
+ 1)
+ start_pos = end_pos
+ }
+ bhashing_1.measure('wyhash.wyhash_c | checksum: ${checksum:22}')
+ mut bhashing_2 := benchmark.start()
+ start_pos = 0
+ checksum = 0
+ for len in str_lens {
+ end_pos := start_pos + len
+ checksum ^= wyhash.sum64(bytepile[start_pos..end_pos], 1)
+ start_pos = end_pos
+ }
+ bhashing_2.measure('wyhash.sum64 | checksum: ${checksum:22}')
+ mut bhashing_3 := benchmark.start()
+ start_pos = 0
+ checksum = 0
+ for len in str_lens {
+ end_pos := start_pos + len
+ checksum ^= fnv1a.sum64(bytepile[start_pos..end_pos])
+ start_pos = end_pos
+ }
+ bhashing_3.measure('fnv1a.sum64 | checksum: ${checksum:22}')
+}
diff --git a/v_windows/v/old/cmd/tools/check_os_api_parity.v b/v_windows/v/old/cmd/tools/check_os_api_parity.v
new file mode 100644
index 0000000..d4b925c
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/check_os_api_parity.v
@@ -0,0 +1,130 @@
+module main
+
+import os
+import v.util
+import v.util.diff
+import v.pref
+import v.builder
+import v.ast
+import rand
+import term
+
+const (
+ base_os = 'linux'
+ os_names = ['linux', 'macos', 'windows']
+ skip_modules = [
+ 'builtin.bare',
+ 'builtin.linux_bare.old',
+ 'builtin.js',
+ 'strconv',
+ 'strconv.ftoa',
+ 'hash',
+ 'strings',
+ 'crypto.rand',
+ 'os.bare',
+ 'os2',
+ 'picohttpparser',
+ 'picoev',
+ 'szip',
+ 'v.eval',
+ ]
+)
+
+struct App {
+ diff_cmd string
+ is_verbose bool
+ modules []string
+mut:
+ api_differences map[string]int
+}
+
+fn main() {
+ vexe := pref.vexe_path()
+ vroot := os.dir(vexe)
+ util.set_vroot_folder(vroot)
+ os.chdir(vroot)
+ cmd := diff.find_working_diff_command() or { '' }
+ mut app := App{
+ diff_cmd: cmd
+ is_verbose: os.getenv('VERBOSE').len > 0
+ modules: if os.args.len > 1 { os.args[1..] } else { all_vlib_modules() }
+ }
+ for mname in app.modules {
+ if !app.is_verbose {
+ eprintln('Checking module: $mname ...')
+ }
+ api_base := app.gen_api_for_module_in_os(mname, base_os)
+ for oname in os_names {
+ if oname == base_os {
+ continue
+ }
+ api_os := app.gen_api_for_module_in_os(mname, oname)
+ app.compare_api(api_base, api_os, mname, base_os, oname)
+ }
+ }
+ howmany := app.api_differences.len
+ if howmany > 0 {
+ eprintln(term.header('Found $howmany modules with different APIs', '='))
+ for m in app.api_differences.keys() {
+ eprintln('Module: $m')
+ }
+ exit(1)
+ }
+}
+
+fn all_vlib_modules() []string {
+ mut vlib_v_files := os.walk_ext('vlib', '.v')
+ mut vmodulesmap := map[string]int{}
+ for f in vlib_v_files {
+ if f.contains('/tests/') || f.ends_with('_test.v') {
+ continue
+ }
+ vmodulename := os.dir(f).replace('/', '.').replace('vlib.', '')
+ if vmodulename in skip_modules {
+ continue
+ }
+ vmodulesmap[vmodulename] = vmodulesmap[vmodulename] + 1
+ }
+ mut modules := vmodulesmap.keys()
+ modules.sort()
+ return modules
+}
+
+fn (app App) gen_api_for_module_in_os(mod_name string, os_name string) string {
+ if app.is_verbose {
+ eprintln('Checking module: ${mod_name:-30} for OS: ${os_name:-10} ...')
+ }
+ mpath := os.join_path('vlib', mod_name.replace('.', '/'))
+ tmpname := '/tmp/${mod_name}_${os_name}.c'
+ prefs, _ := pref.parse_args([], ['-os', os_name, '-o', tmpname, '-shared', mpath])
+ mut b := builder.new_builder(prefs)
+ b.compile_c()
+ mut res := []string{}
+ for f in b.parsed_files {
+ for s in f.stmts {
+ if s is ast.FnDecl {
+ if s.is_pub {
+ fn_signature := s.stringify(b.table, mod_name, map[string]string{})
+ fn_mod := s.modname()
+ if fn_mod == mod_name {
+ fline := '$fn_mod: $fn_signature'
+ res << fline
+ }
+ }
+ }
+ }
+ }
+ res.sort()
+ return res.join('\n')
+}
+
+fn (mut app App) compare_api(api_base string, api_os string, mod_name string, os_base string, os_target string) {
+ res := diff.color_compare_strings(app.diff_cmd, rand.ulid(), api_base, api_os)
+ if res.len > 0 {
+ summary := 'Different APIs found for module: `$mod_name`, between OS base: `$os_base` and OS: `$os_target`'
+ eprintln(term.header(summary, '-'))
+ eprintln(res)
+ eprintln(term.h_divider('-'))
+ app.api_differences[mod_name] = 1
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/fast/.gitignore b/v_windows/v/old/cmd/tools/fast/.gitignore
new file mode 100644
index 0000000..1efae28
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fast/.gitignore
@@ -0,0 +1,5 @@
+fast
+index.html
+table.html
+v.c
+v2
diff --git a/v_windows/v/old/cmd/tools/fast/fast.v b/v_windows/v/old/cmd/tools/fast/fast.v
new file mode 100644
index 0000000..0b9f05a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fast/fast.v
@@ -0,0 +1,178 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+import os
+import time
+
+// TODO -usecache
+const voptions = ' -skip-unused -show-timings -stats '
+
+const exe = os.executable()
+
+const fast_dir = os.dir(exe)
+
+const vdir = @VEXEROOT
+
+fn main() {
+ dump(fast_dir)
+ dump(vdir)
+ os.chdir(fast_dir)
+ if !os.exists('$vdir/v') && !os.is_dir('$vdir/vlib') {
+ println('fast.html generator needs to be located in `v/cmd/tools/fast`')
+ }
+ println('fast.html generator\n')
+ println('Fetching updates...')
+ ret := os.system('$vdir/v up')
+ if ret != 0 {
+ println('failed to update V')
+ return
+ }
+ // Fetch the last commit's hash
+ commit := exec('git rev-parse HEAD')[..8]
+ if !os.exists('table.html') {
+ os.create('table.html') ?
+ }
+ mut table := os.read_file('table.html') ?
+ if table.contains('>$commit<') {
+ println('nothing to benchmark')
+ exit(1)
+ return
+ }
+ // for i, commit in commits {
+ message := exec('git log --pretty=format:"%s" -n1 $commit')
+ // println('\n${i + 1}/$commits.len Benchmarking commit $commit "$message"')
+ println('\nBenchmarking commit $commit "$message"')
+ // Build an optimized V
+ // println('Checking out ${commit}...')
+ // exec('git checkout $commit')
+ println(' Building vprod...')
+ os.chdir(vdir)
+ exec('./v -o vprod -prod -prealloc cmd/v')
+ // println('cur vdir="$vdir"')
+ // exec('v -o vprod cmd/v') // for faster debugging
+ // cache vlib modules
+ exec('$vdir/v wipe-cache')
+ exec('$vdir/v -o v2 -prod cmd/v')
+ // measure
+ diff1 := measure('$vdir/vprod $voptions -o v.c cmd/v', 'v.c')
+ mut tcc_path := 'tcc'
+ $if freebsd {
+ tcc_path = '/usr/local/bin/tcc'
+ }
+ diff2 := measure('$vdir/vprod $voptions -cc $tcc_path -o v2 cmd/v', 'v2')
+ diff3 := 0 // measure('$vdir/vprod -native $vdir/cmd/tools/1mil.v', 'native 1mil')
+ diff4 := measure('$vdir/vprod -usecache $voptions -cc clang examples/hello_world.v',
+ 'hello.v')
+ vc_size := os.file_size('v.c') / 1000
+ // scan/parse/check/cgen
+ scan, parse, check, cgen, vlines := measure_steps(vdir)
+ // println('Building V took ${diff}ms')
+ commit_date := exec('git log -n1 --pretty="format:%at" $commit')
+ date := time.unix(commit_date.int())
+ //
+ os.chdir(fast_dir)
+ mut out := os.create('table.html') ?
+ // Place the new row on top
+ html_message := message.replace_each(['<', '&lt;', '>', '&gt;'])
+ table =
+ '<tr>
+ <td>$date.format()</td>
+ <td><a target=_blank href="https://github.com/vlang/v/commit/$commit">$commit</a></td>
+ <td>$html_message</td>
+ <td>${diff1}ms</td>
+ <td>${diff2}ms</td>
+ <td>${diff3}ms</td>
+ <td>${diff4}ms</td>
+ <td>$vc_size KB</td>
+ <td>${parse}ms</td>
+ <td>${check}ms</td>
+ <td>${cgen}ms</td>
+ <td>${scan}ms</td>
+ <td>$vlines</td>
+ <td>${int(f64(vlines) / f64(diff1) * 1000.0)}</td>
+ </tr>\n' +
+ table.trim_space()
+ out.writeln(table) ?
+ out.close()
+ // Regenerate index.html
+ header := os.read_file('header.html') ?
+ footer := os.read_file('footer.html') ?
+ mut res := os.create('index.html') ?
+ res.writeln(header) ?
+ res.writeln(table) ?
+ res.writeln(footer) ?
+ res.close()
+ //}
+ // exec('git checkout master')
+ // os.write_file('last_commit.txt', commits[commits.len - 1]) ?
+}
+
+fn exec(s string) string {
+ e := os.execute_or_exit(s)
+ return e.output.trim_right('\r\n')
+}
+
+// returns milliseconds
+fn measure(cmd string, description string) int {
+ println(' Measuring $description')
+ println(' Warming up...')
+ println(cmd)
+ for _ in 0 .. 3 {
+ exec(cmd)
+ }
+ println(' Building...')
+ mut runs := []int{}
+ for r in 0 .. 5 {
+ println(' Sample ${r + 1}/5')
+ sw := time.new_stopwatch()
+ exec(cmd)
+ runs << int(sw.elapsed().milliseconds())
+ }
+ // discard lowest and highest time
+ runs.sort()
+ runs = runs[1..4]
+ mut sum := 0
+ for run in runs {
+ sum += run
+ }
+ return int(sum / 3)
+}
+
+fn measure_steps(vdir string) (int, int, int, int, int) {
+ resp := os.execute_or_exit('$vdir/vprod $voptions -o v.c cmd/v')
+
+ mut scan, mut parse, mut check, mut cgen, mut vlines := 0, 0, 0, 0, 0
+ lines := resp.output.split_into_lines()
+ if lines.len == 3 {
+ parse = lines[0].before('.').int()
+ check = lines[1].before('.').int()
+ cgen = lines[2].before('.').int()
+ } else {
+ ms_lines := lines.map(it.split(' ms '))
+ for line in ms_lines {
+ if line.len == 2 {
+ if line[1] == 'SCAN' {
+ scan = line[0].int()
+ }
+ if line[1] == 'PARSE' {
+ parse = line[0].int()
+ }
+ if line[1] == 'CHECK' {
+ check = line[0].int()
+ }
+ if line[1] == 'C GEN' {
+ cgen = line[0].int()
+ }
+ } else {
+ // Fetch number of V lines
+ if line[0].contains('V') && line[0].contains('source') && line[0].contains('size') {
+ start := line[0].index(':') or { 0 }
+ end := line[0].index('lines,') or { 0 }
+ s := line[0][start + 1..end]
+ vlines = s.trim_space().int()
+ }
+ }
+ }
+ }
+ return scan, parse, check, cgen, vlines
+}
diff --git a/v_windows/v/old/cmd/tools/fast/fast_job.v b/v_windows/v/old/cmd/tools/fast/fast_job.v
new file mode 100644
index 0000000..d4d8fc3
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fast/fast_job.v
@@ -0,0 +1,43 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+import os
+import time
+
+// A job that runs in the background, checks for repo updates,
+// runs fast.v, pushes the HTML result to the fast.vlang.io GH pages repo.
+fn main() {
+ println(time.now())
+ if !os.exists('website') {
+ println('cloning the website repo...')
+ os.system('git clone git@github.com:/vlang/website.git')
+ }
+ if !os.exists('fast') {
+ println('"fast" binary (built with `v fast.v`) was not found')
+ return
+ }
+ for {
+ res_pull := os.execute('git pull --rebase')
+ if res_pull.exit_code != 0 {
+ println('failed to git pull. uncommitted changes?')
+ return
+ }
+ // println('running ./fast')
+ resp := os.execute('./fast')
+ if resp.exit_code < 0 {
+ println(resp.output)
+ return
+ }
+ if resp.exit_code != 0 {
+ println('resp != 0, skipping')
+ } else {
+ os.chdir('website')
+ os.execute_or_exit('git checkout gh-pages')
+ os.cp('../index.html', 'index.html') ?
+ os.system('git commit -am "update benchmark"')
+ os.system('git push origin gh-pages')
+ os.chdir('..')
+ }
+ time.sleep(180 * time.second)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/fast/fast_main.js b/v_windows/v/old/cmd/tools/fast/fast_main.js
new file mode 100644
index 0000000..d6b2d19
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fast/fast_main.js
@@ -0,0 +1,67 @@
+const delta = 18;
+
+(function () {
+ var table = document.querySelector("table");
+ var isTbody = table.children[0].nodeName == "TBODY";
+ var trs = isTbody
+ ? table.children[0].querySelectorAll("tr")
+ : table.querySelectorAll("tr");
+ trs.forEach(function (tr, idx) {
+ if (idx != 0 && idx + 1 < trs.length) {
+ var vc = 3, vv = 4, vf = 5, vh = 6;
+ var textContent = {
+ vc: tr.children[vc].textContent,
+ vv: tr.children[vv].textContent,
+ vf: tr.children[vf].textContent,
+ vh: tr.children[vh].textContent
+ };
+ var currentData = {
+ vc: int(textContent.vc.slice(0, -2)),
+ vv: int(textContent.vv.slice(0, -2)),
+ vf: int(textContent.vf.slice(0, -2)),
+ vh: int(textContent.vh.slice(0, -2))
+ };
+ var prevData = {
+ vc: int(trs[idx + 1].children[vc].textContent.slice(0, -2)),
+ vv: int(trs[idx + 1].children[vv].textContent.slice(0, -2)),
+ vf: int(trs[idx + 1].children[vf].textContent.slice(0, -2)),
+ vh: int(trs[idx + 1].children[vh].textContent.slice(0, -2))
+ };
+ var result = {
+ vc: currentData.vc - prevData.vc,
+ vv: currentData.vv - prevData.vv,
+ vf: currentData.vf - prevData.vf,
+ vh: currentData.vh - prevData.vh
+ };
+ if (Math.abs(result.vc) > delta)
+ tr.children[vc].appendChild(createElement(result.vc));
+ if (Math.abs(result.vv) > delta * 2)
+ tr.children[vv].appendChild(createElement(result.vv));
+ if (Math.abs(result.vf) > delta * 2)
+ tr.children[vf].appendChild(createElement(result.vf));
+ if (Math.abs(result.vh) > delta * 2)
+ tr.children[vh].appendChild(createElement(result.vh));
+ }
+ });
+ function int(src) {
+ return src - 0;
+ }
+ function getClassName(x) {
+ if (x == 0)
+ return "equal";
+ return x < 0 ? "plus" : "minus";
+ }
+ function createElement(result) {
+ var el = document.createElement("span");
+ var parsedResult = parseResult(result);
+ el.classList.add("diff");
+ el.classList.add(getClassName(result));
+ el.textContent = parsedResult;
+ return el;
+ }
+ function parseResult(x) {
+ if (x == 0)
+ return "0";
+ return x > 0 ? "+" + x : x;
+ }
+})();
diff --git a/v_windows/v/old/cmd/tools/fast/footer.html b/v_windows/v/old/cmd/tools/fast/footer.html
new file mode 100644
index 0000000..37f5c0f
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fast/footer.html
@@ -0,0 +1,4 @@
+</table>
+<script src="main.js"></script>
+</body>
+</html>
diff --git a/v_windows/v/old/cmd/tools/fast/header.html b/v_windows/v/old/cmd/tools/fast/header.html
new file mode 100644
index 0000000..8f4ee5c
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fast/header.html
@@ -0,0 +1,65 @@
+<!DOCTYPE html>
+<html>
+<head>
+<meta charset="UTF-8">
+<meta name="viewport" content="width=device-width, initial-scale=1">
+<title>Is V still fast?</title>
+<style>
+*, body {
+ font-family: Menlo, Monospace, 'Courier New';
+}
+table {
+ width: 1800px;
+}
+table, td {
+ border-collapse: collapse;
+ border: 1px solid #dfdfdf;
+}
+td {
+ padding: 5px;
+ position: relative;
+}
+.diff {
+ border-radius: 2.5px;
+ color: #ffffff;
+ padding: 0 5px 0 5px;
+ position: absolute;
+ right: 5px;
+}
+.minus {
+ background-color: rgb(195, 74, 104);
+}
+.plus {
+ background-color: #8BC34A;
+}
+.equal {
+ background-color: rgb(113, 68, 172);
+}
+</style>
+</head>
+<body>
+<h2>Is V still fast?</h2>
+
+Monitoring compilation speed for each commit. <br><br>
+Running on a free tier AWS t2.micro instance (1 vCPU). Typical desktop hardware is 2-3 times faster. <br><br>
+Source code: <a target=blank href='https://github.com/vlang/v/blob/master/cmd/tools/fast/fast.v'>fast.v</a> <br><br>
+
+
+
+<table>
+ <tr>
+ <td style='width:180px'>timestamp</td>
+ <td style='width:85px'>commit</td>
+ <td>commit message</td>
+ <td style='width:120px'>v -o v.c</td>
+ <td style='width:120px'>v -o v</td>
+ <td style='width:130px'>v -native 1mil.v</td>
+ <td style='width:120px'>v hello.v</td>
+ <td style='width:85px'>v.c size</td>
+ <td style='width:55px'>parse</td>
+ <td style='width:55px'>check</td>
+ <td style='width:55px'>cgen</td>
+ <td style='width:55px'>scan</td>
+ <td style='width:80px'>V lines</td>
+ <td style='width:95px'>V lines/s</td>
+ </tr>
diff --git a/v_windows/v/old/cmd/tools/fuzz/fuzz.sh b/v_windows/v/old/cmd/tools/fuzz/fuzz.sh
new file mode 100644
index 0000000..cbba7d7
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fuzz/fuzz.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+cores=$(nproc --all)
+
+echo Number of cores: $cores
+echo Compiling...
+./v -cc clang -o cmd/tools/fuzz/map_fuzz cmd/tools/fuzz/map_fuzz.v -prod -cflags "-fsanitize=memory"
+
+echo Fuzzing:
+while true
+do
+ for ((i=1;i<=cores;++i))
+ do
+ sleep 0.001
+ ./cmd/tools/fuzz/map_fuzz &
+ done
+ wait
+done
diff --git a/v_windows/v/old/cmd/tools/fuzz/map_fuzz.v b/v_windows/v/old/cmd/tools/fuzz/map_fuzz.v
new file mode 100644
index 0000000..f1c62f2
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/fuzz/map_fuzz.v
@@ -0,0 +1,144 @@
+import rand
+import time
+
+fn generate_strings(str_len int, arr_len int) []string {
+ mut arr := []string{len: arr_len}
+ for i in 0 .. arr_len {
+ arr[i] = rand.string(str_len)
+ }
+ return arr
+}
+
+fn fuzz1() {
+ amount := 200000 - rand.intn(100000)
+ amount2 := 200000 - rand.intn(100000)
+ len := 25 - rand.intn(10)
+ arr := generate_strings(len, amount)
+ arr2 := generate_strings(len, amount2)
+ mut m := map[string]int{}
+ for i in 0 .. amount {
+ m[arr[i]] = i
+ assert i == m[arr[i]]
+ }
+ for i in 0 .. amount {
+ assert i == m[arr[i]]
+ }
+ for i in 0 .. amount2 {
+ assert 0 == m[arr2[i]]
+ }
+ unsafe {
+ m.free()
+ arr.free()
+ }
+}
+
+fn fuzz2() {
+ mut m := map[string]int{}
+ amount := rand.intn(500000) + 1
+ len := 25 - rand.intn(10)
+ arr := generate_strings(len, amount)
+ for i, x in arr {
+ m[x] = i
+ }
+ mut i := 0
+ for key, val in m {
+ assert key == arr[i]
+ assert val == i
+ i++
+ }
+ unsafe {
+ m.free()
+ arr.free()
+ }
+}
+
+fn fuzz3() {
+ mut m := map[string]int{}
+ amount := rand.intn(500000) + 1
+ len := 25 - rand.intn(10)
+ arr := generate_strings(len, amount)
+ for i, x in arr {
+ if (i % 10000) == 0 {
+ keys := m.keys()
+ assert keys.len == i
+ assert keys == arr[0..i]
+ }
+ m[x] = i
+ }
+ assert m.keys() == arr
+ assert m.keys().len == amount
+ unsafe {
+ m.free()
+ arr.free()
+ }
+}
+
+fn fuzz4() {
+ amount := rand.intn(500000)
+ len := 25 - rand.intn(10)
+ arr := generate_strings(len, amount)
+ mut m := map[string]int{}
+ for i in 0 .. amount {
+ m[arr[i]] = i
+ }
+ for i in 0 .. amount {
+ m.delete(arr[i])
+ assert m[arr[i]] == 0
+ }
+ assert m.len == 0
+ unsafe {
+ m.free()
+ arr.free()
+ }
+}
+
+fn fuzz5() {
+ amount := rand.intn(500000) + 1
+ arr := generate_strings(20, amount)
+ mut m := map[string]int{}
+ for i in 0 .. amount {
+ m[arr[i]] = i
+ assert (arr[i] in m) == true
+ }
+ for i in 0 .. amount {
+ m.delete(arr[i])
+ assert (arr[i] !in m) == true
+ assert m.len == amount - i - 1
+ }
+ assert m.len == 0
+ unsafe {
+ m.free()
+ arr.free()
+ }
+}
+
+fn fuzz6() {
+ mut m := map[string]int{}
+ amount := rand.intn(500000) + 1
+ len := 25 - rand.intn(10)
+ arr := generate_strings(len, amount)
+ for i, x in arr {
+ m[x]++
+ m[x] += i
+ assert m[x] == i + 1
+ }
+ for i, x in arr {
+ assert m[x] == i + 1
+ }
+ unsafe {
+ m.free()
+ arr.free()
+ }
+}
+
+fn main() {
+ seed := u32(time.ticks())
+ println('seed: $seed.hex()')
+ rand.seed([seed, seed])
+ fuzz1()
+ fuzz2()
+ fuzz3()
+ fuzz4()
+ fuzz5()
+ fuzz6()
+}
diff --git a/v_windows/v/old/cmd/tools/gen1m.v b/v_windows/v/old/cmd/tools/gen1m.v
new file mode 100644
index 0000000..2352c9f
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/gen1m.v
@@ -0,0 +1,16 @@
+fn main() {
+ for i in 0 .. 100000 {
+ println('
+fn foo${i}() {
+ x := $i
+ mut a := 1
+ a += 2
+ print(a)
+ a = 0
+ a = 1
+}
+')
+ }
+ // println('fn main() {foo1()} ')
+ println('fn main() { println("1m DONE") } ')
+}
diff --git a/v_windows/v/old/cmd/tools/gen_vc.v b/v_windows/v/old/cmd/tools/gen_vc.v
new file mode 100644
index 0000000..c2e8746
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/gen_vc.v
@@ -0,0 +1,370 @@
+module main
+
+import os
+import log
+import flag
+import time
+import vweb
+import net.urllib
+
+// This tool regenerates V's bootstrap .c files
+// every time the V master branch is updated.
+// if run with the --serve flag it will run in webhook
+// server mode awaiting a request to http://host:port/genhook
+// available command line flags:
+// --work-dir gen_vc's working directory
+// --purge force purge the local repositories
+// --serve run in webhook server mode
+// --port port for http server to listen on
+// --log-to either 'file' or 'terminal'
+// --log-file path to log file used when --log-to is 'file'
+// --dry-run dont push anything to remote repo
+// --force force update even if already up to date
+
+// git credentials
+const (
+ git_username = os.getenv('GITUSER')
+ git_password = os.getenv('GITPASS')
+)
+
+// repository
+const (
+ // git repo
+ git_repo_v = 'github.com/vlang/v'
+ git_repo_vc = 'github.com/vlang/vc'
+ // local repo directories
+ git_repo_dir_v = 'v'
+ git_repo_dir_vc = 'vc'
+)
+
+// gen_vc
+const (
+ // name
+ app_name = 'gen_vc'
+ // version
+ app_version = '0.1.2'
+ // description
+ app_description = "This tool regenerates V's bootstrap .c files every time the V master branch is updated."
+ // assume something went wrong if file size less than this
+ too_short_file_limit = 5000
+ // create a .c file for these os's
+ vc_build_oses = [
+ 'nix',
+ // all nix based os
+ 'windows',
+ ]
+)
+
+// default options (overridden by flags)
+const (
+ // gen_vc working directory
+ work_dir = '/tmp/gen_vc'
+ // dont push anything to remote repo
+ dry_run = false
+ // server port
+ server_port = 7171
+ // log file
+ log_file = '$work_dir/log.txt'
+ // log_to is either 'file' or 'terminal'
+ log_to = 'terminal'
+)
+
+// errors
+const (
+ err_msg_build = 'error building'
+ err_msg_make = 'make failed'
+ err_msg_gen_c = 'failed to generate .c file'
+ err_msg_cmd_x = 'error running cmd'
+)
+
+struct GenVC {
+ // logger
+ // flag options
+ options FlagOptions
+mut:
+ logger &log.Log
+ // true if error was experienced running generate
+ gen_error bool
+}
+
+// webhook server
+struct WebhookServer {
+ vweb.Context
+mut:
+ gen_vc &GenVC = 0 // initialized in init_server
+}
+
+// storage for flag options
+struct FlagOptions {
+ work_dir string
+ purge bool
+ serve bool
+ port int
+ log_to string
+ log_file string
+ dry_run bool
+ force bool
+}
+
+fn main() {
+ mut fp := flag.new_flag_parser(os.args.clone())
+ fp.application(app_name)
+ fp.version(app_version)
+ fp.description(app_description)
+ fp.skip_executable()
+ show_help := fp.bool('help', 0, false, 'Show this help screen\n')
+ flag_options := parse_flags(mut fp)
+ if show_help {
+ println(fp.usage())
+ exit(0)
+ }
+ fp.finalize() or {
+ eprintln(err)
+ println(fp.usage())
+ return
+ }
+ // webhook server mode
+ if flag_options.serve {
+ vweb.run<WebhookServer>(&WebhookServer{}, flag_options.port)
+ } else {
+ // cmd mode
+ mut gen_vc := new_gen_vc(flag_options)
+ gen_vc.init()
+ gen_vc.generate()
+ }
+}
+
+// new GenVC
+fn new_gen_vc(flag_options FlagOptions) &GenVC {
+ mut logger := &log.Log{}
+ logger.set_level(.debug)
+ if flag_options.log_to == 'file' {
+ logger.set_full_logpath(flag_options.log_file)
+ }
+ return &GenVC{
+ options: flag_options
+ logger: logger
+ }
+}
+
+// WebhookServer init
+pub fn (mut ws WebhookServer) init_server() {
+ mut fp := flag.new_flag_parser(os.args.clone())
+ flag_options := parse_flags(mut fp)
+ ws.gen_vc = new_gen_vc(flag_options)
+ ws.gen_vc.init()
+ // ws.gen_vc = new_gen_vc(flag_options)
+}
+
+pub fn (mut ws WebhookServer) index() {
+ eprintln('WebhookServer.index() called')
+}
+
+// gen webhook
+pub fn (mut ws WebhookServer) genhook() {
+ // request data
+ // println(ws.vweb.req.data)
+ // TODO: parse request. json or urlencoded
+ // json.decode or net.urllib.parse
+ ws.gen_vc.generate()
+ // error in generate
+ if ws.gen_vc.gen_error {
+ ws.json('{status: "failed"}')
+ return
+ }
+ ws.json('{status: "ok"}')
+}
+
+pub fn (ws &WebhookServer) reset() {
+}
+
+// parse flags to FlagOptions struct
+fn parse_flags(mut fp flag.FlagParser) FlagOptions {
+ return FlagOptions{
+ serve: fp.bool('serve', 0, false, 'run in webhook server mode')
+ work_dir: fp.string('work-dir', 0, work_dir, 'gen_vc working directory')
+ purge: fp.bool('purge', 0, false, 'force purge the local repositories')
+ port: fp.int('port', 0, server_port, 'port for web server to listen on')
+ log_to: fp.string('log-to', 0, log_to, "log to is 'file' or 'terminal'")
+ log_file: fp.string('log-file', 0, log_file, "log file to use when log-to is 'file'")
+ dry_run: fp.bool('dry-run', 0, dry_run, 'when specified dont push anything to remote repo')
+ force: fp.bool('force', 0, false, 'force update even if already up to date')
+ }
+}
+
+fn (mut gen_vc GenVC) init() {
+ // purge repos if flag is passed
+ if gen_vc.options.purge {
+ gen_vc.purge_repos()
+ }
+}
+
+// regenerate
+fn (mut gen_vc GenVC) generate() {
+ // set errors to false
+ gen_vc.gen_error = false
+ // check if gen_vc dir exists
+ if !os.is_dir(gen_vc.options.work_dir) {
+ // try create
+ os.mkdir(gen_vc.options.work_dir) or { panic(err) }
+ // still dosen't exist... we have a problem
+ if !os.is_dir(gen_vc.options.work_dir) {
+ gen_vc.logger.error('error creating directory: $gen_vc.options.work_dir')
+ gen_vc.gen_error = true
+ return
+ }
+ }
+ // cd to gen_vc dir
+ os.chdir(gen_vc.options.work_dir)
+ // if we are not running with the --serve flag (webhook server)
+ // rather than deleting and re-downloading the repo each time
+ // first check to see if the local v repo is behind master
+ // if it isn't behind theres no point continuing further
+ if !gen_vc.options.serve && os.is_dir(git_repo_dir_v) {
+ gen_vc.cmd_exec('git -C $git_repo_dir_v checkout master')
+ // fetch the remote repo just in case there are newer commits there
+ gen_vc.cmd_exec('git -C $git_repo_dir_v fetch')
+ git_status := gen_vc.cmd_exec('git -C $git_repo_dir_v status')
+ if !git_status.contains('behind') && !gen_vc.options.force {
+ gen_vc.logger.warn('v repository is already up to date.')
+ return
+ }
+ }
+ // delete repos
+ gen_vc.purge_repos()
+ // clone repos
+ gen_vc.cmd_exec('git clone --depth 1 https://$git_repo_v $git_repo_dir_v')
+ gen_vc.cmd_exec('git clone --depth 1 https://$git_repo_vc $git_repo_dir_vc')
+ // get output of git log -1 (last commit)
+ git_log_v := gen_vc.cmd_exec('git -C $git_repo_dir_v log -1 --format="commit %H%nDate: %ci%nDate Unix: %ct%nSubject: %s"')
+ git_log_vc := gen_vc.cmd_exec('git -C $git_repo_dir_vc log -1 --format="Commit %H%nDate: %ci%nDate Unix: %ct%nSubject: %s"')
+ // date of last commit in each repo
+ ts_v := git_log_v.find_between('Date:', '\n').trim_space()
+ ts_vc := git_log_vc.find_between('Date:', '\n').trim_space()
+ // parse time as string to time.Time
+ last_commit_time_v := time.parse(ts_v) or { panic(err) }
+ last_commit_time_vc := time.parse(ts_vc) or { panic(err) }
+ // git dates are in users local timezone and v time.parse does not parse
+ // timezones at the moment, so for now get unix timestamp from output also
+ t_unix_v := git_log_v.find_between('Date Unix:', '\n').trim_space().int()
+ t_unix_vc := git_log_vc.find_between('Date Unix:', '\n').trim_space().int()
+ // last commit hash in v repo
+ last_commit_hash_v := git_log_v.find_between('commit', '\n').trim_space()
+ last_commit_hash_v_short := last_commit_hash_v[..7]
+ // subject
+ last_commit_subject := git_log_v.find_between('Subject:', '\n').trim_space().replace("'",
+ '"')
+ // log some info
+ gen_vc.logger.debug('last commit time ($git_repo_v): ' + last_commit_time_v.format_ss())
+ gen_vc.logger.debug('last commit time ($git_repo_vc): ' + last_commit_time_vc.format_ss())
+ gen_vc.logger.debug('last commit hash ($git_repo_v): $last_commit_hash_v')
+ gen_vc.logger.debug('last commit subject ($git_repo_v): $last_commit_subject')
+ // if vc repo already has a newer commit than the v repo, assume it's up to date
+ if t_unix_vc >= t_unix_v && !gen_vc.options.force {
+ gen_vc.logger.warn('vc repository is already up to date.')
+ return
+ }
+ // try build v for current os (linux in this case)
+ gen_vc.cmd_exec('make -C $git_repo_dir_v')
+ v_exec := '$git_repo_dir_v/v'
+ // check if make was successful
+ gen_vc.assert_file_exists_and_is_not_too_short(v_exec, err_msg_make)
+ // build v.c for each os
+ for os_name in vc_build_oses {
+ c_file := if os_name == 'nix' { 'v.c' } else { 'v_win.c' }
+ v_flags := if os_name == 'nix' { '-os cross' } else { '-os $os_name' }
+ // try generate .c file
+ gen_vc.cmd_exec('$v_exec $v_flags -o $c_file $git_repo_dir_v/cmd/v')
+ // check if the c file seems ok
+ gen_vc.assert_file_exists_and_is_not_too_short(c_file, err_msg_gen_c)
+ // embed the latest v commit hash into the c file
+ gen_vc.cmd_exec('sed -i \'1s/^/#define V_COMMIT_HASH "$last_commit_hash_v_short"\\n/\' $c_file')
+ // move to vc repo
+ gen_vc.cmd_exec('mv $c_file $git_repo_dir_vc/$c_file')
+ // add new .c file to local vc repo
+ gen_vc.cmd_exec('git -C $git_repo_dir_vc add $c_file')
+ }
+ // check if the vc repo actually changed
+ git_status := gen_vc.cmd_exec('git -C $git_repo_dir_vc status')
+ if git_status.contains('nothing to commit') {
+ gen_vc.logger.error('no changes to vc repo: something went wrong.')
+ gen_vc.gen_error = true
+ }
+ // commit changes to local vc repo
+ gen_vc.cmd_exec_safe("git -C $git_repo_dir_vc commit -m '[v:master] $last_commit_hash_v_short - $last_commit_subject'")
+ // push changes to remote vc repo
+ gen_vc.cmd_exec_safe('git -C $git_repo_dir_vc push https://${urllib.query_escape(git_username)}:${urllib.query_escape(git_password)}@$git_repo_vc master')
+}
+
+// only execute when dry_run option is false, otherwise just log
+fn (mut gen_vc GenVC) cmd_exec_safe(cmd string) string {
+ return gen_vc.command_execute(cmd, gen_vc.options.dry_run)
+}
+
+// always execute command
+fn (mut gen_vc GenVC) cmd_exec(cmd string) string {
+ return gen_vc.command_execute(cmd, false)
+}
+
+// execute command
+fn (mut gen_vc GenVC) command_execute(cmd string, dry bool) string {
+ // if dry is true then dont execute, just log
+ if dry {
+ return gen_vc.command_execute_dry(cmd)
+ }
+ gen_vc.logger.info('cmd: $cmd')
+ r := os.execute(cmd)
+ if r.exit_code < 0 {
+ gen_vc.logger.error('$err_msg_cmd_x: "$cmd" could not start.')
+ gen_vc.logger.error(r.output)
+ // something went wrong, better start fresh next time
+ gen_vc.purge_repos()
+ gen_vc.gen_error = true
+ return ''
+ }
+ if r.exit_code != 0 {
+ gen_vc.logger.error('$err_msg_cmd_x: "$cmd" failed.')
+ gen_vc.logger.error(r.output)
+ // something went wrong, better start fresh next time
+ gen_vc.purge_repos()
+ gen_vc.gen_error = true
+ return ''
+ }
+ return r.output
+}
+
+// just log cmd, dont execute
+fn (mut gen_vc GenVC) command_execute_dry(cmd string) string {
+ gen_vc.logger.info('cmd (dry): "$cmd"')
+ return ''
+}
+
+// delete repo directories
+fn (mut gen_vc GenVC) purge_repos() {
+ // delete old repos (better to be fully explicit here, since these are destructive operations)
+ mut repo_dir := '$gen_vc.options.work_dir/$git_repo_dir_v'
+ if os.is_dir(repo_dir) {
+ gen_vc.logger.info('purging local repo: "$repo_dir"')
+ gen_vc.cmd_exec('rm -rf $repo_dir')
+ }
+ repo_dir = '$gen_vc.options.work_dir/$git_repo_dir_vc'
+ if os.is_dir(repo_dir) {
+ gen_vc.logger.info('purging local repo: "$repo_dir"')
+ gen_vc.cmd_exec('rm -rf $repo_dir')
+ }
+}
+
+// check if file size is too short
+fn (mut gen_vc GenVC) assert_file_exists_and_is_not_too_short(f string, emsg string) {
+ if !os.exists(f) {
+ gen_vc.logger.error('$err_msg_build: $emsg .')
+ gen_vc.gen_error = true
+ return
+ }
+ fsize := os.file_size(f)
+ if fsize < too_short_file_limit {
+ gen_vc.logger.error('$err_msg_build: $f exists, but is too short: only $fsize bytes.')
+ gen_vc.gen_error = true
+ return
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/missdoc.v b/v_windows/v/old/cmd/tools/missdoc.v
new file mode 100644
index 0000000..188fca1
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/missdoc.v
@@ -0,0 +1,141 @@
+// Copyright (c) 2020 Lars Pontoppidan. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+import os
+import flag
+
+const (
+ tool_name = os.file_name(os.executable())
+ tool_version = '0.0.2'
+ tool_description = 'Prints all V functions in .v files under PATH/, that do not yet have documentation comments.'
+)
+
+struct UndocumentedFN {
+ line int
+ signature string
+ tags []string
+}
+
+struct Options {
+ show_help bool
+ collect_tags bool
+ deprecated bool
+}
+
+fn collect(path string, mut l []string, f fn (string, mut []string)) {
+ if !os.is_dir(path) {
+ return
+ }
+ mut files := os.ls(path) or { return }
+ for file in files {
+ p := path + os.path_separator + file
+ if os.is_dir(p) && !os.is_link(p) {
+ collect(p, mut l, f)
+ } else if os.exists(p) {
+ f(p, mut l)
+ }
+ }
+ return
+}
+
+fn report_undocumented_functions_in_path(opt Options, path string) {
+ mut files := []string{}
+ collect_fn := fn (path string, mut l []string) {
+ if os.file_ext(path) == '.v' {
+ l << os.real_path(path)
+ }
+ }
+ collect(path, mut files, collect_fn)
+ for file in files {
+ if file.ends_with('_test.v') {
+ continue
+ }
+ report_undocumented_functions_in_file(opt, file)
+ }
+}
+
+fn report_undocumented_functions_in_file(opt Options, file string) {
+ contents := os.read_file(file) or { panic(err) }
+ lines := contents.split('\n')
+ mut info := []UndocumentedFN{}
+ for i, line in lines {
+ if line.starts_with('pub fn') || (line.starts_with('fn ') && !(line.starts_with('fn C.')
+ || line.starts_with('fn main'))) {
+ // println('Match: $line')
+ if i > 0 && lines.len > 0 {
+ mut line_above := lines[i - 1]
+ if !line_above.starts_with('//') {
+ mut tags := []string{}
+ mut grab := true
+ for j := i - 1; j >= 0; j-- {
+ prev_line := lines[j]
+ if prev_line.contains('}') { // We've looked back to the above scope, stop here
+ break
+ } else if prev_line.starts_with('[') {
+ tags << collect_tags(prev_line)
+ continue
+ } else if prev_line.starts_with('//') { // Single-line comment
+ grab = false
+ break
+ }
+ }
+ if grab {
+ clean_line := line.all_before_last(' {')
+ info << UndocumentedFN{i + 1, clean_line, tags}
+ }
+ }
+ }
+ }
+ }
+ if info.len > 0 {
+ for undocumented_fn in info {
+ tags_str := if opt.collect_tags && undocumented_fn.tags.len > 0 {
+ '$undocumented_fn.tags'
+ } else {
+ ''
+ }
+ if opt.deprecated {
+ println('$file:$undocumented_fn.line:0:$undocumented_fn.signature $tags_str')
+ } else {
+ if 'deprecated' !in undocumented_fn.tags {
+ println('$file:$undocumented_fn.line:0:$undocumented_fn.signature $tags_str')
+ }
+ }
+ }
+ }
+}
+
+fn collect_tags(line string) []string {
+ mut cleaned := line.all_before('/')
+ cleaned = cleaned.replace_each(['[', '', ']', '', ' ', ''])
+ return cleaned.split(',')
+}
+
+fn main() {
+ if os.args.len == 1 {
+ println('Usage: $tool_name PATH \n$tool_description\n$tool_name -h for more help...')
+ exit(1)
+ }
+ mut fp := flag.new_flag_parser(os.args[1..])
+ fp.application(tool_name)
+ fp.version(tool_version)
+ fp.description(tool_description)
+ fp.arguments_description('PATH [PATH]...')
+ // Collect tool options
+ opt := Options{
+ show_help: fp.bool('help', `h`, false, 'Show this help text.')
+ deprecated: fp.bool('deprecated', `d`, false, 'Include deprecated functions in output.')
+ collect_tags: fp.bool('tags', `t`, false, 'Also print function tags if any is found.')
+ }
+ if opt.show_help {
+ println(fp.usage())
+ exit(0)
+ }
+ for path in os.args[1..] {
+ if os.is_file(path) {
+ report_undocumented_functions_in_file(opt, path)
+ } else {
+ report_undocumented_functions_in_path(opt, path)
+ }
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/modules/scripting/scripting.v b/v_windows/v/old/cmd/tools/modules/scripting/scripting.v
new file mode 100644
index 0000000..edf6a0a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/modules/scripting/scripting.v
@@ -0,0 +1,180 @@
+module scripting
+
+import os
+import term
+import time
+
+const (
+ term_colors = term.can_show_color_on_stdout()
+)
+
+pub fn set_verbose(on bool) {
+ // setting a global here would be the obvious solution,
+ // but V does not have globals normally.
+ if on {
+ os.setenv('VERBOSE', '1', true)
+ } else {
+ os.unsetenv('VERBOSE')
+ }
+}
+
+pub fn cprint(omessage string) {
+ mut message := omessage
+ if scripting.term_colors {
+ message = term.cyan(message)
+ }
+ print(message)
+}
+
+pub fn cprint_strong(omessage string) {
+ mut message := omessage
+ if scripting.term_colors {
+ message = term.bright_green(message)
+ }
+ print(message)
+}
+
+pub fn cprintln(omessage string) {
+ cprint(omessage)
+ println('')
+}
+
+pub fn cprintln_strong(omessage string) {
+ cprint_strong(omessage)
+ println('')
+}
+
+pub fn verbose_trace(label string, message string) {
+ if os.getenv('VERBOSE').len > 0 {
+ slabel := '$time.now().format_ss_milli() $label'
+ cprintln('# ${slabel:-43s} : $message')
+ }
+}
+
+pub fn verbose_trace_strong(label string, omessage string) {
+ if os.getenv('VERBOSE').len > 0 {
+ slabel := '$time.now().format_ss_milli() $label'
+ mut message := omessage
+ if scripting.term_colors {
+ message = term.bright_green(message)
+ }
+ cprintln('# ${slabel:-43s} : $message')
+ }
+}
+
+pub fn verbose_trace_exec_result(x os.Result) {
+ if os.getenv('VERBOSE').len > 0 {
+ cprintln('# cmd.exit_code : ${x.exit_code.str():-4s} cmd.output:')
+ mut lnum := 1
+ lines := x.output.split_into_lines()
+ for oline in lines {
+ mut line := oline
+ if scripting.term_colors {
+ line = term.bright_green(line)
+ }
+ cprintln('# ${lnum:3d}: $line')
+ lnum++
+ }
+ cprintln('# ----------------------------------------------------------------------')
+ }
+}
+
+fn modfn(mname string, fname string) string {
+ return '${mname}.$fname'
+}
+
+pub fn chdir(path string) {
+ verbose_trace_strong(modfn(@MOD, @FN), 'cd $path')
+ os.chdir(path)
+}
+
+pub fn mkdir(path string) ? {
+ verbose_trace_strong(modfn(@MOD, @FN), 'mkdir $path')
+ os.mkdir(path) or {
+ verbose_trace(modfn(@MOD, @FN), '## failed.')
+ return err
+ }
+}
+
+pub fn mkdir_all(path string) ? {
+ verbose_trace_strong(modfn(@MOD, @FN), 'mkdir -p $path')
+ os.mkdir_all(path) or {
+ verbose_trace(modfn(@MOD, @FN), '## failed.')
+ return err
+ }
+}
+
+pub fn rmrf(path string) {
+ verbose_trace_strong(modfn(@MOD, @FN), 'rm -rf $path')
+ if os.exists(path) {
+ if os.is_dir(path) {
+ os.rmdir_all(path) or { panic(err) }
+ } else {
+ os.rm(path) or { panic(err) }
+ }
+ }
+}
+
+// execute a command, and return a result, or an error, if it failed in any way.
+pub fn exec(cmd string) ?os.Result {
+ verbose_trace_strong(modfn(@MOD, @FN), cmd)
+ x := os.execute(cmd)
+ if x.exit_code != 0 {
+ verbose_trace(modfn(@MOD, @FN), '## failed.')
+ return error(x.output)
+ }
+ verbose_trace_exec_result(x)
+ return x
+}
+
+// run a command, tracing its results, and returning ONLY its output
+pub fn run(cmd string) string {
+ verbose_trace_strong(modfn(@MOD, @FN), cmd)
+ x := os.execute(cmd)
+ if x.exit_code < 0 {
+ verbose_trace(modfn(@MOD, @FN), '## failed.')
+ return ''
+ }
+ verbose_trace_exec_result(x)
+ if x.exit_code == 0 {
+ return x.output.trim_right('\r\n')
+ }
+ return ''
+}
+
+pub fn exit_0_status(cmd string) bool {
+ verbose_trace_strong(modfn(@MOD, @FN), cmd)
+ x := os.execute(cmd)
+ if x.exit_code < 0 {
+ verbose_trace(modfn(@MOD, @FN), '## failed.')
+ return false
+ }
+ verbose_trace_exec_result(x)
+ if x.exit_code == 0 {
+ return true
+ }
+ return false
+}
+
+pub fn tool_must_exist(toolcmd string) {
+ verbose_trace(modfn(@MOD, @FN), toolcmd)
+ if exit_0_status('type $toolcmd') {
+ return
+ }
+ eprintln('Missing tool: $toolcmd')
+ eprintln('Please try again after you install it.')
+ exit(1)
+}
+
+pub fn used_tools_must_exist(tools []string) {
+ for t in tools {
+ tool_must_exist(t)
+ }
+}
+
+pub fn show_sizes_of_files(files []string) {
+ for f in files {
+ size := os.file_size(f)
+ println('$size $f') // println('${size:10d} $f')
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/modules/testing/common.v b/v_windows/v/old/cmd/tools/modules/testing/common.v
new file mode 100644
index 0000000..f37a22a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/modules/testing/common.v
@@ -0,0 +1,488 @@
+module testing
+
+import os
+import time
+import term
+import benchmark
+import sync.pool
+import v.pref
+import v.util.vtest
+
+const github_job = os.getenv('GITHUB_JOB')
+
+const show_start = os.getenv('VTEST_SHOW_START') == '1'
+
+const hide_skips = os.getenv('VTEST_HIDE_SKIP') == '1'
+
+const hide_oks = os.getenv('VTEST_HIDE_OK') == '1'
+
+pub struct TestSession {
+pub mut:
+ files []string
+ skip_files []string
+ vexe string
+ vroot string
+ vtmp_dir string
+ vargs string
+ failed bool
+ benchmark benchmark.Benchmark
+ rm_binaries bool = true
+ silent_mode bool
+ progress_mode bool
+ root_relative bool // used by CI runs, so that the output is stable everywhere
+ nmessages chan LogMessage // many publishers, single consumer/printer
+ nmessage_idx int // currently printed message index
+ nprint_ended chan int // read to block till printing ends, 1:1
+ failed_cmds shared []string
+}
+
+enum MessageKind {
+ ok
+ fail
+ skip
+ info
+ sentinel
+}
+
+struct LogMessage {
+ message string
+ kind MessageKind
+}
+
+pub fn (mut ts TestSession) add_failed_cmd(cmd string) {
+ lock ts.failed_cmds {
+ ts.failed_cmds << cmd
+ }
+}
+
+pub fn (mut ts TestSession) show_list_of_failed_tests() {
+ for i, cmd in ts.failed_cmds {
+ eprintln(term.failed('Failed command ${i + 1}:') + ' $cmd')
+ }
+}
+
+pub fn (mut ts TestSession) append_message(kind MessageKind, msg string) {
+ ts.nmessages <- LogMessage{
+ message: msg
+ kind: kind
+ }
+}
+
+pub fn (mut ts TestSession) print_messages() {
+ empty := term.header(' ', ' ')
+ mut print_msg_time := time.new_stopwatch()
+ for {
+ // get a message from the channel of messages to be printed:
+ mut rmessage := <-ts.nmessages
+ if rmessage.kind == .sentinel {
+ // a sentinel for stopping the printing thread
+ if !ts.silent_mode && ts.progress_mode {
+ eprintln('')
+ }
+ ts.nprint_ended <- 0
+ return
+ }
+ if rmessage.kind != .info {
+ ts.nmessage_idx++
+ }
+ msg := rmessage.message.replace_each([
+ 'TMP1',
+ '${ts.nmessage_idx:1d}',
+ 'TMP2',
+ '${ts.nmessage_idx:2d}',
+ 'TMP3',
+ '${ts.nmessage_idx:3d}',
+ 'TMP4',
+ '${ts.nmessage_idx:4d}',
+ ])
+ is_ok := rmessage.kind == .ok
+ //
+ time_passed := print_msg_time.elapsed().seconds()
+ if time_passed > 10 && ts.silent_mode && is_ok {
+ // Even if OK tests are suppressed,
+ // show *at least* 1 result every 10 seconds,
+ // otherwise the CI can seem stuck ...
+ eprintln(msg)
+ print_msg_time.restart()
+ continue
+ }
+ if ts.progress_mode {
+ // progress mode, the last line is rewritten many times:
+ if is_ok && !ts.silent_mode {
+ print('\r$empty\r$msg')
+ } else {
+ // the last \n is needed, so SKIP/FAIL messages
+ // will not get overwritten by the OK ones
+ eprint('\r$empty\r$msg\n')
+ }
+ continue
+ }
+ if !ts.silent_mode || !is_ok {
+ // normal expanded mode, or failures in -silent mode
+ eprintln(msg)
+ continue
+ }
+ }
+}
+
+pub fn new_test_session(_vargs string, will_compile bool) TestSession {
+ mut skip_files := []string{}
+ if will_compile {
+ $if msvc {
+ skip_files << 'vlib/v/tests/const_comptime_eval_before_vinit_test.v' // _constructor used
+ }
+ $if solaris {
+ skip_files << 'examples/gg/gg2.v'
+ skip_files << 'examples/pico/pico.v'
+ skip_files << 'examples/sokol/fonts.v'
+ skip_files << 'examples/sokol/drawing.v'
+ }
+ $if macos {
+ skip_files << 'examples/database/mysql.v'
+ skip_files << 'examples/database/orm.v'
+ skip_files << 'examples/database/psql/customer.v'
+ }
+ $if windows {
+ skip_files << 'examples/database/mysql.v'
+ skip_files << 'examples/database/orm.v'
+ skip_files << 'examples/websocket/ping.v' // requires OpenSSL
+ skip_files << 'examples/websocket/client-server/client.v' // requires OpenSSL
+ skip_files << 'examples/websocket/client-server/server.v' // requires OpenSSL
+ $if tinyc {
+ skip_files << 'examples/database/orm.v' // try fix it
+ }
+ }
+ if testing.github_job != 'sokol-shaders-can-be-compiled' {
+ // These examples need .h files that are produced from the supplied .glsl files,
+ // using by the shader compiler tools in https://github.com/floooh/sokol-tools-bin/archive/pre-feb2021-api-changes.tar.gz
+ skip_files << 'examples/sokol/02_cubes_glsl/cube_glsl.v'
+ skip_files << 'examples/sokol/03_march_tracing_glsl/rt_glsl.v'
+ skip_files << 'examples/sokol/04_multi_shader_glsl/rt_glsl.v'
+ skip_files << 'examples/sokol/05_instancing_glsl/rt_glsl.v'
+ // Skip obj_viewer code in the CI
+ skip_files << 'examples/sokol/06_obj_viewer/show_obj.v'
+ }
+ if testing.github_job != 'ubuntu-tcc' {
+ skip_files << 'examples/c_interop_wkhtmltopdf.v' // needs installation of wkhtmltopdf from https://github.com/wkhtmltopdf/packaging/releases
+ // the ttf_test.v is not interactive, but needs X11 headers to be installed, which is done only on ubuntu-tcc for now
+ skip_files << 'vlib/x/ttf/ttf_test.v'
+ skip_files << 'vlib/vweb/vweb_app_test.v' // imports the `sqlite` module, which in turn includes sqlite3.h
+ }
+ if testing.github_job != 'audio-examples' {
+ skip_files << 'examples/sokol/sounds/melody.v'
+ skip_files << 'examples/sokol/sounds/wav_player.v'
+ skip_files << 'examples/sokol/sounds/simple_sin_tones.v'
+ }
+ }
+ vargs := _vargs.replace('-progress', '').replace('-progress', '')
+ vexe := pref.vexe_path()
+ vroot := os.dir(vexe)
+ new_vtmp_dir := setup_new_vtmp_folder()
+ if term.can_show_color_on_stderr() {
+ os.setenv('VCOLORS', 'always', true)
+ }
+ return TestSession{
+ vexe: vexe
+ vroot: vroot
+ skip_files: skip_files
+ vargs: vargs
+ vtmp_dir: new_vtmp_dir
+ silent_mode: _vargs.contains('-silent')
+ progress_mode: _vargs.contains('-progress')
+ }
+}
+
+pub fn (mut ts TestSession) init() {
+ ts.files.sort()
+ ts.benchmark = benchmark.new_benchmark_no_cstep()
+}
+
+pub fn (mut ts TestSession) add(file string) {
+ ts.files << file
+}
+
+pub fn (mut ts TestSession) test() {
+ // Ensure that .tmp.c files generated from compiling _test.v files,
+ // are easy to delete at the end, *without* affecting the existing ones.
+ current_wd := os.getwd()
+ if current_wd == os.wd_at_startup && current_wd == ts.vroot {
+ ts.root_relative = true
+ }
+ //
+ ts.init()
+ mut remaining_files := []string{}
+ for dot_relative_file in ts.files {
+ relative_file := dot_relative_file.replace('./', '')
+ file := os.real_path(relative_file)
+ $if windows {
+ if file.contains('sqlite') || file.contains('httpbin') {
+ continue
+ }
+ }
+ $if !macos {
+ if file.contains('customer') {
+ continue
+ }
+ }
+ $if msvc {
+ if file.contains('asm') {
+ continue
+ }
+ }
+ remaining_files << dot_relative_file
+ }
+ remaining_files = vtest.filter_vtest_only(remaining_files, fix_slashes: false)
+ ts.files = remaining_files
+ ts.benchmark.set_total_expected_steps(remaining_files.len)
+ mut pool_of_test_runners := pool.new_pool_processor(callback: worker_trunner)
+ // for handling messages across threads
+ ts.nmessages = chan LogMessage{cap: 10000}
+ ts.nprint_ended = chan int{cap: 0}
+ ts.nmessage_idx = 0
+ go ts.print_messages()
+ pool_of_test_runners.set_shared_context(ts)
+ pool_of_test_runners.work_on_pointers(unsafe { remaining_files.pointers() })
+ ts.benchmark.stop()
+ ts.append_message(.sentinel, '') // send the sentinel
+ _ := <-ts.nprint_ended // wait for the stop of the printing thread
+ eprintln(term.h_divider('-'))
+ // cleanup generated .tmp.c files after successfull tests:
+ if ts.benchmark.nfail == 0 {
+ if ts.rm_binaries {
+ os.rmdir_all(ts.vtmp_dir) or { panic(err) }
+ }
+ }
+ ts.show_list_of_failed_tests()
+}
+
+fn worker_trunner(mut p pool.PoolProcessor, idx int, thread_id int) voidptr {
+ mut ts := &TestSession(p.get_shared_context())
+ tmpd := ts.vtmp_dir
+ show_stats := '-stats' in ts.vargs.split(' ')
+ // tls_bench is used to format the step messages/timings
+ mut tls_bench := &benchmark.Benchmark(p.get_thread_context(idx))
+ if isnil(tls_bench) {
+ tls_bench = benchmark.new_benchmark_pointer()
+ tls_bench.set_total_expected_steps(ts.benchmark.nexpected_steps)
+ p.set_thread_context(idx, tls_bench)
+ }
+ tls_bench.no_cstep = true
+ dot_relative_file := p.get_item<string>(idx)
+ mut relative_file := dot_relative_file.replace('./', '')
+ mut cmd_options := [ts.vargs]
+ if relative_file.contains('global') && !ts.vargs.contains('fmt') {
+ cmd_options << ' -enable-globals'
+ }
+ if ts.root_relative {
+ relative_file = relative_file.replace(ts.vroot + os.path_separator, '')
+ }
+ file := os.real_path(relative_file)
+ normalised_relative_file := relative_file.replace('\\', '/')
+ // Ensure that the generated binaries will be stored in the temporary folder.
+ // Remove them after a test passes/fails.
+ fname := os.file_name(file)
+ generated_binary_fname := if os.user_os() == 'windows' {
+ fname.replace('.v', '.exe')
+ } else {
+ fname.replace('.v', '')
+ }
+ generated_binary_fpath := os.join_path(tmpd, generated_binary_fname)
+ if os.exists(generated_binary_fpath) {
+ if ts.rm_binaries {
+ os.rm(generated_binary_fpath) or { panic(err) }
+ }
+ }
+ if !ts.vargs.contains('fmt') {
+ cmd_options << ' -o "$generated_binary_fpath"'
+ }
+ cmd := '"$ts.vexe" ' + cmd_options.join(' ') + ' "$file"'
+ ts.benchmark.step()
+ tls_bench.step()
+ if relative_file.replace('\\', '/') in ts.skip_files {
+ ts.benchmark.skip()
+ tls_bench.skip()
+ if !testing.hide_skips {
+ ts.append_message(.skip, tls_bench.step_message_skip(normalised_relative_file))
+ }
+ return pool.no_result
+ }
+ if show_stats {
+ ts.append_message(.ok, term.h_divider('-'))
+ status := os.system(cmd)
+ if status == 0 {
+ ts.benchmark.ok()
+ tls_bench.ok()
+ } else {
+ ts.failed = true
+ ts.benchmark.fail()
+ tls_bench.fail()
+ ts.add_failed_cmd(cmd)
+ return pool.no_result
+ }
+ } else {
+ if testing.show_start {
+ ts.append_message(.info, ' starting $relative_file ...')
+ }
+ r := os.execute(cmd)
+ if r.exit_code < 0 {
+ ts.failed = true
+ ts.benchmark.fail()
+ tls_bench.fail()
+ ts.append_message(.fail, tls_bench.step_message_fail(normalised_relative_file))
+ ts.add_failed_cmd(cmd)
+ return pool.no_result
+ }
+ if r.exit_code != 0 {
+ ts.failed = true
+ ts.benchmark.fail()
+ tls_bench.fail()
+ ending_newline := if r.output.ends_with('\n') { '\n' } else { '' }
+ ts.append_message(.fail, tls_bench.step_message_fail('$normalised_relative_file\n$r.output.trim_space()$ending_newline'))
+ ts.add_failed_cmd(cmd)
+ } else {
+ ts.benchmark.ok()
+ tls_bench.ok()
+ if !testing.hide_oks {
+ ts.append_message(.ok, tls_bench.step_message_ok(normalised_relative_file))
+ }
+ }
+ }
+ if os.exists(generated_binary_fpath) {
+ if ts.rm_binaries {
+ os.rm(generated_binary_fpath) or { panic(err) }
+ }
+ }
+ return pool.no_result
+}
+
+pub fn vlib_should_be_present(parent_dir string) {
+ vlib_dir := os.join_path(parent_dir, 'vlib')
+ if !os.is_dir(vlib_dir) {
+ eprintln('$vlib_dir is missing, it must be next to the V executable')
+ exit(1)
+ }
+}
+
+pub fn v_build_failing(zargs string, folder string) bool {
+ return v_build_failing_skipped(zargs, folder, [])
+}
+
+pub fn prepare_test_session(zargs string, folder string, oskipped []string, main_label string) TestSession {
+ vexe := pref.vexe_path()
+ parent_dir := os.dir(vexe)
+ vlib_should_be_present(parent_dir)
+ vargs := zargs.replace(vexe, '')
+ eheader(main_label)
+ if vargs.len > 0 {
+ eprintln('v compiler args: "$vargs"')
+ }
+ mut session := new_test_session(vargs, true)
+ files := os.walk_ext(os.join_path(parent_dir, folder), '.v')
+ mut mains := []string{}
+ mut skipped := oskipped.clone()
+ next_file: for f in files {
+ fnormalised := f.replace('\\', '/')
+ // NB: a `testdata` folder, is the preferred name of a folder, containing V code,
+ // that you *do not want* the test framework to find incidentally for various reasons,
+ // for example module import tests, or subtests, that are compiled/run by other parent tests
+ // in specific configurations, etc.
+ if fnormalised.contains('testdata/') || fnormalised.contains('modules/')
+ || f.contains('preludes/') {
+ continue
+ }
+ $if windows {
+ // skip pico and process/command examples on windows
+ if fnormalised.ends_with('examples/pico/pico.v')
+ || fnormalised.ends_with('examples/process/command.v') {
+ continue
+ }
+ }
+ c := os.read_file(f) or { panic(err) }
+ maxc := if c.len > 300 { 300 } else { c.len }
+ start := c[0..maxc]
+ if start.contains('module ') && !start.contains('module main') {
+ skipped_f := f.replace(os.join_path(parent_dir, ''), '')
+ skipped << skipped_f
+ }
+ for skip_prefix in oskipped {
+ if f.starts_with(skip_prefix) {
+ continue next_file
+ }
+ }
+ mains << f
+ }
+ session.files << mains
+ session.skip_files << skipped
+ return session
+}
+
+pub fn v_build_failing_skipped(zargs string, folder string, oskipped []string) bool {
+ main_label := 'Building $folder ...'
+ finish_label := 'building $folder'
+ mut session := prepare_test_session(zargs, folder, oskipped, main_label)
+ session.test()
+ eprintln(session.benchmark.total_message(finish_label))
+ return session.failed
+}
+
+pub fn build_v_cmd_failed(cmd string) bool {
+ res := os.execute(cmd)
+ if res.exit_code < 0 {
+ return true
+ }
+ if res.exit_code != 0 {
+ eprintln('')
+ eprintln(res.output)
+ return true
+ }
+ return false
+}
+
+pub fn building_any_v_binaries_failed() bool {
+ eheader('Building V binaries...')
+ eprintln('VFLAGS is: "' + os.getenv('VFLAGS') + '"')
+ vexe := pref.vexe_path()
+ parent_dir := os.dir(vexe)
+ vlib_should_be_present(parent_dir)
+ os.chdir(parent_dir)
+ mut failed := false
+ v_build_commands := ['$vexe -o v_g -g cmd/v', '$vexe -o v_prod_g -prod -g cmd/v',
+ '$vexe -o v_cg -cg cmd/v', '$vexe -o v_prod_cg -prod -cg cmd/v',
+ '$vexe -o v_prod -prod cmd/v',
+ ]
+ mut bmark := benchmark.new_benchmark()
+ for cmd in v_build_commands {
+ bmark.step()
+ if build_v_cmd_failed(cmd) {
+ bmark.fail()
+ failed = true
+ eprintln(bmark.step_message_fail('command: $cmd . See details above ^^^^^^^'))
+ eprintln('')
+ continue
+ }
+ bmark.ok()
+ if !testing.hide_oks {
+ eprintln(bmark.step_message_ok('command: $cmd'))
+ }
+ }
+ bmark.stop()
+ eprintln(term.h_divider('-'))
+ eprintln(bmark.total_message('building v binaries'))
+ return failed
+}
+
+pub fn eheader(msg string) {
+ eprintln(term.header_left(msg, '-'))
+}
+
+pub fn header(msg string) {
+ println(term.header_left(msg, '-'))
+}
+
+pub fn setup_new_vtmp_folder() string {
+ now := time.sys_mono_now()
+ new_vtmp_dir := os.join_path(os.temp_dir(), 'v', 'test_session_$now')
+ os.mkdir_all(new_vtmp_dir) or { panic(err) }
+ os.setenv('VTMP', new_vtmp_dir, true)
+ return new_vtmp_dir
+}
diff --git a/v_windows/v/old/cmd/tools/modules/vgit/vgit.v b/v_windows/v/old/cmd/tools/modules/vgit/vgit.v
new file mode 100644
index 0000000..efa2e8a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/modules/vgit/vgit.v
@@ -0,0 +1,197 @@
+module vgit
+
+import os
+import flag
+import scripting
+
+pub fn check_v_commit_timestamp_before_self_rebuilding(v_timestamp int) {
+ if v_timestamp >= 1561805697 {
+ return
+ }
+ eprintln('##################################################################')
+ eprintln('# WARNING: v self rebuilding, before 5b7a1e8 (2019-06-29 12:21) #')
+ eprintln('# required the v executable to be built *inside* #')
+ eprintln('# the toplevel compiler/ folder. #')
+ eprintln('# #')
+ eprintln('# That is not supported by this tool. #')
+ eprintln('# You will have to build it manually there. #')
+ eprintln('##################################################################')
+}
+
+pub fn validate_commit_exists(commit string) {
+ if commit.len == 0 {
+ return
+ }
+ cmd := "git cat-file -t '$commit' "
+ if !scripting.exit_0_status(cmd) {
+ eprintln('Commit: "$commit" does not exist in the current repository.')
+ exit(3)
+ }
+}
+
+pub fn line_to_timestamp_and_commit(line string) (int, string) {
+ parts := line.split(' ')
+ return parts[0].int(), parts[1]
+}
+
+pub fn normalized_workpath_for_commit(workdir string, commit string) string {
+ nc := 'v_at_' + commit.replace('^', '_').replace('-', '_').replace('/', '_')
+ return os.real_path(workdir + os.path_separator + nc)
+}
+
+fn get_current_folder_commit_hash() string {
+ vline := scripting.run('git rev-list -n1 --timestamp HEAD')
+ _, v_commithash := line_to_timestamp_and_commit(vline)
+ return v_commithash
+}
+
+pub fn prepare_vc_source(vcdir string, cdir string, commit string) (string, string) {
+ scripting.chdir(cdir)
+ // Building a historic v with the latest vc is not always possible ...
+ // It is more likely, that the vc *at the time of the v commit*,
+ // or slightly before that time will be able to build the historic v:
+ vline := scripting.run('git rev-list -n1 --timestamp "$commit" ')
+ v_timestamp, v_commithash := line_to_timestamp_and_commit(vline)
+ scripting.verbose_trace(@FN, 'v_timestamp: $v_timestamp | v_commithash: $v_commithash')
+ check_v_commit_timestamp_before_self_rebuilding(v_timestamp)
+ scripting.chdir(vcdir)
+ scripting.run('git checkout --quiet master')
+ //
+ mut vccommit := ''
+ vcbefore_subject_match := scripting.run('git rev-list HEAD -n1 --timestamp --grep=${v_commithash[0..7]} ')
+ scripting.verbose_trace(@FN, 'vcbefore_subject_match: $vcbefore_subject_match')
+ if vcbefore_subject_match.len > 3 {
+ _, vccommit = line_to_timestamp_and_commit(vcbefore_subject_match)
+ } else {
+ scripting.verbose_trace(@FN, 'the v commit did not match anything in the vc log; try --timestamp instead.')
+ vcbefore := scripting.run('git rev-list HEAD -n1 --timestamp --before=$v_timestamp ')
+ _, vccommit = line_to_timestamp_and_commit(vcbefore)
+ }
+ scripting.verbose_trace(@FN, 'vccommit: $vccommit')
+ scripting.run('git checkout --quiet "$vccommit" ')
+ scripting.run('wc *.c')
+ scripting.chdir(cdir)
+ return v_commithash, vccommit
+}
+
+pub fn clone_or_pull(remote_git_url string, local_worktree_path string) {
+ // NB: after clone_or_pull, the current repo branch is === HEAD === master
+ if os.is_dir(local_worktree_path) && os.is_dir(os.join_path(local_worktree_path, '.git')) {
+ // Already existing ... Just pulling in this case is faster usually.
+ scripting.run('git -C "$local_worktree_path" checkout --quiet master')
+ scripting.run('git -C "$local_worktree_path" pull --quiet ')
+ } else {
+ // Clone a fresh
+ scripting.run('git clone --quiet "$remote_git_url" "$local_worktree_path" ')
+ }
+}
+
+pub struct VGitContext {
+pub:
+ cc string = 'cc' // what compiler to use
+ workdir string = '/tmp' // the base working folder
+ commit_v string = 'master' // the commit-ish that needs to be prepared
+ path_v string // where is the local working copy v repo
+ path_vc string // where is the local working copy vc repo
+ v_repo_url string // the remote v repo URL
+ vc_repo_url string // the remote vc repo URL
+pub mut:
+ // these will be filled by vgitcontext.compile_oldv_if_needed()
+ commit_v__hash string // the git commit of the v repo that should be prepared
+ commit_vc_hash string // the git commit of the vc repo, corresponding to commit_v__hash
+ vexename string // v or v.exe
+ vexepath string // the full absolute path to the prepared v/v.exe
+ vvlocation string // v.v or compiler/ or cmd/v, depending on v version
+ make_fresh_tcc bool // whether to do 'make fresh_tcc' before compiling an old V.
+}
+
+pub fn (mut vgit_context VGitContext) compile_oldv_if_needed() {
+ vgit_context.vexename = if os.user_os() == 'windows' { 'v.exe' } else { 'v' }
+ vgit_context.vexepath = os.real_path(os.join_path(vgit_context.path_v, vgit_context.vexename))
+ mut command_for_building_v_from_c_source := ''
+ mut command_for_selfbuilding := ''
+ if 'windows' == os.user_os() {
+ command_for_building_v_from_c_source = '$vgit_context.cc -std=c99 -municode -w -o cv.exe "$vgit_context.path_vc/v_win.c" '
+ command_for_selfbuilding = './cv.exe -o $vgit_context.vexename {SOURCE}'
+ } else {
+ command_for_building_v_from_c_source = '$vgit_context.cc -std=gnu11 -w -o cv "$vgit_context.path_vc/v.c" -lm -lpthread'
+ command_for_selfbuilding = './cv -o $vgit_context.vexename {SOURCE}'
+ }
+ scripting.chdir(vgit_context.workdir)
+ clone_or_pull(vgit_context.v_repo_url, vgit_context.path_v)
+ clone_or_pull(vgit_context.vc_repo_url, vgit_context.path_vc)
+ scripting.chdir(vgit_context.path_v)
+ scripting.run('git checkout --quiet $vgit_context.commit_v')
+ if os.is_dir(vgit_context.path_v) && os.exists(vgit_context.vexepath) {
+ // already compiled, so no need to compile v again
+ vgit_context.commit_v__hash = get_current_folder_commit_hash()
+ return
+ }
+ v_commithash, vccommit_before := prepare_vc_source(vgit_context.path_vc, vgit_context.path_v,
+ 'HEAD')
+ vgit_context.commit_v__hash = v_commithash
+ vgit_context.commit_vc_hash = vccommit_before
+ if os.exists('cmd/v') {
+ vgit_context.vvlocation = 'cmd/v'
+ } else {
+ vgit_context.vvlocation = if os.exists('v.v') { 'v.v' } else { 'compiler' }
+ }
+ if os.is_dir(vgit_context.path_v) && os.exists(vgit_context.vexepath) {
+ // already compiled, so no need to compile v again
+ return
+ }
+ // Recompilation is needed. Just to be sure, clean up everything first.
+ scripting.run('git clean -xf')
+ if vgit_context.make_fresh_tcc {
+ scripting.run('make fresh_tcc')
+ }
+ scripting.run(command_for_building_v_from_c_source)
+ build_cmd := command_for_selfbuilding.replace('{SOURCE}', vgit_context.vvlocation)
+ scripting.run(build_cmd)
+ // At this point, there exists a file vgit_context.vexepath
+ // which should be a valid working V executable.
+}
+
+pub struct VGitOptions {
+pub mut:
+ workdir string // the working folder (typically /tmp), where the tool will write
+ v_repo_url string // the url of the V repository. It can be a local folder path, if you want to eliminate network operations...
+ vc_repo_url string // the url of the vc repository. It can be a local folder path, if you want to eliminate network operations...
+ show_help bool // whether to show the usage screen
+ verbose bool // should the tool be much more verbose
+}
+
+pub fn add_common_tool_options(mut context VGitOptions, mut fp flag.FlagParser) []string {
+ tdir := os.temp_dir()
+ context.workdir = os.real_path(fp.string('workdir', `w`, context.workdir, 'A writable base folder. Default: $tdir'))
+ context.v_repo_url = fp.string('vrepo', 0, context.v_repo_url, 'The url of the V repository. You can clone it locally too. See also --vcrepo below.')
+ context.vc_repo_url = fp.string('vcrepo', 0, context.vc_repo_url, 'The url of the vc repository. You can clone it
+${flag.space}beforehand, and then just give the local folder
+${flag.space}path here. That will eliminate the network ops
+${flag.space}done by this tool, which is useful, if you want
+${flag.space}to script it/run it in a restrictive vps/docker.
+')
+ context.show_help = fp.bool('help', `h`, false, 'Show this help screen.')
+ context.verbose = fp.bool('verbose', `v`, false, 'Be more verbose.')
+ if context.show_help {
+ println(fp.usage())
+ exit(0)
+ }
+ if context.verbose {
+ scripting.set_verbose(true)
+ }
+ if os.is_dir(context.v_repo_url) {
+ context.v_repo_url = os.real_path(context.v_repo_url)
+ }
+ if os.is_dir(context.vc_repo_url) {
+ context.vc_repo_url = os.real_path(context.vc_repo_url)
+ }
+ commits := fp.finalize() or {
+ eprintln('Error: $err')
+ exit(1)
+ }
+ for commit in commits {
+ validate_commit_exists(commit)
+ }
+ return commits
+}
diff --git a/v_windows/v/old/cmd/tools/modules/vhelp/vhelp.v b/v_windows/v/old/cmd/tools/modules/vhelp/vhelp.v
new file mode 100644
index 0000000..347ba75
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/modules/vhelp/vhelp.v
@@ -0,0 +1,14 @@
+module vhelp
+
+import os
+
+pub fn show_topic(topic string) {
+ vexe := os.real_path(os.getenv('VEXE'))
+ vroot := os.dir(vexe)
+ target_topic := os.join_path(vroot, 'cmd', 'v', 'help', '${topic}.txt')
+ content := os.read_file(target_topic) or {
+ eprintln('Unknown topic: $topic')
+ exit(1)
+ }
+ println(content)
+}
diff --git a/v_windows/v/old/cmd/tools/oldv.v b/v_windows/v/old/cmd/tools/oldv.v
new file mode 100644
index 0000000..03eaf83
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/oldv.v
@@ -0,0 +1,176 @@
+import os
+import flag
+import scripting
+import vgit
+
+const (
+ tool_version = '0.0.3'
+ tool_description = ' Checkout an old V and compile it as it was on specific commit.
+| This tool is useful, when you want to discover when something broke.
+| It is also useful, when you just want to experiment with an older historic V.
+|
+| The VCOMMIT argument can be a git commitish like HEAD or master and so on.
+| When oldv is used with git bisect, you probably want to give HEAD. For example:
+| git bisect start
+| git bisect bad
+| git checkout known_good_commit
+| git bisect good
+| ## Now git will automatically checkout a middle commit between the bad and the good
+| cmd/tools/oldv HEAD --command="run commands in oldv folder, to verify if the commit is good or bad"
+| ## See what the result is, and either do: ...
+| git bisect good
+| ## ... or do:
+| git bisect bad
+| ## Now you just repeat the above steps, each time running oldv with the same command, then mark the result as good or bad,
+| ## until you find the commit, where the problem first occurred.
+| ## When you finish, do not forget to do:
+| git bisect reset'.strip_margin()
+)
+
+struct Context {
+mut:
+ vgo vgit.VGitOptions
+ vgcontext vgit.VGitContext
+ commit_v string = 'master' // the commit from which you want to produce a working v compiler (this may be a commit-ish too)
+ commit_v_hash string // this will be filled from the commit-ish commit_v using rev-list. It IS a commit hash.
+ path_v string // the full path to the v folder inside workdir.
+ path_vc string // the full path to the vc folder inside workdir.
+ cmd_to_run string // the command that you want to run *in* the oldv repo
+ cc string = 'cc' // the C compiler to use for bootstrapping.
+ cleanup bool // should the tool run a cleanup first
+ use_cache bool // use local cached copies for --vrepo and --vcrepo in
+ fresh_tcc bool // do use `make fresh_tcc`
+}
+
+fn (mut c Context) compile_oldv_if_needed() {
+ c.vgcontext = vgit.VGitContext{
+ workdir: c.vgo.workdir
+ v_repo_url: c.vgo.v_repo_url
+ vc_repo_url: c.vgo.vc_repo_url
+ cc: c.cc
+ commit_v: c.commit_v
+ path_v: c.path_v
+ path_vc: c.path_vc
+ make_fresh_tcc: c.fresh_tcc
+ }
+ c.vgcontext.compile_oldv_if_needed()
+ c.commit_v_hash = c.vgcontext.commit_v__hash
+ if !os.exists(c.vgcontext.vexepath) && c.cmd_to_run.len > 0 {
+ // NB: 125 is a special code, that git bisect understands as 'skip this commit'.
+ // it is used to inform git bisect that the current commit leads to a build failure.
+ exit(125)
+ }
+}
+
+const cache_oldv_folder = os.join_path(os.cache_dir(), 'oldv')
+
+const cache_oldv_folder_v = os.join_path(cache_oldv_folder, 'v')
+
+const cache_oldv_folder_vc = os.join_path(cache_oldv_folder, 'vc')
+
+fn sync_cache() {
+ scripting.verbose_trace(@FN, 'start')
+ if !os.exists(cache_oldv_folder) {
+ scripting.verbose_trace(@FN, 'creating $cache_oldv_folder')
+ scripting.mkdir_all(cache_oldv_folder) or {
+ scripting.verbose_trace(@FN, '## failed.')
+ exit(1)
+ }
+ }
+ scripting.chdir(cache_oldv_folder)
+ for reponame in ['v', 'vc'] {
+ repofolder := os.join_path(cache_oldv_folder, reponame)
+ if !os.exists(repofolder) {
+ scripting.verbose_trace(@FN, 'cloning to $repofolder')
+ scripting.exec('git clone --quiet https://github.com/vlang/$reponame $repofolder') or {
+ scripting.verbose_trace(@FN, '## error during clone: $err')
+ exit(1)
+ }
+ }
+ scripting.chdir(repofolder)
+ scripting.exec('git pull --quiet') or {
+ scripting.verbose_trace(@FN, 'pulling to $repofolder')
+ scripting.verbose_trace(@FN, '## error during pull: $err')
+ exit(1)
+ }
+ }
+ scripting.verbose_trace(@FN, 'done')
+}
+
+fn main() {
+ scripting.used_tools_must_exist(['git', 'cc'])
+ //
+ // Resetting VEXE here allows for `v run cmd/tools/oldv.v'.
+ // the parent V would have set VEXE, which later will
+ // affect the V's run from the tool itself.
+ os.setenv('VEXE', '', true)
+ //
+ mut context := Context{}
+ context.vgo.workdir = cache_oldv_folder
+ mut fp := flag.new_flag_parser(os.args)
+ fp.application(os.file_name(os.executable()))
+ fp.version(tool_version)
+ fp.description(tool_description)
+ fp.arguments_description('VCOMMIT')
+ fp.skip_executable()
+ context.use_cache = fp.bool('cache', `u`, true, 'Use a cache of local repositories for --vrepo and --vcrepo in \$HOME/.cache/oldv/')
+ if context.use_cache {
+ context.vgo.v_repo_url = cache_oldv_folder_v
+ context.vgo.vc_repo_url = cache_oldv_folder_vc
+ } else {
+ context.vgo.v_repo_url = 'https://github.com/vlang/v'
+ context.vgo.vc_repo_url = 'https://github.com/vlang/vc'
+ }
+ should_sync := fp.bool('cache-sync', `s`, false, 'Update the local cache')
+ if !should_sync {
+ fp.limit_free_args(1, 1)
+ }
+ ////
+ context.cleanup = fp.bool('clean', 0, false, 'Clean before running (slower).')
+ context.fresh_tcc = fp.bool('fresh_tcc', 0, true, 'Do `make fresh_tcc` when preparing a V compiler.')
+ context.cmd_to_run = fp.string('command', `c`, '', 'Command to run in the old V repo.\n')
+ commits := vgit.add_common_tool_options(mut context.vgo, mut fp)
+ if should_sync {
+ sync_cache()
+ exit(0)
+ }
+ if context.use_cache {
+ if !os.is_dir(cache_oldv_folder_v) || !os.is_dir(cache_oldv_folder_vc) {
+ sync_cache()
+ }
+ }
+ if commits.len > 0 {
+ context.commit_v = commits[0]
+ } else {
+ context.commit_v = scripting.run('git rev-list -n1 HEAD')
+ }
+ scripting.cprintln('################# context.commit_v: $context.commit_v #####################')
+ context.path_v = vgit.normalized_workpath_for_commit(context.vgo.workdir, context.commit_v)
+ context.path_vc = vgit.normalized_workpath_for_commit(context.vgo.workdir, 'vc')
+ if !os.is_dir(context.vgo.workdir) {
+ eprintln('Work folder: $context.vgo.workdir , does not exist.')
+ exit(2)
+ }
+ ecc := os.getenv('CC')
+ if ecc != '' {
+ context.cc = ecc
+ }
+ if context.cleanup {
+ scripting.rmrf(context.path_v)
+ scripting.rmrf(context.path_vc)
+ }
+ context.compile_oldv_if_needed()
+ scripting.chdir(context.path_v)
+ shorter_hash := context.commit_v_hash[0..10]
+ scripting.cprintln('# v commit hash: $shorter_hash | folder: $context.path_v')
+ if context.cmd_to_run.len > 0 {
+ scripting.cprintln_strong('# command: ${context.cmd_to_run:-34s}')
+ cmdres := os.execute_or_exit(context.cmd_to_run)
+ if cmdres.exit_code != 0 {
+ scripting.cprintln_strong('# exit code: ${cmdres.exit_code:-4d}')
+ }
+ scripting.cprint_strong('# result: ')
+ print(cmdres.output)
+ exit(cmdres.exit_code)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/performance_compare.v b/v_windows/v/old/cmd/tools/performance_compare.v
new file mode 100644
index 0000000..6723231
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/performance_compare.v
@@ -0,0 +1,215 @@
+import os
+import flag
+import scripting
+import vgit
+
+const (
+ tool_version = '0.0.5'
+ tool_description = " Compares V executable size and performance,
+| between 2 commits from V's local git history.
+| When only one commit is given, it is compared to master.
+| ".strip_margin()
+)
+
+struct Context {
+ cwd string // current working folder
+mut:
+ vgo vgit.VGitOptions
+ a string // the full path to the 'after' folder inside workdir
+ b string // the full path to the 'before' folder inside workdir
+ vc string // the full path to the vc folder inside workdir. It is used during bootstrapping v from the C source.
+ commit_before string // the git commit for the 'before' state
+ commit_after string // the git commit for the 'after' state
+ warmups int // how many times to execute a command before gathering stats
+ hyperfineopts string // use for additional CLI options that will be given to the hyperfine command
+ vflags string // other v options to pass to compared v commands
+}
+
+fn new_context() Context {
+ return Context{
+ cwd: os.getwd()
+ commit_after: 'master'
+ warmups: 4
+ }
+}
+
+fn (c Context) compare_versions() {
+ // Input is validated at this point...
+ // Cleanup artifacts from previous runs of this tool:
+ scripting.chdir(c.vgo.workdir)
+ scripting.run('rm -rf "$c.a" "$c.b" "$c.vc" ')
+ // clone the VC source *just once per comparison*, and reuse it:
+ scripting.run('git clone --quiet "$c.vgo.vc_repo_url" "$c.vc" ')
+ println('Comparing V performance of commit $c.commit_before (before) vs commit $c.commit_after (after) ...')
+ c.prepare_v(c.b, c.commit_before)
+ c.prepare_v(c.a, c.commit_after)
+ scripting.chdir(c.vgo.workdir)
+ if c.vflags.len > 0 {
+ os.setenv('VFLAGS', c.vflags, true)
+ }
+ // The first is the baseline, against which all the others will be compared.
+ // It is the fastest, since hello_world.v has only a single println in it,
+ mut perf_files := []string{}
+ perf_files << c.compare_v_performance('source_hello', [
+ 'vprod @DEBUG@ -o source.c examples/hello_world.v',
+ 'vprod -o source.c examples/hello_world.v',
+ 'v @DEBUG@ -o source.c examples/hello_world.v',
+ 'v -o source.c examples/hello_world.v',
+ ])
+ perf_files << c.compare_v_performance('source_v', ['vprod @DEBUG@ -o source.c @COMPILER@',
+ 'vprod -o source.c @COMPILER@', 'v @DEBUG@ -o source.c @COMPILER@',
+ 'v -o source.c @COMPILER@',
+ ])
+ perf_files << c.compare_v_performance('binary_hello', [
+ 'vprod -o hello examples/hello_world.v',
+ 'v -o hello examples/hello_world.v',
+ ])
+ perf_files << c.compare_v_performance('binary_v', ['vprod -o binary @COMPILER@',
+ 'v -o binary @COMPILER@',
+ ])
+ println('All performance files:')
+ for f in perf_files {
+ println(' $f')
+ }
+}
+
+fn (c &Context) prepare_v(cdir string, commit string) {
+ mut cc := os.getenv('CC')
+ if cc == '' {
+ cc = 'cc'
+ }
+ mut vgit_context := vgit.VGitContext{
+ cc: cc
+ commit_v: commit
+ path_v: cdir
+ path_vc: c.vc
+ workdir: c.vgo.workdir
+ v_repo_url: c.vgo.v_repo_url
+ vc_repo_url: c.vgo.vc_repo_url
+ }
+ vgit_context.compile_oldv_if_needed()
+ scripting.chdir(cdir)
+ println('Making a v compiler in $cdir')
+ scripting.run('./v -cc $cc -o v $vgit_context.vvlocation')
+ println('Making a vprod compiler in $cdir')
+ scripting.run('./v -cc $cc -prod -o vprod $vgit_context.vvlocation')
+ println('Stripping and compressing cv v and vprod binaries in $cdir')
+ scripting.run('cp cv cv_stripped')
+ scripting.run('cp v v_stripped')
+ scripting.run('cp vprod vprod_stripped')
+ scripting.run('strip *_stripped')
+ scripting.run('cp cv_stripped cv_stripped_upxed')
+ scripting.run('cp v_stripped v_stripped_upxed')
+ scripting.run('cp vprod_stripped vprod_stripped_upxed')
+ scripting.run('upx -qqq --lzma cv_stripped_upxed')
+ scripting.run('upx -qqq --lzma v_stripped_upxed')
+ scripting.run('upx -qqq --lzma vprod_stripped_upxed')
+ scripting.show_sizes_of_files(['$cdir/cv', '$cdir/cv_stripped', '$cdir/cv_stripped_upxed'])
+ scripting.show_sizes_of_files(['$cdir/v', '$cdir/v_stripped', '$cdir/v_stripped_upxed'])
+ scripting.show_sizes_of_files(['$cdir/vprod', '$cdir/vprod_stripped',
+ '$cdir/vprod_stripped_upxed',
+ ])
+ vversion := scripting.run('$cdir/v -version')
+ vcommit := scripting.run('git rev-parse --short --verify HEAD')
+ println('V version is: $vversion , local source commit: $vcommit')
+ if vgit_context.vvlocation == 'cmd/v' {
+ if os.exists('vlib/v/ast/ast.v') {
+ println('Source lines of the compiler: ' +
+ scripting.run('find cmd/v/ vlib/v/ -name "*.v" | grep -v /tests/ | xargs wc | tail -n -1'))
+ } else {
+ println('Source lines of the compiler: ' +
+ scripting.run('wc cmd/v/*.v vlib/compiler/*.v | tail -n -1'))
+ }
+ } else if vgit_context.vvlocation == 'v.v' {
+ println('Source lines of the compiler: ' +
+ scripting.run('wc v.v vlib/compiler/*.v | tail -n -1'))
+ } else {
+ println('Source lines of the compiler: ' + scripting.run('wc compiler/*.v | tail -n -1'))
+ }
+}
+
+fn (c Context) compare_v_performance(label string, commands []string) string {
+ println('---------------------------------------------------------------------------------')
+ println('Compare v performance when doing the following commands ($label):')
+ mut source_location_a := ''
+ mut source_location_b := ''
+ if os.exists('$c.a/cmd/v') {
+ source_location_a = 'cmd/v'
+ } else {
+ source_location_a = if os.exists('$c.a/v.v') { 'v.v ' } else { 'compiler/ ' }
+ }
+ if os.exists('$c.b/cmd/v') {
+ source_location_b = 'cmd/v'
+ } else {
+ source_location_b = if os.exists('$c.b/v.v') { 'v.v ' } else { 'compiler/ ' }
+ }
+ timestamp_a, _ := vgit.line_to_timestamp_and_commit(scripting.run('cd $c.a/ ; git rev-list -n1 --timestamp HEAD'))
+ timestamp_b, _ := vgit.line_to_timestamp_and_commit(scripting.run('cd $c.b/ ; git rev-list -n1 --timestamp HEAD'))
+ debug_option_a := if timestamp_a > 1570877641 { '-cg ' } else { '-debug ' }
+ debug_option_b := if timestamp_b > 1570877641 { '-cg ' } else { '-debug ' }
+ mut hyperfine_commands_arguments := []string{}
+ for cmd in commands {
+ println(cmd)
+ }
+ for cmd in commands {
+ hyperfine_commands_arguments << ' \'cd ${c.b:-34s} ; ./$cmd \' '.replace_each([
+ '@COMPILER@',
+ source_location_b,
+ '@DEBUG@',
+ debug_option_b,
+ ])
+ }
+ for cmd in commands {
+ hyperfine_commands_arguments << ' \'cd ${c.a:-34s} ; ./$cmd \' '.replace_each([
+ '@COMPILER@',
+ source_location_a,
+ '@DEBUG@',
+ debug_option_a,
+ ])
+ }
+ // /////////////////////////////////////////////////////////////////////////////
+ cmd_stats_file := os.real_path([c.vgo.workdir, 'v_performance_stats_${label}.json'].join(os.path_separator))
+ comparison_cmd := 'hyperfine $c.hyperfineopts ' + '--export-json $cmd_stats_file ' +
+ '--time-unit millisecond ' + '--style full --warmup $c.warmups ' +
+ hyperfine_commands_arguments.join(' ')
+ // /////////////////////////////////////////////////////////////////////////////
+ if c.vgo.verbose {
+ println(comparison_cmd)
+ }
+ os.system(comparison_cmd)
+ println('The detailed performance comparison report was saved to: $cmd_stats_file .')
+ println('')
+ return cmd_stats_file
+}
+
+fn main() {
+ scripting.used_tools_must_exist(['cp', 'rm', 'strip', 'make', 'git', 'upx', 'cc', 'wc', 'tail',
+ 'find', 'xargs', 'hyperfine'])
+ mut context := new_context()
+ mut fp := flag.new_flag_parser(os.args)
+ fp.application(os.file_name(os.executable()))
+ fp.version(tool_version)
+ fp.description(tool_description)
+ fp.arguments_description('COMMIT_BEFORE [COMMIT_AFTER]')
+ fp.skip_executable()
+ fp.limit_free_args(1, 2)
+ context.vflags = fp.string('vflags', 0, '', 'Additional options to pass to the v commands, for example "-cc tcc"')
+ context.hyperfineopts = fp.string('hyperfine_options', 0, '', 'Additional options passed to hyperfine.
+${flag.space}For example on linux, you may want to pass:
+$flag.space--hyperfine_options "--prepare \'sync; echo 3 | sudo tee /proc/sys/vm/drop_caches\'"
+')
+ commits := vgit.add_common_tool_options(mut context.vgo, mut fp)
+ context.commit_before = commits[0]
+ if commits.len > 1 {
+ context.commit_after = commits[1]
+ }
+ context.b = vgit.normalized_workpath_for_commit(context.vgo.workdir, context.commit_before)
+ context.a = vgit.normalized_workpath_for_commit(context.vgo.workdir, context.commit_after)
+ context.vc = vgit.normalized_workpath_for_commit(context.vgo.workdir, 'vc')
+ if !os.is_dir(context.vgo.workdir) {
+ msg := 'Work folder: ' + context.vgo.workdir + ' , does not exist.'
+ eprintln(msg)
+ exit(2)
+ }
+ context.compare_versions()
+}
diff --git a/v_windows/v/old/cmd/tools/repeat.v b/v_windows/v/old/cmd/tools/repeat.v
new file mode 100644
index 0000000..e69cfdf
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/repeat.v
@@ -0,0 +1,374 @@
+module main
+
+import os
+import flag
+import time
+import term
+import math
+import scripting
+
+struct CmdResult {
+mut:
+ runs int
+ cmd string
+ icmd int
+ outputs []string
+ oms map[string][]int
+ summary map[string]Aints
+ timings []int
+ atiming Aints
+}
+
+struct Context {
+mut:
+ count int
+ series int
+ warmup int
+ show_help bool
+ show_output bool
+ use_newline bool // use \n instead of \r, so the last line is not overwritten
+ fail_on_regress_percent int
+ fail_on_maxtime int // in ms
+ verbose bool
+ commands []string
+ results []CmdResult
+ cmd_template string // {T} will be substituted with the current command
+ cmd_params map[string][]string
+ cline string // a terminal clearing line
+ cgoback string
+ nmins int // number of minimums to discard
+ nmaxs int // number of maximums to discard
+}
+
+[unsafe]
+fn (mut result CmdResult) free() {
+ unsafe {
+ result.cmd.free()
+ result.outputs.free()
+ result.oms.free()
+ result.summary.free()
+ result.timings.free()
+ result.atiming.free()
+ }
+}
+
+[unsafe]
+fn (mut context Context) free() {
+ unsafe {
+ context.commands.free()
+ context.results.free()
+ context.cmd_template.free()
+ context.cmd_params.free()
+ context.cline.free()
+ context.cgoback.free()
+ }
+}
+
+struct Aints {
+ values []int
+mut:
+ imin int
+ imax int
+ average f64
+ stddev f64
+ nmins int // number of discarded fastest results
+ nmaxs int // number of discarded slowest results
+}
+
+[unsafe]
+fn (mut a Aints) free() {
+ unsafe { a.values.free() }
+}
+
+fn new_aints(ovals []int, extreme_mins int, extreme_maxs int) Aints {
+ mut res := Aints{
+ values: ovals // remember the original values
+ nmins: extreme_mins
+ nmaxs: extreme_maxs
+ }
+ mut sum := i64(0)
+ mut imin := math.max_i32
+ mut imax := -math.max_i32
+ // discard the extremes:
+ mut vals := []int{}
+ for x in ovals {
+ vals << x
+ }
+ vals.sort()
+ if vals.len > extreme_mins + extreme_maxs {
+ vals = vals[extreme_mins..vals.len - extreme_maxs].clone()
+ } else {
+ vals = []
+ }
+ // statistical processing of the remaining values:
+ for i in vals {
+ sum += i
+ if i < imin {
+ imin = i
+ }
+ if i > imax {
+ imax = i
+ }
+ }
+ res.imin = imin
+ res.imax = imax
+ if vals.len > 0 {
+ res.average = sum / f64(vals.len)
+ }
+ //
+ mut devsum := f64(0.0)
+ for i in vals {
+ x := f64(i) - res.average
+ devsum += (x * x)
+ }
+ res.stddev = math.sqrt(devsum / f64(vals.len))
+ // eprintln('\novals: $ovals\n vals: $vals\n vals.len: $vals.len | res.imin: $res.imin | res.imax: $res.imax | res.average: $res.average | res.stddev: $res.stddev')
+ return res
+}
+
+fn bold(s string) string {
+ return term.colorize(term.bold, s)
+}
+
+fn (a Aints) str() string {
+ return bold('${a.average:6.2f}') +
+ 'ms ± σ: ${a.stddev:4.1f}ms, min: ${a.imin:4}ms, max: ${a.imax:4}ms, runs:${a.values.len:3}, nmins:${a.nmins:2}, nmaxs:${a.nmaxs:2}'
+}
+
+const (
+ max_fail_percent = 100 * 1000
+ max_time = 60 * 1000 // ms
+ performance_regression_label = 'Performance regression detected, failing since '
+)
+
+fn main() {
+ mut context := Context{}
+ context.parse_options()
+ context.run()
+ context.show_diff_summary()
+}
+
+fn (mut context Context) parse_options() {
+ mut fp := flag.new_flag_parser(os.args)
+ fp.application(os.file_name(os.executable()))
+ fp.version('0.0.1')
+ fp.description('Repeat command(s) and collect statistics. NB: you have to quote each command, if it contains spaces.')
+ fp.arguments_description('CMD1 CMD2 ...')
+ fp.skip_executable()
+ fp.limit_free_args_to_at_least(1)
+ context.count = fp.int('count', `c`, 10, 'Repetition count.')
+ context.series = fp.int('series', `s`, 2, 'Series count. `-s 2 -c 4 a b` => aaaabbbbaaaabbbb, while `-s 3 -c 2 a b` => aabbaabbaabb.')
+ context.warmup = fp.int('warmup', `w`, 2, 'Warmup runs. These are done *only at the start*, and are ignored.')
+ context.show_help = fp.bool('help', `h`, false, 'Show this help screen.')
+ context.use_newline = fp.bool('newline', `n`, false, 'Use \\n, do not overwrite the last line. Produces more output, but easier to diagnose.')
+ context.show_output = fp.bool('output', `O`, false, 'Show command stdout/stderr in the progress indicator for each command. NB: slower, for verbose commands.')
+ context.verbose = fp.bool('verbose', `v`, false, 'Be more verbose.')
+ context.fail_on_maxtime = fp.int('max_time', `m`, max_time, 'Fail with exit code 2, when first cmd takes above M milliseconds (regression).')
+ context.fail_on_regress_percent = fp.int('fail_percent', `f`, max_fail_percent, 'Fail with exit code 3, when first cmd is X% slower than the rest (regression).')
+ context.cmd_template = fp.string('template', `t`, '{T}', 'Command template. {T} will be substituted with the current command.')
+ cmd_params := fp.string_multi('parameter', `p`, 'A parameter substitution list. `{p}=val1,val2,val2` means that {p} in the template, will be substituted with each of val1, val2, val3.')
+ context.nmins = fp.int('nmins', `i`, 0, 'Ignore the BOTTOM X results (minimum execution time). Makes the results more robust to performance flukes.')
+ context.nmaxs = fp.int('nmaxs', `a`, 1, 'Ignore the TOP X results (maximum execution time). Makes the results more robust to performance flukes.')
+ for p in cmd_params {
+ parts := p.split(':')
+ if parts.len > 1 {
+ context.cmd_params[parts[0]] = parts[1].split(',')
+ }
+ }
+ if context.show_help {
+ println(fp.usage())
+ exit(0)
+ }
+ if context.verbose {
+ scripting.set_verbose(true)
+ }
+ commands := fp.finalize() or {
+ eprintln('Error: $err')
+ exit(1)
+ }
+ context.commands = context.expand_all_commands(commands)
+ context.results = []CmdResult{len: context.commands.len, cap: 20, init: CmdResult{
+ outputs: []string{cap: 500}
+ timings: []int{cap: 500}
+ }}
+ if context.use_newline {
+ context.cline = '\n'
+ context.cgoback = '\n'
+ } else {
+ context.cline = '\r' + term.h_divider('')
+ context.cgoback = '\r'
+ }
+}
+
+fn (mut context Context) clear_line() {
+ print(context.cline)
+}
+
+fn (mut context Context) expand_all_commands(commands []string) []string {
+ mut all_commands := []string{}
+ for cmd in commands {
+ maincmd := context.cmd_template.replace('{T}', cmd)
+ mut substituted_commands := []string{}
+ substituted_commands << maincmd
+ for paramk, paramlist in context.cmd_params {
+ for paramv in paramlist {
+ mut new_substituted_commands := []string{}
+ for cscmd in substituted_commands {
+ scmd := cscmd.replace(paramk, paramv)
+ new_substituted_commands << scmd
+ }
+ for sc in new_substituted_commands {
+ substituted_commands << sc
+ }
+ }
+ }
+ for sc in substituted_commands {
+ all_commands << sc
+ }
+ }
+ mut unique := map[string]int{}
+ for x in all_commands {
+ if x.contains('{') && x.contains('}') {
+ continue
+ }
+ unique[x] = 1
+ }
+ return unique.keys()
+}
+
+fn (mut context Context) run() {
+ mut run_warmups := 0
+ for si in 1 .. context.series + 1 {
+ for icmd, cmd in context.commands {
+ mut runs := 0
+ mut duration := 0
+ mut sum := 0
+ mut oldres := ''
+ println('Series: ${si:4}/${context.series:-4}, command: $cmd')
+ if context.warmup > 0 && run_warmups < context.commands.len {
+ for i in 1 .. context.warmup + 1 {
+ print('${context.cgoback}warming up run: ${i:4}/${context.warmup:-4} for ${cmd:-50s} took ${duration:6} ms ...')
+ mut sw := time.new_stopwatch()
+ res := os.execute(cmd)
+ if res.exit_code != 0 {
+ continue
+ }
+ duration = int(sw.elapsed().milliseconds())
+ }
+ run_warmups++
+ }
+ context.clear_line()
+ for i in 1 .. (context.count + 1) {
+ avg := f64(sum) / f64(i)
+ print('${context.cgoback}Average: ${avg:9.3f}ms | run: ${i:4}/${context.count:-4} | took ${duration:6} ms')
+ if context.show_output {
+ print(' | result: ${oldres:s}')
+ }
+ mut sw := time.new_stopwatch()
+ res := scripting.exec(cmd) or { continue }
+ duration = int(sw.elapsed().milliseconds())
+ if res.exit_code != 0 {
+ eprintln('${i:10} non 0 exit code for cmd: $cmd')
+ continue
+ }
+ trimed_output := res.output.trim_right('\r\n')
+ trimed_normalized := trimed_output.replace('\r\n', '\n')
+ lines := trimed_normalized.split('\n')
+ for line in lines {
+ context.results[icmd].outputs << line
+ }
+ context.results[icmd].timings << duration
+ sum += duration
+ runs++
+ oldres = res.output.replace('\n', ' ')
+ }
+ context.results[icmd].cmd = cmd
+ context.results[icmd].icmd = icmd
+ context.results[icmd].runs += runs
+ context.results[icmd].atiming = new_aints(context.results[icmd].timings, context.nmins,
+ context.nmaxs)
+ context.clear_line()
+ print(context.cgoback)
+ mut m := map[string][]int{}
+ ioutputs := context.results[icmd].outputs
+ for o in ioutputs {
+ x := o.split(':')
+ if x.len > 1 {
+ k := x[0]
+ v := x[1].trim_left(' ').int()
+ m[k] << v
+ }
+ }
+ mut summary := map[string]Aints{}
+ for k, v in m {
+ // show a temporary summary for the current series/cmd cycle
+ s := new_aints(v, context.nmins, context.nmaxs)
+ println(' $k: $s')
+ summary[k] = s
+ }
+ // merge current raw results to the previous ones
+ old_oms := context.results[icmd].oms.move()
+ mut new_oms := map[string][]int{}
+ for k, v in m {
+ if old_oms[k].len == 0 {
+ new_oms[k] = v
+ } else {
+ new_oms[k] << old_oms[k]
+ new_oms[k] << v
+ }
+ }
+ context.results[icmd].oms = new_oms.move()
+ // println('')
+ }
+ }
+ // create full summaries, taking account of all runs
+ for icmd in 0 .. context.results.len {
+ mut new_full_summary := map[string]Aints{}
+ for k, v in context.results[icmd].oms {
+ new_full_summary[k] = new_aints(v, context.nmins, context.nmaxs)
+ }
+ context.results[icmd].summary = new_full_summary.move()
+ }
+}
+
+fn (mut context Context) show_diff_summary() {
+ context.results.sort_with_compare(fn (a &CmdResult, b &CmdResult) int {
+ if a.atiming.average < b.atiming.average {
+ return -1
+ }
+ if a.atiming.average > b.atiming.average {
+ return 1
+ }
+ return 0
+ })
+ println('Summary (commands are ordered by ascending mean time), after $context.series series of $context.count repetitions:')
+ base := context.results[0].atiming.average
+ mut first_cmd_percentage := f64(100.0)
+ mut first_marker := ''
+ for i, r in context.results {
+ first_marker = ' '
+ cpercent := (r.atiming.average / base) * 100 - 100
+ if r.icmd == 0 {
+ first_marker = bold('>')
+ first_cmd_percentage = cpercent
+ }
+ println(' $first_marker${(i + 1):3} | ${cpercent:5.1f}% slower | ${r.cmd:-57s} | $r.atiming')
+ }
+ $if debugcontext ? {
+ println('context: $context')
+ }
+ if int(base) > context.fail_on_maxtime {
+ print(performance_regression_label)
+ println('average time: ${base:6.1f} ms > $context.fail_on_maxtime ms threshold.')
+ exit(2)
+ }
+ if context.fail_on_regress_percent == max_fail_percent || context.results.len < 2 {
+ return
+ }
+ fail_threshold_max := f64(context.fail_on_regress_percent)
+ if first_cmd_percentage > fail_threshold_max {
+ print(performance_regression_label)
+ println('${first_cmd_percentage:5.1f}% > ${fail_threshold_max:5.1f}% threshold.')
+ exit(3)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/test_if_v_test_system_works.v b/v_windows/v/old/cmd/tools/test_if_v_test_system_works.v
new file mode 100644
index 0000000..a86abd8
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/test_if_v_test_system_works.v
@@ -0,0 +1,74 @@
+module main
+
+// This program verifies that `v test` propagates errors
+// and that it exits with code 1, when at least 1 FAIL happen.
+import os
+import rand
+
+const (
+ vexe = get_vexe_path()
+ vroot = os.dir(vexe)
+ tdir = new_tdir()
+)
+
+fn get_vexe_path() string {
+ env_vexe := os.getenv('VEXE')
+ if env_vexe != '' {
+ return env_vexe
+ }
+ me := os.executable()
+ eprintln('me: $me')
+ mut vexe_ := os.join_path(os.dir(os.dir(os.dir(me))), 'v')
+ if os.user_os() == 'windows' {
+ vexe_ += '.exe'
+ }
+ return vexe_
+}
+
+fn new_tdir() string {
+ tdir_ := os.join_path(os.temp_dir(), rand.ulid())
+ if os.exists(tdir_) {
+ os.rmdir(tdir_) or { panic(err) }
+ }
+ os.mkdir(tdir_) or { panic(err) }
+ C.atexit(cleanup_tdir)
+ return tdir_
+}
+
+fn cleanup_tdir() {
+ println('... removing tdir: $tdir')
+ os.rmdir_all(tdir) or { panic(err) }
+}
+
+fn main() {
+ println('> vroot: $vroot | vexe: $vexe | tdir: $tdir')
+ ok_fpath := os.join_path(tdir, 'single_test.v')
+ os.write_file(ok_fpath, 'fn test_ok(){ assert true }') ?
+ check_ok('"$vexe" $ok_fpath')
+ check_ok('"$vexe" test $ok_fpath')
+ fail_fpath := os.join_path(tdir, 'failing_test.v')
+ os.write_file(fail_fpath, 'fn test_fail(){ assert 1 == 2 }') ?
+ check_fail('"$vexe" $fail_fpath')
+ check_fail('"$vexe" test $fail_fpath')
+ check_fail('"$vexe" test $tdir')
+}
+
+fn check_ok(cmd string) string {
+ println('> check_ok cmd: $cmd')
+ res := os.execute(cmd)
+ if res.exit_code != 0 {
+ eprintln('> check_ok failed.\n$res.output')
+ exit(1)
+ }
+ return res.output
+}
+
+fn check_fail(cmd string) string {
+ println('> check_fail cmd: $cmd')
+ res := os.execute(cmd)
+ if res.exit_code == 0 {
+ eprintln('> check_fail succeeded, but it should have failed.\n$res.output')
+ exit(1)
+ }
+ return res.output
+}
diff --git a/v_windows/v/old/cmd/tools/test_os_process.v b/v_windows/v/old/cmd/tools/test_os_process.v
new file mode 100644
index 0000000..bc275dd
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/test_os_process.v
@@ -0,0 +1,82 @@
+module main
+
+import os
+import time
+import os.cmdline
+
+enum Target {
+ both
+ stderr
+ stdout
+ alternate
+}
+
+fn s2target(s string) Target {
+ return match s {
+ 'both' { Target.both }
+ 'stderr' { Target.stderr }
+ 'alternate' { Target.alternate }
+ else { Target.stdout }
+ }
+}
+
+struct Context {
+mut:
+ timeout_ms int
+ period_ms int
+ exitcode int
+ target Target
+ omode Target
+ is_verbose bool
+}
+
+fn (mut ctx Context) println(s string) {
+ if ctx.target == .alternate {
+ ctx.omode = if ctx.omode == .stderr { Target.stdout } else { Target.stderr }
+ }
+ if ctx.target in [.both, .stdout] || ctx.omode == .stdout {
+ println('stdout, $s')
+ }
+ if ctx.target in [.both, .stderr] || ctx.omode == .stderr {
+ eprintln('stderr, $s')
+ }
+}
+
+fn do_timeout(c &Context) {
+ mut ctx := unsafe { c }
+ time.sleep(ctx.timeout_ms * time.millisecond)
+ exit(ctx.exitcode)
+}
+
+fn main() {
+ mut ctx := Context{}
+ args := os.args[1..]
+ if '-h' in args || '--help' in args {
+ println("Usage:
+ test_os_process [-v] [-h] [-target stderr/stdout/both/alternate] [-exitcode 0] [-timeout_ms 200] [-period_ms 50]
+ Prints lines periodically (-period_ms), to stdout/stderr (-target).
+ After a while (-timeout_ms), exit with (-exitcode).
+ This program is useful for platform independent testing
+ of child process/standart input/output control.
+ It is used in V's `os` module tests.
+")
+ return
+ }
+ ctx.is_verbose = '-v' in args
+ ctx.target = s2target(cmdline.option(args, '-target', 'both'))
+ ctx.exitcode = cmdline.option(args, '-exitcode', '0').int()
+ ctx.timeout_ms = cmdline.option(args, '-timeout_ms', '200').int()
+ ctx.period_ms = cmdline.option(args, '-period_ms', '50').int()
+ if ctx.target == .alternate {
+ ctx.omode = .stdout
+ }
+ if ctx.is_verbose {
+ eprintln('> args: $args | context: $ctx')
+ }
+ go do_timeout(&ctx)
+ for i := 1; true; i++ {
+ ctx.println('$i')
+ time.sleep(ctx.period_ms * time.millisecond)
+ }
+ time.sleep(100 * time.second)
+}
diff --git a/v_windows/v/old/cmd/tools/vast/cjson.v b/v_windows/v/old/cmd/tools/vast/cjson.v
new file mode 100644
index 0000000..63e9f5d
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vast/cjson.v
@@ -0,0 +1,114 @@
+module main
+
+import json
+
+struct UseJson {
+ x int
+}
+
+fn suppress_json_warning() {
+ json.encode(UseJson{})
+}
+
+// struct C.cJSON {}
+fn C.cJSON_CreateObject() &C.cJSON
+
+fn C.cJSON_CreateArray() &C.cJSON
+
+// fn C.cJSON_CreateBool(bool) &C.cJSON
+fn C.cJSON_CreateTrue() &C.cJSON
+
+fn C.cJSON_CreateFalse() &C.cJSON
+
+fn C.cJSON_CreateNull() &C.cJSON
+
+// fn C.cJSON_CreateNumber() &C.cJSON
+// fn C.cJSON_CreateString() &C.cJSON
+fn C.cJSON_CreateRaw(&byte) &C.cJSON
+
+fn C.cJSON_IsInvalid(voidptr) bool
+
+fn C.cJSON_IsFalse(voidptr) bool
+
+// fn C.cJSON_IsTrue(voidptr) bool
+fn C.cJSON_IsBool(voidptr) bool
+
+fn C.cJSON_IsNull(voidptr) bool
+
+fn C.cJSON_IsNumber(voidptr) bool
+
+fn C.cJSON_IsString(voidptr) bool
+
+fn C.cJSON_IsArray(voidptr) bool
+
+fn C.cJSON_IsObject(voidptr) bool
+
+fn C.cJSON_IsRaw(voidptr) bool
+
+fn C.cJSON_AddItemToObject(voidptr, &byte, voidptr)
+
+fn C.cJSON_AddItemToArray(voidptr, voidptr)
+
+fn C.cJSON_Delete(voidptr)
+
+fn C.cJSON_Print(voidptr) &byte
+
+[inline]
+fn create_object() &C.cJSON {
+ return C.cJSON_CreateObject()
+}
+
+[inline]
+fn create_array() &C.cJSON {
+ return C.cJSON_CreateArray()
+}
+
+[inline]
+fn create_string(val string) &C.cJSON {
+ return C.cJSON_CreateString(val.str)
+}
+
+[inline]
+fn create_number(val f64) &C.cJSON {
+ return C.cJSON_CreateNumber(val)
+}
+
+[inline]
+fn create_bool(val bool) &C.cJSON {
+ return C.cJSON_CreateBool(val)
+}
+
+[inline]
+fn create_true() &C.cJSON {
+ return C.cJSON_CreateTrue()
+}
+
+[inline]
+fn create_false() &C.cJSON {
+ return C.cJSON_CreateFalse()
+}
+
+[inline]
+fn create_null() &C.cJSON {
+ return C.cJSON_CreateNull()
+}
+
+[inline]
+fn delete(b voidptr) {
+ C.cJSON_Delete(b)
+}
+
+[inline]
+fn add_item_to_object(obj &C.cJSON, key string, item &C.cJSON) {
+ C.cJSON_AddItemToObject(obj, key.str, item)
+}
+
+[inline]
+fn add_item_to_array(obj &C.cJSON, item &C.cJSON) {
+ C.cJSON_AddItemToArray(obj, item)
+}
+
+fn json_print(json &C.cJSON) string {
+ s := C.cJSON_Print(json)
+ return unsafe { tos3(s) }
+}
diff --git a/v_windows/v/old/cmd/tools/vast/test/.gitignore b/v_windows/v/old/cmd/tools/vast/test/.gitignore
new file mode 100644
index 0000000..857cade
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vast/test/.gitignore
@@ -0,0 +1 @@
+demo.json
diff --git a/v_windows/v/old/cmd/tools/vast/test/demo.v b/v_windows/v/old/cmd/tools/vast/test/demo.v
new file mode 100644
index 0000000..a2176b1
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vast/test/demo.v
@@ -0,0 +1,121 @@
+// usage test: v ast path_to_v/cmd/tools/vast/test/demo.v
+// will generate demo.json
+
+// comment for module
+module main
+
+// import module
+import os
+import math
+import time { Time, now }
+
+// const decl
+const (
+ a = 1
+ b = 3
+ c = 'c'
+)
+
+// struct decl
+struct Point {
+ x int
+mut:
+ y int
+pub:
+ z int
+pub mut:
+ name string
+}
+
+// method of Point
+pub fn (p Point) get_x() int {
+ return p.x
+}
+
+// embed struct
+struct MyPoint {
+ Point
+ title string
+}
+
+// enum type
+enum Color {
+ red
+ green
+ blue
+}
+
+// type alias
+type Myint = int
+
+// sum type
+type MySumType = bool | int | string
+
+// function type
+type Myfn = fn (int) int
+
+// interface type
+interface Myinterfacer {
+ add(int, int) int
+ sub(int, int) int
+}
+
+// main funciton
+fn main() {
+ add(1, 3)
+ println(add(1, 2))
+ println('ok') // comment println
+ arr := [1, 3, 5, 7]
+ for a in arr {
+ println(a)
+ add(1, 3)
+ }
+ color := Color.red
+ println(color)
+ println(os.args)
+ m := math.max(1, 3)
+ println(m)
+ println(now())
+ t := Time{}
+ println(t)
+ p := Point{
+ x: 1
+ y: 2
+ z: 3
+ }
+ println(p)
+ my_point := MyPoint{
+ // x: 1
+ // y: 3
+ // z: 5
+ }
+ println(my_point.get_x())
+}
+
+// normal function
+fn add(x int, y int) int {
+ return x + y
+}
+
+// function with defer stmt
+fn defer_fn() {
+ mut x := 1
+ println('start fn')
+ defer {
+ println('in defer block')
+ println(x)
+ }
+ println('end fn')
+}
+
+// generic function
+fn g_fn<T>(p T) T {
+ return p
+}
+
+// generic struct
+struct GenericStruct<T> {
+ point Point
+mut:
+ model T
+}
diff --git a/v_windows/v/old/cmd/tools/vast/vast.v b/v_windows/v/old/cmd/tools/vast/vast.v
new file mode 100644
index 0000000..fe2d5da
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vast/vast.v
@@ -0,0 +1,2246 @@
+module main
+
+import os
+import time
+import flag
+import v.token
+import v.parser
+import v.ast
+import v.pref
+import v.errors
+
+struct Context {
+mut:
+ is_watch bool
+ is_compile bool
+ is_print bool
+}
+
+struct HideFields {
+mut:
+ names map[string]bool
+}
+
+const hide_fields = &HideFields{}
+
+fn main() {
+ if os.args.len < 2 {
+ eprintln('not enough parameters')
+ exit(1)
+ }
+ mut ctx := Context{}
+ mut fp := flag.new_flag_parser(os.args[2..])
+ fp.application('v ast')
+ fp.usage_example('demo.v generate demo.json file.')
+ fp.usage_example('-w demo.v generate demo.json file, and watch for changes.')
+ fp.usage_example('-c demo.v generate demo.json *and* a demo.c file, and watch for changes.')
+ fp.usage_example('-p demo.v print the json output to stdout.')
+ fp.description('Dump a JSON representation of the V AST for a given .v or .vsh file.')
+ fp.description('By default, `v ast` will save the JSON to a .json file, named after the .v file.')
+ fp.description('Pass -p to see it instead.')
+ ctx.is_watch = fp.bool('watch', `w`, false, 'watch a .v file for changes, rewrite the .json file, when a change is detected')
+ ctx.is_print = fp.bool('print', `p`, false, 'print the AST to stdout')
+ ctx.is_compile = fp.bool('compile', `c`, false, 'watch the .v file for changes, rewrite the .json file, *AND* generate a .c file too on any change')
+ hfields := fp.string_multi('hide', 0, 'hide the specified fields. You can give several, by separating them with `,`').join(',')
+ mut mhf := unsafe { hide_fields }
+ for hf in hfields.split(',') {
+ mhf.names[hf] = true
+ }
+ fp.limit_free_args_to_at_least(1)
+ rest_of_args := fp.remaining_parameters()
+ for vfile in rest_of_args {
+ file := get_abs_path(vfile)
+ check_file(file)
+ ctx.write_file_or_print(file)
+ if ctx.is_watch || ctx.is_compile {
+ ctx.watch_for_changes(file)
+ }
+ }
+}
+
+fn (ctx Context) write_file_or_print(file string) {
+ if ctx.is_print {
+ println(json(file))
+ } else {
+ println('$time.now(): AST written to: ' + json_file(file))
+ }
+}
+
+// generate ast json file and c source code file
+fn (ctx Context) watch_for_changes(file string) {
+ println('start watching...')
+ mut timestamp := 0
+ for {
+ new_timestamp := os.file_last_mod_unix(file)
+ if timestamp != new_timestamp {
+ ctx.write_file_or_print(file)
+ if ctx.is_compile {
+ file_name := file[0..(file.len - os.file_ext(file).len)]
+ os.system('v -o ${file_name}.c $file')
+ }
+ }
+ timestamp = new_timestamp
+ time.sleep(500 * time.millisecond)
+ }
+}
+
+// get absolute path for file
+fn get_abs_path(path string) string {
+ if os.is_abs_path(path) {
+ return path
+ } else if path.starts_with('./') {
+ return os.join_path(os.getwd(), path[2..])
+ } else {
+ return os.join_path(os.getwd(), path)
+ }
+}
+
+// check file is v file and exists
+fn check_file(file string) {
+ if os.file_ext(file) !in ['.v', '.vv', '.vsh'] {
+ eprintln('the file `$file` must be a v file or vsh file')
+ exit(1)
+ }
+ if !os.exists(file) {
+ eprintln('the v file `$file` does not exist')
+ exit(1)
+ }
+}
+
+// generate json file with the same file name
+fn json_file(file string) string {
+ ast_json := json(file)
+ // support .v and .vsh file
+ file_name := file[0..(file.len - os.file_ext(file).len)]
+ json_file := file_name + '.json'
+ os.write_file(json_file, ast_json) or { panic(err) }
+ return json_file
+}
+
+// generate json string
+fn json(file string) string {
+ // use as permissive preferences as possible, so that `v ast`
+ // can print the AST of arbitrary V files, even .vsh or ones
+ // that require globals:
+ mut pref := &pref.Preferences{}
+ pref.fill_with_defaults()
+ pref.enable_globals = true
+ pref.is_fmt = true
+ //
+ mut t := Tree{
+ root: new_object()
+ table: ast.new_table()
+ pref: pref
+ }
+ // parse file with comment
+ ast_file := parser.parse_file(file, t.table, .parse_comments, t.pref)
+ t.root = t.ast_file(ast_file)
+ // generate the ast string
+ s := json_print(t.root)
+ return s
+}
+
+// the ast tree
+struct Tree {
+ table &ast.Table
+ pref &pref.Preferences
+mut:
+ root Node // the root of tree
+}
+
+// tree node
+pub type Node = C.cJSON
+
+// create an object node
+[inline]
+fn new_object() &Node {
+ return C.cJSON_CreateObject()
+}
+
+// add item to object node
+[inline]
+fn (node &Node) add(key string, child &Node) {
+ if hide_fields.names.len > 0 && key in hide_fields.names {
+ return
+ }
+ add_item_to_object(node, key, child)
+}
+
+// create an array node
+[inline]
+fn new_array() &Node {
+ return C.cJSON_CreateArray()
+}
+
+// add item to array node
+[inline]
+fn (node &Node) add_item(child &Node) {
+ add_item_to_array(node, child)
+}
+
+// string type node
+fn (t Tree) string_node(val string) &Node {
+ return create_string(val)
+}
+
+// number type node
+fn (t Tree) number_node(val int) &Node {
+ return create_number(val)
+}
+
+// bool type node
+fn (t Tree) bool_node(val bool) &Node {
+ if val {
+ return create_true()
+ } else {
+ return create_false()
+ }
+}
+
+// null type node
+fn (t Tree) null_node() &Node {
+ return create_null()
+}
+
+// type node
+fn (t Tree) type_node(typ ast.Type) &Node {
+ if typ == 0 {
+ return create_null()
+ } else {
+ type_name := t.table.get_type_name(typ)
+ return create_string(type_name)
+ }
+}
+
+// token type node
+fn (t Tree) token_node(tok_kind token.Kind) &Node {
+ return t.string_node('token:${int(tok_kind)}($tok_kind.str())')
+}
+
+// enum type node
+fn (t Tree) enum_node<T>(value T) &Node {
+ return t.string_node('enum:${int(value)}($value)')
+}
+
+// for [][]comment
+fn (t Tree) two_dimension_comment(node [][]ast.Comment) &Node {
+ mut comments := new_array()
+ for n in node {
+ mut comment_array := new_array()
+ for c in n {
+ comment_array.add_item(t.comment(c))
+ }
+ comments.add_item(comment_array)
+ }
+ return comments
+}
+
+// ast file root node
+fn (t Tree) ast_file(node ast.File) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ast.File'))
+ obj.add('path', t.string_node(node.path))
+ obj.add('path_base', t.string_node(node.path_base))
+ obj.add('nr_lines', t.number_node(node.nr_lines))
+ obj.add('nr_bytes', t.number_node(node.nr_bytes))
+ obj.add('mod', t.mod(node.mod))
+ obj.add('imports', t.imports(node.imports))
+ obj.add('global_scope', t.scope(node.global_scope))
+ obj.add('scope', t.scope(node.scope))
+ obj.add('errors', t.errors(node.errors))
+ obj.add('warnings', t.warnings(node.warnings))
+ obj.add('notices', t.notices(node.notices))
+ obj.add('auto_imports', t.array_node_string(node.auto_imports))
+ symbol_obj := new_object()
+ for key, val in node.imported_symbols {
+ symbol_obj.add(key, t.string_node(val))
+ }
+ obj.add('imported_symbols', symbol_obj)
+ obj.add('generic_fns', t.array_node_generic_fns(node.generic_fns))
+ obj.add('embedded_files', t.array_node_embed_file(node.embedded_files))
+ obj.add('global_labels', t.array_node_string(node.global_labels))
+ obj.add('is_test', t.bool_node(node.is_test))
+ obj.add('stmts', t.stmts(node.stmts))
+ return obj
+}
+
+// embed files
+fn (t Tree) embed_file(node ast.EmbeddedFile) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('EmbeddedFile'))
+ obj.add('rpath', t.string_node(node.rpath))
+ obj.add('apath', t.string_node(node.apath))
+ return obj
+}
+
+// ast module node
+fn (t Tree) mod(node ast.Module) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Module'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('short_name', t.string_node(node.short_name))
+ obj.add('attrs', t.array_node_attr(node.attrs))
+ obj.add('pos', t.position(node.pos))
+ obj.add('name_pos', t.position(node.name_pos))
+ obj.add('is_skipped', t.bool_node(node.is_skipped))
+ return obj
+}
+
+fn (t Tree) scope(scope ast.Scope) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Scope'))
+ obj.add('parent', t.string_node(ptr_str(scope.parent)))
+ children_arr := new_array()
+ for s in scope.children {
+ mut children_obj := new_object()
+ children_obj.add('parent', t.string_node(ptr_str(s.parent)))
+ children_obj.add('start_pos', t.number_node(s.start_pos))
+ children_obj.add('end_pos', t.number_node(s.end_pos))
+ children_arr.add_item(children_obj)
+ }
+ obj.add('children', children_arr)
+ obj.add('start_pos', t.number_node(scope.start_pos))
+ obj.add('end_pos', t.number_node(scope.end_pos))
+ obj.add('objects', t.objects(scope.objects))
+ obj.add('struct_fields', t.array_node_scope_struct_field(scope.struct_fields))
+ return obj
+}
+
+fn (t Tree) scope_struct_field(node ast.ScopeStructField) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ScopeStructField'))
+ obj.add('struct_type', t.type_node(node.struct_type))
+ obj.add('name', t.string_node(node.name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('orig_type', t.type_node(node.orig_type))
+ obj.add('pos', t.position(node.pos))
+ obj.add('smartcasts', t.array_node_type(node.smartcasts))
+ return obj
+}
+
+fn (t Tree) objects(so map[string]ast.ScopeObject) &Node {
+ mut obj := new_object()
+ for key, val in so {
+ obj.add(key, t.scope_object(val))
+ }
+ return obj
+}
+
+fn (t Tree) scope_object(node ast.ScopeObject) &Node {
+ mut obj := new_object()
+ match node {
+ ast.ConstField { t.const_field(node) }
+ ast.GlobalField { t.global_field(node) }
+ ast.Var { t.var(node) }
+ ast.AsmRegister { t.asm_register(node) }
+ }
+ return obj
+}
+
+fn (t Tree) imports(nodes []ast.Import) &Node {
+ mut import_array := new_array()
+ for node in nodes {
+ import_array.add_item(t.import_module(node))
+ }
+ return import_array
+}
+
+fn (t Tree) errors(errors []errors.Error) &Node {
+ mut errs := new_array()
+ for e in errors {
+ obj := new_object()
+ obj.add('message', t.string_node(e.message))
+ obj.add('file_path', t.string_node(e.file_path))
+ obj.add('pos', t.position(e.pos))
+ obj.add('backtrace', t.string_node(e.backtrace))
+ obj.add('reporter', t.enum_node(e.reporter))
+ errs.add_item(obj)
+ }
+ return errs
+}
+
+fn (t Tree) warnings(warnings []errors.Warning) &Node {
+ mut warns := new_array()
+ for w in warnings {
+ mut obj := new_object()
+ obj.add('message', t.string_node(w.message))
+ obj.add('file_path', t.string_node(w.file_path))
+ obj.add('pos', t.position(w.pos))
+ obj.add('reporter', t.enum_node(w.reporter))
+ warns.add_item(obj)
+ }
+ return warns
+}
+
+fn (t Tree) notices(notices []errors.Notice) &Node {
+ mut notice_array := new_array()
+ for n in notices {
+ mut obj := new_object()
+ obj.add('message', t.string_node(n.message))
+ obj.add('file_path', t.string_node(n.file_path))
+ obj.add('pos', t.position(n.pos))
+ obj.add('reporter', t.enum_node(n.reporter))
+ notice_array.add_item(obj)
+ }
+ return notice_array
+}
+
+// stmt array node
+fn (t Tree) stmts(stmts []ast.Stmt) &Node {
+ mut stmt_array := new_array()
+ for s in stmts {
+ stmt_array.add_item(t.stmt(s))
+ }
+ return stmt_array
+}
+
+fn (t Tree) stmt(node ast.Stmt) &Node {
+ match node {
+ ast.Module { return t.mod(node) }
+ ast.Import { return t.import_module(node) }
+ ast.ConstDecl { return t.const_decl(node) }
+ ast.FnDecl { return t.fn_decl(node) }
+ ast.StructDecl { return t.struct_decl(node) }
+ ast.EnumDecl { return t.enum_decl(node) }
+ ast.InterfaceDecl { return t.interface_decl(node) }
+ ast.HashStmt { return t.hash_stmt(node) }
+ ast.CompFor { return t.comp_for(node) }
+ ast.GlobalDecl { return t.global_decl(node) }
+ ast.DeferStmt { return t.defer_stmt(node) }
+ ast.TypeDecl { return t.type_decl(node) }
+ ast.GotoLabel { return t.goto_label(node) }
+ ast.GotoStmt { return t.goto_stmt(node) }
+ ast.AssignStmt { return t.assign_stmt(node) }
+ ast.Return { return t.return_(node) }
+ ast.ForCStmt { return t.for_c_stmt(node) }
+ ast.ForStmt { return t.for_stmt(node) }
+ ast.ForInStmt { return t.for_in_stmt(node) }
+ ast.BranchStmt { return t.branch_stmt(node) }
+ ast.AssertStmt { return t.assert_stmt(node) }
+ ast.ExprStmt { return t.expr_stmt(node) }
+ ast.Block { return t.block(node) }
+ ast.SqlStmt { return t.sql_stmt(node) }
+ ast.AsmStmt { return t.asm_stmt(node) }
+ ast.NodeError { return t.node_error(node) }
+ ast.EmptyStmt { return t.empty_stmt(node) }
+ }
+ return t.null_node()
+}
+
+fn (t Tree) import_module(node ast.Import) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Import'))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('alias', t.string_node(node.alias))
+ obj.add('syms', t.array_node_import_symbol(node.syms))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('next_comments', t.array_node_comment(node.next_comments))
+ obj.add('pos', t.position(node.pos))
+ obj.add('mod_pos', t.position(node.mod_pos))
+ obj.add('alias_pos', t.position(node.alias_pos))
+ obj.add('syms_pos', t.position(node.syms_pos))
+ return obj
+}
+
+fn (t Tree) import_symbol(node ast.ImportSymbol) &Node {
+ mut obj := new_object()
+ obj.add('name', t.string_node(node.name))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) position(p token.Position) &Node {
+ mut obj := new_object()
+ obj.add('line_nr', t.number_node(p.line_nr))
+ obj.add('last_line', t.number_node(p.last_line))
+ obj.add('pos', t.number_node(p.pos))
+ obj.add('len', t.number_node(p.len))
+ return obj
+}
+
+fn (t Tree) comment(node ast.Comment) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Comment'))
+ obj.add('text', t.string_node(node.text))
+ obj.add('is_multi', t.bool_node(node.is_multi))
+ obj.add('is_inline', t.bool_node(node.is_inline))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) const_decl(node ast.ConstDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ConstDecl'))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('is_block', t.bool_node(node.is_block))
+ obj.add('fields', t.array_node_const_field(node.fields))
+ obj.add('end_comments', t.array_node_comment(node.end_comments))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) const_field(node ast.ConstField) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ConstField'))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('name', t.string_node(node.name))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+// function declaration
+fn (t Tree) fn_decl(node ast.FnDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('FnDecl'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('is_deprecated', t.bool_node(node.is_deprecated))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('is_variadic', t.bool_node(node.is_variadic))
+ obj.add('is_anon', t.bool_node(node.is_anon))
+ obj.add('is_noreturn', t.bool_node(node.is_noreturn))
+ obj.add('is_manualfree', t.bool_node(node.is_manualfree))
+ obj.add('is_main', t.bool_node(node.is_main))
+ obj.add('is_test', t.bool_node(node.is_test))
+ obj.add('is_conditional', t.bool_node(node.is_conditional))
+ obj.add('is_exported', t.bool_node(node.is_exported))
+ obj.add('is_keep_alive', t.bool_node(node.is_keep_alive))
+ obj.add('receiver', t.struct_field(node.receiver))
+ obj.add('receiver_pos', t.position(node.receiver_pos))
+ obj.add('is_method', t.bool_node(node.is_method))
+ obj.add('method_idx', t.number_node(node.method_idx))
+ obj.add('rec_mut', t.bool_node(node.rec_mut))
+ obj.add('rec_share', t.enum_node(node.rec_share))
+ obj.add('language', t.enum_node(node.language))
+ obj.add('no_body', t.bool_node(node.no_body))
+ obj.add('is_builtin', t.bool_node(node.is_builtin))
+ obj.add('is_direct_arr', t.bool_node(node.is_direct_arr))
+ obj.add('ctdefine_idx', t.number_node(node.ctdefine_idx))
+ obj.add('pos', t.position(node.pos))
+ obj.add('body_pos', t.position(node.body_pos))
+ obj.add('return_type_pos', t.position(node.return_type_pos))
+ obj.add('file', t.string_node(node.file))
+ obj.add('has_return', t.bool_node(node.has_return))
+ obj.add('return_type', t.type_node(node.return_type))
+ obj.add('source_file', t.number_node(int(node.source_file)))
+ obj.add('scope', t.number_node(int(node.scope)))
+ obj.add('attrs', t.array_node_attr(node.attrs))
+ obj.add('params', t.array_node_arg(node.params))
+ obj.add('generic_names', t.array_node_string(node.generic_names))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('next_comments', t.array_node_comment(node.next_comments))
+ obj.add('label_names', t.array_node_string(node.label_names))
+ obj.add('defer_stmts', t.array_node_defer_stmt(node.defer_stmts))
+ return obj
+}
+
+fn (t Tree) anon_fn(node ast.AnonFn) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AnonFn'))
+ obj.add('decl', t.fn_decl(node.decl))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('has_gen', t.bool_node(node.has_gen))
+ return obj
+}
+
+fn (t Tree) struct_decl(node ast.StructDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('StructDecl'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('pub_pos', t.number_node(node.pub_pos))
+ obj.add('mut_pos', t.number_node(node.mut_pos))
+ obj.add('pub_mut_pos', t.number_node(node.pub_mut_pos))
+ obj.add('global_pos', t.number_node(node.global_pos))
+ obj.add('module_pos', t.number_node(node.module_pos))
+ obj.add('language', t.enum_node(node.language))
+ obj.add('is_union', t.bool_node(node.is_union))
+ obj.add('pos', t.position(node.pos))
+ obj.add('fields', t.array_node_struct_field(node.fields))
+ obj.add('generic_types', t.array_node_type(node.generic_types))
+ obj.add('attrs', t.array_node_attr(node.attrs))
+ obj.add('end_comments', t.array_node_comment(node.end_comments))
+ obj.add('embeds', t.array_node_embed(node.embeds))
+ return obj
+}
+
+fn (t Tree) struct_field(node ast.StructField) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('StructField'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('type_pos', t.position(node.type_pos))
+ obj.add('has_default_expr', t.bool_node(node.has_default_expr))
+ obj.add('default_expr_typ', t.type_node(node.default_expr_typ))
+ obj.add('default_expr', t.expr(node.default_expr))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('is_mut', t.bool_node(node.is_mut))
+ obj.add('is_global', t.bool_node(node.is_global))
+ obj.add('attrs', t.array_node_attr(node.attrs))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) embed(node ast.Embed) &Node {
+ mut obj := new_object()
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('pos', t.position(node.pos))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+fn (t Tree) enum_decl(node ast.EnumDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('EnumDecl'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('is_flag', t.bool_node(node.is_flag))
+ obj.add('is_multi_allowed', t.bool_node(node.is_multi_allowed))
+ obj.add('pos', t.position(node.pos))
+ obj.add('fields', t.array_node_enum_field(node.fields))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('attrs', t.array_node_attr(node.attrs))
+ return obj
+}
+
+fn (t Tree) enum_field(node ast.EnumField) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('EnumField'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('has_expr', t.bool_node(node.has_expr))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('pos', t.position(node.pos))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('next_comments', t.array_node_comment(node.next_comments))
+ return obj
+}
+
+fn (t Tree) interface_decl(node ast.InterfaceDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('InterfaceDecl'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('mut_pos', t.number_node(node.mut_pos))
+ obj.add('field_names', t.array_node_string(node.field_names))
+ obj.add('methods', t.array_node_fn_decl(node.methods))
+ obj.add('fields', t.array_node_struct_field(node.fields))
+ obj.add('pre_comments', t.array_node_comment(node.pre_comments))
+ obj.add('name_pos', t.position(node.name_pos))
+ obj.add('language', t.enum_node(node.language))
+ obj.add('pos', t.position(node.pos))
+ obj.add('are_ifaces_expanded', t.bool_node(node.are_ifaces_expanded))
+ obj.add('ifaces', t.array_node_interface_embedding(node.ifaces))
+ return obj
+}
+
+fn (t Tree) interface_embedding(node ast.InterfaceEmbedding) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('InterfaceEmbedding'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('pos', t.position(node.pos))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+fn (t Tree) attr(node ast.Attr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Attr'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('has_arg', t.bool_node(node.has_arg))
+ obj.add('kind', t.enum_node(node.kind))
+ obj.add('ct_expr', t.expr(node.ct_expr))
+ obj.add('ct_opt', t.bool_node(node.ct_opt))
+ obj.add('ct_evaled', t.bool_node(node.ct_evaled))
+ obj.add('ct_skip', t.bool_node(node.ct_skip))
+ obj.add('arg', t.string_node(node.arg))
+ return obj
+}
+
+fn (t Tree) hash_stmt(node ast.HashStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('HashStmt'))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('val', t.string_node(node.val))
+ obj.add('kind', t.string_node(node.kind))
+ obj.add('main', t.string_node(node.main))
+ obj.add('msg', t.string_node(node.msg))
+ obj.add('source_file', t.string_node(node.source_file))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) comp_for(node ast.CompFor) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('CompFor'))
+ obj.add('val_var', t.string_node(node.val_var))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('kind', t.enum_node(node.kind))
+ obj.add('pos', t.position(node.pos))
+ obj.add('typ_pos', t.position(node.pos))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ return obj
+}
+
+fn (t Tree) global_decl(node ast.GlobalDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('GlobalDecl'))
+ obj.add('pos', t.position(node.pos))
+ obj.add('is_block', t.bool_node(node.is_block))
+ obj.add('fields', t.array_node_global_field(node.fields))
+ obj.add('end_comments', t.array_node_comment(node.end_comments))
+ return obj
+}
+
+fn (t Tree) global_field(node ast.GlobalField) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('GlobalField'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('has_expr', t.bool_node(node.has_expr))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('pos', t.position(node.pos))
+ obj.add('typ_pos', t.position(node.typ_pos))
+ return obj
+}
+
+fn (t Tree) defer_stmt(node ast.DeferStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('DeferStmt'))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ obj.add('defer_vars', t.array_node_ident(node.defer_vars))
+ obj.add('ifdef', t.string_node(node.ifdef))
+ obj.add('idx_in_fn', t.number_node(node.idx_in_fn))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) type_decl(node ast.TypeDecl) &Node {
+ match node {
+ ast.AliasTypeDecl { return t.alias_type_decl(node) }
+ ast.FnTypeDecl { return t.fn_type_decl(node) }
+ ast.SumTypeDecl { return t.sum_type_decl(node) }
+ }
+}
+
+fn (t Tree) alias_type_decl(node ast.AliasTypeDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AliasTypeDecl'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('parent_type', t.type_node(node.parent_type))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) sum_type_decl(node ast.SumTypeDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SumTypeDecl'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('pos', t.position(node.pos))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('comments', t.array_node_comment(node.comments))
+ // variants
+ t_array := new_array()
+ for s in node.variants {
+ variants_obj := new_object()
+ variants_obj.add('typ', t.type_node(s.typ))
+ variants_obj.add('pos', t.position(s.pos))
+ t_array.add_item(variants_obj)
+ }
+ obj.add('variants', t_array)
+ return obj
+}
+
+fn (t Tree) fn_type_decl(node ast.FnTypeDecl) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('FnTypeDecl'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('is_pub', t.bool_node(node.is_pub))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('pos', t.position(node.pos))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+fn (t Tree) arg(node ast.Param) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Param'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('is_mut', t.bool_node(node.is_mut))
+ return obj
+}
+
+fn (t Tree) goto_label(node ast.GotoLabel) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('GotoLabel'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) goto_stmt(node ast.GotoStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('GotoStmt'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) assign_stmt(node ast.AssignStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AssignStmt'))
+ obj.add('left', t.array_node_expr(node.left))
+ obj.add('left_types', t.array_node_type(node.left_types))
+ obj.add('right', t.array_node_expr(node.right))
+ obj.add('right_types', t.array_node_type(node.left_types))
+ obj.add('op', t.token_node(node.op))
+ obj.add('is_static', t.bool_node(node.is_static))
+ obj.add('is_simple', t.bool_node(node.is_simple))
+ obj.add('has_cross_var', t.bool_node(node.has_cross_var))
+ obj.add('pos', t.position(node.pos))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('end_comments', t.array_node_comment(node.end_comments))
+ return obj
+}
+
+fn (t Tree) var(node ast.Var) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Var'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('orig_type', t.type_node(node.orig_type))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('is_arg', t.bool_node(node.is_arg))
+ obj.add('is_mut', t.bool_node(node.is_mut))
+ obj.add('is_used', t.bool_node(node.is_used))
+ obj.add('is_changed', t.bool_node(node.is_changed))
+ obj.add('is_or', t.bool_node(node.is_or))
+ obj.add('is_tmp', t.bool_node(node.is_tmp))
+ obj.add('is_autofree_tmp', t.bool_node(node.is_autofree_tmp))
+ obj.add('is_auto_deref', t.bool_node(node.is_auto_deref))
+ obj.add('is_auto_heap', t.bool_node(node.is_auto_heap))
+ obj.add('is_stack_obj', t.bool_node(node.is_stack_obj))
+ obj.add('share', t.enum_node(node.share))
+ obj.add('pos', t.position(node.pos))
+ obj.add('smartcasts', t.array_node_type(node.smartcasts))
+ return obj
+}
+
+fn (t Tree) return_(node ast.Return) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Return'))
+ obj.add('exprs', t.array_node_expr(node.exprs))
+ obj.add('types', t.array_node_type(node.types))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) for_c_stmt(node ast.ForCStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ForCStmt'))
+ obj.add('init', t.stmt(node.init))
+ obj.add('has_init', t.bool_node(node.has_init))
+ obj.add('cond', t.expr(node.cond))
+ obj.add('has_cond', t.bool_node(node.has_cond))
+ obj.add('inc', t.stmt(node.inc))
+ obj.add('has_inc', t.bool_node(node.has_inc))
+ obj.add('is_multi', t.bool_node(node.is_multi))
+ obj.add('label', t.string_node(node.label))
+ obj.add('pos', t.position(node.pos))
+ obj.add('scope', t.number_node(int(node.scope)))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ return obj
+}
+
+fn (t Tree) for_stmt(node ast.ForStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ForStmt'))
+ obj.add('cond', t.expr(node.cond))
+ obj.add('is_inf', t.bool_node(node.is_inf))
+ obj.add('label', t.string_node(node.label))
+ obj.add('pos', t.position(node.pos))
+ obj.add('scope', t.number_node(int(node.scope)))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ return obj
+}
+
+fn (t Tree) for_in_stmt(node ast.ForInStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ForInStmt'))
+ obj.add('key_var', t.string_node(node.key_var))
+ obj.add('val_var', t.string_node(node.val_var))
+ obj.add('cond', t.expr(node.cond))
+ obj.add('is_range', t.bool_node(node.is_range))
+ obj.add('high', t.expr(node.high))
+ obj.add('key_type', t.type_node(node.key_type))
+ obj.add('val_type', t.type_node(node.val_type))
+ obj.add('cond_type', t.type_node(node.cond_type))
+ obj.add('kind', t.enum_node(node.kind))
+ obj.add('val_is_mut', t.bool_node(node.val_is_mut))
+ obj.add('label', t.string_node(node.label))
+ obj.add('pos', t.position(node.pos))
+ obj.add('scope', t.number_node(int(node.scope)))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ return obj
+}
+
+fn (t Tree) branch_stmt(node ast.BranchStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('BranchStmt'))
+ obj.add('kind', t.token_node(node.kind))
+ obj.add('label', t.string_node(node.label))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) assert_stmt(node ast.AssertStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AssertStmt'))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('is_used', t.bool_node(node.is_used))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) block(node ast.Block) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Block'))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ obj.add('is_unsafe', t.bool_node(node.is_unsafe))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) comptime_call(node ast.ComptimeCall) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ComptimeCall'))
+ obj.add('method_name', t.string_node(node.method_name))
+ obj.add('left', t.expr(node.left))
+ obj.add('is_vweb', t.bool_node(node.is_vweb))
+ obj.add('vweb_tmpl', t.string_node(node.vweb_tmpl.path))
+ obj.add('args_var', t.string_node(node.args_var))
+ obj.add('sym', t.string_node(node.sym.name))
+ obj.add('has_parens', t.bool_node(node.has_parens))
+ obj.add('is_embed', t.bool_node(node.is_embed))
+ obj.add('embed_file', t.embed_file(node.embed_file))
+ obj.add('method_pos', t.position(node.method_pos))
+ obj.add('result_type', t.type_node(node.result_type))
+ obj.add('scope', t.scope(node.scope))
+ obj.add('env_value', t.string_node(node.env_value))
+ obj.add('pos', t.position(node.pos))
+ obj.add('args', t.array_node_call_arg(node.args))
+ return obj
+}
+
+fn (t Tree) comptime_selector(node ast.ComptimeSelector) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ComptimeSelector'))
+ obj.add('has_parens', t.bool_node(node.has_parens))
+ obj.add('left', t.expr(node.left))
+ obj.add('field_expr', t.expr(node.field_expr))
+ obj.add('left_type', t.type_node(node.left_type))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) expr_stmt(node ast.ExprStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ExprStmt'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('is_expr', t.bool_node(node.is_expr))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('pos', t.position(node.pos))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+// expr
+fn (t Tree) expr(expr ast.Expr) &Node {
+ match expr {
+ ast.IntegerLiteral {
+ return t.integer_literal(expr)
+ }
+ ast.FloatLiteral {
+ return t.float_literal(expr)
+ }
+ ast.StringLiteral {
+ return t.string_literal(expr)
+ }
+ ast.CharLiteral {
+ return t.char_literal(expr)
+ }
+ ast.BoolLiteral {
+ return t.bool_literal(expr)
+ }
+ ast.StringInterLiteral {
+ return t.string_inter_literal(expr)
+ }
+ ast.EnumVal {
+ return t.enum_val(expr)
+ }
+ ast.Assoc {
+ return t.assoc(expr)
+ }
+ ast.AtExpr {
+ return t.at_expr(expr)
+ }
+ ast.CastExpr {
+ return t.cast_expr(expr)
+ }
+ ast.AsCast {
+ return t.as_cast(expr)
+ }
+ ast.TypeNode {
+ return t.type_expr(expr)
+ }
+ ast.SizeOf {
+ return t.size_of(expr)
+ }
+ ast.IsRefType {
+ return t.is_ref_type(expr)
+ }
+ ast.PrefixExpr {
+ return t.prefix_expr(expr)
+ }
+ ast.InfixExpr {
+ return t.infix_expr(expr)
+ }
+ ast.IndexExpr {
+ return t.index_expr(expr)
+ }
+ ast.PostfixExpr {
+ return t.postfix_expr(expr)
+ }
+ ast.SelectorExpr {
+ return t.selector_expr(expr)
+ }
+ ast.RangeExpr {
+ return t.range_expr(expr)
+ }
+ ast.IfExpr {
+ return t.if_expr(expr)
+ }
+ ast.Ident {
+ return t.ident(expr)
+ }
+ ast.CallExpr {
+ return t.call_expr(expr)
+ }
+ ast.OrExpr {
+ return t.or_expr(expr)
+ }
+ ast.StructInit {
+ return t.struct_init(expr)
+ }
+ ast.ArrayInit {
+ return t.array_init(expr)
+ }
+ ast.MapInit {
+ return t.map_init(expr)
+ }
+ ast.None {
+ return t.none_expr(expr)
+ }
+ ast.ParExpr {
+ return t.par_expr(expr)
+ }
+ ast.IfGuardExpr {
+ return t.if_guard_expr(expr)
+ }
+ ast.MatchExpr {
+ return t.match_expr(expr)
+ }
+ ast.ConcatExpr {
+ return t.concat_expr(expr)
+ }
+ ast.TypeOf {
+ return t.type_of(expr)
+ }
+ ast.Likely {
+ return t.likely(expr)
+ }
+ ast.SqlExpr {
+ return t.sql_expr(expr)
+ }
+ ast.ComptimeCall {
+ return t.comptime_call(expr)
+ }
+ ast.ComptimeSelector {
+ return t.comptime_selector(expr)
+ }
+ ast.LockExpr {
+ return t.lock_expr(expr)
+ }
+ ast.UnsafeExpr {
+ return t.unsafe_expr(expr)
+ }
+ ast.ChanInit {
+ return t.chan_init(expr)
+ }
+ ast.SelectExpr {
+ return t.select_expr(expr)
+ }
+ ast.Comment {
+ return t.comment(expr)
+ }
+ ast.AnonFn {
+ return t.anon_fn(expr)
+ }
+ ast.ArrayDecompose {
+ return t.array_decompose(expr)
+ }
+ ast.GoExpr {
+ return t.go_expr(expr)
+ }
+ ast.OffsetOf {
+ return t.offset_of(expr)
+ }
+ ast.DumpExpr {
+ return t.dump_expr(expr)
+ }
+ ast.NodeError {
+ return t.node_error(expr)
+ }
+ ast.EmptyExpr {
+ return t.empty_expr(expr)
+ }
+ else {
+ // println('unknown expr')
+ return t.null_node()
+ }
+ }
+}
+
+fn (t Tree) integer_literal(node ast.IntegerLiteral) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IntegerLiteral'))
+ obj.add('val', t.string_node(node.val))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) float_literal(node ast.FloatLiteral) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('FloatLiteral'))
+ obj.add('val', t.string_node(node.val))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) string_literal(node ast.StringLiteral) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('StringLiteral'))
+ obj.add('val', t.string_node(node.val))
+ obj.add('is_raw', t.bool_node(node.is_raw))
+ obj.add('language', t.enum_node(node.language))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) char_literal(node ast.CharLiteral) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('CharLiteral'))
+ obj.add('val', t.string_node(node.val))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) bool_literal(node ast.BoolLiteral) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('BoolLiteral'))
+ obj.add('val', t.bool_node(node.val))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) string_inter_literal(node ast.StringInterLiteral) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('StringInterLiteral'))
+ obj.add('vals', t.array_node_string(node.vals))
+ obj.add('exprs', t.array_node_expr(node.exprs))
+ obj.add('expr_types', t.array_node_type(node.expr_types))
+ obj.add('fwidths', t.array_node_int(node.fwidths))
+ obj.add('precisions', t.array_node_int(node.precisions))
+ obj.add('pluss', t.array_node_bool(node.pluss))
+ obj.add('fills', t.array_node_bool(node.fills))
+ obj.add('fmt_poss', t.array_node_position(node.fmt_poss))
+ obj.add('fmts', t.array_node_byte(node.fmts))
+ obj.add('need_fmts', t.array_node_bool(node.need_fmts))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) enum_val(node ast.EnumVal) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('EnumVal'))
+ obj.add('enum_name', t.string_node(node.enum_name))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('val', t.string_node(node.val))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) assoc(node ast.Assoc) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Assoc'))
+ obj.add('var_name', t.string_node(node.var_name))
+ obj.add('fields', t.array_node_string(node.fields))
+ obj.add('exprs', t.array_node_expr(node.exprs))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('pos', t.position(node.pos))
+ obj.add('scope', t.number_node(int(node.scope)))
+ return obj
+}
+
+fn (t Tree) at_expr(node ast.AtExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AtExpr'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('pos', t.position(node.pos))
+ obj.add('kind', t.enum_node(node.kind))
+ obj.add('val', t.string_node(node.val))
+ return obj
+}
+
+fn (t Tree) cast_expr(node ast.CastExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('CastExpr'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('ityp', t.number_node(int(node.typ)))
+ obj.add('typname', t.string_node(node.typname))
+ obj.add('has_arg', t.bool_node(node.has_arg))
+ obj.add('arg', t.expr(node.arg))
+ obj.add('expr_type', t.type_node(node.expr_type))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) as_cast(node ast.AsCast) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsCast'))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('expr_type', t.type_node(node.expr_type))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) type_expr(node ast.TypeNode) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('TypeNode'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) size_of(node ast.SizeOf) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SizeOf'))
+ obj.add('is_type', t.bool_node(node.is_type))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) is_ref_type(node ast.IsRefType) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IsRefType'))
+ obj.add('is_type', t.bool_node(node.is_type))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) prefix_expr(node ast.PrefixExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('PrefixExpr'))
+ obj.add('op', t.token_node(node.op))
+ obj.add('right', t.expr(node.right))
+ obj.add('right_type', t.type_node(node.right_type))
+ obj.add('or_block', t.or_expr(node.or_block))
+ obj.add('is_option', t.bool_node(node.is_option))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) infix_expr(node ast.InfixExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('InfixExpr'))
+ obj.add('op', t.token_node(node.op))
+ obj.add('left', t.expr(node.left))
+ obj.add('left_type', t.type_node(node.left_type))
+ obj.add('right', t.expr(node.right))
+ obj.add('right_type', t.type_node(node.right_type))
+ obj.add('auto_locked', t.string_node(node.auto_locked))
+ obj.add('or_block', t.or_expr(node.or_block))
+ obj.add('is_stmt', t.bool_node(node.is_stmt))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) index_expr(node ast.IndexExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IndexExpr'))
+ obj.add('left', t.expr(node.left))
+ obj.add('left_type', t.type_node(node.left_type))
+ obj.add('index', t.expr(node.index))
+ obj.add('is_setter', t.bool_node(node.is_setter))
+ obj.add('or_expr', t.or_expr(node.or_expr))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) postfix_expr(node ast.PostfixExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('PostfixExpr'))
+ obj.add('op', t.token_node(node.op))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('auto_locked', t.string_node(node.auto_locked))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) selector_expr(node ast.SelectorExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SelectorExpr'))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('expr_type', t.type_node(node.expr_type))
+ obj.add('field_name', t.string_node(node.field_name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('name_type', t.type_node(node.name_type))
+ obj.add('from_embed_type', t.type_node(node.from_embed_type))
+ obj.add('next_token', t.token_node(node.next_token))
+ obj.add('pos', t.position(node.pos))
+ obj.add('scope', t.number_node(int(node.scope)))
+ return obj
+}
+
+fn (t Tree) range_expr(node ast.RangeExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('RangeExpr'))
+ obj.add('low', t.expr(node.low))
+ obj.add('high', t.expr(node.high))
+ obj.add('has_high', t.bool_node(node.has_high))
+ obj.add('has_low', t.bool_node(node.has_low))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) if_expr(node ast.IfExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IfExpr'))
+ obj.add('is_comptime', t.bool_node(node.is_comptime))
+ obj.add('tok_kind', t.token_node(node.tok_kind))
+ obj.add('branches', t.array_node_if_branch(node.branches))
+ obj.add('left', t.expr(node.left))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('has_else', t.bool_node(node.has_else))
+ obj.add('is_expr', t.bool_node(node.is_expr))
+ obj.add('pos', t.position(node.pos))
+ obj.add('post_comments', t.array_node_comment(node.post_comments))
+ return obj
+}
+
+fn (t Tree) if_branch(node ast.IfBranch) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IfBranch'))
+ obj.add('cond', t.expr(node.cond))
+ obj.add('pos', t.position(node.pos))
+ obj.add('body_pos', t.position(node.body_pos))
+ obj.add('scope', t.number_node(int(node.scope)))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+fn (t Tree) ident(node ast.Ident) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Ident'))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('name', t.string_node(node.name))
+ obj.add('language', t.enum_node(node.language))
+ obj.add('is_mut', t.bool_node(node.is_mut))
+ obj.add('comptime', t.bool_node(node.comptime))
+ obj.add('tok_kind', t.token_node(node.tok_kind))
+ obj.add('kind', t.enum_node(node.kind))
+ obj.add('info', t.ident_info(node.info))
+ obj.add('pos', t.position(node.pos))
+ obj.add('mut_pos', t.position(node.mut_pos))
+ obj.add('obj', t.scope_object(node.obj))
+ obj.add('scope', t.number_node(int(node.scope)))
+ return obj
+}
+
+fn (t Tree) ident_info(node ast.IdentInfo) &Node {
+ match node {
+ ast.IdentVar { return t.ident_var(node) }
+ ast.IdentFn { return t.ident_fn(node) }
+ }
+}
+
+fn (t Tree) ident_var(node ast.IdentVar) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IdentVar'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('is_mut', t.bool_node(node.is_mut))
+ obj.add('is_static', t.bool_node(node.is_static))
+ obj.add('is_optional', t.bool_node(node.is_optional))
+ obj.add('share', t.enum_node(node.share))
+ return obj
+}
+
+fn (t Tree) ident_fn(node ast.IdentFn) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IdentFn'))
+ obj.add('typ', t.type_node(node.typ))
+ return obj
+}
+
+fn (t Tree) call_expr(node ast.CallExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('CallExpr'))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('name', t.string_node(node.name))
+ obj.add('language', t.enum_node(node.language))
+ obj.add('left_type', t.type_node(node.left_type))
+ obj.add('receiver_type', t.type_node(node.receiver_type))
+ obj.add('return_type', t.type_node(node.return_type))
+ obj.add('left', t.expr(node.left))
+ obj.add('is_method', t.bool_node(node.is_method))
+ obj.add('is_keep_alive', t.bool_node(node.is_keep_alive))
+ obj.add('is_noreturn', t.bool_node(node.is_noreturn))
+ obj.add('should_be_skipped', t.bool_node(node.should_be_skipped))
+ obj.add('free_receiver', t.bool_node(node.free_receiver))
+ obj.add('scope', t.number_node(int(node.scope)))
+ obj.add('args', t.array_node_call_arg(node.args))
+ obj.add('expected_arg_types', t.array_node_type(node.expected_arg_types))
+ obj.add('concrete_types', t.array_node_type(node.concrete_types))
+ obj.add('or_block', t.or_expr(node.or_block))
+ obj.add('concrete_list_pos', t.position(node.concrete_list_pos))
+ obj.add('from_embed_type', t.type_node(node.from_embed_type))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('pos', t.position(node.pos))
+ obj.add('name_pos', t.position(node.name_pos))
+ return obj
+}
+
+fn (t Tree) call_arg(node ast.CallArg) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('CallArg'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('is_mut', t.bool_node(node.is_mut))
+ obj.add('share', t.enum_node(node.share))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('is_tmp_autofree', t.bool_node(node.is_tmp_autofree))
+ obj.add('pos', t.position(node.pos))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+fn (t Tree) or_expr(node ast.OrExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('OrExpr'))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ obj.add('kind', t.enum_node(node.kind))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) struct_init(node ast.StructInit) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('StructInit'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('is_short', t.bool_node(node.is_short))
+ obj.add('unresolved', t.bool_node(node.unresolved))
+ obj.add('has_update_expr', t.bool_node(node.has_update_expr))
+ obj.add('update_expr', t.expr(node.update_expr))
+ obj.add('update_expr_type', t.type_node(node.update_expr_type))
+ obj.add('pos', t.position(node.pos))
+ obj.add('name_pos', t.position(node.name_pos))
+ obj.add('update_expr_comments', t.array_node_comment(node.update_expr_comments))
+ obj.add('fields', t.array_node_struct_init_field(node.fields))
+ obj.add('embeds', t.array_node_struct_init_embed(node.embeds))
+ obj.add('pre_comments', t.array_node_comment(node.pre_comments))
+ return obj
+}
+
+fn (t Tree) struct_init_field(node ast.StructInitField) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('StructInitField'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('expected_type', t.type_node(node.expected_type))
+ obj.add('parent_type', t.type_node(node.parent_type))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('next_comments', t.array_node_comment(node.next_comments))
+ obj.add('pos', t.position(node.pos))
+ obj.add('name_pos', t.position(node.name_pos))
+ return obj
+}
+
+fn (t Tree) struct_init_embed(node ast.StructInitEmbed) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('StructInitEmbed'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('expected_type', t.type_node(node.expected_type))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('next_comments', t.array_node_comment(node.next_comments))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) array_init(node ast.ArrayInit) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ArrayInit'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('elem_type', t.type_node(node.elem_type))
+ obj.add('exprs', t.array_node_expr(node.exprs))
+ obj.add('ecmnts', t.two_dimension_comment(node.ecmnts))
+ obj.add('pre_cmnts', t.array_node_comment(node.pre_cmnts))
+ obj.add('elem_type_pos', t.position(node.elem_type_pos))
+ obj.add('is_fixed', t.bool_node(node.is_fixed))
+ obj.add('has_val', t.bool_node(node.has_val))
+ obj.add('mod', t.string_node(node.mod))
+ obj.add('len_expr', t.expr(node.len_expr))
+ obj.add('cap_expr', t.expr(node.cap_expr))
+ obj.add('default_expr', t.expr(node.default_expr))
+ obj.add('has_len', t.bool_node(node.has_len))
+ obj.add('has_cap', t.bool_node(node.has_cap))
+ obj.add('has_default', t.bool_node(node.has_default))
+ obj.add('expr_types', t.array_node_type(node.expr_types))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) map_init(node ast.MapInit) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('MapInit'))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('key_type', t.type_node(node.key_type))
+ obj.add('value_type', t.type_node(node.value_type))
+ obj.add('keys', t.array_node_expr(node.keys))
+ obj.add('vals', t.array_node_expr(node.vals))
+ obj.add('comments', t.two_dimension_comment(node.comments))
+ obj.add('pre_cmnts', t.array_node_comment(node.pre_cmnts))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) none_expr(node ast.None) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('None'))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) par_expr(node ast.ParExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ParExpr'))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) if_guard_expr(node ast.IfGuardExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('IfGuardExpr'))
+ obj.add('var_name', t.string_node(node.var_name))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('expr_type', t.type_node(node.expr_type))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) match_expr(node ast.MatchExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('MatchExpr'))
+ obj.add('tok_kind', t.token_node(node.tok_kind))
+ obj.add('cond', t.expr(node.cond))
+ obj.add('cond_type', t.type_node(node.cond_type))
+ obj.add('return_type', t.type_node(node.return_type))
+ obj.add('expected_type', t.type_node(node.expected_type))
+ obj.add('is_sum_type', t.bool_node(node.is_sum_type))
+ obj.add('is_expr', t.bool_node(node.is_expr))
+ obj.add('pos', t.position(node.pos))
+ obj.add('branches', t.array_node_match_branch(node.branches))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+fn (t Tree) match_branch(node ast.MatchBranch) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('MatchBranch'))
+ obj.add('exprs', t.array_node_expr(node.exprs))
+ obj.add('ecmnts', t.two_dimension_comment(node.ecmnts))
+ obj.add('stmts', t.array_node_stmt(node.stmts))
+ obj.add('is_else', t.bool_node(node.is_else))
+ obj.add('pos', t.position(node.pos))
+ obj.add('post_comments', t.array_node_comment(node.post_comments))
+ obj.add('scope', t.number_node(int(node.scope)))
+ return obj
+}
+
+fn (t Tree) concat_expr(node ast.ConcatExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ConcatExpr'))
+ obj.add('vals', t.array_node_expr(node.vals))
+ obj.add('return_type', t.type_node(node.return_type))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) type_of(node ast.TypeOf) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('TypeOf'))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('expr_type', t.type_node(node.expr_type))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) likely(node ast.Likely) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('Likely'))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('is_likely', t.bool_node(node.is_likely))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) sql_expr(node ast.SqlExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SqlExpr'))
+ obj.add('type', t.type_node(node.typ))
+ obj.add('is_count', t.bool_node(node.is_count))
+ obj.add('db_expr', t.expr(node.db_expr))
+ obj.add('table_expr', t.type_expr(node.table_expr))
+ obj.add('has_where', t.bool_node(node.has_where))
+ obj.add('where_expr', t.expr(node.where_expr))
+ obj.add('has_order', t.bool_node(node.has_order))
+ obj.add('order_expr', t.expr(node.order_expr))
+ obj.add('has_desc', t.bool_node(node.has_desc))
+ obj.add('is_array', t.bool_node(node.is_array))
+ obj.add('pos', t.position(node.pos))
+ obj.add('has_limit', t.bool_node(node.has_limit))
+ obj.add('limit_expr', t.expr(node.limit_expr))
+ obj.add('has_offset', t.bool_node(node.has_offset))
+ obj.add('offset_expr', t.expr(node.offset_expr))
+ obj.add('fields', t.array_node_struct_field(node.fields))
+ sub_struct_map := new_object()
+ for key, val in node.sub_structs {
+ sub_struct_map.add(key.str(), t.sql_expr(val))
+ }
+ obj.add('sub_structs', sub_struct_map)
+ return obj
+}
+
+fn (t Tree) sql_stmt(node ast.SqlStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SqlStmt'))
+ obj.add('db_expr', t.expr(node.db_expr))
+ obj.add('pos', t.position(node.pos))
+ obj.add('lines', t.array_node_sql_stmt_line(node.lines))
+ return obj
+}
+
+fn (t Tree) sql_stmt_line(node ast.SqlStmtLine) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SqlStmtLine'))
+ obj.add('kind', t.enum_node(node.kind))
+ obj.add('table_expr', t.type_expr(node.table_expr))
+ obj.add('object_var_name', t.string_node(node.object_var_name))
+ obj.add('where_expr', t.expr(node.where_expr))
+ obj.add('fields', t.array_node_struct_field(node.fields))
+ obj.add('updated_columns', t.array_node_string(node.updated_columns))
+ obj.add('update_exprs', t.array_node_expr(node.update_exprs))
+ obj.add('pos', t.position(node.pos))
+
+ sub_struct_map := new_object()
+ for key, val in node.sub_structs {
+ sub_struct_map.add(key.str(), t.sql_stmt_line(val))
+ }
+ obj.add('sub_structs', sub_struct_map)
+ return obj
+}
+
+fn (t Tree) lock_expr(expr ast.LockExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('LockExpr'))
+ obj.add('is_expr', t.bool_node(expr.is_expr))
+ obj.add('typ', t.type_node(expr.typ))
+ obj.add('pos', t.position(expr.pos))
+ obj.add('stmts', t.array_node_stmt(expr.stmts))
+ obj.add('lockeds', t.array_node_expr(expr.lockeds))
+ obj.add('r_lock', t.array_node_bool(expr.is_rlock))
+ return obj
+}
+
+fn (t Tree) unsafe_expr(expr ast.UnsafeExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('UnsafeExpr'))
+ obj.add('expr', t.expr(expr.expr))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) chan_init(expr ast.ChanInit) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ChanInit'))
+ obj.add('has_cap', t.bool_node(expr.has_cap))
+ obj.add('cap_expr', t.expr(expr.cap_expr))
+ obj.add('typ', t.type_node(expr.typ))
+ obj.add('elem_type', t.type_node(expr.elem_type))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) select_expr(expr ast.SelectExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SelectExpr'))
+ obj.add('branches', t.array_node_select_branch(expr.branches))
+ obj.add('is_expr', t.bool_node(expr.is_expr))
+ obj.add('has_exception', t.bool_node(expr.has_exception))
+ obj.add('expected_type', t.type_node(expr.expected_type))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) select_branch(expr ast.SelectBranch) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('SelectBranch'))
+ obj.add('stmt', t.stmt(expr.stmt))
+ obj.add('stmts', t.array_node_stmt(expr.stmts))
+ obj.add('pos', t.position(expr.pos))
+ obj.add('comment', t.comment(expr.comment))
+ obj.add('is_else', t.bool_node(expr.is_else))
+ obj.add('is_timeout', t.bool_node(expr.is_timeout))
+ obj.add('post_comments', t.array_node_comment(expr.post_comments))
+ return obj
+}
+
+fn (t Tree) array_decompose(expr ast.ArrayDecompose) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('ArrayDecompose'))
+ obj.add('expr', t.expr(expr.expr))
+ obj.add('expr_type', t.type_node(expr.expr_type))
+ obj.add('arg_type', t.type_node(expr.arg_type))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) go_expr(expr ast.GoExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('GoExpr'))
+ obj.add('call_expr', t.call_expr(expr.call_expr))
+ obj.add('is_expr', t.bool_node(expr.is_expr))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) offset_of(expr ast.OffsetOf) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('OffsetOf'))
+ obj.add('struct_type', t.type_node(expr.struct_type))
+ obj.add('field', t.string_node('field'))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) dump_expr(expr ast.DumpExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('DumpExpr'))
+ obj.add('expr', t.expr(expr.expr))
+ obj.add('expr_type', t.type_node(expr.expr_type))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) node_error(expr ast.NodeError) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('NodeError'))
+ obj.add('idx', t.number_node(expr.idx))
+ obj.add('pos', t.position(expr.pos))
+ return obj
+}
+
+fn (t Tree) empty_expr(expr ast.EmptyExpr) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('EmptyExpr'))
+ // obj.add('x', t.number_node(expr.x))
+ return obj
+}
+
+fn (t Tree) empty_stmt(node ast.EmptyStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('EmptyStmt'))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) asm_stmt(node ast.AsmStmt) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmStmt'))
+ obj.add('arch', t.enum_node(node.arch))
+ obj.add('is_basic', t.bool_node(node.is_basic))
+ obj.add('is_volatile', t.bool_node(node.is_volatile))
+ obj.add('is_goto', t.bool_node(node.is_goto))
+ obj.add('scope', t.scope(node.scope))
+ // obj.add('scope', t.number_node(int(node.scope)))
+ obj.add('pos', t.position(node.pos))
+ obj.add('clobbered', t.array_node_asm_clobbered(node.clobbered))
+ obj.add('templates', t.array_node_asm_template(node.templates))
+ obj.add('output', t.array_node_asm_io(node.output))
+ obj.add('input', t.array_node_asm_io(node.input))
+ obj.add('global_labels', t.array_node_string(node.global_labels))
+ obj.add('local_labels', t.array_node_string(node.local_labels))
+ return obj
+}
+
+fn (t Tree) asm_register(node ast.AsmRegister) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmRegister'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('size', t.number_node(node.size))
+ return obj
+}
+
+fn (t Tree) asm_template(node ast.AsmTemplate) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmTemplate'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('is_label', t.bool_node(node.is_label))
+ obj.add('is_directive', t.bool_node(node.is_directive))
+ obj.add('args', t.array_node_asm_arg(node.args))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) asm_addressing(node ast.AsmAddressing) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmAddressing'))
+ obj.add('scale', t.number_node(node.scale))
+ obj.add('mode', t.enum_node(node.mode))
+ obj.add('displacement', t.asm_arg(node.displacement))
+ obj.add('base', t.asm_arg(node.base))
+ obj.add('index', t.asm_arg(node.index))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) asm_arg(node ast.AsmArg) &Node {
+ match node {
+ ast.AsmAddressing {
+ return t.asm_addressing(node)
+ }
+ ast.AsmAlias {
+ return t.asm_alias(node)
+ }
+ ast.AsmDisp {
+ return t.asm_disp(node)
+ }
+ ast.AsmRegister {
+ return t.asm_register(node)
+ }
+ ast.BoolLiteral {
+ return t.bool_literal(node)
+ }
+ ast.CharLiteral {
+ return t.char_literal(node)
+ }
+ ast.FloatLiteral {
+ return t.float_literal(node)
+ }
+ ast.IntegerLiteral {
+ return t.integer_literal(node)
+ }
+ string {
+ return t.string_node(node)
+ }
+ }
+}
+
+fn (t Tree) asm_alias(node ast.AsmAlias) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmAlias'))
+ obj.add('name', t.string_node(node.name))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) asm_disp(node ast.AsmDisp) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmDisp'))
+ obj.add('val', t.string_node(node.val))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+fn (t Tree) asm_clobbered(node ast.AsmClobbered) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmClobbered'))
+ obj.add('reg', t.asm_register(node.reg))
+ obj.add('comments', t.array_node_comment(node.comments))
+ return obj
+}
+
+fn (t Tree) asm_io(node ast.AsmIO) &Node {
+ mut obj := new_object()
+ obj.add('ast_type', t.string_node('AsmIO'))
+ obj.add('alias', t.string_node(node.alias))
+ obj.add('constraint', t.string_node(node.constraint))
+ obj.add('expr', t.expr(node.expr))
+ obj.add('typ', t.type_node(node.typ))
+ obj.add('comments', t.array_node_comment(node.comments))
+ obj.add('pos', t.position(node.pos))
+ return obj
+}
+
+// do not support yet by vlang
+// fn (t Tree) array_node1<T>(nodes []T, method_name string) &Node {
+// mut arr := new_array()
+
+// // call method dynamically, V do not support yet
+// // error: todo: not a string literal
+
+// // for node in nodes {
+// // arr.add_item(t.$method_name(node))
+// // }
+
+// // temp
+// $for method in Tree.methods {
+// if method.name == method_name {
+// for node in nodes {
+// res := t.$method(node)
+// arr.add_item(res) // TODO,waiting for bug fixed
+// }
+// }
+// }
+// return arr
+// }
+
+// do not support yet by vlang
+// fn (t Tree) array_node2<T>(nodes []T) &Node {
+// mut arr := new_array()
+
+// for node in nodes {
+// match node {
+// string {
+// arr.add_item(t.string_node(node))
+// }
+// ast.Comment {
+// arr.add_item(t.comment(node))
+// }
+// ast.ConstField {
+// arr.add_item(t.const_field(node))
+// }
+// else {
+// panic('unknown array type')
+// }
+// }
+// }
+
+// return arr
+// }
+
+// list all the different type of array node,temporarily
+fn (t Tree) array_node_string(nodes []string) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.string_node(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_position(nodes []token.Position) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.position(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_if_branch(nodes []ast.IfBranch) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.if_branch(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_fn_decl(nodes []ast.FnDecl) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.fn_decl(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_generic_fns(nodes []&ast.FnDecl) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.fn_decl(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_embed_file(nodes []ast.EmbeddedFile) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.embed_file(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_attr(nodes []ast.Attr) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.attr(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_scope_struct_field(nodes map[string]ast.ScopeStructField) &Node {
+ mut arr := new_array()
+ for _, node in nodes {
+ arr.add_item(t.scope_struct_field(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_type(nodes []ast.Type) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.type_node(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_import_symbol(nodes []ast.ImportSymbol) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.import_symbol(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_comment(nodes []ast.Comment) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.comment(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_const_field(nodes []ast.ConstField) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.const_field(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_arg(nodes []ast.Param) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.arg(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_stmt(nodes []ast.Stmt) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.stmt(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_defer_stmt(nodes []ast.DeferStmt) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.defer_stmt(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_struct_field(nodes []ast.StructField) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.struct_field(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_embed(nodes []ast.Embed) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.embed(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_enum_field(nodes []ast.EnumField) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.enum_field(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_global_field(nodes []ast.GlobalField) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.global_field(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_expr(nodes []ast.Expr) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.expr(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_call_arg(nodes []ast.CallArg) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.call_arg(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_int(nodes []int) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.number_node(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_byte(nodes []byte) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.number_node(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_bool(nodes []bool) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.bool_node(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_struct_init_field(nodes []ast.StructInitField) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.struct_init_field(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_struct_init_embed(nodes []ast.StructInitEmbed) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.struct_init_embed(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_match_branch(nodes []ast.MatchBranch) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.match_branch(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_ident(nodes []ast.Ident) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.ident(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_select_branch(nodes []ast.SelectBranch) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.select_branch(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_asm_clobbered(nodes []ast.AsmClobbered) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.asm_clobbered(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_asm_template(nodes []ast.AsmTemplate) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.asm_template(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_asm_io(nodes []ast.AsmIO) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.asm_io(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_asm_arg(nodes []ast.AsmArg) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.asm_arg(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_sql_stmt_line(nodes []ast.SqlStmtLine) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.sql_stmt_line(node))
+ }
+ return arr
+}
+
+fn (t Tree) array_node_interface_embedding(nodes []ast.InterfaceEmbedding) &Node {
+ mut arr := new_array()
+ for node in nodes {
+ arr.add_item(t.interface_embedding(node))
+ }
+ return arr
+}
diff --git a/v_windows/v/old/cmd/tools/vbin2v.v b/v_windows/v/old/cmd/tools/vbin2v.v
new file mode 100644
index 0000000..c1ab6e0
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vbin2v.v
@@ -0,0 +1,146 @@
+module main
+
+import os
+import flag
+import strings
+
+const (
+ tool_version = '0.0.4'
+ tool_description = 'Converts a list of arbitrary files into a single v module file.'
+)
+
+struct Context {
+mut:
+ files []string
+ prefix string
+ show_help bool
+ module_name string
+ write_file string
+}
+
+fn (context Context) header() string {
+ mut header_s := ''
+ header_s += 'module $context.module_name\n'
+ header_s += '\n'
+ allfiles := context.files.join(' ')
+ mut options := []string{}
+ if context.prefix.len > 0 {
+ options << '-p $context.prefix'
+ }
+ if context.module_name.len > 0 {
+ options << '-m $context.module_name'
+ }
+ if context.write_file.len > 0 {
+ options << '-w $context.write_file'
+ }
+ soptions := options.join(' ')
+ header_s += '// File generated by:\n'
+ header_s += '// v bin2v $allfiles $soptions\n'
+ header_s += '// Please, do not edit this file.\n'
+ header_s += '// Your changes may be overwritten.\n'
+ header_s += 'const (\n'
+ return header_s
+}
+
+fn (context Context) footer() string {
+ return ')\n'
+}
+
+fn (context Context) file2v(bname string, fbytes []byte, bn_max int) string {
+ mut sb := strings.new_builder(1000)
+ bn_diff_len := bn_max - bname.len
+ sb.write_string('\t${bname}_len' + ' '.repeat(bn_diff_len - 4) + ' = $fbytes.len\n')
+ fbyte := fbytes[0]
+ bnmae_line := '\t$bname' + ' '.repeat(bn_diff_len) + ' = [byte($fbyte), '
+ sb.write_string(bnmae_line)
+ mut line_len := bnmae_line.len + 3
+ for i := 1; i < fbytes.len; i++ {
+ b := int(fbytes[i]).str()
+ if line_len > 94 {
+ sb.go_back(1)
+ sb.write_string('\n\t\t')
+ line_len = 8
+ }
+ if i == fbytes.len - 1 {
+ sb.write_string(b)
+ line_len += b.len
+ } else {
+ sb.write_string('$b, ')
+ line_len += b.len + 2
+ }
+ }
+ sb.write_string(']!\n')
+ return sb.str()
+}
+
+fn (context Context) bname_and_bytes(file string) ?(string, []byte) {
+ fname := os.file_name(file)
+ fname_escaped := fname.replace_each(['.', '_', '-', '_'])
+ byte_name := '$context.prefix$fname_escaped'.to_lower()
+ fbytes := os.read_bytes(file) or { return error('Error: $err.msg') }
+ return byte_name, fbytes
+}
+
+fn (context Context) max_bname_len(bnames []string) int {
+ mut max := 0
+ for n in bnames {
+ if n.len > max {
+ max = n.len
+ }
+ }
+ // Add 4 to max due to "_len" suffix
+ return max + 4
+}
+
+fn main() {
+ mut context := Context{}
+ mut fp := flag.new_flag_parser(os.args[1..])
+ fp.application('v bin2v')
+ fp.version(tool_version)
+ fp.description(tool_description)
+ fp.arguments_description('FILE [FILE]...')
+ context.show_help = fp.bool('help', `h`, false, 'Show this help screen.')
+ context.module_name = fp.string('module', `m`, 'binary', 'Name of the generated module.')
+ context.prefix = fp.string('prefix', `p`, '', 'A prefix put before each resource name.')
+ context.write_file = fp.string('write', `w`, '', 'Write directly to a file with the given name.')
+ if context.show_help {
+ println(fp.usage())
+ exit(0)
+ }
+ files := fp.finalize() or {
+ eprintln('Error: $err.msg')
+ exit(1)
+ }
+ real_files := files.filter(it != 'bin2v')
+ if real_files.len == 0 {
+ println(fp.usage())
+ exit(0)
+ }
+ context.files = real_files
+ if context.write_file != '' && os.file_ext(context.write_file) !in ['.vv', '.v'] {
+ context.write_file += '.v'
+ }
+ mut file_byte_map := map[string][]byte{}
+ for file in real_files {
+ bname, fbytes := context.bname_and_bytes(file) or {
+ eprintln(err.msg)
+ exit(1)
+ }
+ file_byte_map[bname] = fbytes
+ }
+ max_bname := context.max_bname_len(file_byte_map.keys())
+ if context.write_file.len > 0 {
+ mut out_file := os.create(context.write_file) ?
+ out_file.write_string(context.header()) ?
+ for bname, fbytes in file_byte_map {
+ out_file.write_string(context.file2v(bname, fbytes, max_bname)) ?
+ }
+ out_file.write_string(context.footer()) ?
+ } else {
+ print(context.header())
+ for bname, fbytes in file_byte_map {
+ print(context.file2v(bname, fbytes, max_bname))
+ }
+ print(context.footer())
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vbug.v b/v_windows/v/old/cmd/tools/vbug.v
new file mode 100644
index 0000000..6baf890
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vbug.v
@@ -0,0 +1,208 @@
+import dl
+import net.urllib
+import os
+import readline
+
+const vroot = @VMODROOT
+
+// get output from `v doctor`
+fn get_vdoctor_output(is_verbose bool) string {
+ vexe := os.getenv('VEXE')
+ verbose_flag := if is_verbose { '-v' } else { '' }
+ result := os.execute('$vexe $verbose_flag doctor')
+ if result.exit_code != 0 {
+ eprintln('unable to get `v doctor` output: $result.output')
+ return ''
+ }
+ return result.output
+}
+
+// get ouput from `v -g -o vdbg cmd/v && vdbg file.v`
+fn get_v_build_output(is_verbose bool, is_yes bool, file_path string) string {
+ mut vexe := os.getenv('VEXE')
+ // prepare a V compiler with -g to have better backtraces if possible
+ wd := os.getwd()
+ os.chdir(vroot)
+ verbose_flag := if is_verbose { '-v' } else { '' }
+ vdbg_path := $if windows { '$vroot/vdbg.exe' } $else { '$vroot/vdbg' }
+ vdbg_compilation_cmd := '"$vexe" $verbose_flag -g -o "$vdbg_path" cmd/v'
+ vdbg_result := os.execute(vdbg_compilation_cmd)
+ os.chdir(wd)
+ if vdbg_result.exit_code == 0 {
+ vexe = vdbg_path
+ } else {
+ eprintln('unable to compile V in debug mode: $vdbg_result.output\ncommand: $vdbg_compilation_cmd\n')
+ }
+ //
+ mut result := os.execute('"$vexe" $verbose_flag "$file_path"')
+ defer {
+ os.rm(vdbg_path) or {
+ if is_verbose {
+ eprintln('unable to delete `vdbg`: $err')
+ }
+ }
+ }
+ if result.exit_code == 0 {
+ defer {
+ mut generated_file := file_path.all_before_last('.')
+ $if windows {
+ generated_file += '.exe'
+ }
+ os.rm(generated_file) or {
+ if is_verbose {
+ eprintln('unable to delete generated file: $err')
+ }
+ }
+ }
+ run := is_yes
+ || ask('It looks like the compilation went well, do you want to run the file?')
+ if run {
+ result = os.execute('"$vexe" $verbose_flag run "$file_path"')
+ if result.exit_code == 0 && !is_yes {
+ confirm_or_exit('It looks like the file ran correctly as well, are you sure you want to continue?')
+ }
+ }
+ }
+ return result.output
+}
+
+type ShellExecuteWin = fn (voidptr, &u16, &u16, &u16, &u16, int)
+
+// open a uri using the default associated application
+fn open_browser(uri string) ? {
+ $if macos {
+ result := os.execute('open "$uri"')
+ if result.exit_code != 0 {
+ return error('unable to open url: $result.output')
+ }
+ } $else $if freebsd || openbsd {
+ result := os.execute('xdg-open "$uri"')
+ if result.exit_code != 0 {
+ return error('unable to open url: $result.output')
+ }
+ } $else $if linux {
+ providers := ['xdg-open', 'x-www-browser', 'www-browser', 'wslview']
+
+ // There are multiple possible providers to open a browser on linux
+ // One of them is xdg-open, another is x-www-browser, then there's www-browser, etc.
+ // Look for one that exists and run it
+ for provider in providers {
+ if os.exists_in_system_path(provider) {
+ result := os.execute('$provider "$uri"')
+ if result.exit_code != 0 {
+ return error('unable to open url: $result.output')
+ }
+ break
+ }
+ }
+ } $else $if windows {
+ handle := dl.open_opt('shell32', dl.rtld_now) ?
+ // https://docs.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-shellexecutew
+ func := ShellExecuteWin(dl.sym_opt(handle, 'ShellExecuteW') ?)
+ func(C.NULL, 'open'.to_wide(), uri.to_wide(), C.NULL, C.NULL, C.SW_SHOWNORMAL)
+ dl.close(handle)
+ } $else {
+ return error('unsupported platform')
+ }
+}
+
+fn ask(msg string) bool {
+ prompt := os.input_opt('$msg [Y/n] ') or { 'y' }
+ return prompt == '' || prompt[0].ascii_str().to_lower() != 'n'
+}
+
+fn confirm_or_exit(msg string) {
+ if !ask(msg) {
+ exit(1)
+ }
+}
+
+fn main() {
+ mut file_path := ''
+ mut is_verbose := false
+ mut is_yes := false
+ for arg in os.args[2..] {
+ match arg {
+ '-v' {
+ is_verbose = true
+ }
+ '-y' {
+ is_yes = true
+ }
+ else {
+ if !arg.ends_with('.v') && !arg.ends_with('.vsh') && !arg.ends_with('.vv') {
+ eprintln('unknown argument: `$arg`')
+ exit(1)
+ }
+ if file_path != '' {
+ eprintln('only one V file can be submitted')
+ exit(1)
+ }
+ file_path = arg
+ }
+ }
+ }
+ if file_path == '' {
+ eprintln('v bug: no v file listed to report')
+ exit(1)
+ }
+ // collect error information
+ // output from `v doctor`
+ vdoctor_output := get_vdoctor_output(is_verbose)
+ // file content
+ file_content := os.read_file(file_path) or {
+ eprintln('unable to get file "$file_path" content: $err')
+ ''
+ }
+ // output from `v -g -o vdbg cmd/v && vdbg file.v`
+ build_output := get_v_build_output(is_verbose, is_yes, file_path)
+ // ask the user if he wants to submit even after an error
+ if !is_yes && (vdoctor_output == '' || file_content == '' || build_output == '') {
+ confirm_or_exit('An error occured retrieving the information, do you want to continue?')
+ }
+
+ expected_result := readline.read_line('What did you expect to see? ') or {
+ // Ctrl-C was pressed
+ eprintln('\nCanceled')
+ exit(1)
+ }
+ // open prefilled issue creation page, or print link as a fallback
+
+ if !is_yes && vdoctor_output.contains('behind V master') {
+ confirm_or_exit('It looks like your installation of V is outdated, we advise you to run `v up` before submitting an issue. Are you sure you want to continue?')
+ }
+
+ // When updating this template, make sure to update `.github/ISSUE_TEMPLATE/bug_report.md` too
+ raw_body := '<!-- It is advisable to update all relevant modules using `v outdated` and `v install` -->
+**V doctor:**
+```
+$vdoctor_output```
+
+**What did you do?**
+`v -g -o vdbg cmd/v && vdbg $file_path`
+{file_content}
+
+**What did you expect to see?**
+
+$expected_result
+
+**What did you see instead?**
+```
+$build_output```'
+ mut encoded_body := urllib.query_escape(raw_body.replace_once('{file_content}', '```v\n$file_content\n```'))
+ mut generated_uri := 'https://github.com/vlang/v/issues/new?labels=Bug&body=$encoded_body'
+ if generated_uri.len > 8192 {
+ // GitHub doesn't support URLs longer than 8192 characters
+ encoded_body = urllib.query_escape(raw_body.replace_once('{file_content}', 'See attached file `$file_path`'))
+ generated_uri = 'https://github.com/vlang/v/issues/new?labels=Bug&body=$encoded_body'
+ println('Your file is too big to be submitted. Head over to the following URL and attach your file.')
+ println(generated_uri)
+ } else {
+ open_browser(generated_uri) or {
+ if is_verbose {
+ eprintln(err)
+ }
+ println(generated_uri)
+ }
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vbuild-examples.v b/v_windows/v/old/cmd/tools/vbuild-examples.v
new file mode 100644
index 0000000..82ddc1e
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vbuild-examples.v
@@ -0,0 +1,15 @@
+module main
+
+import os
+import testing
+
+fn main() {
+ args_string := os.args[1..].join(' ')
+ params := args_string.all_before('build-examples')
+ if testing.v_build_failing(params, 'examples') {
+ exit(1)
+ }
+ if testing.v_build_failing(params + '-live', os.join_path('examples', 'hot_reload')) {
+ exit(1)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vbuild-tools.v b/v_windows/v/old/cmd/tools/vbuild-tools.v
new file mode 100644
index 0000000..19905ab
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vbuild-tools.v
@@ -0,0 +1,71 @@
+module main
+
+import os
+import testing
+import v.util
+
+// NB: tools like vdoc are compiled in their own subfolder
+// => cmd/tools/vdoc/vdoc.exe
+// Usually, they have several top level .v files in the subfolder,
+// that cannot be compiled separately, but instead, the whole folder,
+// should be compiled (v folder).
+// To implement that, these folders are initially skipped, then added
+// as a whole *after the testing.prepare_test_session call*.
+const tools_in_subfolders = ['vdoc', 'vvet', 'vast']
+
+// non_packaged_tools are tools that should not be packaged with
+// prebuild versions of V, to keep the size smaller.
+// They are mainly usefull for the V project itself, not to end users.
+const non_packaged_tools = ['gen1m', 'gen_vc', 'fast', 'wyhash']
+
+fn main() {
+ util.ensure_modules_for_all_tools_are_installed('-v' in os.args)
+ args_string := os.args[1..].join(' ')
+ vexe := os.getenv('VEXE')
+ vroot := os.dir(vexe)
+ os.chdir(vroot)
+ folder := os.join_path('cmd', 'tools')
+ tfolder := os.join_path(vroot, 'cmd', 'tools')
+ main_label := 'Building $folder ...'
+ finish_label := 'building $folder'
+ //
+ mut skips := []string{}
+ for stool in tools_in_subfolders {
+ skips << os.join_path(tfolder, stool)
+ }
+ buildopts := args_string.all_before('build-tools')
+ mut session := testing.prepare_test_session(buildopts, folder, skips, main_label)
+ session.rm_binaries = false
+ for stool in tools_in_subfolders {
+ session.add(os.join_path(tfolder, stool))
+ }
+ // eprintln('> session.files: $session.files')
+ // eprintln('> session.skip_files: $session.skip_files')
+ session.test()
+ eprintln(session.benchmark.total_message(finish_label))
+ if session.failed {
+ exit(1)
+ }
+ //
+ mut executables := os.ls(session.vtmp_dir) ?
+ executables.sort()
+ for texe in executables {
+ tname := texe.replace(os.file_ext(texe), '')
+ if tname in non_packaged_tools {
+ continue
+ }
+ //
+ tpath := os.join_path(session.vtmp_dir, texe)
+ if tname in tools_in_subfolders {
+ os.mv_by_cp(tpath, os.join_path(tfolder, tname, texe)) or { panic(err) }
+ continue
+ }
+ target_path := os.join_path(tfolder, texe)
+ os.mv_by_cp(tpath, target_path) or {
+ if !err.msg.contains('vbuild-tools') && !err.msg.contains('vtest-all') {
+ eprintln('error while moving $tpath to $target_path: $err.msg')
+ }
+ continue
+ }
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vbuild-vbinaries.v b/v_windows/v/old/cmd/tools/vbuild-vbinaries.v
new file mode 100644
index 0000000..49e297e
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vbuild-vbinaries.v
@@ -0,0 +1,9 @@
+module main
+
+import testing
+
+fn main() {
+ if testing.building_any_v_binaries_failed() {
+ exit(1)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vcheck-md.v b/v_windows/v/old/cmd/tools/vcheck-md.v
new file mode 100644
index 0000000..f5cd5ea
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vcheck-md.v
@@ -0,0 +1,540 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module main
+
+import os
+import os.cmdline
+import rand
+import term
+import vhelp
+import v.pref
+import regex
+
+const (
+ too_long_line_length = 100
+ term_colors = term.can_show_color_on_stderr()
+ hide_warnings = '-hide-warnings' in os.args || '-w' in os.args
+ show_progress = os.getenv('GITHUB_JOB') == '' && '-silent' !in os.args
+ non_option_args = cmdline.only_non_options(os.args[2..])
+)
+
+struct CheckResult {
+pub mut:
+ warnings int
+ errors int
+ oks int
+}
+
+fn (v1 CheckResult) + (v2 CheckResult) CheckResult {
+ return CheckResult{
+ warnings: v1.warnings + v2.warnings
+ errors: v1.errors + v2.errors
+ oks: v1.oks + v2.oks
+ }
+}
+
+fn main() {
+ if non_option_args.len == 0 || '-help' in os.args {
+ vhelp.show_topic('check-md')
+ exit(0)
+ }
+ if '-all' in os.args {
+ println('´-all´ flag is deprecated. Please use ´v check-md .´ instead.')
+ exit(1)
+ }
+ if show_progress {
+ // this is intended to be replaced by the progress lines
+ println('')
+ }
+ mut files_paths := non_option_args.clone()
+ mut res := CheckResult{}
+ if term_colors {
+ os.setenv('VCOLORS', 'always', true)
+ }
+ for i := 0; i < files_paths.len; i++ {
+ file_path := files_paths[i]
+ if os.is_dir(file_path) {
+ files_paths << md_file_paths(file_path)
+ continue
+ }
+ real_path := os.real_path(file_path)
+ lines := os.read_lines(real_path) or {
+ println('"$file_path" does not exist')
+ res.warnings++
+ continue
+ }
+ mut mdfile := MDFile{
+ path: file_path
+ lines: lines
+ }
+ res += mdfile.check()
+ }
+ if res.errors == 0 && show_progress {
+ term.clear_previous_line()
+ }
+ if res.warnings > 0 || res.errors > 0 || res.oks > 0 {
+ println('\nWarnings: $res.warnings | Errors: $res.errors | OKs: $res.oks')
+ }
+ if res.errors > 0 {
+ exit(1)
+ }
+}
+
+fn md_file_paths(dir string) []string {
+ mut files_to_check := []string{}
+ md_files := os.walk_ext(dir, '.md')
+ for file in md_files {
+ if file.contains_any_substr(['/thirdparty/', 'CHANGELOG']) {
+ continue
+ }
+ files_to_check << file
+ }
+ return files_to_check
+}
+
+fn wprintln(s string) {
+ if !hide_warnings {
+ println(s)
+ }
+}
+
+fn ftext(s string, cb fn (string) string) string {
+ if term_colors {
+ return cb(s)
+ }
+ return s
+}
+
+fn btext(s string) string {
+ return ftext(s, term.bold)
+}
+
+fn mtext(s string) string {
+ return ftext(s, term.magenta)
+}
+
+fn rtext(s string) string {
+ return ftext(s, term.red)
+}
+
+fn wline(file_path string, lnumber int, column int, message string) string {
+ return btext('$file_path:${lnumber + 1}:${column + 1}:') + btext(mtext(' warn:')) +
+ rtext(' $message')
+}
+
+fn eline(file_path string, lnumber int, column int, message string) string {
+ return btext('$file_path:${lnumber + 1}:${column + 1}:') + btext(rtext(' error: $message'))
+}
+
+const (
+ default_command = 'compile'
+)
+
+struct VCodeExample {
+mut:
+ text []string
+ command string
+ sline int
+ eline int
+}
+
+enum MDFileParserState {
+ markdown
+ vexample
+ codeblock
+}
+
+struct MDFile {
+ path string
+ lines []string
+mut:
+ examples []VCodeExample
+ current VCodeExample
+ state MDFileParserState = .markdown
+}
+
+fn (mut f MDFile) progress(message string) {
+ if show_progress {
+ term.clear_previous_line()
+ println('File: ${f.path:-30s}, Lines: ${f.lines.len:5}, $message')
+ }
+}
+
+fn (mut f MDFile) check() CheckResult {
+ mut res := CheckResult{}
+ mut anchor_data := AnchorData{}
+ for j, line in f.lines {
+ // f.progress('line: $j')
+ if line.len > too_long_line_length {
+ if f.state == .vexample {
+ wprintln(wline(f.path, j, line.len, 'long V example line'))
+ wprintln(line)
+ res.warnings++
+ } else if f.state == .codeblock {
+ wprintln(wline(f.path, j, line.len, 'long code block line'))
+ wprintln(line)
+ res.warnings++
+ } else if line.starts_with('|') {
+ wprintln(wline(f.path, j, line.len, 'long table'))
+ wprintln(line)
+ res.warnings++
+ } else if line.contains('https') {
+ wprintln(wline(f.path, j, line.len, 'long link'))
+ wprintln(line)
+ res.warnings++
+ } else {
+ eprintln(eline(f.path, j, line.len, 'line too long'))
+ eprintln(line)
+ res.errors++
+ }
+ }
+ if f.state == .markdown {
+ anchor_data.add_links(j, line)
+ anchor_data.add_link_targets(j, line)
+ }
+
+ f.parse_line(j, line)
+ }
+ anchor_data.check_link_target_match(f.path, mut res)
+ res += f.check_examples()
+ return res
+}
+
+fn (mut f MDFile) parse_line(lnumber int, line string) {
+ if line.starts_with('```v') {
+ if f.state == .markdown {
+ f.state = .vexample
+ mut command := line.replace('```v', '').trim_space()
+ if command == '' {
+ command = default_command
+ } else if command == 'nofmt' {
+ command += ' $default_command'
+ }
+ f.current = VCodeExample{
+ sline: lnumber
+ command: command
+ }
+ }
+ return
+ }
+ if line.starts_with('```') {
+ match f.state {
+ .vexample {
+ f.state = .markdown
+ f.current.eline = lnumber
+ f.examples << f.current
+ f.current = VCodeExample{}
+ return
+ }
+ .codeblock {
+ f.state = .markdown
+ return
+ }
+ .markdown {
+ f.state = .codeblock
+ return
+ }
+ }
+ }
+ if f.state == .vexample {
+ f.current.text << line
+ }
+}
+
+struct Headline {
+ line int
+ lable string
+ level int
+}
+
+struct Anchor {
+ line int
+}
+
+type AnchorTarget = Anchor | Headline
+
+struct AnchorLink {
+ line int
+ lable string
+}
+
+struct AnchorData {
+mut:
+ links map[string][]AnchorLink
+ anchors map[string][]AnchorTarget
+}
+
+fn (mut ad AnchorData) add_links(line_number int, line string) {
+ query := r'\[(?P<lable>[^\]]+)\]\(\s*#(?P<link>[a-z0-9\-\_\x7f-\uffff]+)\)'
+ mut re := regex.regex_opt(query) or { panic(err) }
+ res := re.find_all_str(line)
+
+ for elem in res {
+ re.match_string(elem)
+ link := re.get_group_by_name(elem, 'link')
+ ad.links[link] << AnchorLink{
+ line: line_number
+ lable: re.get_group_by_name(elem, 'lable')
+ }
+ }
+}
+
+fn (mut ad AnchorData) add_link_targets(line_number int, line string) {
+ if line.trim_space().starts_with('#') {
+ if headline_start_pos := line.index(' ') {
+ headline := line.substr(headline_start_pos + 1, line.len)
+ link := create_ref_link(headline)
+ ad.anchors[link] << Headline{
+ line: line_number
+ lable: headline
+ level: headline_start_pos
+ }
+ }
+ } else {
+ query := '<a\\s*id=["\'](?P<link>[a-z0-9\\-\\_\\x7f-\\uffff]+)["\']\\s*/>'
+ mut re := regex.regex_opt(query) or { panic(err) }
+ res := re.find_all_str(line)
+
+ for elem in res {
+ re.match_string(elem)
+ link := re.get_group_by_name(elem, 'link')
+ ad.anchors[link] << Anchor{
+ line: line_number
+ }
+ }
+ }
+}
+
+fn (mut ad AnchorData) check_link_target_match(fpath string, mut res CheckResult) {
+ mut checked_headlines := []string{}
+ mut found_error_warning := false
+ for link, linkdata in ad.links {
+ if link in ad.anchors {
+ checked_headlines << link
+ if ad.anchors[link].len > 1 {
+ found_error_warning = true
+ res.errors++
+ for anchordata in ad.anchors[link] {
+ eprintln(eline(fpath, anchordata.line, 0, 'multiple link targets of existing link (#$link)'))
+ }
+ }
+ } else {
+ found_error_warning = true
+ res.errors++
+ for brokenlink in linkdata {
+ eprintln(eline(fpath, brokenlink.line, 0, 'no link target found for existing link [$brokenlink.lable](#$link)'))
+ }
+ }
+ }
+ for link, anchor_lists in ad.anchors {
+ if !(link in checked_headlines) {
+ if anchor_lists.len > 1 {
+ for anchor in anchor_lists {
+ line := match anchor {
+ Headline {
+ anchor.line
+ }
+ Anchor {
+ anchor.line
+ }
+ }
+ wprintln(wline(fpath, line, 0, 'multiple link target for non existing link (#$link)'))
+ found_error_warning = true
+ res.warnings++
+ }
+ }
+ }
+ }
+ if found_error_warning {
+ eprintln('') // fix suppressed last error output
+ }
+}
+
+// based on a reference sample md doc
+// https://github.com/aheissenberger/vlang-markdown-module/blob/master/test.md
+fn create_ref_link(s string) string {
+ mut result := ''
+ for c in s.trim_space() {
+ result += match c {
+ `a`...`z`, `0`...`9` {
+ c.ascii_str()
+ }
+ `A`...`Z` {
+ c.ascii_str().to_lower()
+ }
+ ` `, `-` {
+ '-'
+ }
+ `_` {
+ '_'
+ }
+ else {
+ if c > 127 { c.ascii_str() } else { '' }
+ }
+ }
+ }
+ return result
+}
+
+fn (mut f MDFile) debug() {
+ for e in f.examples {
+ eprintln('f.path: $f.path | example: $e')
+ }
+}
+
+fn cmdexecute(cmd string) int {
+ res := os.execute(cmd)
+ if res.exit_code < 0 {
+ return 1
+ }
+ if res.exit_code != 0 {
+ eprint(res.output)
+ }
+ return res.exit_code
+}
+
+fn silent_cmdexecute(cmd string) int {
+ res := os.execute(cmd)
+ return res.exit_code
+}
+
+fn get_fmt_exit_code(vfile string, vexe string) int {
+ return silent_cmdexecute('"$vexe" fmt -verify $vfile')
+}
+
+fn (mut f MDFile) check_examples() CheckResult {
+ mut errors := 0
+ mut oks := 0
+ vexe := pref.vexe_path()
+ for e in f.examples {
+ if e.command == 'ignore' {
+ continue
+ }
+ if e.command == 'wip' {
+ continue
+ }
+ fname := os.base(f.path).replace('.md', '_md')
+ uid := rand.ulid()
+ vfile := os.join_path(os.temp_dir(), 'check_${fname}_example_${e.sline}__${e.eline}__${uid}.v')
+ mut should_cleanup_vfile := true
+ // eprintln('>>> checking example $vfile ...')
+ vcontent := e.text.join('\n') + '\n'
+ os.write_file(vfile, vcontent) or { panic(err) }
+ mut acommands := e.command.split(' ')
+ nofmt := 'nofmt' in acommands
+ for command in acommands {
+ f.progress('example from $e.sline to $e.eline, command: $command')
+ fmt_res := if nofmt { 0 } else { get_fmt_exit_code(vfile, vexe) }
+ match command {
+ 'compile' {
+ res := cmdexecute('"$vexe" -w -Wfatal-errors -o x.c $vfile')
+ os.rm('x.c') or {}
+ if res != 0 || fmt_res != 0 {
+ if res != 0 {
+ eprintln(eline(f.path, e.sline, 0, 'example failed to compile'))
+ }
+ if fmt_res != 0 {
+ eprintln(eline(f.path, e.sline, 0, 'example is not formatted'))
+ }
+ eprintln(vcontent)
+ should_cleanup_vfile = false
+ errors++
+ continue
+ }
+ oks++
+ }
+ 'globals' {
+ res := cmdexecute('"$vexe" -w -Wfatal-errors -enable-globals -o x.c $vfile')
+ os.rm('x.c') or {}
+ if res != 0 || fmt_res != 0 {
+ if res != 0 {
+ eprintln(eline(f.path, e.sline, 0, '`example failed to compile with -enable-globals'))
+ }
+ if fmt_res != 0 {
+ eprintln(eline(f.path, e.sline, 0, '`example is not formatted'))
+ }
+ eprintln(vcontent)
+ should_cleanup_vfile = false
+ errors++
+ continue
+ }
+ oks++
+ }
+ 'live' {
+ res := cmdexecute('"$vexe" -w -Wfatal-errors -live -o x.c $vfile')
+ if res != 0 || fmt_res != 0 {
+ if res != 0 {
+ eprintln(eline(f.path, e.sline, 0, 'example failed to compile with -live'))
+ }
+ if fmt_res != 0 {
+ eprintln(eline(f.path, e.sline, 0, 'example is not formatted'))
+ }
+ eprintln(vcontent)
+ should_cleanup_vfile = false
+ errors++
+ continue
+ }
+ oks++
+ }
+ 'failcompile' {
+ res := silent_cmdexecute('"$vexe" -w -Wfatal-errors -o x.c $vfile')
+ os.rm('x.c') or {}
+ if res == 0 || fmt_res != 0 {
+ if res == 0 {
+ eprintln(eline(f.path, e.sline, 0, '`failcompile` example compiled'))
+ }
+ if fmt_res != 0 {
+ eprintln(eline(f.path, e.sline, 0, 'example is not formatted'))
+ }
+ eprintln(vcontent)
+ should_cleanup_vfile = false
+ errors++
+ continue
+ }
+ oks++
+ }
+ 'oksyntax' {
+ res := cmdexecute('"$vexe" -w -Wfatal-errors -check-syntax $vfile')
+ if res != 0 || fmt_res != 0 {
+ if res != 0 {
+ eprintln(eline(f.path, e.sline, 0, '`oksyntax` example with invalid syntax'))
+ }
+ if fmt_res != 0 {
+ eprintln(eline(f.path, e.sline, 0, '`oksyntax` example is not formatted'))
+ }
+ eprintln(vcontent)
+ should_cleanup_vfile = false
+ errors++
+ continue
+ }
+ oks++
+ }
+ 'badsyntax' {
+ res := silent_cmdexecute('"$vexe" -w -Wfatal-errors -check-syntax $vfile')
+ if res == 0 {
+ eprintln(eline(f.path, e.sline, 0, '`badsyntax` example can be parsed fine'))
+ eprintln(vcontent)
+ should_cleanup_vfile = false
+ errors++
+ continue
+ }
+ oks++
+ }
+ 'nofmt' {}
+ else {
+ eprintln(eline(f.path, e.sline, 0, 'unrecognized command: "$command", use one of: wip/ignore/compile/failcompile/oksyntax/badsyntax'))
+ should_cleanup_vfile = false
+ errors++
+ }
+ }
+ }
+ if should_cleanup_vfile {
+ os.rm(vfile) or { panic(err) }
+ }
+ }
+ return CheckResult{
+ errors: errors
+ oks: oks
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vcomplete.v b/v_windows/v/old/cmd/tools/vcomplete.v
new file mode 100644
index 0000000..8e61755
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vcomplete.v
@@ -0,0 +1,451 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+//
+// Utility functions helping integrate with various shell auto-completion systems.
+// The install process and communication is inspired from that of [kitty](https://sw.kovidgoyal.net/kitty/#completion-for-kitty)
+// This method avoids writing and maintaining external files on the user's file system.
+// The user will be responsible for adding a small line to their .*rc - that will ensure *live* (i.e. not-static)
+// auto-completion features.
+//
+// # bash
+// To install auto-completion for V in bash, simply add this code to your `~/.bashrc`:
+// `source /dev/stdin <<<"$(v complete setup bash)"`
+// On more recent versions of bash (>3.2) this should suffice:
+// `source <(v complete setup bash)`
+//
+// # fish
+// For versions of fish <3.0.0, add the following to your `~/.config/fish/config.fish`
+// `v complete setup fish | source`
+// Later versions of fish source completions by default.
+//
+// # zsh
+// To install auto-completion for V in zsh - please add the following to your `~/.zshrc`:
+// ```
+// autoload -Uz compinit
+// compinit
+// # Completion for v
+// v complete setup zsh | source /dev/stdin
+// ```
+// Please note that you should let v load the zsh completions after the call to compinit
+//
+// # powershell
+// To install auto-complete for V in PowerShell, simply do this
+// `v complete setup powershell >> $PROFILE`
+// and reload profile
+// `& $PROFILE`
+// If `$PROFILE` didn't exist yet, create it before
+// `New-Item -Type File -Force $PROFILE`
+//
+module main
+
+import os
+
+const (
+ auto_complete_shells = ['bash', 'fish', 'zsh', 'powershell'] // list of supported shells
+ vexe = os.getenv('VEXE')
+)
+
+// Snooped from cmd/v/v.v, vlib/v/pref/pref.v
+const (
+ auto_complete_commands = [
+ // simple_cmd
+ 'fmt',
+ 'up',
+ 'vet',
+ 'self',
+ 'tracev',
+ 'symlink',
+ 'bin2v',
+ 'test',
+ 'test-fmt',
+ 'test-self',
+ 'test-cleancode',
+ 'repl',
+ 'complete',
+ 'build-tools',
+ 'build-examples',
+ 'build-vbinaries',
+ 'setup-freetype',
+ 'doc',
+ 'doctor',
+ // commands
+ 'help',
+ 'new',
+ 'init',
+ 'complete',
+ 'translate',
+ 'self',
+ 'search',
+ 'install',
+ 'update',
+ 'upgrade',
+ 'outdated',
+ 'list',
+ 'remove',
+ 'vlib-docs',
+ 'get',
+ 'version',
+ 'run',
+ 'build',
+ 'build-module',
+ ]
+ auto_complete_flags = [
+ '-apk',
+ '-show-timings',
+ '-check-syntax',
+ '-v',
+ '-progress',
+ '-silent',
+ '-g',
+ '-cg',
+ '-repl',
+ '-live',
+ '-sharedlive',
+ '-shared',
+ '--enable-globals',
+ '-enable-globals',
+ '-autofree',
+ '-compress',
+ '-freestanding',
+ '-no-preludes',
+ '-prof',
+ '-profile',
+ '-profile-no-inline',
+ '-prod',
+ '-simulator',
+ '-stats',
+ '-obfuscate',
+ '-translated',
+ '-color',
+ '-nocolor',
+ '-showcc',
+ '-show-c-output',
+ '-experimental',
+ '-usecache',
+ '-prealloc',
+ '-parallel',
+ '-native',
+ '-W',
+ '-keepc',
+ '-w',
+ '-print-v-files',
+ '-error-limit',
+ '-os',
+ '-printfn',
+ '-cflags',
+ '-define',
+ '-d',
+ '-cc',
+ '-o',
+ '-b',
+ '-path',
+ '-custom-prelude',
+ '-name',
+ '-bundle',
+ '-V',
+ '-version',
+ '--version',
+ ]
+ auto_complete_flags_doc = [
+ '-all',
+ '-f',
+ '-h',
+ '-help',
+ '-m',
+ '-o',
+ '-readme',
+ '-v',
+ '-filename',
+ '-pos',
+ '-no-timestamp',
+ '-inline-assets',
+ '-open',
+ '-p',
+ '-s',
+ '-l',
+ ]
+ auto_complete_flags_fmt = [
+ '-c',
+ '-diff',
+ '-l',
+ '-w',
+ '-debug',
+ '-verify',
+ ]
+ auto_complete_flags_bin2v = [
+ '-h',
+ '--help',
+ '-m',
+ '--module',
+ '-p',
+ '--prefix',
+ '-w',
+ '--write',
+ ]
+ auto_complete_flags_self = [
+ '-prod',
+ ]
+ auto_complete_compilers = [
+ 'cc',
+ 'gcc',
+ 'tcc',
+ 'tinyc',
+ 'clang',
+ 'mingw',
+ 'msvc',
+ ]
+)
+
+// auto_complete prints auto completion results back to the calling shell's completion system.
+// auto_complete acts as communication bridge between the calling shell and V's completions.
+fn auto_complete(args []string) {
+ if args.len <= 1 || args[0] != 'complete' {
+ if args.len == 1 {
+ eprintln('auto completion require arguments to work.')
+ } else {
+ eprintln('auto completion failed for "$args".')
+ }
+ exit(1)
+ }
+ sub := args[1]
+ sub_args := args[1..]
+ match sub {
+ 'setup' {
+ if sub_args.len <= 1 || sub_args[1] !in auto_complete_shells {
+ eprintln('please specify a shell to setup auto completion for ($auto_complete_shells).')
+ exit(1)
+ }
+ shell := sub_args[1]
+ mut setup := ''
+ match shell {
+ 'bash' {
+ setup = '
+_v_completions() {
+ local src
+ local limit
+ # Send all words up to the word the cursor is currently on
+ let limit=1+\$COMP_CWORD
+ src=\$($vexe complete bash \$(printf "%s\\n" \${COMP_WORDS[@]: 0:\$limit}))
+ if [[ \$? == 0 ]]; then
+ eval \${src}
+ #echo \${src}
+ fi
+}
+
+complete -o nospace -F _v_completions v
+'
+ }
+ 'fish' {
+ setup = '
+function __v_completions
+ # Send all words up to the one before the cursor
+ $vexe complete fish (commandline -cop)
+end
+complete -f -c v -a "(__v_completions)"
+'
+ }
+ 'zsh' {
+ setup = '
+#compdef v
+_v() {
+ local src
+ # Send all words up to the word the cursor is currently on
+ src=\$($vexe complete zsh \$(printf "%s\\n" \${(@)words[1,\$CURRENT]}))
+ if [[ \$? == 0 ]]; then
+ eval \${src}
+ #echo \${src}
+ fi
+}
+compdef _v v
+'
+ }
+ 'powershell' {
+ setup = '
+Register-ArgumentCompleter -Native -CommandName v -ScriptBlock {
+ param(\$commandName, \$wordToComplete, \$cursorPosition)
+ $vexe complete powershell "\$wordToComplete" | ForEach-Object {
+ [System.Management.Automation.CompletionResult]::new(\$_, \$_, \'ParameterValue\', \$_)
+ }
+}
+'
+ }
+ else {}
+ }
+ println(setup)
+ }
+ 'bash' {
+ if sub_args.len <= 1 {
+ exit(0)
+ }
+ mut lines := []string{}
+ list := auto_complete_request(sub_args[1..])
+ for entry in list {
+ lines << "COMPREPLY+=('$entry')"
+ }
+ println(lines.join('\n'))
+ }
+ 'fish', 'powershell' {
+ if sub_args.len <= 1 {
+ exit(0)
+ }
+ mut lines := []string{}
+ list := auto_complete_request(sub_args[1..])
+ for entry in list {
+ lines << '$entry'
+ }
+ println(lines.join('\n'))
+ }
+ 'zsh' {
+ if sub_args.len <= 1 {
+ exit(0)
+ }
+ mut lines := []string{}
+ list := auto_complete_request(sub_args[1..])
+ for entry in list {
+ lines << 'compadd -U -S' + '""' + ' -- ' + "'$entry';"
+ }
+ println(lines.join('\n'))
+ }
+ else {}
+ }
+ exit(0)
+}
+
+// append_separator_if_dir is a utility function.that returns the input `path` appended an
+// OS dependant path separator if the `path` is a directory.
+fn append_separator_if_dir(path string) string {
+ if os.is_dir(path) && !path.ends_with(os.path_separator) {
+ return path + os.path_separator
+ }
+ return path
+}
+
+// auto_complete_request retuns a list of completions resolved from a full argument list.
+fn auto_complete_request(args []string) []string {
+ // Using space will ensure a uniform input in cases where the shell
+ // returns the completion input as a string (['v','run'] vs. ['v run']).
+ split_by := ' '
+ request := args.join(split_by)
+ mut list := []string{}
+ // new_part := request.ends_with('\n\n')
+ mut parts := request.trim_right(' ').split(split_by)
+ if parts.len <= 1 { // 'v <tab>' -> top level commands.
+ for command in auto_complete_commands {
+ list << command
+ }
+ } else {
+ part := parts.last().trim(' ')
+ mut parent_command := ''
+ for i := parts.len - 1; i >= 0; i-- {
+ if parts[i].starts_with('-') {
+ continue
+ }
+ parent_command = parts[i]
+ break
+ }
+ get_flags := fn (base []string, flag string) []string {
+ if flag.len == 1 { return base
+ } else { return base.filter(it.starts_with(flag))
+ }
+ }
+ if part.starts_with('-') { // 'v -<tab>' -> flags.
+ match parent_command {
+ 'bin2v' { // 'v bin2v -<tab>'
+ list = get_flags(auto_complete_flags_bin2v, part)
+ }
+ 'build' { // 'v build -<tab>' -> flags.
+ list = get_flags(auto_complete_flags, part)
+ }
+ 'doc' { // 'v doc -<tab>' -> flags.
+ list = get_flags(auto_complete_flags_doc, part)
+ }
+ 'fmt' { // 'v fmt -<tab>' -> flags.
+ list = get_flags(auto_complete_flags_fmt, part)
+ }
+ 'self' { // 'v self -<tab>' -> flags.
+ list = get_flags(auto_complete_flags_self, part)
+ }
+ else {
+ for flag in auto_complete_flags {
+ if flag == part {
+ if flag == '-cc' { // 'v -cc <tab>' -> list of available compilers.
+ for compiler in auto_complete_compilers {
+ path := os.find_abs_path_of_executable(compiler) or { '' }
+ if path != '' {
+ list << compiler
+ }
+ }
+ }
+ } else if flag.starts_with(part) { // 'v -<char(s)><tab>' -> flags matching "<char(s)>".
+ list << flag
+ }
+ }
+ }
+ }
+ } else {
+ match part {
+ 'help' { // 'v help <tab>' -> top level commands except "help".
+ list = auto_complete_commands.filter(it != part && it != 'complete')
+ }
+ else {
+ // 'v <char(s)><tab>' -> commands matching "<char(s)>".
+ // Don't include if part matches a full command - instead go to path completion below.
+ for command in auto_complete_commands {
+ if part != command && command.starts_with(part) {
+ list << command
+ }
+ }
+ }
+ }
+ }
+ // Nothing of value was found.
+ // Mimic shell dir and file completion
+ if list.len == 0 {
+ mut ls_path := '.'
+ mut collect_all := part in auto_complete_commands
+ mut path_complete := false
+ if part.ends_with(os.path_separator) || part == '.' || part == '..' {
+ // 'v <command>(.*/$|.|..)<tab>' -> output full directory list
+ ls_path = '.' + os.path_separator + part
+ collect_all = true
+ } else if !collect_all && part.contains(os.path_separator) && os.is_dir(os.dir(part)) {
+ // 'v <command>(.*/.* && os.is_dir)<tab>' -> output completion friendly directory list
+ ls_path = os.dir(part)
+ path_complete = true
+ }
+ entries := os.ls(ls_path) or { return list }
+ last := part.all_after_last(os.path_separator)
+ if path_complete {
+ path := part.all_before_last(os.path_separator)
+ for entry in entries {
+ if entry.starts_with(last) {
+ list << append_separator_if_dir(os.join_path(path, entry))
+ }
+ }
+ // If only one possible file - send full path to completion system.
+ // Please note that this might be bash specific - needs more testing.
+ if list.len == 1 {
+ list = [list[0]]
+ }
+ } else {
+ for entry in entries {
+ if collect_all {
+ list << append_separator_if_dir(entry)
+ } else {
+ if entry.starts_with(last) {
+ list << append_separator_if_dir(entry)
+ }
+ }
+ }
+ }
+ }
+ }
+ return list
+}
+
+fn main() {
+ args := os.args[1..]
+ // println('"$args"')
+ auto_complete(args)
+}
diff --git a/v_windows/v/old/cmd/tools/vcreate.v b/v_windows/v/old/cmd/tools/vcreate.v
new file mode 100644
index 0000000..e147979
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vcreate.v
@@ -0,0 +1,186 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license that can be found in the LICENSE file.
+module main
+
+// This module follows a similar convention to Rust: `init` makes the
+// structure of the program in the _current_ directory, while `new`
+// makes the program structure in a _sub_ directory. Besides that, the
+// functionality is essentially the same.
+import os
+
+struct Create {
+mut:
+ name string
+ description string
+ version string
+ license string
+}
+
+fn cerror(e string) {
+ eprintln('\nerror: $e')
+}
+
+fn check_name(name string) string {
+ if name.trim_space().len == 0 {
+ cerror('project name cannot be empty')
+ exit(1)
+ }
+ if name.is_title() {
+ mut cname := name.to_lower()
+ if cname.contains(' ') {
+ cname = cname.replace(' ', '_')
+ }
+ eprintln('warning: the project name cannot be capitalized, the name will be changed to `$cname`')
+ return cname
+ }
+ if name.contains(' ') {
+ cname := name.replace(' ', '_')
+ eprintln('warning: the project name cannot contain spaces, the name will be changed to `$cname`')
+ return cname
+ }
+ return name
+}
+
+fn vmod_content(c Create) string {
+ return [
+ 'Module {',
+ " name: '$c.name'",
+ " description: '$c.description'",
+ " version: '$c.version'",
+ " license: '$c.license'",
+ ' dependencies: []',
+ '}',
+ '',
+ ].join('\n')
+}
+
+fn main_content() string {
+ return [
+ 'module main\n',
+ 'fn main() {',
+ " println('Hello World!')",
+ '}',
+ '',
+ ].join('\n')
+}
+
+fn gen_gitignore(name string) string {
+ return [
+ '# Binaries for programs and plugins',
+ 'main',
+ '$name',
+ '*.exe',
+ '*.exe~',
+ '*.so',
+ '*.dylib',
+ '*.dll',
+ '',
+ ].join('\n')
+}
+
+fn (c &Create) write_vmod(new bool) {
+ vmod_path := if new { '$c.name/v.mod' } else { 'v.mod' }
+ mut vmod := os.create(vmod_path) or {
+ cerror(err.msg)
+ exit(1)
+ }
+ vmod.write_string(vmod_content(c)) or { panic(err) }
+ vmod.close()
+}
+
+fn (c &Create) write_main(new bool) {
+ if !new && (os.exists('${c.name}.v') || os.exists('src/${c.name}.v')) {
+ return
+ }
+ main_path := if new { '$c.name/${c.name}.v' } else { '${c.name}.v' }
+ mut mainfile := os.create(main_path) or {
+ cerror(err.msg)
+ exit(2)
+ }
+ mainfile.write_string(main_content()) or { panic(err) }
+ mainfile.close()
+}
+
+fn (c &Create) create_git_repo(dir string) {
+ // Create Git Repo and .gitignore file
+ if !os.is_dir('$dir/.git') {
+ res := os.execute('git init $dir')
+ if res.exit_code != 0 {
+ cerror('Unable to create git repo')
+ exit(4)
+ }
+ }
+ if !os.exists('$dir/.gitignore') {
+ mut fl := os.create('$dir/.gitignore') or {
+ // We don't really need a .gitignore, it's just a nice-to-have
+ return
+ }
+ fl.write_string(gen_gitignore(c.name)) or { panic(err) }
+ fl.close()
+ }
+}
+
+fn create(args []string) {
+ mut c := Create{}
+ c.name = check_name(if args.len > 0 { args[0] } else { os.input('Input your project name: ') })
+ if c.name == '' {
+ cerror('project name cannot be empty')
+ exit(1)
+ }
+ if c.name.contains('-') {
+ cerror('"$c.name" should not contain hyphens')
+ exit(1)
+ }
+ if os.is_dir(c.name) {
+ cerror('$c.name folder already exists')
+ exit(3)
+ }
+ c.description = if args.len > 1 { args[1] } else { os.input('Input your project description: ') }
+ default_version := '0.0.0'
+ c.version = os.input('Input your project version: ($default_version) ')
+ if c.version == '' {
+ c.version = default_version
+ }
+ default_license := 'MIT'
+ c.license = os.input('Input your project license: ($default_license) ')
+ if c.license == '' {
+ c.license = default_license
+ }
+ println('Initialising ...')
+ os.mkdir(c.name) or { panic(err) }
+ c.write_vmod(true)
+ c.write_main(true)
+ c.create_git_repo(c.name)
+}
+
+fn init_project() {
+ if os.exists('v.mod') {
+ cerror('`v init` cannot be run on existing v modules')
+ exit(3)
+ }
+ mut c := Create{}
+ c.name = check_name(os.file_name(os.getwd()))
+ c.description = ''
+ c.write_vmod(false)
+ c.write_main(false)
+ c.create_git_repo('.')
+
+ println('Change the description of your project in `v.mod`')
+}
+
+fn main() {
+ cmd := os.args[1]
+ match cmd {
+ 'new' {
+ create(os.args[2..])
+ }
+ 'init' {
+ init_project()
+ }
+ else {
+ cerror('unknown command: $cmd')
+ exit(1)
+ }
+ }
+ println('Complete!')
+}
diff --git a/v_windows/v/old/cmd/tools/vcreate_test.v b/v_windows/v/old/cmd/tools/vcreate_test.v
new file mode 100644
index 0000000..fe829e6
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vcreate_test.v
@@ -0,0 +1,79 @@
+import os
+
+const test_path = 'vcreate_test'
+
+fn init_and_check() ? {
+ vexe := @VEXE
+ os.execute_or_exit('$vexe init')
+
+ assert os.read_file('vcreate_test.v') ? == [
+ 'module main\n',
+ 'fn main() {',
+ " println('Hello World!')",
+ '}',
+ '',
+ ].join('\n')
+
+ assert os.read_file('v.mod') ? == [
+ 'Module {',
+ " name: 'vcreate_test'",
+ " description: ''",
+ " version: ''",
+ " license: ''",
+ ' dependencies: []',
+ '}',
+ '',
+ ].join('\n')
+
+ assert os.read_file('.gitignore') ? == [
+ '# Binaries for programs and plugins',
+ 'main',
+ 'vcreate_test',
+ '*.exe',
+ '*.exe~',
+ '*.so',
+ '*.dylib',
+ '*.dll',
+ '',
+ ].join('\n')
+}
+
+fn test_v_init() ? {
+ dir := os.join_path(os.temp_dir(), test_path)
+ os.rmdir_all(dir) or {}
+ os.mkdir(dir) or {}
+ defer {
+ os.rmdir_all(dir) or {}
+ }
+ os.chdir(dir)
+
+ init_and_check() ?
+}
+
+fn test_v_init_in_git_dir() ? {
+ dir := os.join_path(os.temp_dir(), test_path)
+ os.rmdir_all(dir) or {}
+ os.mkdir(dir) or {}
+ defer {
+ os.rmdir_all(dir) or {}
+ }
+ os.chdir(dir)
+ os.execute_or_exit('git init .')
+ init_and_check() ?
+}
+
+fn test_v_init_no_overwrite_gitignore() ? {
+ dir := os.join_path(os.temp_dir(), test_path)
+ os.rmdir_all(dir) or {}
+ os.mkdir(dir) or {}
+ os.write_file('$dir/.gitignore', 'blah') ?
+ defer {
+ os.rmdir_all(dir) or {}
+ }
+ os.chdir(dir)
+
+ vexe := @VEXE
+ os.execute_or_exit('$vexe init')
+
+ assert os.read_file('.gitignore') ? == 'blah'
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/html.v b/v_windows/v/old/cmd/tools/vdoc/html.v
new file mode 100644
index 0000000..e253898
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/html.v
@@ -0,0 +1,553 @@
+module main
+
+import os
+import net.urllib
+import strings
+import markdown
+import regex
+import v.scanner
+import v.ast
+import v.token
+import v.doc
+import v.pref
+
+const (
+ css_js_assets = ['doc.css', 'normalize.css', 'doc.js', 'dark-mode.js']
+ res_path = os.resource_abs_path('resources')
+ favicons_path = os.join_path(res_path, 'favicons')
+ link_svg = '<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M3.9 12c0-1.71 1.39-3.1 3.1-3.1h4V7H7c-2.76 0-5 2.24-5 5s2.24 5 5 5h4v-1.9H7c-1.71 0-3.1-1.39-3.1-3.1zM8 13h8v-2H8v2zm9-6h-4v1.9h4c1.71 0 3.1 1.39 3.1 3.1s-1.39 3.1-3.1 3.1h-4V17h4c2.76 0 5-2.24 5-5s-2.24-5-5-5z"/></svg>'
+ html_content = '<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="UTF-8">
+ <meta http-equiv="x-ua-compatible" content="IE=edge" />
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
+ <title>{{ title }} | vdoc</title>
+ <link rel="preconnect" href="https://fonts.gstatic.com">
+ <link href="https://fonts.googleapis.com/css2?family=Roboto:wght@400;500;700&display=swap" rel="stylesheet">
+ <link href="https://fonts.googleapis.com/css2?family=Work+Sans:wght@400;500;600&display=swap" rel="stylesheet">
+ <link rel="apple-touch-icon" sizes="180x180" href="apple-touch-icon.png">
+ <link rel="icon" type="image/png" sizes="32x32" href="favicon-32x32.png">
+ <link rel="icon" type="image/png" sizes="16x16" href="favicon-16x16.png">
+ <link rel="manifest" href="site.webmanifest">
+ <link rel="mask-icon" href="safari-pinned-tab.svg" color="#5bbad5">
+ <meta name="msapplication-TileColor" content="#da532c">
+ <meta name="theme-color" content="#ffffff">
+ {{ head_assets }}
+ </head>
+ <body>
+ <div id="page">
+ <header class="doc-nav hidden">
+ <div class="heading-container">
+ <div class="heading">
+ <div class="info">
+ <div class="module">{{ head_name }}</div>
+ <div class="toggle-version-container">
+ <span>{{ version }}</span>
+ <div id="dark-mode-toggle" role="switch" aria-checked="false" aria-label="Toggle dark mode">{{ light_icon }}{{ dark_icon }}</div>
+ </div>
+ {{ menu_icon }}
+ </div>
+ <input type="text" id="search" placeholder="Search... (beta)" autocomplete="off">
+ </div>
+ </div>
+ <nav class="search hidden"></nav>
+ <nav class="content hidden">
+ <ul>
+ {{ toc_links }}
+ </ul>
+ </nav>
+ </header>
+ <div class="doc-scrollview">
+ <div class="doc-container">
+ <div class="doc-content">
+{{ contents }}
+ <div class="footer">
+ {{ footer_content }}
+ </div>
+ </div>
+ {{ right_content }}
+ </div>
+ </div>
+ </div>
+ {{ footer_assets }}
+ <script async src="search_index.js" type="text/javascript"></script>
+ </body>
+</html>'
+)
+
+enum HighlightTokenTyp {
+ unone
+ boolean
+ builtin
+ char
+ comment
+ function
+ keyword
+ name
+ number
+ operator
+ punctuation
+ string
+ symbol
+ none_
+ module_
+ prefix
+}
+
+struct SearchModuleResult {
+ description string
+ link string
+}
+
+struct SearchResult {
+ prefix string
+ badge string
+ description string
+ link string
+}
+
+fn (vd VDoc) render_search_index(out Output) {
+ mut js_search_index := strings.new_builder(200)
+ mut js_search_data := strings.new_builder(200)
+ js_search_index.write_string('var searchModuleIndex = [')
+ js_search_data.write_string('var searchModuleData = [')
+ for i, title in vd.search_module_index {
+ data := vd.search_module_data[i]
+ js_search_index.write_string('"$title",')
+ js_search_data.write_string('["$data.description","$data.link"],')
+ }
+ js_search_index.writeln('];')
+ js_search_index.write_string('var searchIndex = [')
+ js_search_data.writeln('];')
+ js_search_data.write_string('var searchData = [')
+ for i, title in vd.search_index {
+ data := vd.search_data[i]
+ js_search_index.write_string('"$title",')
+ // array instead of object to reduce file size
+ js_search_data.write_string('["$data.badge","$data.description","$data.link","$data.prefix"],')
+ }
+ js_search_index.writeln('];')
+ js_search_data.writeln('];')
+ out_file_path := os.join_path(out.path, 'search_index.js')
+ os.write_file(out_file_path, js_search_index.str() + js_search_data.str()) or { panic(err) }
+}
+
+fn (mut vd VDoc) render_static_html(out Output) {
+ vd.assets = map{
+ 'doc_css': vd.get_resource(css_js_assets[0], out)
+ 'normalize_css': vd.get_resource(css_js_assets[1], out)
+ 'doc_js': vd.get_resource(css_js_assets[2], out)
+ 'dark_mode_js': vd.get_resource(css_js_assets[3], out)
+ 'light_icon': vd.get_resource('light.svg', out)
+ 'dark_icon': vd.get_resource('dark.svg', out)
+ 'menu_icon': vd.get_resource('menu.svg', out)
+ 'arrow_icon': vd.get_resource('arrow.svg', out)
+ }
+}
+
+fn (vd VDoc) get_resource(name string, out Output) string {
+ cfg := vd.cfg
+ path := os.join_path(res_path, name)
+ mut res := os.read_file(path) or { panic('vdoc: could not read $path') }
+ /*
+ if minify {
+ if name.ends_with('.js') {
+ res = js_compress(res)
+ } else {
+ res = res.split_into_lines().map(it.trim_space()).join('')
+ }
+ }
+ */
+ // TODO: Make SVG inline for now
+ if cfg.inline_assets || path.ends_with('.svg') {
+ return res
+ } else {
+ output_path := os.join_path(out.path, name)
+ if !os.exists(output_path) {
+ println('Generating $out.typ in "$output_path"')
+ os.write_file(output_path, res) or { panic(err) }
+ }
+ return name
+ }
+}
+
+fn (mut vd VDoc) collect_search_index(out Output) {
+ cfg := vd.cfg
+ for doc in vd.docs {
+ mod := doc.head.name
+ vd.search_module_index << mod
+ comments := if cfg.include_examples {
+ doc.head.merge_comments()
+ } else {
+ doc.head.merge_comments_without_examples()
+ }
+ vd.search_module_data << SearchModuleResult{
+ description: trim_doc_node_description(comments)
+ link: vd.get_file_name(mod, out)
+ }
+ for _, dn in doc.contents {
+ vd.create_search_results(mod, dn, out)
+ }
+ }
+}
+
+fn (mut vd VDoc) create_search_results(mod string, dn doc.DocNode, out Output) {
+ cfg := vd.cfg
+ if dn.kind == .const_group {
+ return
+ }
+ comments := if cfg.include_examples {
+ dn.merge_comments()
+ } else {
+ dn.merge_comments_without_examples()
+ }
+ dn_description := trim_doc_node_description(comments)
+ vd.search_index << dn.name
+ vd.search_data << SearchResult{
+ prefix: if dn.parent_name != '' { '$dn.kind ($dn.parent_name)' } else { '$dn.kind ' }
+ description: dn_description
+ badge: mod
+ link: vd.get_file_name(mod, out) + '#' + get_node_id(dn)
+ }
+ for child in dn.children {
+ vd.create_search_results(mod, child, out)
+ }
+}
+
+fn (vd VDoc) write_content(cn &doc.DocNode, d &doc.Doc, mut hw strings.Builder) {
+ cfg := vd.cfg
+ base_dir := os.dir(os.real_path(cfg.input_path))
+ file_path_name := if cfg.is_multi {
+ cn.file_path.replace('$base_dir/', '')
+ } else {
+ os.file_name(cn.file_path)
+ }
+ src_link := get_src_link(vd.manifest.repo_url, file_path_name, cn.pos.line_nr + 1)
+ if cn.content.len != 0 || (cn.name == 'Constants') {
+ hw.write_string(doc_node_html(cn, src_link, false, cfg.include_examples, d.table))
+ }
+ for child in cn.children {
+ child_file_path_name := child.file_path.replace('$base_dir/', '')
+ child_src_link := get_src_link(vd.manifest.repo_url, child_file_path_name,
+ child.pos.line_nr + 1)
+ hw.write_string(doc_node_html(child, child_src_link, false, cfg.include_examples,
+ d.table))
+ }
+}
+
+fn (vd VDoc) gen_html(d doc.Doc) string {
+ cfg := vd.cfg
+ mut symbols_toc := strings.new_builder(200)
+ mut modules_toc := strings.new_builder(200)
+ mut contents := strings.new_builder(200)
+ dcs_contents := d.contents.arr()
+ // generate toc first
+ contents.writeln(doc_node_html(d.head, '', true, cfg.include_examples, d.table))
+ if is_module_readme(d.head) {
+ write_toc(d.head, mut symbols_toc)
+ }
+ for cn in dcs_contents {
+ vd.write_content(&cn, &d, mut contents)
+ write_toc(cn, mut symbols_toc)
+ } // write head
+ // write css
+ version := if vd.manifest.version.len != 0 { vd.manifest.version } else { '' }
+ header_name := if cfg.is_multi && vd.docs.len > 1 {
+ os.file_name(os.real_path(cfg.input_path))
+ } else {
+ d.head.name
+ }
+ // write nav1
+ if cfg.is_multi || vd.docs.len > 1 {
+ mut submod_prefix := ''
+ for i, dc in vd.docs {
+ if i - 1 >= 0 && dc.head.name.starts_with(submod_prefix + '.') {
+ continue
+ }
+ names := dc.head.name.split('.')
+ submod_prefix = if names.len > 1 { names[0] } else { dc.head.name }
+ mut href_name := './${dc.head.name}.html'
+ if (cfg.is_vlib && dc.head.name == 'builtin' && !cfg.include_readme)
+ || dc.head.name == 'README' {
+ href_name = './index.html'
+ } else if submod_prefix !in vd.docs.map(it.head.name) {
+ href_name = '#'
+ }
+ submodules := vd.docs.filter(it.head.name.starts_with(submod_prefix + '.'))
+ dropdown := if submodules.len > 0 { vd.assets['arrow_icon'] } else { '' }
+ active_class := if dc.head.name == d.head.name { ' active' } else { '' }
+ modules_toc.write_string('<li class="open$active_class"><div class="menu-row">$dropdown<a href="$href_name">$submod_prefix</a></div>')
+ for j, cdoc in submodules {
+ if j == 0 {
+ modules_toc.write_string('<ul>')
+ }
+ submod_name := cdoc.head.name.all_after(submod_prefix + '.')
+ sub_selected_classes := if cdoc.head.name == d.head.name {
+ ' class="active"'
+ } else {
+ ''
+ }
+ modules_toc.write_string('<li$sub_selected_classes><a href="./${cdoc.head.name}.html">$submod_name</a></li>')
+ if j == submodules.len - 1 {
+ modules_toc.write_string('</ul>')
+ }
+ }
+ modules_toc.write_string('</li>')
+ }
+ }
+ modules_toc_str := modules_toc.str()
+ symbols_toc_str := symbols_toc.str()
+ result := html_content.replace('{{ title }}', d.head.name).replace('{{ head_name }}',
+ header_name).replace('{{ version }}', version).replace('{{ light_icon }}', vd.assets['light_icon']).replace('{{ dark_icon }}',
+ vd.assets['dark_icon']).replace('{{ menu_icon }}', vd.assets['menu_icon']).replace('{{ head_assets }}',
+ if cfg.inline_assets {
+ '\n${tabs[0]}<style>' + vd.assets['doc_css'] + '</style>\n${tabs[0]}<style>' +
+ vd.assets['normalize_css'] + '</style>\n${tabs[0]}<script>' +
+ vd.assets['dark_mode_js'] + '</script>'
+ } else {
+ '\n${tabs[0]}<link rel="stylesheet" href="' + vd.assets['doc_css'] +
+ '" />\n${tabs[0]}<link rel="stylesheet" href="' + vd.assets['normalize_css'] +
+ '" />\n${tabs[0]}<script src="' + vd.assets['dark_mode_js'] + '"></script>'
+ }).replace('{{ toc_links }}', if cfg.is_multi || vd.docs.len > 1 {
+ modules_toc_str
+ } else {
+ symbols_toc_str
+ }).replace('{{ contents }}', contents.str()).replace('{{ right_content }}', if cfg.is_multi
+ && vd.docs.len > 1 && d.head.name != 'README' {
+ '<div class="doc-toc"><ul>' + symbols_toc_str + '</ul></div>'
+ } else {
+ ''
+ }).replace('{{ footer_content }}', gen_footer_text(d, !cfg.no_timestamp)).replace('{{ footer_assets }}',
+ if cfg.inline_assets {
+ '<script>' + vd.assets['doc_js'] + '</script>'
+ } else {
+ '<script src="' + vd.assets['doc_js'] + '"></script>'
+ })
+ return result
+}
+
+fn get_src_link(repo_url string, file_name string, line_nr int) string {
+ mut url := urllib.parse(repo_url) or { return '' }
+ if url.path.len <= 1 || file_name.len == 0 {
+ return ''
+ }
+ url.path = url.path.trim_right('/') + match url.host {
+ 'github.com' { '/blob/master/$file_name' }
+ 'gitlab.com' { '/-/blob/master/$file_name' }
+ 'git.sir.ht' { '/tree/master/$file_name' }
+ else { '' }
+ }
+ if url.path == '/' {
+ return ''
+ }
+ url.fragment = 'L$line_nr'
+ return url.str()
+}
+
+fn html_highlight(code string, tb &ast.Table) string {
+ builtin := ['bool', 'string', 'i8', 'i16', 'int', 'i64', 'i128', 'byte', 'u16', 'u32', 'u64',
+ 'u128', 'rune', 'f32', 'f64', 'int_literal', 'float_literal', 'byteptr', 'voidptr', 'any']
+ highlight_code := fn (tok token.Token, typ HighlightTokenTyp) string {
+ lit := if typ in [.unone, .operator, .punctuation] {
+ tok.kind.str()
+ } else if typ == .string {
+ "'$tok.lit'"
+ } else if typ == .char {
+ '`$tok.lit`'
+ } else {
+ tok.lit
+ }
+ if typ in [.unone, .name] {
+ return lit
+ }
+ return '<span class="token $typ">$lit</span>'
+ }
+ mut s := scanner.new_scanner(code, .parse_comments, &pref.Preferences{})
+ mut tok := s.scan()
+ mut next_tok := s.scan()
+ mut buf := strings.new_builder(200)
+ mut i := 0
+ for i < code.len {
+ if i == tok.pos {
+ mut tok_typ := HighlightTokenTyp.unone
+ match tok.kind {
+ .name {
+ if tok.lit in builtin || tb.known_type(tok.lit) {
+ tok_typ = .builtin
+ } else if next_tok.kind == .lcbr {
+ tok_typ = .symbol
+ } else if next_tok.kind == .lpar {
+ tok_typ = .function
+ } else {
+ tok_typ = .name
+ }
+ }
+ .comment {
+ tok_typ = .comment
+ }
+ .chartoken {
+ tok_typ = .char
+ }
+ .string {
+ tok_typ = .string
+ }
+ .number {
+ tok_typ = .number
+ }
+ .key_true, .key_false {
+ tok_typ = .boolean
+ }
+ .lpar, .lcbr, .rpar, .rcbr, .lsbr, .rsbr, .semicolon, .colon, .comma, .dot {
+ tok_typ = .punctuation
+ }
+ else {
+ if token.is_key(tok.lit) || token.is_decl(tok.kind) {
+ tok_typ = .keyword
+ } else if tok.kind == .decl_assign || tok.kind.is_assign() || tok.is_unary()
+ || tok.kind.is_relational() || tok.kind.is_infix() {
+ tok_typ = .operator
+ }
+ }
+ }
+ buf.write_string(highlight_code(tok, tok_typ))
+ if next_tok.kind != .eof {
+ i = tok.pos + tok.len
+ tok = next_tok
+ next_tok = s.scan()
+ } else {
+ break
+ }
+ } else {
+ buf.write_b(code[i])
+ i++
+ }
+ }
+ return buf.str()
+}
+
+fn doc_node_html(dn doc.DocNode, link string, head bool, include_examples bool, tb &ast.Table) string {
+ mut dnw := strings.new_builder(200)
+ head_tag := if head { 'h1' } else { 'h2' }
+ comments := dn.merge_comments_without_examples()
+ // Allow README.md to go through unescaped except for script tags
+ escaped_html := if head && is_module_readme(dn) {
+ // Strip markdown [TOC] directives, since we generate our own.
+ stripped := comments.replace('[TOC]', '')
+ markdown_escape_script_tags(stripped)
+ } else {
+ html_tag_escape(comments)
+ }
+ md_content := markdown.to_html(escaped_html)
+ highlighted_code := html_highlight(dn.content, tb)
+ node_class := if dn.kind == .const_group { ' const' } else { '' }
+ sym_name := get_sym_name(dn)
+ has_deprecated := 'deprecated' in dn.tags
+ mut tags := dn.tags.filter(it != 'deprecated')
+ tags.sort()
+ mut node_id := get_node_id(dn)
+ mut hash_link := if !head { ' <a href="#$node_id">#</a>' } else { '' }
+ if head && is_module_readme(dn) {
+ node_id = 'readme_$node_id'
+ hash_link = ' <a href="#$node_id">#</a>'
+ }
+ dnw.writeln('${tabs[1]}<section id="$node_id" class="doc-node$node_class">')
+ if dn.name.len > 0 {
+ if dn.kind == .const_group {
+ dnw.write_string('${tabs[2]}<div class="title"><$head_tag>$sym_name$hash_link</$head_tag>')
+ } else {
+ dnw.write_string('${tabs[2]}<div class="title"><$head_tag>$dn.kind $sym_name$hash_link</$head_tag>')
+ }
+ if link.len != 0 {
+ dnw.write_string('<a class="link" rel="noreferrer" target="_blank" href="$link">$link_svg</a>')
+ }
+ dnw.write_string('</div>')
+ }
+ if tags.len > 0 || has_deprecated {
+ mut attributes := if has_deprecated {
+ '<div class="attribute attribute-deprecated">deprecated</div>'
+ } else {
+ ''
+ }
+ attributes += tags.map('<div class="attribute">$it</div>').join('')
+ dnw.writeln('<div class="attributes">$attributes</div>')
+ }
+ if !head && dn.content.len > 0 {
+ dnw.writeln('<pre class="signature"><code>$highlighted_code</code></pre>')
+ }
+ // do not mess with md_content further, its formatting is important, just output it 1:1 !
+ dnw.writeln('$md_content\n')
+ // Write examples if any found
+ examples := dn.examples()
+ if include_examples && examples.len > 0 {
+ example_title := if examples.len > 1 { 'Examples' } else { 'Example' }
+ dnw.writeln('<section class="doc-node examples"><h4>$example_title</h4>')
+ for example in examples {
+ // hl_example := html_highlight(example, tb)
+ dnw.writeln('<pre><code class="language-v">$example</code></pre>')
+ }
+ dnw.writeln('</section>')
+ }
+ dnw.writeln('</section>')
+ dnw_str := dnw.str()
+ return dnw_str
+}
+
+fn html_tag_escape(str string) string {
+ excaped_string := str.replace_each(['<', '&lt;', '>', '&gt;'])
+ mut re := regex.regex_opt(r'`.+[(&lt;)(&gt;)].+`') or { regex.RE{} }
+ if re.find_all_str(excaped_string).len > 0 {
+ return str
+ }
+ return excaped_string
+}
+
+/*
+fn js_compress(str string) string {
+ mut js := strings.new_builder(200)
+ lines := str.split_into_lines()
+ rules := [') {', ' = ', ', ', '{ ', ' }', ' (', '; ', ' + ', ' < ', ' - ', ' || ', ' var',
+ ': ', ' >= ', ' && ', ' else if', ' === ', ' !== ', ' else ']
+ clean := ['){', '=', ',', '{', '}', '(', ';', '+', '<', '-', '||', 'var', ':', '>=', '&&',
+ 'else if', '===', '!==', 'else']
+ for line in lines {
+ mut trimmed := line.trim_space()
+ if trimmed.starts_with('//') || (trimmed.starts_with('/*') && trimmed.ends_with('*/')) {
+ continue
+ }
+ for i in 0 .. rules.len - 1 {
+ trimmed = trimmed.replace(rules[i], clean[i])
+ }
+ js.write_string(trimmed)
+ }
+ js_str := js.str()
+ return js_str
+}
+*/
+fn write_toc(dn doc.DocNode, mut toc strings.Builder) {
+ mut toc_slug := if dn.name.len == 0 || dn.content.len == 0 { '' } else { slug(dn.name) }
+ if toc_slug == '' && dn.children.len > 0 {
+ if dn.children[0].name == '' {
+ toc_slug = slug(dn.name)
+ } else {
+ toc_slug = slug(dn.name + '.' + dn.children[0].name)
+ }
+ }
+ if is_module_readme(dn) {
+ if dn.comments.len == 0 || (dn.comments.len > 0 && dn.comments[0].text.len == 0) {
+ return
+ }
+ toc.write_string('<li class="open"><a href="#readme_$toc_slug">README</a>')
+ } else if dn.name != 'Constants' {
+ toc.write_string('<li class="open"><a href="#$toc_slug">$dn.kind $dn.name</a>')
+ toc.writeln(' <ul>')
+ for child in dn.children {
+ cname := dn.name + '.' + child.name
+ toc.writeln('<li><a href="#${slug(cname)}">$child.kind $child.name</a></li>')
+ }
+ toc.writeln('</ul>')
+ } else {
+ toc.write_string('<li class="open"><a href="#$toc_slug">$dn.name</a>')
+ }
+ toc.writeln('</li>')
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/html_tag_escape_test.v b/v_windows/v/old/cmd/tools/vdoc/html_tag_escape_test.v
new file mode 100644
index 0000000..64a7d1c
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/html_tag_escape_test.v
@@ -0,0 +1,6 @@
+module main
+
+fn test_html_tag_escape() {
+ assert html_tag_escape('<abc>') == '&lt;abc&gt;'
+ assert html_tag_escape('`<abc>`') == '`<abc>`'
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/markdown.v b/v_windows/v/old/cmd/tools/vdoc/markdown.v
new file mode 100644
index 0000000..cc24ad2
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/markdown.v
@@ -0,0 +1,55 @@
+module main
+
+import strings
+import v.doc
+
+fn markdown_escape_script_tags(str string) string {
+ return str.replace_each(['<script>', '`', '</script>', '`'])
+}
+
+fn (vd VDoc) gen_markdown(d doc.Doc, with_toc bool) string {
+ mut hw := strings.new_builder(200)
+ mut cw := strings.new_builder(200)
+ hw.writeln('# $d.head.content\n')
+ if d.head.comments.len > 0 {
+ comments := if vd.cfg.include_examples {
+ d.head.merge_comments()
+ } else {
+ d.head.merge_comments_without_examples()
+ }
+ hw.writeln('$comments\n')
+ }
+ if with_toc {
+ hw.writeln('## Contents')
+ }
+ vd.write_markdown_content(d.contents.arr(), mut cw, mut hw, 0, with_toc)
+ footer_text := gen_footer_text(d, !vd.cfg.no_timestamp)
+ cw.writeln('#### $footer_text')
+ return hw.str() + '\n' + cw.str()
+}
+
+fn (vd VDoc) write_markdown_content(contents []doc.DocNode, mut cw strings.Builder, mut hw strings.Builder, indent int, with_toc bool) {
+ for cn in contents {
+ if with_toc && cn.name.len > 0 {
+ hw.writeln(' '.repeat(2 * indent) + '- [${slug(cn.name)}](#$cn.name)')
+ cw.writeln('## $cn.name')
+ }
+ if cn.content.len > 0 {
+ comments := cn.merge_comments_without_examples()
+ cw.writeln('```v\n$cn.content\n```\n$comments\n')
+ // Write examples if any found
+ examples := cn.examples()
+ if vd.cfg.include_examples && examples.len > 0 {
+ example_title := if examples.len > 1 { 'Examples' } else { 'Example' }
+ cw.writeln('$example_title\n```v\n')
+ for example in examples {
+ cw.writeln('$example\n')
+ }
+ cw.writeln('```\n')
+ }
+ cw.writeln(r'[[Return to contents]](#Contents)')
+ cw.writeln('')
+ }
+ vd.write_markdown_content(cn.children, mut cw, mut hw, indent + 1, with_toc)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/arrow.svg b/v_windows/v/old/cmd/tools/vdoc/resources/arrow.svg
new file mode 100644
index 0000000..2a0456f
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/arrow.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" class="dropdown-arrow" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M7 10l5 5 5-5z"/></svg> \ No newline at end of file
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/dark-mode.js b/v_windows/v/old/cmd/tools/vdoc/resources/dark-mode.js
new file mode 100644
index 0000000..075dbb5
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/dark-mode.js
@@ -0,0 +1,6 @@
+(function() {
+ var html = document.getElementsByTagName('html')[0];
+ if (localStorage.getItem('dark-mode') === 'true') {
+ html.classList.add('dark');
+ }
+})();
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/dark.svg b/v_windows/v/old/cmd/tools/vdoc/resources/dark.svg
new file mode 100644
index 0000000..2067d05
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/dark.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" class="dark-icon" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><g><rect fill="none" height="24" width="24"/></g><g><g><g><path d="M11.1,12.08C8.77,7.57,10.6,3.6,11.63,2.01C6.27,2.2,1.98,6.59,1.98,12c0,0.14,0.02,0.28,0.02,0.42 C2.62,12.15,3.29,12,4,12c1.66,0,3.18,0.83,4.1,2.15C9.77,14.63,11,16.17,11,18c0,1.52-0.87,2.83-2.12,3.51 c0.98,0.32,2.03,0.5,3.11,0.5c3.5,0,6.58-1.8,8.37-4.52C18,17.72,13.38,16.52,11.1,12.08z"/></g><path d="M7,16l-0.18,0C6.4,14.84,5.3,14,4,14c-1.66,0-3,1.34-3,3s1.34,3,3,3c0.62,0,2.49,0,3,0c1.1,0,2-0.9,2-2 C9,16.9,8.1,16,7,16z"/></g></g></svg>
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/doc.css b/v_windows/v/old/cmd/tools/vdoc/resources/doc.css
new file mode 100644
index 0000000..a63d357
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/doc.css
@@ -0,0 +1,725 @@
+:root {
+ --background-color: #fff;
+ --link-color: #2779bd;
+ --text-color: #000;
+ --ref-symbol-color: #dae1e7;
+ --ref-symbol-hover-color: #b8c2cc;
+ --title-bottom-line-color: #f1f5f8;
+ --footer-top-line-color: #f1f5f8;
+ --footer-text-color: #616161;
+ --code-signature-border-color: #a0aec0;
+ --menu-background-color: #4b6c88;
+ --menu-text-color: #fff;
+ --menu-indent-line-color: #3b3b3b66;
+ --menu-indent-line-active-color: #00000066;
+ --menu-scrollbar-color: #a0aec0;
+ --menu-toggle-icon-color: #fff;
+ --menu-toggle-icon-hover-color: #00000044;
+ --menu-search-background-color: #00000044;
+ --menu-search-font-color: #fff;
+ --menu-search-result-background-hover-color: #00000021;
+ --menu-search-separator-color: #00000044;
+ --menu-search-title-text-color: #d5efff;
+ --menu-search-badge-background-color: #00000044;
+ --menu-search-badge-background-hover-color: #0000004d;
+ --toc-text-color: #2779bd;
+ --toc-indicator-color: #4299e1;
+ --code-default-text-color: #5c6e74;
+ --code-background-color: #edf2f7;
+ --code-keyword-text-color: #2b6cb0;
+ --code-builtin-text-color: #0a0a0a;
+ --code-function-text-color: #319795;
+ --code-comment-text-color: #93a1a1;
+ --code-punctuation-text-color: #999999;
+ --code-symbol-text-color: #702459;
+ --code-operator-text-color: #a67f59;
+ --attribute-deprecated-background-color: #f59f0b48;
+ --attribute-deprecated-text-color: #92400e;
+ --attribute-text-color: #000000af;
+}
+:root.dark .dark-icon {
+ display: none;
+}
+:root:not(.dark) .light-icon {
+ display: none;
+}
+
+.dark body {
+ --background-color: #1a202c;
+ --text-color: #fff;
+ --link-color: #90cdf4;
+ --ref-symbol-color: #2d3748;
+ --ref-symbol-hover-color: #4a5568;
+ --title-bottom-line-color: #2d3748;
+ --footer-top-line-color: #2d3748;
+ --footer-text-color: #bbd3e1;
+ --code-signature-border-color: #4a5568;
+ --menu-background-color: #2d3748;
+ --menu-text-color: #fff;
+ --menu-indent-line-color: #4a5568;
+ --menu-indent-line-active-color: #90cdf4; /*#4a5568*/
+ --menu-scrollbar-color: #4a5568;
+ --menu-toggle-icon-color: #fff;
+ --menu-search-background-color: #4a5568;
+ --menu-search-font-color: #fff;
+ --menu-search-separator-color: #4a5568;
+ --menu-search-title-text-color: #90cdf4;
+ --menu-search-badge-background-color: #4a5568;
+ --menu-search-badge-background-hover-color: #4a5568;
+ --toc-text-color: #90cdf4;
+ --toc-indicator-color: #4299e1;
+ --code-default-text-color: #cbd5e0;
+ --code-background-color: #2d3748;
+ --code-builtin-text-color: #68d391;
+ --code-keyword-text-color: #63b3ed;
+ --code-function-text-color: #4fd1c5;
+ --code-comment-text-color: #a0aec0;
+ --code-punctuation-text-color: #a0aec0;
+ --code-symbol-text-color: #ed64a6;
+ --attribute-background-color: #ffffff20;
+ --attribute-text-color: #ffffffaf;
+ --attribute-deprecated-text-color: #fef3c7;
+}
+html {
+ height: 100%;
+}
+body {
+ margin: 0;
+ font-family: Roboto, -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica,
+ Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
+ background-color: #fff;
+ background-color: var(--background-color);
+ color: #000;
+ color: var(--text-color);
+ height: 100%;
+}
+#page {
+ height: 100%;
+ padding-top: 56px;
+ box-sizing: border-box;
+ overflow: hidden;
+}
+
+/** Reset for menus */
+.doc-nav ul,
+.doc-toc ul {
+ list-style: none;
+ padding: 0;
+ margin: 0;
+}
+
+/* Left nav */
+.doc-nav {
+ position: fixed;
+ width: 100%;
+ left: 0;
+ right: 0;
+ top: 0;
+ display: flex;
+ background-color: #4b6c88;
+ background-color: var(--menu-background-color);
+ color: #fff;
+ color: var(--menu-text-color);
+ flex-direction: column;
+ overflow-y: auto;
+ height: 100vh;
+ z-index: 10;
+ scrollbar-width: thin;
+ scrollbar-color: #a0aec0 transparent;
+ scrollbar-color: var(--menu-scrollbar-color) transparent;
+ font-family: "Work Sans", sans-serif;
+}
+*::-webkit-scrollbar {
+ width: 4px;
+ height: 4px;
+}
+*::-webkit-scrollbar-track {
+ background: transparent;
+}
+*::-webkit-scrollbar-thumb {
+ background-color: #a0aec0;
+ background-color: var(--menu-scrollbar-color);
+ border: 3px solid transparent;
+}
+.doc-nav .content li {
+ line-height: 1.8;
+}
+.doc-nav .content.show {
+ display: flex;
+}
+.doc-nav .content.hidden {
+ display: none;
+}
+.doc-nav #toggle-menu {
+ cursor: pointer;
+ padding: 0.3rem;
+ fill: #fff;
+ fill: var(--menu-toggle-icon-color);
+}
+.doc-nav > .heading-container {
+ position: relative;
+ /* IE11 */
+ position: sticky;
+ position: -webkit-sticky;
+ top: 0;
+ background-color: #4b6c88;
+ background-color: var(--menu-background-color);
+ z-index: 10;
+}
+.doc-nav > .heading-container > .heading {
+ display: flex;
+ flex-direction: column;
+}
+.doc-nav > .heading-container > .heading > .info {
+ display: flex;
+ padding: 0 1rem;
+ height: 56px;
+}
+.doc-nav > .heading-container > .heading > .info > .module {
+ font-size: 1.6rem;
+ font-weight: 500;
+ margin: 0;
+}
+.doc-nav > .heading-container > .heading > .info > .toggle-version-container {
+ display: flex;
+ align-items: center;
+}
+.doc-nav
+ > .heading-container
+ > .heading
+ > .info
+ > .toggle-version-container
+ > #dark-mode-toggle {
+ cursor: pointer;
+ fill: #fff;
+ display: flex;
+ visibility: hidden;
+}
+.doc-nav
+ > .heading-container
+ > .heading
+ > .info
+ > .toggle-version-container
+ > #dark-mode-toggle
+ > svg {
+ width: 1.2rem;
+ height: 1.2rem;
+}
+.doc-nav > .heading-container > .heading > #search {
+ margin: 0.6rem 1.2rem 1rem 1.2rem;
+ border: none;
+ border-radius: 0.2rem;
+ padding: 0.5rem 1rem;
+ outline: none;
+ background-color: #00000044;
+ background-color: var(--menu-search-background-color);
+ color: #fff;
+ color: var(--menu-search-text-color);
+}
+.doc-nav > .heading-container > .heading > #search::placeholder {
+ color: #edf2f7;
+ text-transform: uppercase;
+ font-size: 12px;
+ font-weight: 600;
+}
+.doc-nav > .heading-container > .heading > #search:-ms-input-placeholder {
+ color: #edf2f7;
+ text-transform: uppercase;
+ font-size: 12px;
+ font-weight: 600;
+}
+.doc-nav > .content {
+ padding: 0 2rem 2rem 2rem;
+ display: flex;
+ flex-direction: column;
+}
+.doc-nav > .content > ul > li.active {
+ font-weight: 600;
+}
+.doc-nav > .content > ul > li.open ul {
+ display: initial;
+}
+.doc-nav > .content > ul > li.open > .menu-row > .dropdown-arrow {
+ transform: initial;
+}
+.doc-nav > .content > ul > li > .menu-row {
+ display: flex;
+ align-items: center;
+}
+.doc-nav > .content > ul > li > .menu-row > .dropdown-arrow {
+ transform: rotate(-90deg);
+ height: 18px;
+ width: 18px;
+ margin-left: calc(-18px - 0.3rem);
+ margin-right: 0.3rem;
+ cursor: pointer;
+ fill: #fff;
+ pointer-events: all;
+}
+.doc-nav > .content > ul > li > ul {
+ margin: 0.4rem 0;
+ display: none;
+}
+.doc-nav > .content > ul > li > ul > li {
+ border-color: #ffffff66;
+ border-color: var(--menu-indent-line-color);
+ border-left-width: 1.7px;
+ border-left-style: solid;
+ padding-left: 0.7rem;
+}
+.doc-nav > .content > ul > li > ul > li.active {
+ border-color: #00000066;
+ border-color: var(--menu-search-result-hover-background-color);
+}
+.doc-nav > .content a {
+ color: #fff;
+ color: var(--menu-text-color);
+ text-decoration: none;
+ user-select: none;
+}
+.doc-nav > .content a:hover {
+ text-decoration: underline;
+}
+.doc-nav .search.hidden {
+ display: none;
+}
+.doc-nav .search li {
+ line-height: 1.5;
+}
+.doc-nav > .search .result:hover {
+ background-color: #00000021;
+ background-color: var(--menu-search-result-background-hover-color);
+}
+.doc-nav > .search .result:hover > .link > .definition > .badge {
+ background-color: #0000004d;
+ background-color: var(--menu-search-badge-background-hover-color);
+}
+.doc-nav > .search .result > .link {
+ padding: 0.5rem 1.4rem;
+ text-decoration: none;
+ color: #fff;
+ color: var(--menu-text-color);
+ display: block;
+}
+.doc-nav > .search .result > .link > .definition {
+ display: flex;
+}
+.doc-nav > .search .result > .link > .definition > .title {
+ color: #90cdf4;
+ color: var(--menu-search-title-text-color);
+ font-size: 0.875rem;
+ font-weight: 500;
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+}
+.doc-nav > .search .result > .link > .definition > .badge {
+ font-size: 0.75rem;
+ display: inline-flex;
+ padding: 0 0.5rem;
+ background-color: #00000044;
+ background-color: var(--menu-search-badge-background-color);
+ margin-left: auto;
+ align-items: center;
+ border-radius: 9999px;
+ font-weight: 500;
+}
+.doc-nav > .search .result > .link > .description {
+ font-family: Roboto, -apple-system, BlinkMacSystemFont, "Segoe UI", Helvetica,
+ Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
+ font-size: 0.75rem;
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ margin-top: 0.25rem;
+}
+.doc-nav > .search > hr.separator {
+ margin: 0.5rem 0;
+ border-color: #00000044;
+ border-color: var(--menu-search-separator-color);
+ box-sizing: content-box;
+ height: 0;
+ border-width: 0;
+ border-top-width: 1px;
+ border-style: solid;
+ overflow: visible;
+}
+
+/* Main content */
+.doc-scrollview {
+ height: 100%;
+ overflow-y: scroll;
+}
+.doc-container {
+ display: flex;
+ flex-direction: column-reverse;
+}
+.doc-content {
+ display: flex;
+ flex-direction: column;
+ padding: 1rem;
+ overflow: hidden;
+}
+.doc-content img {
+ width: auto;
+ max-width: 100%;
+}
+.doc-content p {
+ line-height: 1.4;
+}
+.doc-content a {
+ color: #2779bd;
+ color: var(--link-color);
+}
+.doc-content > .doc-node {
+ padding: 5rem 0 2rem 0;
+ margin-top: -4rem;
+ overflow: hidden;
+ word-break: break-all; /* IE11 */
+ word-break: break-word;
+}
+.doc-content > .doc-node.const:nth-child(2) {
+ padding-bottom: 0 !important;
+}
+.doc-content > .doc-node.const:not(:first-child) {
+ padding-top: 4rem;
+}
+.doc-content > .doc-node.const:not(:last-child) {
+ padding-bottom: 2rem;
+}
+.doc-content > .timestamp {
+ font-size: 0.8rem;
+ color: #b8c2cc;
+ color: var(--timestamp-color);
+}
+.doc-content > .doc-node > .title {
+ display: flex;
+ font-family: "Work Sans", sans-serif;
+ font-weight: 500;
+ padding: 0.3rem;
+ align-items: center;
+ margin-bottom: 1rem;
+ border-bottom: 1px solid #f1f5f8;
+ border-bottom: 1px solid var(--title-bottom-line-color);
+}
+.doc-content > .doc-node > .attributes {
+ margin-bottom: 0.6rem;
+}
+.doc-content > .doc-node > .attributes > .attribute {
+ display: inline-block;
+ border-radius: 100px;
+ padding: 0.3rem 0.6rem;
+ background-color: var(--code-background-color);
+ color: var(--attribute-text-color);
+ margin-right: 0.8rem;
+}
+.doc-content > .doc-node > .attributes > .attribute-deprecated {
+ background-color: var(--attribute-deprecated-background-color);
+ color: var(--attribute-deprecated-text-color);
+}
+.doc-content > .doc-node > .title > .link {
+ display: flex;
+ margin-left: auto;
+ fill: #dae1e7;
+ fill: var(--ref-symbol-color);
+}
+.doc-content > .doc-node > .title > .link:hover {
+ fill: #b8c2cc;
+ fill: var(--ref-symbol-hover-color);
+}
+.doc-content > .doc-node h1 {
+ font-size: 2rem;
+}
+.doc-content > .doc-node h2 {
+ font-size: 1.3rem;
+}
+.doc-content > .doc-node .signature {
+ border-color: #a0aec0;
+ border-color: var(--code-signature-border-color);
+ border-left-width: 3px;
+ border-left-style: solid;
+}
+.doc-content > .doc-node > ul > li .task-list-item-checkbox {
+ margin-right: 0.5rem;
+}
+.doc-content > .doc-node > .title h1,
+.doc-content > .doc-node > .title h2,
+.doc-content > .doc-node > .title h3,
+.doc-content > .doc-node > .title h4,
+.doc-content > .doc-node > .title h5,
+.doc-content > .doc-node > .title h6 {
+ font-weight: 500;
+ margin: 0;
+}
+.doc-content > .doc-node > .title h1 a,
+.doc-content > .doc-node > .title h2 a,
+.doc-content > .doc-node > .title h3 a,
+.doc-content > .doc-node > .title h4 a,
+.doc-content > .doc-node > .title h5 a,
+.doc-content > .doc-node > .title h6 a {
+ text-decoration: none;
+ color: #dae1e7;
+ color: var(--ref-symbol-color);
+}
+.doc-content > .doc-node > .title h1 a:hover,
+.doc-content > .doc-node > .title h2 a:hover,
+.doc-content > .doc-node > .title h3 a:hover,
+.doc-content > .doc-node > .title h4 a:hover,
+.doc-content > .doc-node > .title h5 a:hover,
+.doc-content > .doc-node > .title h6 a:hover {
+ color: #b8c2cc;
+ color: var(--ref-symbol-hover-color);
+}
+.doc-content > .footer {
+ padding-top: 1rem;
+ margin-top: auto;
+ bottom: 1rem;
+ color: 616161;
+ color: var(--footer-text-color);
+ border-color: #f1f5f8;
+ border-color: var(--footer-top-line-color);
+ border-top-style: solid;
+ border-top-width: 1px;
+ font-size: 0.8rem;
+ font-weight: 500;
+}
+
+/* Right menu */
+.doc-toc {
+ right: 0;
+ top: 0;
+ height: 100%;
+ overflow-y: auto;
+ padding: 1rem 1rem 0 1rem;
+ width: 100%;
+ box-sizing: border-box;
+ -ms-overflow-style: none;
+ scrollbar-width: none;
+ font-family: "Work Sans", sans-serif;
+}
+.doc-toc::-webkit-scrollbar {
+ display: none;
+}
+.doc-toc li {
+ line-height: 1.5;
+}
+.doc-toc a {
+ color: #2779bd;
+ color: var(--toc-text-color);
+ font-size: 0.9rem;
+ font-weight: 600;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ display: block;
+ text-decoration: none;
+ border-left-width: 2px;
+ border-left-style: solid;
+ border-color: transparent;
+ padding-left: 0.4rem;
+}
+.doc-toc a:hover {
+ text-decoration: underline;
+}
+.doc-toc a.active {
+ border-color: #4299e1;
+ border-color: var(--toc-indicator-color);
+}
+.doc-toc li ul {
+ margin: 0.2rem 0 0.2rem;
+ font-size: 0.7rem;
+ list-style: none;
+}
+.doc-toc li ul a {
+ font-weight: 400;
+ padding-left: 0.8rem;
+}
+
+/* Code highlight */
+pre,
+code,
+pre code {
+ color: #5c6e74;
+ color: var(--code-default-text-color);
+ font-size: 0.948em;
+ text-shadow: none;
+ font-family: monospace;
+ background-color: #edf2f7;
+ background-color: var(--code-background-color);
+ border-radius: 0.25rem;
+}
+pre code {
+ direction: ltr;
+ text-align: left;
+ white-space: pre;
+ word-spacing: normal;
+ word-break: normal;
+ line-height: 1.5;
+ -moz-tab-size: 4;
+ -o-tab-size: 4;
+ tab-size: 4;
+ -webkit-hyphens: none;
+ -moz-hyphens: none;
+ -ms-hyphens: none;
+ hyphens: none;
+ display: block;
+ overflow-x: auto;
+ padding: 1rem;
+}
+code {
+ padding: 0.2rem;
+ vertical-align: middle;
+}
+pre {
+ overflow: auto;
+ margin: 0;
+}
+.namespace {
+ opacity: 0.7;
+}
+.token.comment {
+ color: #93a1a1;
+ color: var(--code-comment-text-color);
+}
+.token.punctuation {
+ color: #999999;
+ color: var(--code-punctuation-text-color);
+}
+.token.number,
+.token.symbol {
+ color: #702459;
+ color: var(--code-symbol-text-color);
+}
+.token.string,
+.token.char,
+.token.builtin {
+ color: #38a169;
+ color: var(--code-builtin-text-color);
+}
+.token.operator,
+.token.entity,
+.token.url,
+.language-css .token.string,
+.style .token.string {
+ color: #a67f59;
+ color: var(--code-operator-text-color);
+ background: transparent;
+}
+.token.boolean,
+.token.keyword {
+ color: #2b6cb0;
+ color: var(--code-keyword-text-color);
+}
+.token.function {
+ color: #319795;
+ color: var(--code-function-text-color);
+}
+
+/* Medium screen and up */
+@media (min-width: 768px) {
+ *::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+ }
+ *::-webkit-scrollbar-thumb {
+ border: 3px solid transparent;
+ }
+ .doc-container {
+ flex-direction: row;
+ }
+ .doc-content {
+ font-size: 0.95rem;
+ flex: 1;
+ padding: 0rem 2rem 1rem 2rem;
+ }
+ .doc-toc {
+ position: relative;
+ /* IE11 */
+ position: sticky;
+ position: -webkit-sticky;
+ align-self: flex-start;
+ top: 56px;
+ height: auto;
+ height: 100vh;
+ min-width: 200px;
+ width: auto;
+ max-width: 300px;
+ }
+ .doc-toc > ul {
+ padding-bottom: 1rem;
+ }
+}
+
+@media (max-width: 1023px) {
+ .doc-nav.hidden {
+ height: auto;
+ }
+ .doc-nav.hidden #search {
+ display: none;
+ }
+ .doc-nav .search.mobile-hidden {
+ display: none;
+ }
+ .doc-nav > .heading-container > .heading > .info {
+ align-items: center;
+ }
+ .doc-nav > .heading-container > .heading > .info > .toggle-version-container {
+ flex-grow: 1;
+ padding: 0 1rem;
+ justify-content: space-between;
+ }
+}
+
+@media (min-width: 1024px) {
+ #page {
+ padding-top: 0;
+ }
+ .doc-nav {
+ width: 300px;
+ }
+ .doc-nav #toggle-menu {
+ display: none;
+ }
+ .doc-nav > .heading-container > .heading > .info {
+ height: auto;
+ padding: 1rem 2rem 0 2rem;
+ flex-direction: column-reverse;
+ justify-content: center;
+ }
+ .doc-nav > .heading-container > .heading > .info > .toggle-version-container {
+ align-items: center;
+ margin-bottom: 0.2rem;
+ display: flex;
+ flex-direction: row-reverse;
+ }
+ .doc-nav
+ > .heading-container
+ > .heading
+ > .info
+ > .toggle-version-container
+ > #dark-mode-toggle {
+ margin-right: auto;
+ }
+ .doc-nav .content.show,
+ .doc-nav .content.hidden {
+ display: flex;
+ }
+ .doc-content > .doc-node.const:nth-child(2) {
+ padding-bottom: 0 !important;
+ }
+ .doc-content > .doc-node.const:not(:first-child) {
+ padding-top: 0;
+ }
+ .doc-content > .doc-node.const:not(:last-child) {
+ padding-bottom: 1rem;
+ }
+ .doc-container {
+ margin-left: 300px;
+ }
+ .doc-node {
+ padding-top: 1rem !important;
+ margin-top: 0 !important;
+ }
+ .doc-toc {
+ top: 0;
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/doc.js b/v_windows/v/old/cmd/tools/vdoc/resources/doc.js
new file mode 100644
index 0000000..c355d7e
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/doc.js
@@ -0,0 +1,235 @@
+(function () {
+ if (document.body.scrollIntoView) {
+ var docnav = document.querySelector('.doc-nav');
+ var active = docnav.querySelector('li.active');
+ if (active) {
+ active.scrollIntoView({ block: 'center', inline: 'nearest' });
+ }
+ }
+ setupScrollSpy();
+ setupMobileToggle();
+ setupDarkMode();
+ setupSearch();
+ setupCollapse();
+})();
+
+function setupScrollSpy() {
+ var sectionPositions = [];
+ var sections = document.querySelectorAll('section');
+ sections.forEach(function (section) {
+ sectionPositions.push(section.offsetTop);
+ });
+ var scrollPos = 0;
+ window.addEventListener('scroll', function (_) {
+ // Reset classes
+ document.querySelectorAll('.doc-toc a[class="active"]').forEach(function (link) {
+ link.classList.remove('active');
+ });
+ // Set current menu link as active
+ var scrollPosition = document.documentElement.scrollTop || document.body.scrollTop;
+ for (var i = 0; i < sectionPositions.length; i++) {
+ var section = sections[i];
+ var position = sectionPositions[i];
+ if (position >= scrollPosition) {
+ var link = document.querySelector('.doc-toc a[href="#' + section.id + '"]');
+ if (link) {
+ link.classList.add('active');
+ var docToc = document.querySelector('.doc-toc');
+ var tocHeight = docToc.clientHeight;
+ var scrollTop = docToc.scrollTop;
+ if ((document.body.getBoundingClientRect()).top < scrollPos && scrollTop < link.offsetTop - 10) {
+ docToc.scrollTop = link.clientHeight + link.offsetTop - tocHeight + 10;
+ } else if (scrollTop > link.offsetTop - 10) {
+ docToc.scrollTop = link.offsetTop - 10;
+ }
+ }
+ break;
+ }
+ }
+ scrollPos = (document.body.getBoundingClientRect()).top;
+ });
+}
+
+function setupMobileToggle() {
+ var toggle = document.getElementById('toggle-menu');
+ toggle.addEventListener('click', function (_) {
+ var docNav = document.querySelector('.doc-nav');
+ var isHidden = docNav.classList.contains('hidden');
+ docNav.classList.toggle('hidden');
+ var search = document.querySelector('.doc-nav > .search');
+ console.log(search);
+ var searchHasResults = search.classList.contains('has-results');
+ if (isHidden && searchHasResults) {
+ search.classList.remove('mobile-hidden');
+ } else {
+ search.classList.add('mobile-hidden');
+ }
+ var content = document.querySelector('.doc-nav .content');
+ content.classList.toggle('hidden');
+ content.classList.toggle('show');
+ });
+}
+
+function setupDarkMode() {
+ var html = document.getElementsByTagName('html')[0];
+ var darkModeToggle = document.getElementById('dark-mode-toggle');
+ darkModeToggle.addEventListener('click', function () {
+ html.classList.toggle('dark');
+ var isDarkModeEnabled = html.classList.contains('dark');
+ localStorage.setItem('dark-mode', isDarkModeEnabled);
+ darkModeToggle.setAttribute('aria-checked', isDarkModeEnabled)
+ });
+ // Check if css var() is supported and enable dark mode toggle
+ if (window.CSS && CSS.supports('color', 'var(--fake-var)')) {
+ darkModeToggle.style.visibility = 'unset';
+ }
+}
+
+function setupSearch() {
+ var searchInput = document.getElementById('search');
+ var onInputChange = debounce(function (e) {
+ var searchValue = e.target.value.toLowerCase();
+ var menu = document.querySelector('.doc-nav > .content');
+ var search = document.querySelector('.doc-nav > .search');
+ if (searchValue === '') {
+ // reset to default
+ menu.style.display = '';
+ if (!search.classList.contains('hidden')) {
+ search.classList.add('hidden');
+ search.classList.remove('has-results');
+ }
+ } else if (searchValue.length >= 2) {
+ // search for less than 2 characters can display too much results
+ search.innerHTML = '';
+ menu.style.display = 'none';
+ if (search.classList.contains('hidden')) {
+ search.classList.remove('hidden');
+ search.classList.remove('mobile-hidden');
+ search.classList.add('has-results');
+ }
+ // cache length for performance
+ var foundModule = false;
+ var searchModuleIndexLength = searchModuleIndex.length;
+ var ul = document.createElement('ul');
+ search.appendChild(ul);
+ for (var i = 0; i < searchModuleIndexLength; i++) {
+ // no toLowerCase needed because modules are always lowercase
+ var title = searchModuleIndex[i];
+ if (title.indexOf(searchValue) === -1) {
+ continue
+ }
+ foundModule = true;
+ // [description, link]
+ var data = searchModuleData[i];
+ var description = data[0];
+ var link = data[1];
+ var el = createSearchResult({
+ link: link,
+ title: title,
+ description: description,
+ badge: 'module',
+ });
+ ul.appendChild(el);
+ }
+ if (foundModule) {
+ var hr = document.createElement('hr');
+ hr.classList.add('separator');
+ search.appendChild(hr);
+ }
+ var searchIndexLength = searchIndex.length;
+ var results = [];
+ for (var i = 0; i < searchIndexLength; i++) {
+ var title = searchIndex[i];
+ if (title.toLowerCase().indexOf(searchValue) === -1) {
+ continue
+ }
+ // [badge, description, link]
+ var data = searchData[i];
+ var badge = data[0];
+ var description = data[1];
+ var link = data[2];
+ var prefix = data[3];
+ results.push({
+ badge: badge,
+ description: description,
+ link: link,
+ title: prefix + ' ' + title,
+ });
+ }
+ results.sort(function (a, b) {
+ if (a.title < b.title) {
+ return -1;
+ }
+ if (a.title > b.title) {
+ return 1;
+ }
+ return 0;
+ });
+ var ul = document.createElement('ul');
+ search.appendChild(ul);
+ for (var i = 0; i < results.length; i++) {
+ var result = results[i];
+ var el = createSearchResult(result);
+ ul.appendChild(el);
+ }
+ }
+ });
+ searchInput.addEventListener('input', onInputChange);
+}
+
+function createSearchResult(data) {
+ var li = document.createElement('li');
+ li.classList.add('result');
+ var a = document.createElement('a');
+ a.href = data.link;
+ a.classList.add('link');
+ li.appendChild(a);
+ var defintion = document.createElement('div');
+ defintion.classList.add('definition');
+ a.appendChild(defintion);
+ if (data.description) {
+ var description = document.createElement('div');
+ description.classList.add('description');
+ description.textContent = data.description;
+ a.appendChild(description);
+ }
+ var title = document.createElement('span');
+ title.classList.add('title');
+ title.textContent = data.title;
+ defintion.appendChild(title);
+ var badge = document.createElement('badge');
+ badge.classList.add('badge');
+ badge.textContent = data.badge;
+ defintion.appendChild(badge);
+ return li;
+}
+
+function setupCollapse() {
+ var dropdownArrows = document.querySelectorAll('.dropdown-arrow');
+ for (var i = 0; i < dropdownArrows.length; i++) {
+ var dropdownArrow = dropdownArrows[i];
+ dropdownArrow.addEventListener('click', function (e) {
+ var parent = e.target.parentElement.parentElement.parentElement;
+ parent.classList.toggle('open');
+ });
+ }
+}
+
+function debounce(func, timeout) {
+ var timer;
+ return (...args) => {
+ const next = () => func(...args);
+ if (timer) {
+ clearTimeout(timer);
+ }
+ timer = setTimeout(next, timeout > 0 ? timeout : 300);
+ }
+}
+
+document.addEventListener('keypress', (ev) => {
+ if (ev.key == '/') {
+ let search = document.getElementById('search');
+ ev.preventDefault();
+ search.focus();
+ }
+});
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-192x192.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-192x192.png
new file mode 100644
index 0000000..a674500
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-192x192.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-512x512.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-512x512.png
new file mode 100644
index 0000000..fe7294e
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/android-chrome-512x512.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/apple-touch-icon.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/apple-touch-icon.png
new file mode 100644
index 0000000..d2bedd5
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/apple-touch-icon.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/browserconfig.xml b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/browserconfig.xml
new file mode 100644
index 0000000..b3930d0
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/browserconfig.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="utf-8"?>
+<browserconfig>
+ <msapplication>
+ <tile>
+ <square150x150logo src="/mstile-150x150.png"/>
+ <TileColor>#da532c</TileColor>
+ </tile>
+ </msapplication>
+</browserconfig>
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-16x16.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-16x16.png
new file mode 100644
index 0000000..ed11964
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-16x16.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-32x32.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-32x32.png
new file mode 100644
index 0000000..083808f
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon-32x32.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon.ico b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon.ico
new file mode 100644
index 0000000..5123c5e
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/favicon.ico
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-144x144.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-144x144.png
new file mode 100644
index 0000000..f34f872
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-144x144.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-150x150.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-150x150.png
new file mode 100644
index 0000000..d511595
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-150x150.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x150.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x150.png
new file mode 100644
index 0000000..ec8e25f
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x150.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x310.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x310.png
new file mode 100644
index 0000000..8b98e30
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-310x310.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-70x70.png b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-70x70.png
new file mode 100644
index 0000000..4740338
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/mstile-70x70.png
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/safari-pinned-tab.svg b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/safari-pinned-tab.svg
new file mode 100644
index 0000000..8580c38
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/safari-pinned-tab.svg
@@ -0,0 +1,39 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
+ "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
+<svg version="1.0" xmlns="http://www.w3.org/2000/svg"
+ width="700.000000pt" height="700.000000pt" viewBox="0 0 700.000000 700.000000"
+ preserveAspectRatio="xMidYMid meet">
+<metadata>
+Created by potrace 1.11, written by Peter Selinger 2001-2013
+</metadata>
+<g transform="translate(0.000000,700.000000) scale(0.100000,-0.100000)"
+fill="#000000" stroke="none">
+<path d="M580 6459 c-45 -10 -80 -50 -80 -94 0 -18 20 -89 44 -156 63 -178
+287 -801 371 -1034 62 -172 123 -341 205 -570 39 -110 81 -225 92 -255 19 -52
+62 -173 157 -440 23 -63 44 -122 48 -130 3 -8 7 -19 9 -25 4 -18 354 -989 369
+-1025 8 -19 30 -82 50 -140 20 -58 55 -154 77 -215 22 -60 51 -144 65 -185 14
+-41 29 -79 34 -85 5 -5 9 -17 9 -27 0 -9 7 -32 15 -50 8 -18 53 -139 99 -268
+186 -518 281 -784 326 -910 72 -205 97 -240 200 -289 l55 -25 795 -2 c437 -1
+809 0 826 3 50 8 88 51 118 133 15 41 34 91 41 110 7 19 49 136 94 260 176
+496 213 597 231 645 29 76 472 1309 474 1320 1 6 10 27 19 49 9 21 17 41 17
+45 0 3 32 94 71 201 39 107 72 200 73 205 2 6 6 17 9 25 4 8 38 105 78 215 39
+110 100 281 136 380 76 208 133 366 173 480 29 84 292 814 437 1215 208 573
+204 560 154 611 -26 25 -56 35 -101 31 -6 -1 -102 -10 -185 -17 -27 -3 -70 -7
+-95 -10 -25 -3 -74 -7 -110 -10 -36 -3 -81 -7 -100 -10 -19 -2 -64 -7 -100
+-10 -82 -7 -152 -14 -210 -20 -25 -3 -74 -7 -110 -10 -36 -3 -81 -7 -100 -10
+-19 -2 -64 -7 -100 -10 -147 -13 -262 -24 -315 -30 -16 -2 -64 -6 -105 -10
+-102 -9 -140 -20 -196 -58 -75 -51 -101 -92 -156 -248 -28 -79 -51 -148 -53
+-154 -1 -5 -5 -17 -8 -25 -4 -8 -40 -112 -81 -230 -82 -239 -197 -567 -256
+-735 -21 -60 -59 -171 -85 -245 -26 -74 -64 -184 -85 -245 -22 -60 -89 -254
+-150 -430 -61 -176 -120 -345 -131 -375 -11 -30 -45 -130 -77 -222 -65 -192
+-51 -195 -126 19 -24 71 -72 207 -106 303 -34 96 -81 231 -105 300 -24 69 -71
+204 -105 300 -34 96 -65 186 -70 200 -4 14 -34 97 -65 185 -31 88 -65 185 -75
+215 -17 52 -70 203 -210 600 -97 276 -141 403 -150 430 -77 237 -117 309 -199
+362 -54 33 -91 44 -186 53 -73 6 -130 12 -205 20 -25 3 -74 7 -110 10 -36 3
+-85 8 -110 11 -25 3 -70 7 -100 9 -30 3 -75 7 -100 10 -25 3 -72 7 -105 10
+-33 3 -80 7 -105 10 -25 3 -72 7 -105 10 -33 3 -82 8 -110 10 -27 3 -71 8 -97
+10 -27 3 -72 7 -100 10 -83 9 -137 14 -218 21 -41 3 -77 7 -80 8 -3 2 -21 -1
+-40 -5z"/>
+</g>
+</svg>
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/favicons/site.webmanifest b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/site.webmanifest
new file mode 100644
index 0000000..b20abb7
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/favicons/site.webmanifest
@@ -0,0 +1,19 @@
+{
+ "name": "",
+ "short_name": "",
+ "icons": [
+ {
+ "src": "/android-chrome-192x192.png",
+ "sizes": "192x192",
+ "type": "image/png"
+ },
+ {
+ "src": "/android-chrome-512x512.png",
+ "sizes": "512x512",
+ "type": "image/png"
+ }
+ ],
+ "theme_color": "#ffffff",
+ "background_color": "#ffffff",
+ "display": "standalone"
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/light.svg b/v_windows/v/old/cmd/tools/vdoc/resources/light.svg
new file mode 100644
index 0000000..21606c9
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/light.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" class="light-icon" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M6.76 4.84l-1.8-1.79-1.41 1.41 1.79 1.79 1.42-1.41zM4 10.5H1v2h3v-2zm9-9.95h-2V3.5h2V.55zm7.45 3.91l-1.41-1.41-1.79 1.79 1.41 1.41 1.79-1.79zm-3.21 13.7l1.79 1.8 1.41-1.41-1.8-1.79-1.4 1.4zM20 10.5v2h3v-2h-3zm-8-5c-3.31 0-6 2.69-6 6s2.69 6 6 6 6-2.69 6-6-2.69-6-6-6zm-1 16.95h2V19.5h-2v2.95zm-7.45-3.91l1.41 1.41 1.79-1.8-1.41-1.41-1.79 1.8z"/></svg>
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/link.svg b/v_windows/v/old/cmd/tools/vdoc/resources/link.svg
new file mode 100644
index 0000000..60d93bb
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/link.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M3.9 12c0-1.71 1.39-3.1 3.1-3.1h4V7H7c-2.76 0-5 2.24-5 5s2.24 5 5 5h4v-1.9H7c-1.71 0-3.1-1.39-3.1-3.1zM8 13h8v-2H8v2zm9-6h-4v1.9h4c1.71 0 3.1 1.39 3.1 3.1s-1.39 3.1-3.1 3.1h-4V17h4c2.76 0 5-2.24 5-5s-2.24-5-5-5z"/></svg>
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/menu.svg b/v_windows/v/old/cmd/tools/vdoc/resources/menu.svg
new file mode 100644
index 0000000..c069b00
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/menu.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" id="toggle-menu" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0z" fill="none"/><path d="M3 18h18v-2H3v2zm0-5h18v-2H3v2zm0-7v2h18V6H3z"/></svg>
diff --git a/v_windows/v/old/cmd/tools/vdoc/resources/normalize.css b/v_windows/v/old/cmd/tools/vdoc/resources/normalize.css
new file mode 100644
index 0000000..d9fbe67
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/resources/normalize.css
@@ -0,0 +1,171 @@
+/*! normalize.css v8.0.1 | MIT License | github.com/necolas/normalize.css */
+html {
+ line-height: 1.15;
+ -webkit-text-size-adjust: 100%;
+}
+
+main {
+ display: block;
+}
+
+h1 {
+ font-size: 2em;
+ margin: 0.67em 0;
+}
+
+hr {
+ box-sizing: content-box;
+ height: 0;
+ overflow: visible;
+}
+
+pre {
+ font-family: monospace, monospace;
+ font-size: 1em;
+}
+
+a {
+ background-color: transparent;
+}
+
+abbr[title] {
+ border-bottom: none;
+ text-decoration: underline;
+ text-decoration: underline dotted;
+}
+
+b,
+strong {
+ font-weight: bolder;
+}
+
+kbd,
+samp {
+ font-family: monospace, monospace;
+ font-size: 1em;
+}
+
+small {
+ font-size: 80%;
+}
+
+sub,
+sup {
+ font-size: 75%;
+ line-height: 0;
+ position: relative;
+ vertical-align: baseline;
+}
+
+sub {
+ bottom: -0.25em;
+}
+
+sup {
+ top: -0.5em;
+}
+
+img {
+ border-style: none;
+}
+
+button,
+input,
+optgroup,
+select,
+textarea {
+ font-family: inherit;
+ font-size: 100%;
+ line-height: 1.15;
+ margin: 0;
+}
+
+button,
+input {
+ overflow: visible;
+}
+
+button,
+select {
+ text-transform: none;
+}
+
+button,
+[type="button"],
+[type="reset"],
+[type="submit"] {
+ -webkit-appearance: button;
+}
+
+button::-moz-focus-inner,
+[type="button"]::-moz-focus-inner,
+[type="reset"]::-moz-focus-inner,
+[type="submit"]::-moz-focus-inner {
+ border-style: none;
+ padding: 0;
+}
+
+button:-moz-focusring,
+[type="button"]:-moz-focusring,
+[type="reset"]:-moz-focusring,
+[type="submit"]:-moz-focusring {
+ outline: 1px dotted ButtonText;
+}
+
+fieldset {
+ padding: 0.35em 0.75em 0.625em;
+}
+
+legend {
+ box-sizing: border-box;
+ color: inherit;
+ display: table;
+ max-width: 100%;
+ padding: 0;
+ white-space: normal;
+}
+
+progress {
+ vertical-align: baseline;
+}
+
+textarea {
+ overflow: auto;
+}
+
+[type="checkbox"],
+[type="radio"] {
+ box-sizing: border-box;
+ padding: 0;
+}
+
+[type="number"]::-webkit-inner-spin-button,
+[type="number"]::-webkit-outer-spin-button {
+ height: auto;
+}
+
+[type="search"] {
+ -webkit-appearance: textfield;
+ outline-offset: -2px;
+}
+
+[type="search"]::-webkit-search-decoration {
+ -webkit-appearance: none;
+}
+
+::-webkit-file-upload-button {
+ -webkit-appearance: button;
+ font: inherit;
+}
+
+summary {
+ display: list-item;
+}
+
+template {
+ display: none;
+}
+
+[hidden] {
+ display: none;
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.out b/v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.out
new file mode 100644
index 0000000..41b75a2
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.out
@@ -0,0 +1 @@
+vdoc: No documentation found for /v/vmaster/cmd/tools/vdoc/tests/testdata/project1/main.v
diff --git a/v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.v b/v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.v
new file mode 100644
index 0000000..1a1b527
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/tests/testdata/project1/main.v
@@ -0,0 +1,8 @@
+const (
+ source_root = 'temp'
+)
+
+// funky - comment for function below
+fn funky() {
+ println('hi')
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/tests/vdoc_file_test.v b/v_windows/v/old/cmd/tools/vdoc/tests/vdoc_file_test.v
new file mode 100644
index 0000000..49a0130
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/tests/vdoc_file_test.v
@@ -0,0 +1,72 @@
+import os
+import rand
+import term
+import v.util.vtest
+import v.util.diff
+
+const vexe = @VEXE
+
+const vroot = @VMODROOT
+
+const diff_cmd = find_diff_cmd()
+
+fn find_diff_cmd() string {
+ return diff.find_working_diff_command() or { '' }
+}
+
+fn test_vet() {
+ os.setenv('VCOLORS', 'never', true)
+ os.chdir(vroot)
+ test_dir := 'cmd/tools/vdoc/tests/testdata'
+ main_files := get_main_files_in_dir(test_dir)
+ fails := check_path(vexe, test_dir, main_files)
+ assert fails == 0
+}
+
+fn get_main_files_in_dir(dir string) []string {
+ mut mfiles := os.walk_ext(dir, '.v')
+ mfiles.sort()
+ return mfiles
+}
+
+fn check_path(vexe string, dir string, tests []string) int {
+ mut nb_fail := 0
+ paths := vtest.filter_vtest_only(tests, basepath: vroot)
+ for path in paths {
+ program := path
+ print(path + ' ')
+ res := os.execute('$vexe doc $program')
+ if res.exit_code < 0 {
+ panic(res.output)
+ }
+ mut expected := os.read_file(program.replace('main.v', 'main.out')) or { panic(err) }
+ expected = clean_line_endings(expected)
+ found := clean_line_endings(res.output)
+ if expected != found {
+ println(term.red('FAIL'))
+ println('============')
+ println('expected:')
+ println(expected)
+ println('============')
+ println('found:')
+ println(found)
+ println('============\n')
+ println('diff:')
+ println(diff.color_compare_strings(diff_cmd, rand.ulid(), found, expected))
+ println('============\n')
+ nb_fail++
+ } else {
+ println(term.green('OK'))
+ }
+ }
+ return nb_fail
+}
+
+fn clean_line_endings(s string) string {
+ mut res := s.trim_space()
+ res = res.replace(' \n', '\n')
+ res = res.replace(' \r\n', '\n')
+ res = res.replace('\r\n', '\n')
+ res = res.trim('\n')
+ return res
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/utils.v b/v_windows/v/old/cmd/tools/vdoc/utils.v
new file mode 100644
index 0000000..4b07373
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/utils.v
@@ -0,0 +1,275 @@
+module main
+
+import os
+import v.doc
+import term
+import v.ast
+import v.scanner
+import v.token
+import strings
+import v.pref
+
+[inline]
+fn slug(title string) string {
+ return title.replace(' ', '-')
+}
+
+fn escape(str string) string {
+ return str.replace_each(['"', '\\"', '\r\n', '\\n', '\n', '\\n', '\t', '\\t'])
+}
+
+fn get_sym_name(dn doc.DocNode) string {
+ sym_name := if dn.parent_name.len > 0 && dn.parent_name != 'void' {
+ '($dn.parent_name) $dn.name'
+ } else {
+ dn.name
+ }
+ return sym_name
+}
+
+fn get_node_id(dn doc.DocNode) string {
+ tag := if dn.parent_name.len > 0 && dn.parent_name != 'void' {
+ '${dn.parent_name}.$dn.name'
+ } else {
+ dn.name
+ }
+ return slug(tag)
+}
+
+fn is_module_readme(dn doc.DocNode) bool {
+ if dn.comments.len > 0 && dn.content == 'module $dn.name' {
+ return true
+ }
+ return false
+}
+
+fn trim_doc_node_description(description string) string {
+ mut dn_description := description.replace_each(['\r\n', '\n', '"', '\\"'])
+ // 80 is enough to fill one line
+ if dn_description.len > 80 {
+ dn_description = dn_description[..80]
+ }
+ if dn_description.contains('\n') {
+ dn_description = dn_description.split('\n')[0]
+ }
+ // if \ is last character, it ends with \" which leads to a JS error
+ if dn_description.ends_with('\\') {
+ dn_description = dn_description.trim_right('\\')
+ }
+ return dn_description
+}
+
+fn set_output_type_from_str(format string) OutputType {
+ output_type := match format {
+ 'htm', 'html' { OutputType.html }
+ 'md', 'markdown' { OutputType.markdown }
+ 'json' { OutputType.json }
+ 'stdout' { OutputType.stdout }
+ else { OutputType.plaintext }
+ }
+ return output_type
+}
+
+fn get_ignore_paths(path string) ?[]string {
+ ignore_file_path := os.join_path(path, '.vdocignore')
+ ignore_content := os.read_file(ignore_file_path) or {
+ return error_with_code('ignore file not found.', 1)
+ }
+ mut res := []string{}
+ if ignore_content.trim_space().len > 0 {
+ rules := ignore_content.split_into_lines().map(it.trim_space())
+ mut final := []string{}
+ for rule in rules {
+ if rule.contains('*.') || rule.contains('**') {
+ println('vdoc: Wildcards in ignore rules are not allowed for now.')
+ continue
+ }
+ final << rule
+ }
+ res = final.map(os.join_path(path, it.trim_right('/')))
+ } else {
+ mut dirs := os.ls(path) or { return []string{} }
+ res = dirs.map(os.join_path(path, it)).filter(os.is_dir(it))
+ }
+ return res.map(it.replace('/', os.path_separator))
+}
+
+fn is_included(path string, ignore_paths []string) bool {
+ if path.len == 0 {
+ return true
+ }
+ for ignore_path in ignore_paths {
+ if !path.contains(ignore_path) {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+fn get_modules_list(path string, ignore_paths2 []string) []string {
+ files := os.ls(path) or { return []string{} }
+ mut ignore_paths := get_ignore_paths(path) or { []string{} }
+ ignore_paths << ignore_paths2
+ mut dirs := []string{}
+ for file in files {
+ fpath := os.join_path(path, file)
+ if os.is_dir(fpath) && is_included(fpath, ignore_paths) && !os.is_link(path) {
+ dirs << get_modules_list(fpath, ignore_paths.filter(it.starts_with(fpath)))
+ } else if fpath.ends_with('.v') && !fpath.ends_with('_test.v') {
+ if path in dirs {
+ continue
+ }
+ dirs << path
+ }
+ }
+ dirs.sort()
+ return dirs
+}
+
+fn gen_footer_text(d &doc.Doc, include_timestamp bool) string {
+ footer_text := 'Powered by vdoc.'
+ if !include_timestamp {
+ return footer_text
+ }
+ generated_time := d.time_generated
+ time_str := '$generated_time.day $generated_time.smonth() $generated_time.year $generated_time.hhmmss()'
+ return '$footer_text Generated on: $time_str'
+}
+
+fn color_highlight(code string, tb &ast.Table) string {
+ builtin := ['bool', 'string', 'i8', 'i16', 'int', 'i64', 'i128', 'byte', 'u16', 'u32', 'u64',
+ 'u128', 'rune', 'f32', 'f64', 'int_literal', 'float_literal', 'byteptr', 'voidptr', 'any']
+ highlight_code := fn (tok token.Token, typ HighlightTokenTyp) string {
+ mut lit := ''
+ match typ {
+ .unone, .operator, .punctuation {
+ lit = tok.kind.str()
+ }
+ .string {
+ use_double_quote := tok.lit.contains("'") && !tok.lit.contains('"')
+ unescaped_val := tok.lit.replace('\\\\', '\x01').replace_each(["\\'", "'", '\\"',
+ '"',
+ ])
+ if use_double_quote {
+ s := unescaped_val.replace_each(['\x01', '\\\\', '"', '\\"'])
+ lit = term.yellow('"$s"')
+ } else {
+ s := unescaped_val.replace_each(['\x01', '\\\\', "'", "\\'"])
+ lit = term.yellow("'$s'")
+ }
+ }
+ .char {
+ lit = term.yellow('`$tok.lit`')
+ }
+ .keyword {
+ lit = term.bright_blue(tok.lit)
+ }
+ .builtin, .symbol {
+ lit = term.green(tok.lit)
+ }
+ .function {
+ lit = term.cyan(tok.lit)
+ }
+ .number, .module_ {
+ lit = term.bright_blue(tok.lit)
+ }
+ .boolean {
+ lit = term.bright_magenta(tok.lit)
+ }
+ .none_ {
+ lit = term.red(tok.lit)
+ }
+ .prefix {
+ lit = term.magenta(tok.lit)
+ }
+ else {
+ lit = tok.lit
+ }
+ }
+ return lit
+ }
+ mut s := scanner.new_scanner(code, .parse_comments, &pref.Preferences{ is_fmt: true })
+ mut prev_prev := token.Token{}
+ mut prev := token.Token{}
+ mut tok := s.scan()
+ mut next_tok := s.scan()
+ mut buf := strings.new_builder(200)
+ mut i := 0
+ for i < code.len {
+ if i == tok.pos {
+ mut tok_typ := HighlightTokenTyp.unone
+ match tok.kind {
+ .name {
+ if (tok.lit in builtin || tb.known_type(tok.lit))
+ && (next_tok.kind != .lpar || prev.kind !in [.key_fn, .rpar]) {
+ tok_typ = .builtin
+ } else if
+ next_tok.kind in [.lcbr, .rpar, .eof, .comma, .pipe, .name, .rcbr, .assign, .key_pub, .key_mut, .pipe, .comma]
+ && prev.kind in [.name, .amp, .rsbr, .key_type, .assign, .dot, .question, .rpar, .key_struct, .key_enum, .pipe, .key_interface]
+ && (tok.lit[0].ascii_str().is_upper() || prev_prev.lit in ['C', 'JS']) {
+ tok_typ = .symbol
+ } else if next_tok.kind in [.lpar, .lt] {
+ tok_typ = .function
+ } else if next_tok.kind == .dot {
+ if tok.lit in ['C', 'JS'] {
+ tok_typ = .prefix
+ } else {
+ if tok.lit[0].ascii_str().is_upper() {
+ tok_typ = .symbol
+ } else {
+ tok_typ = .module_
+ }
+ }
+ } else if tok.lit in ['r', 'c'] && next_tok.kind == .string {
+ tok_typ = .prefix
+ } else {
+ tok_typ = .name
+ }
+ }
+ .comment {
+ tok_typ = .comment
+ }
+ .chartoken {
+ tok_typ = .char
+ }
+ .string {
+ tok_typ = .string
+ }
+ .number {
+ tok_typ = .number
+ }
+ .key_true, .key_false {
+ tok_typ = .boolean
+ }
+ .lpar, .lcbr, .rpar, .rcbr, .lsbr, .rsbr, .semicolon, .colon, .comma, .dot {
+ tok_typ = .punctuation
+ }
+ .key_none {
+ tok_typ = .none_
+ }
+ else {
+ if token.is_key(tok.lit) || token.is_decl(tok.kind) {
+ tok_typ = .keyword
+ } else if tok.kind == .decl_assign || tok.kind.is_assign() || tok.is_unary()
+ || tok.kind.is_relational() || tok.kind.is_infix() {
+ tok_typ = .operator
+ }
+ }
+ }
+ buf.write_string(highlight_code(tok, tok_typ))
+ if prev_prev.kind == .eof || prev.kind == .eof || next_tok.kind == .eof {
+ break
+ }
+ prev_prev = prev
+ prev = tok
+ i = tok.pos + tok.len
+ tok = next_tok
+ next_tok = s.scan()
+ } else {
+ buf.write_b(code[i])
+ i++
+ }
+ }
+ return buf.str()
+}
diff --git a/v_windows/v/old/cmd/tools/vdoc/vdoc.v b/v_windows/v/old/cmd/tools/vdoc/vdoc.v
new file mode 100644
index 0000000..f4f0264
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoc/vdoc.v
@@ -0,0 +1,511 @@
+module main
+
+import markdown
+import os
+import os.cmdline
+import time
+import strings
+import sync
+import runtime
+import v.doc
+import v.pref
+import v.vmod
+import json
+import term
+
+const (
+ allowed_formats = ['md', 'markdown', 'json', 'text', 'stdout', 'html', 'htm']
+ vexe = pref.vexe_path()
+ vroot = os.dir(vexe)
+ tabs = ['\t\t', '\t\t\t\t\t\t', '\t\t\t\t\t\t\t']
+)
+
+enum OutputType {
+ unset
+ html
+ markdown
+ json
+ plaintext
+ stdout
+}
+
+struct VDoc {
+ cfg Config [required]
+mut:
+ docs []doc.Doc
+ assets map[string]string
+ manifest vmod.Manifest
+ search_index []string
+ search_data []SearchResult
+ search_module_index []string // search results are split into a module part and the rest
+ search_module_data []SearchModuleResult
+}
+
+struct Config {
+mut:
+ pub_only bool = true
+ show_loc bool // for plaintext
+ is_color bool
+ is_multi bool
+ is_vlib bool
+ is_verbose bool
+ include_readme bool
+ include_examples bool = true
+ inline_assets bool
+ no_timestamp bool
+ output_path string
+ output_type OutputType = .unset
+ input_path string
+ symbol_name string
+ platform doc.Platform
+}
+
+//
+struct Output {
+mut:
+ path string
+ typ OutputType = .unset
+}
+
+struct ParallelDoc {
+ d doc.Doc
+ out Output
+}
+
+fn (vd VDoc) gen_json(d doc.Doc) string {
+ cfg := vd.cfg
+ mut jw := strings.new_builder(200)
+ comments := if cfg.include_examples {
+ d.head.merge_comments()
+ } else {
+ d.head.merge_comments_without_examples()
+ }
+ jw.write_string('{"module_name":"$d.head.name","description":"${escape(comments)}","contents":')
+ jw.write_string(json.encode(d.contents.keys().map(d.contents[it])))
+ jw.write_string(',"generator":"vdoc","time_generated":"$d.time_generated.str()"}')
+ return jw.str()
+}
+
+fn (vd VDoc) gen_plaintext(d doc.Doc) string {
+ cfg := vd.cfg
+ mut pw := strings.new_builder(200)
+ if cfg.is_color {
+ content_arr := d.head.content.split(' ')
+ pw.writeln('${term.bright_blue(content_arr[0])} ${term.green(content_arr[1])}\n')
+ } else {
+ pw.writeln('$d.head.content\n')
+ }
+ comments := if cfg.include_examples {
+ d.head.merge_comments()
+ } else {
+ d.head.merge_comments_without_examples()
+ }
+ if comments.trim_space().len > 0 && !cfg.pub_only {
+ pw.writeln(comments.split_into_lines().map(' ' + it).join('\n'))
+ }
+ vd.write_plaintext_content(d.contents.arr(), mut pw)
+ return pw.str()
+}
+
+fn (vd VDoc) write_plaintext_content(contents []doc.DocNode, mut pw strings.Builder) {
+ cfg := vd.cfg
+ for cn in contents {
+ if cn.content.len > 0 {
+ if cfg.is_color {
+ pw.writeln(color_highlight(cn.content, vd.docs[0].table))
+ } else {
+ pw.writeln(cn.content)
+ }
+ if cn.comments.len > 0 && !cfg.pub_only {
+ comments := if cfg.include_examples {
+ cn.merge_comments()
+ } else {
+ cn.merge_comments_without_examples()
+ }
+ pw.writeln(comments.trim_space().split_into_lines().map(' ' + it).join('\n'))
+ }
+ if cfg.show_loc {
+ pw.writeln('Location: $cn.file_path:${cn.pos.line_nr + 1}\n')
+ }
+ }
+ vd.write_plaintext_content(cn.children, mut pw)
+ }
+}
+
+fn (vd VDoc) render_doc(d doc.Doc, out Output) (string, string) {
+ name := vd.get_file_name(d.head.name, out)
+ output := match out.typ {
+ .html { vd.gen_html(d) }
+ .markdown { vd.gen_markdown(d, true) }
+ .json { vd.gen_json(d) }
+ else { vd.gen_plaintext(d) }
+ }
+ return name, output
+}
+
+// get_file_name returns the final file name from a module name
+fn (vd VDoc) get_file_name(mod string, out Output) string {
+ cfg := vd.cfg
+ mut name := mod
+ // since builtin is generated first, ignore it
+ if (cfg.is_vlib && mod == 'builtin' && !cfg.include_readme) || mod == 'README' {
+ name = 'index'
+ } else if !cfg.is_multi && !os.is_dir(out.path) {
+ name = os.file_name(out.path)
+ }
+ name = name + match out.typ {
+ .html { '.html' }
+ .markdown { '.md' }
+ .json { '.json' }
+ else { '.txt' }
+ }
+ return name
+}
+
+fn (vd VDoc) work_processor(mut work sync.Channel, mut wg sync.WaitGroup) {
+ for {
+ mut pdoc := ParallelDoc{}
+ if !work.pop(&pdoc) {
+ break
+ }
+ file_name, content := vd.render_doc(pdoc.d, pdoc.out)
+ output_path := os.join_path(pdoc.out.path, file_name)
+ println('Generating $pdoc.out.typ in "$output_path"')
+ os.write_file(output_path, content) or { panic(err) }
+ }
+ wg.done()
+}
+
+fn (vd VDoc) render_parallel(out Output) {
+ vjobs := runtime.nr_jobs()
+ mut work := sync.new_channel<ParallelDoc>(u32(vd.docs.len))
+ mut wg := sync.new_waitgroup()
+ for i in 0 .. vd.docs.len {
+ p_doc := ParallelDoc{vd.docs[i], out}
+ work.push(&p_doc)
+ }
+ work.close()
+ wg.add(vjobs)
+ for _ in 0 .. vjobs {
+ go vd.work_processor(mut work, mut wg)
+ }
+ wg.wait()
+}
+
+fn (vd VDoc) render(out Output) map[string]string {
+ mut docs := map[string]string{}
+ for doc in vd.docs {
+ name, output := vd.render_doc(doc, out)
+ docs[name] = output.trim_space()
+ }
+ vd.vprintln('Rendered: ' + docs.keys().str())
+ return docs
+}
+
+fn (vd VDoc) get_readme(path string) string {
+ mut fname := ''
+ for name in ['readme', 'README'] {
+ if os.exists(os.join_path(path, '${name}.md')) {
+ fname = name
+ break
+ }
+ }
+ if fname == '' {
+ return ''
+ }
+ readme_path := os.join_path(path, '${fname}.md')
+ vd.vprintln('Reading README file from $readme_path')
+ readme_contents := os.read_file(readme_path) or { '' }
+ return readme_contents
+}
+
+fn (vd VDoc) emit_generate_err(err IError) {
+ cfg := vd.cfg
+ mut err_msg := err.msg
+ if err.code == 1 {
+ mod_list := get_modules_list(cfg.input_path, []string{})
+ println('Available modules:\n==================')
+ for mod in mod_list {
+ println(mod.all_after('vlib/').all_after('modules/').replace('/', '.'))
+ }
+ err_msg += ' Use the `-m` flag when generating docs from a directory that has multiple modules.'
+ }
+ eprintln(err_msg)
+}
+
+fn (mut vd VDoc) generate_docs_from_file() {
+ cfg := vd.cfg
+ mut out := Output{
+ path: cfg.output_path
+ typ: cfg.output_type
+ }
+ if out.path.len == 0 {
+ if cfg.output_type == .unset {
+ out.typ = .stdout
+ } else {
+ vd.vprintln('No output path has detected. Using input path instead.')
+ out.path = cfg.input_path
+ }
+ } else if out.typ == .unset {
+ vd.vprintln('Output path detected. Identifying output type..')
+ ext := os.file_ext(out.path)
+ out.typ = set_output_type_from_str(ext.all_after('.'))
+ }
+ if cfg.include_readme && out.typ !in [.html, .stdout] {
+ eprintln('vdoc: Including README.md for doc generation is supported on HTML output, or when running directly in the terminal.')
+ exit(1)
+ }
+ dir_path := if cfg.is_vlib {
+ vroot
+ } else if os.is_dir(cfg.input_path) {
+ cfg.input_path
+ } else {
+ os.dir(cfg.input_path)
+ }
+ manifest_path := os.join_path(dir_path, 'v.mod')
+ if os.exists(manifest_path) {
+ vd.vprintln('Reading v.mod info from $manifest_path')
+ if manifest := vmod.from_file(manifest_path) {
+ vd.manifest = manifest
+ }
+ }
+ if cfg.include_readme {
+ readme_contents := vd.get_readme(dir_path)
+ comment := doc.DocComment{
+ text: readme_contents
+ }
+ if out.typ == .stdout {
+ println(markdown.to_plain(readme_contents))
+ } else if out.typ == .html && cfg.is_multi {
+ vd.docs << doc.Doc{
+ head: doc.DocNode{
+ name: 'README'
+ comments: [comment]
+ }
+ time_generated: time.now()
+ }
+ }
+ }
+ dirs := if cfg.is_multi {
+ get_modules_list(cfg.input_path, []string{})
+ } else {
+ [cfg.input_path]
+ }
+ for dirpath in dirs {
+ vd.vprintln('Generating $out.typ docs for "$dirpath"')
+ mut dcs := doc.generate(dirpath, cfg.pub_only, true, cfg.platform, cfg.symbol_name) or {
+ vd.emit_generate_err(err)
+ exit(1)
+ }
+ if dcs.contents.len == 0 {
+ continue
+ }
+ if cfg.is_multi || (!cfg.is_multi && cfg.include_readme) {
+ readme_contents := vd.get_readme(dirpath)
+ comment := doc.DocComment{
+ text: readme_contents
+ }
+ dcs.head.comments = [comment]
+ }
+ if cfg.pub_only {
+ for name, dc in dcs.contents {
+ dcs.contents[name].content = dc.content.all_after('pub ')
+ for i, cc in dc.children {
+ dcs.contents[name].children[i].content = cc.content.all_after('pub ')
+ }
+ }
+ }
+ vd.docs << dcs
+ }
+ // Important. Let builtin be in the top of the module list
+ // if we are generating docs for vlib.
+ if cfg.is_vlib {
+ mut docs := vd.docs.filter(it.head.name == 'builtin')
+ docs << vd.docs.filter(it.head.name != 'builtin')
+ vd.docs = docs
+ }
+ vd.vprintln('Rendering docs...')
+ if out.path.len == 0 || out.path == 'stdout' {
+ if out.typ == .html {
+ vd.render_static_html(out)
+ }
+ outputs := vd.render(out)
+ if outputs.len == 0 {
+ eprintln('vdoc: No documentation found for ${dirs[0]}')
+ exit(1)
+ } else {
+ first := outputs.keys()[0]
+ println(outputs[first])
+ }
+ } else {
+ if !os.exists(out.path) {
+ os.mkdir_all(out.path) or { panic(err) }
+ } else if !os.is_dir(out.path) {
+ out.path = os.real_path('.')
+ }
+ if cfg.is_multi {
+ out.path = os.join_path(out.path, '_docs')
+ if !os.exists(out.path) {
+ os.mkdir(out.path) or { panic(err) }
+ } else {
+ for fname in css_js_assets {
+ existing_asset_path := os.join_path(out.path, fname)
+ if os.exists(existing_asset_path) {
+ os.rm(existing_asset_path) or { panic(err) }
+ }
+ }
+ }
+ }
+ if out.typ == .html {
+ vd.render_static_html(out)
+ }
+ vd.render_parallel(out)
+ if out.typ == .html {
+ println('Creating search index...')
+ vd.collect_search_index(out)
+ vd.render_search_index(out)
+ // move favicons to target directory
+ println('Copying favicons...')
+ favicons := os.ls(favicons_path) or { panic(err) }
+ for favicon in favicons {
+ favicon_path := os.join_path(favicons_path, favicon)
+ destination_path := os.join_path(out.path, favicon)
+ os.cp(favicon_path, destination_path) or { panic(err) }
+ }
+ }
+ }
+}
+
+fn (vd VDoc) vprintln(str string) {
+ if vd.cfg.is_verbose {
+ println('vdoc: $str')
+ }
+}
+
+fn parse_arguments(args []string) Config {
+ mut cfg := Config{}
+ cfg.is_color = term.can_show_color_on_stdout()
+ for i := 0; i < args.len; i++ {
+ arg := args[i]
+ current_args := args[i..]
+ match arg {
+ '-all' {
+ cfg.pub_only = false
+ }
+ '-f' {
+ format := cmdline.option(current_args, '-f', '')
+ if format !in allowed_formats {
+ allowed_str := allowed_formats.join(', ')
+ eprintln('vdoc: "$format" is not a valid format. Only $allowed_str are allowed.')
+ exit(1)
+ }
+ cfg.output_type = set_output_type_from_str(format)
+ i++
+ }
+ '-color' {
+ cfg.is_color = true
+ }
+ '-no-color' {
+ cfg.is_color = false
+ }
+ '-inline-assets' {
+ cfg.inline_assets = true
+ }
+ '-l' {
+ cfg.show_loc = true
+ }
+ '-m' {
+ cfg.is_multi = true
+ }
+ '-o' {
+ opath := cmdline.option(current_args, '-o', '')
+ cfg.output_path = if opath == 'stdout' { opath } else { os.real_path(opath) }
+ i++
+ }
+ '-os' {
+ platform_str := cmdline.option(current_args, '-os', '')
+ if platform_str == 'cross' {
+ eprintln('`v doc -os cross` is not supported yet.')
+ exit(1)
+ }
+ selected_platform := doc.platform_from_string(platform_str) or {
+ eprintln(err.msg)
+ exit(1)
+ }
+ cfg.platform = selected_platform
+ i++
+ }
+ '-no-timestamp' {
+ cfg.no_timestamp = true
+ }
+ '-no-examples' {
+ cfg.include_examples = false
+ }
+ '-readme' {
+ cfg.include_readme = true
+ }
+ '-v' {
+ cfg.is_verbose = true
+ }
+ else {
+ if cfg.input_path.len < 1 {
+ cfg.input_path = arg
+ } else if !cfg.is_multi {
+ // Symbol name filtering should not be enabled
+ // in multi-module documentation mode.
+ cfg.symbol_name = arg
+ }
+ if i == args.len - 1 {
+ break
+ }
+ }
+ }
+ }
+ // Correct from configuration from user input
+ if cfg.output_path == 'stdout' && cfg.output_type == .html {
+ cfg.inline_assets = true
+ }
+ $if windows {
+ cfg.input_path = cfg.input_path.replace('/', os.path_separator)
+ } $else {
+ cfg.input_path = cfg.input_path.replace('\\', os.path_separator)
+ }
+ is_path := cfg.input_path.ends_with('.v') || cfg.input_path.split(os.path_separator).len > 1
+ || cfg.input_path == '.'
+ if cfg.input_path.trim_right('/') == 'vlib' {
+ cfg.is_vlib = true
+ cfg.is_multi = true
+ cfg.input_path = os.join_path(vroot, 'vlib')
+ } else if !is_path {
+ // TODO vd.vprintln('Input "$cfg.input_path" is not a valid path. Looking for modules named "$cfg.input_path"...')
+ mod_path := doc.lookup_module(cfg.input_path) or {
+ eprintln('vdoc: $err')
+ exit(1)
+ }
+ cfg.input_path = mod_path
+ }
+ return cfg
+}
+
+fn main() {
+ if os.args.len < 2 || '-h' in os.args || '-help' in os.args || '--help' in os.args
+ || os.args[1..] == ['doc', 'help'] {
+ os.system('$vexe help doc')
+ exit(0)
+ }
+ args := os.args[2..].clone()
+ cfg := parse_arguments(args)
+ if cfg.input_path.len == 0 {
+ eprintln('vdoc: No input path found.')
+ exit(1)
+ }
+ // Config is immutable from this point on
+ mut vd := VDoc{
+ cfg: cfg
+ manifest: vmod.Manifest{
+ repo_url: ''
+ }
+ }
+ vd.vprintln('Setting output type to "$cfg.output_type"')
+ vd.generate_docs_from_file()
+}
diff --git a/v_windows/v/old/cmd/tools/vdoctor.exe b/v_windows/v/old/cmd/tools/vdoctor.exe
new file mode 100644
index 0000000..b023ce4
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoctor.exe
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vdoctor.v b/v_windows/v/old/cmd/tools/vdoctor.v
new file mode 100644
index 0000000..7eb8901
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vdoctor.v
@@ -0,0 +1,264 @@
+import os
+import time
+import term
+import v.util.version
+import runtime
+
+struct App {
+mut:
+ report_lines []string
+ cached_cpuinfo map[string]string
+}
+
+fn (mut a App) println(s string) {
+ a.report_lines << s
+}
+
+fn (mut a App) collect_info() {
+ mut os_kind := os.user_os()
+ mut arch_details := []string{}
+ arch_details << '$runtime.nr_cpus() cpus'
+ if runtime.is_32bit() {
+ arch_details << '32bit'
+ }
+ if runtime.is_64bit() {
+ arch_details << '64bit'
+ }
+ if runtime.is_big_endian() {
+ arch_details << 'big endian'
+ }
+ if runtime.is_little_endian() {
+ arch_details << 'little endian'
+ }
+ if os_kind == 'macos' {
+ arch_details << a.cmd(command: 'sysctl -n machdep.cpu.brand_string')
+ }
+ if os_kind == 'linux' {
+ mut cpu_details := ''
+ if cpu_details == '' {
+ cpu_details = a.cpu_info('model name')
+ }
+ if cpu_details == '' {
+ cpu_details = a.cpu_info('hardware')
+ }
+ if cpu_details == '' {
+ cpu_details = os.uname().machine
+ }
+ arch_details << cpu_details
+ }
+ if os_kind == 'windows' {
+ arch_details << a.cmd(
+ command: 'wmic cpu get name /format:table'
+ line: 1
+ )
+ }
+ //
+ mut os_details := ''
+ wsl_check := a.cmd(command: 'cat /proc/sys/kernel/osrelease')
+ if os_kind == 'linux' {
+ os_details = a.get_linux_os_name()
+ if a.cpu_info('flags').contains('hypervisor') {
+ if wsl_check.contains('microsoft') {
+ // WSL 2 is a Managed VM and Full Linux Kernel
+ // See https://docs.microsoft.com/en-us/windows/wsl/compare-versions
+ os_details += ' (WSL 2)'
+ } else {
+ os_details += ' (VM)'
+ }
+ }
+ // WSL 1 is NOT a Managed VM and Full Linux Kernel
+ // See https://docs.microsoft.com/en-us/windows/wsl/compare-versions
+ if wsl_check.contains('Microsoft') {
+ os_details += ' (WSL)'
+ }
+ // From https://unix.stackexchange.com/a/14346
+ awk_cmd := '[ "$(awk \'\$5=="/" {print \$1}\' </proc/1/mountinfo)" != "$(awk \'\$5=="/" {print \$1}\' </proc/$$/mountinfo)" ] ; echo \$?'
+ if a.cmd(command: awk_cmd) == '0' {
+ os_details += ' (chroot)'
+ }
+ } else if os_kind == 'macos' {
+ mut details := []string{}
+ details << a.cmd(command: 'sw_vers -productName')
+ details << a.cmd(command: 'sw_vers -productVersion')
+ details << a.cmd(command: 'sw_vers -buildVersion')
+ os_details = details.join(', ')
+ } else if os_kind == 'windows' {
+ wmic_info := a.cmd(
+ command: 'wmic os get * /format:value'
+ line: -1
+ )
+ p := a.parse(wmic_info, '=')
+ caption, build_number, os_arch := p['caption'], p['buildnumber'], p['osarchitecture']
+ os_details = '$caption v$build_number $os_arch'
+ } else {
+ ouname := os.uname()
+ os_details = '$ouname.release, $ouname.version'
+ }
+ a.line('OS', '$os_kind, $os_details')
+ a.line('Processor', arch_details.join(', '))
+ a.line('CC version', a.cmd(command: 'cc --version'))
+ a.println('')
+ getwd := os.getwd()
+ vmodules := os.vmodules_dir()
+ vexe := os.getenv('VEXE')
+ vroot := os.dir(vexe)
+ os.chdir(vroot)
+ a.line('getwd', getwd)
+ a.line('vmodules', vmodules)
+ a.line('vroot', vroot)
+ a.line('vexe', vexe)
+ a.line('vexe mtime', time.unix(os.file_last_mod_unix(vexe)).str())
+ a.line('is vroot writable', is_writable_dir(vroot).str())
+ a.line('is vmodules writable', is_writable_dir(vmodules).str())
+ a.line('V full version', version.full_v_version(true))
+ vtmp := os.getenv('VTMP')
+ if vtmp != '' {
+ a.line('env VTMP', '"$vtmp"')
+ }
+ vflags := os.getenv('VFLAGS')
+ if vflags != '' {
+ a.line('env VFLAGS', '"$vflags"')
+ }
+ a.println('')
+ a.line('Git version', a.cmd(command: 'git --version'))
+ a.line('Git vroot status', a.git_info())
+ a.line('.git/config present', os.is_file('.git/config').str())
+ //
+ a.report_tcc_version('thirdparty/tcc')
+}
+
+struct CmdConfig {
+ line int
+ command string
+}
+
+fn (mut a App) cmd(c CmdConfig) string {
+ x := os.execute(c.command)
+ if x.exit_code < 0 {
+ return 'N/A'
+ }
+ if x.exit_code == 0 {
+ if c.line < 0 {
+ return x.output
+ }
+ output := x.output.split_into_lines()
+ if output.len > 0 && output.len > c.line {
+ return output[c.line]
+ }
+ }
+ return 'Error: $x.output'
+}
+
+fn (mut a App) line(label string, value string) {
+ a.println('$label: ${term.colorize(term.bold, value)}')
+}
+
+fn (app &App) parse(config string, sep string) map[string]string {
+ mut m := map[string]string{}
+ lines := config.split_into_lines()
+ for line in lines {
+ sline := line.trim_space()
+ if sline.len == 0 || sline[0] == `#` {
+ continue
+ }
+ x := sline.split(sep)
+ if x.len < 2 {
+ continue
+ }
+ m[x[0].trim_space().to_lower()] = x[1].trim_space().trim('"')
+ }
+ return m
+}
+
+fn (mut a App) get_linux_os_name() string {
+ mut os_details := ''
+ linux_os_methods := ['os-release', 'lsb_release', 'kernel', 'uname']
+ for m in linux_os_methods {
+ match m {
+ 'os-release' {
+ if !os.is_file('/etc/os-release') {
+ continue
+ }
+ lines := os.read_file('/etc/os-release') or { continue }
+ vals := a.parse(lines, '=')
+ if vals['PRETTY_NAME'] == '' {
+ continue
+ }
+ os_details = vals['PRETTY_NAME']
+ break
+ }
+ 'lsb_release' {
+ exists := a.cmd(command: 'type lsb_release')
+ if exists.starts_with('Error') {
+ continue
+ }
+ os_details = a.cmd(command: 'lsb_release -d -s')
+ break
+ }
+ 'kernel' {
+ if !os.is_file('/proc/version') {
+ continue
+ }
+ os_details = a.cmd(command: 'cat /proc/version')
+ break
+ }
+ 'uname' {
+ ouname := os.uname()
+ os_details = '$ouname.release, $ouname.version'
+ break
+ }
+ else {}
+ }
+ }
+ return os_details
+}
+
+fn (mut a App) cpu_info(key string) string {
+ if a.cached_cpuinfo.len > 0 {
+ return a.cached_cpuinfo[key]
+ }
+ info := os.execute('cat /proc/cpuinfo')
+ if info.exit_code != 0 {
+ return '`cat /proc/cpuinfo` could not run'
+ }
+ a.cached_cpuinfo = a.parse(info.output, ':')
+ return a.cached_cpuinfo[key]
+}
+
+fn (mut a App) git_info() string {
+ mut out := a.cmd(command: 'git -C . describe --abbrev=8 --dirty --always --tags').trim_space()
+ os.execute('git -C . remote add V_REPO https://github.com/vlang/v') // ignore failure (i.e. remote exists)
+ os.execute('git -C . fetch V_REPO')
+ commit_count := a.cmd(command: 'git rev-list @{0}...V_REPO/master --right-only --count').int()
+ if commit_count > 0 {
+ out += ' ($commit_count commit(s) behind V master)'
+ }
+ return out
+}
+
+fn (mut a App) report_tcc_version(tccfolder string) {
+ if !os.is_file(os.join_path(tccfolder, '.git', 'config')) {
+ a.line(tccfolder, 'N/A')
+ return
+ }
+ tcc_branch_name := a.cmd(command: 'git -C $tccfolder rev-parse --abbrev-ref HEAD')
+ tcc_commit := a.cmd(command: 'git -C $tccfolder describe --abbrev=8 --dirty --always --tags')
+ a.line('$tccfolder status', '$tcc_branch_name $tcc_commit')
+}
+
+fn (mut a App) report_info() {
+ for x in a.report_lines {
+ println(x)
+ }
+}
+
+fn is_writable_dir(path string) bool {
+ res := os.is_writable_folder(path) or { false }
+ return res
+}
+
+fn main() {
+ mut app := App{}
+ app.collect_info()
+ app.report_info()
+}
diff --git a/v_windows/v/old/cmd/tools/vfmt.v b/v_windows/v/old/cmd/tools/vfmt.v
new file mode 100644
index 0000000..ae10f1a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vfmt.v
@@ -0,0 +1,334 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module main
+
+import os
+import os.cmdline
+import rand
+import term
+import v.ast
+import v.pref
+import v.fmt
+import v.util
+import v.util.diff
+import v.parser
+import vhelp
+
+struct FormatOptions {
+ is_l bool
+ is_c bool // NB: This refers to the '-c' fmt flag, NOT the C backend
+ is_w bool
+ is_diff bool
+ is_verbose bool
+ is_all bool
+ is_debug bool
+ is_noerror bool
+ is_verify bool // exit(1) if the file is not vfmt'ed
+ is_worker bool // true *only* in the worker processes. NB: workers can crash.
+}
+
+const (
+ formatted_file_token = '\@\@\@' + 'FORMATTED_FILE: '
+ vtmp_folder = util.get_vtmp_folder()
+ term_colors = term.can_show_color_on_stderr()
+)
+
+fn main() {
+ // if os.getenv('VFMT_ENABLE') == '' {
+ // eprintln('v fmt is disabled for now')
+ // exit(1)
+ // }
+ toolexe := os.executable()
+ util.set_vroot_folder(os.dir(os.dir(os.dir(toolexe))))
+ args := util.join_env_vflags_and_os_args()
+ mut foptions := FormatOptions{
+ is_c: '-c' in args
+ is_l: '-l' in args
+ is_w: '-w' in args
+ is_diff: '-diff' in args
+ is_verbose: '-verbose' in args || '--verbose' in args
+ is_all: '-all' in args || '--all' in args
+ is_worker: '-worker' in args
+ is_debug: '-debug' in args
+ is_noerror: '-noerror' in args
+ is_verify: '-verify' in args
+ }
+ if term_colors {
+ os.setenv('VCOLORS', 'always', true)
+ }
+ if foptions.is_verbose {
+ eprintln('vfmt foptions: $foptions')
+ }
+ if foptions.is_worker {
+ // -worker should be added by a parent vfmt process.
+ // We launch a sub process for each file because
+ // the v compiler can do an early exit if it detects
+ // a syntax error, but we want to process ALL passed
+ // files if possible.
+ foptions.format_file(cmdline.option(args, '-worker', ''))
+ exit(0)
+ }
+ // we are NOT a worker at this stage, i.e. we are a parent vfmt process
+ possible_files := cmdline.only_non_options(cmdline.options_after(args, ['fmt']))
+ if foptions.is_verbose {
+ eprintln('vfmt toolexe: $toolexe')
+ eprintln('vfmt args: ' + os.args.str())
+ eprintln('vfmt env_vflags_and_os_args: ' + args.str())
+ eprintln('vfmt possible_files: ' + possible_files.str())
+ }
+ files := util.find_all_v_files(possible_files) or {
+ verror(err.msg)
+ return
+ }
+ if os.is_atty(0) == 0 && files.len == 0 {
+ foptions.format_pipe()
+ exit(0)
+ }
+ if files.len == 0 || '-help' in args || '--help' in args {
+ vhelp.show_topic('fmt')
+ exit(0)
+ }
+ mut cli_args_no_files := []string{}
+ for a in os.args {
+ if a !in files {
+ cli_args_no_files << a
+ }
+ }
+ mut errors := 0
+ for file in files {
+ fpath := os.real_path(file)
+ mut worker_command_array := cli_args_no_files.clone()
+ worker_command_array << ['-worker', util.quote_path(fpath)]
+ worker_cmd := worker_command_array.join(' ')
+ if foptions.is_verbose {
+ eprintln('vfmt worker_cmd: $worker_cmd')
+ }
+ worker_result := os.execute(worker_cmd)
+ // Guard against a possibly crashing worker process.
+ if worker_result.exit_code != 0 {
+ eprintln(worker_result.output)
+ if worker_result.exit_code == 1 {
+ eprintln('Internal vfmt error while formatting file: ${file}.')
+ }
+ errors++
+ continue
+ }
+ if worker_result.output.len > 0 {
+ if worker_result.output.contains(formatted_file_token) {
+ wresult := worker_result.output.split(formatted_file_token)
+ formatted_warn_errs := wresult[0]
+ formatted_file_path := wresult[1].trim_right('\n\r')
+ foptions.post_process_file(fpath, formatted_file_path) or { errors = errors + 1 }
+ if formatted_warn_errs.len > 0 {
+ eprintln(formatted_warn_errs)
+ }
+ continue
+ }
+ }
+ errors++
+ }
+ if errors > 0 {
+ eprintln('Encountered a total of: $errors errors.')
+ if foptions.is_noerror {
+ exit(0)
+ }
+ if foptions.is_verify {
+ exit(1)
+ }
+ if foptions.is_c {
+ exit(2)
+ }
+ exit(1)
+ }
+}
+
+fn (foptions &FormatOptions) format_file(file string) {
+ mut prefs := pref.new_preferences()
+ prefs.is_fmt = true
+ if foptions.is_verbose {
+ eprintln('vfmt2 running fmt.fmt over file: $file')
+ }
+ table := ast.new_table()
+ // checker := checker.new_checker(table, prefs)
+ file_ast := parser.parse_file(file, table, .parse_comments, prefs)
+ // checker.check(file_ast)
+ formatted_content := fmt.fmt(file_ast, table, prefs, foptions.is_debug)
+ file_name := os.file_name(file)
+ ulid := rand.ulid()
+ vfmt_output_path := os.join_path(vtmp_folder, 'vfmt_${ulid}_$file_name')
+ os.write_file(vfmt_output_path, formatted_content) or { panic(err) }
+ if foptions.is_verbose {
+ eprintln('fmt.fmt worked and $formatted_content.len bytes were written to $vfmt_output_path .')
+ }
+ eprintln('$formatted_file_token$vfmt_output_path')
+}
+
+fn (foptions &FormatOptions) format_pipe() {
+ mut prefs := pref.new_preferences()
+ prefs.is_fmt = true
+ if foptions.is_verbose {
+ eprintln('vfmt2 running fmt.fmt over stdin')
+ }
+ input_text := os.get_raw_lines_joined()
+ table := ast.new_table()
+ // checker := checker.new_checker(table, prefs)
+ file_ast := parser.parse_text(input_text, '', table, .parse_comments, prefs)
+ // checker.check(file_ast)
+ formatted_content := fmt.fmt(file_ast, table, prefs, foptions.is_debug)
+ print(formatted_content)
+ if foptions.is_verbose {
+ eprintln('fmt.fmt worked and $formatted_content.len bytes were written to stdout.')
+ }
+}
+
+fn print_compiler_options(compiler_params &pref.Preferences) {
+ eprintln(' os: ' + compiler_params.os.str())
+ eprintln(' ccompiler: $compiler_params.ccompiler')
+ eprintln(' path: $compiler_params.path ')
+ eprintln(' out_name: $compiler_params.out_name ')
+ eprintln(' vroot: $compiler_params.vroot ')
+ eprintln('lookup_path: $compiler_params.lookup_path ')
+ eprintln(' out_name: $compiler_params.out_name ')
+ eprintln(' cflags: $compiler_params.cflags ')
+ eprintln(' is_test: $compiler_params.is_test ')
+ eprintln(' is_script: $compiler_params.is_script ')
+}
+
+fn (foptions &FormatOptions) post_process_file(file string, formatted_file_path string) ? {
+ if formatted_file_path.len == 0 {
+ return
+ }
+ if foptions.is_diff {
+ diff_cmd := diff.find_working_diff_command() or {
+ eprintln(err)
+ return
+ }
+ if foptions.is_verbose {
+ eprintln('Using diff command: $diff_cmd')
+ }
+ diff := diff.color_compare_files(diff_cmd, file, formatted_file_path)
+ if diff.len > 0 {
+ println(diff)
+ }
+ return
+ }
+ if foptions.is_verify {
+ diff_cmd := diff.find_working_diff_command() or {
+ eprintln(err)
+ return
+ }
+ x := diff.color_compare_files(diff_cmd, file, formatted_file_path)
+ if x.len != 0 {
+ println("$file is not vfmt'ed")
+ return error('')
+ }
+ return
+ }
+ fc := os.read_file(file) or {
+ eprintln('File $file could not be read')
+ return
+ }
+ formatted_fc := os.read_file(formatted_file_path) or {
+ eprintln('File $formatted_file_path could not be read')
+ return
+ }
+ is_formatted_different := fc != formatted_fc
+ if foptions.is_c {
+ if is_formatted_different {
+ eprintln('File is not formatted: $file')
+ return error('')
+ }
+ return
+ }
+ if foptions.is_l {
+ if is_formatted_different {
+ eprintln('File needs formatting: $file')
+ }
+ return
+ }
+ if foptions.is_w {
+ if is_formatted_different {
+ os.mv_by_cp(formatted_file_path, file) or { panic(err) }
+ eprintln('Reformatted file: $file')
+ } else {
+ eprintln('Already formatted file: $file')
+ }
+ return
+ }
+ print(formatted_fc)
+}
+
+fn (f FormatOptions) str() string {
+ return
+ 'FormatOptions{ is_l: $f.is_l, is_w: $f.is_w, is_diff: $f.is_diff, is_verbose: $f.is_verbose,' +
+ ' is_all: $f.is_all, is_worker: $f.is_worker, is_debug: $f.is_debug, is_noerror: $f.is_noerror,' +
+ ' is_verify: $f.is_verify" }'
+}
+
+fn file_to_mod_name_and_is_module_file(file string) (string, bool) {
+ mut mod_name := 'main'
+ mut is_module_file := false
+ flines := read_source_lines(file) or { return mod_name, is_module_file }
+ for fline in flines {
+ line := fline.trim_space()
+ if line.starts_with('module ') {
+ if !line.starts_with('module main') {
+ is_module_file = true
+ mod_name = line.replace('module ', ' ').trim_space()
+ }
+ break
+ }
+ }
+ return mod_name, is_module_file
+}
+
+fn read_source_lines(file string) ?[]string {
+ source_lines := os.read_lines(file) or { return error('can not read $file') }
+ return source_lines
+}
+
+fn get_compile_name_of_potential_v_project(file string) string {
+ // This function get_compile_name_of_potential_v_project returns:
+ // a) the file's folder, if file is part of a v project
+ // b) the file itself, if the file is a standalone v program
+ pfolder := os.real_path(os.dir(file))
+ // a .v project has many 'module main' files in one folder
+ // if there is only one .v file, then it must be a standalone
+ all_files_in_pfolder := os.ls(pfolder) or { panic(err) }
+ mut vfiles := []string{}
+ for f in all_files_in_pfolder {
+ vf := os.join_path(pfolder, f)
+ if f.starts_with('.') || !f.ends_with('.v') || os.is_dir(vf) {
+ continue
+ }
+ vfiles << vf
+ }
+ if vfiles.len == 1 {
+ return file
+ }
+ // /////////////////////////////////////////////////////////////
+ // At this point, we know there are many .v files in the folder
+ // We will have to read them all, and if there are more than one
+ // containing `fn main` then the folder contains multiple standalone
+ // v programs. If only one contains `fn main` then the folder is
+ // a project folder, that should be compiled with `v pfolder`.
+ mut main_fns := 0
+ for f in vfiles {
+ slines := read_source_lines(f) or { panic(err) }
+ for line in slines {
+ if line.contains('fn main()') {
+ main_fns++
+ if main_fns > 1 {
+ return file
+ }
+ }
+ }
+ }
+ return pfolder
+}
+
+[noreturn]
+fn verror(s string) {
+ util.verror('vfmt error', s)
+}
diff --git a/v_windows/v/old/cmd/tools/vpm.exe b/v_windows/v/old/cmd/tools/vpm.exe
new file mode 100644
index 0000000..2f4b9c9
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vpm.exe
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vpm.v b/v_windows/v/old/cmd/tools/vpm.v
new file mode 100644
index 0000000..a503d24
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vpm.v
@@ -0,0 +1,601 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module main
+
+import os
+import os.cmdline
+import net.http
+import json
+import vhelp
+import v.vmod
+
+const (
+ default_vpm_server_urls = ['https://vpm.vlang.io']
+ valid_vpm_commands = ['help', 'search', 'install', 'update', 'upgrade', 'outdated',
+ 'list', 'remove', 'show']
+ excluded_dirs = ['cache', 'vlib']
+ supported_vcs_systems = ['git', 'hg']
+ supported_vcs_folders = ['.git', '.hg']
+ supported_vcs_update_cmds = map{
+ 'git': 'git pull'
+ 'hg': 'hg pull --update'
+ }
+ supported_vcs_install_cmds = map{
+ 'git': 'git clone --depth=1'
+ 'hg': 'hg clone'
+ }
+ supported_vcs_outdated_steps = map{
+ 'git': ['git fetch', 'git rev-parse @', 'git rev-parse @{u}']
+ 'hg': ['hg incoming']
+ }
+)
+
+struct Mod {
+ id int
+ name string
+ url string
+ nr_downloads int
+ vcs string
+}
+
+struct Vmod {
+mut:
+ name string
+ version string
+ deps []string
+}
+
+fn main() {
+ init_settings()
+ // This tool is intended to be launched by the v frontend,
+ // which provides the path to V inside os.getenv('VEXE')
+ // args are: vpm [options] SUBCOMMAND module names
+ params := cmdline.only_non_options(os.args[1..])
+ verbose_println('cli params: $params')
+ if params.len < 1 {
+ vpm_help()
+ exit(5)
+ }
+ vpm_command := params[0]
+ mut module_names := params[1..]
+ ensure_vmodules_dir_exist()
+ // println('module names: ') println(module_names)
+ match vpm_command {
+ 'help' {
+ vpm_help()
+ }
+ 'search' {
+ vpm_search(module_names)
+ }
+ 'install' {
+ if module_names.len == 0 && os.exists('./v.mod') {
+ println('Detected v.mod file inside the project directory. Using it...')
+ manifest := vmod.from_file('./v.mod') or { panic(err) }
+ module_names = manifest.dependencies
+ }
+ vpm_install(module_names)
+ }
+ 'update' {
+ vpm_update(module_names)
+ }
+ 'upgrade' {
+ vpm_upgrade()
+ }
+ 'outdated' {
+ vpm_outdated()
+ }
+ 'list' {
+ vpm_list()
+ }
+ 'remove' {
+ vpm_remove(module_names)
+ }
+ 'show' {
+ vpm_show(module_names)
+ }
+ else {
+ println('Error: you tried to run "v $vpm_command"')
+ println('... but the v package management tool vpm only knows about these commands:')
+ for validcmd in valid_vpm_commands {
+ println(' v $validcmd')
+ }
+ exit(3)
+ }
+ }
+}
+
+fn vpm_search(keywords []string) {
+ search_keys := keywords.map(it.replace('_', '-'))
+ if settings.is_help {
+ vhelp.show_topic('search')
+ exit(0)
+ }
+ if search_keys.len == 0 {
+ println('´v search´ requires *at least one* keyword.')
+ exit(2)
+ }
+ modules := get_all_modules()
+ installed_modules := get_installed_modules()
+ joined := search_keys.join(', ')
+ mut index := 0
+ for mod in modules {
+ // TODO for some reason .filter results in substr error, so do it manually
+ for k in search_keys {
+ if !mod.contains(k) {
+ continue
+ }
+ if index == 0 {
+ println('Search results for "$joined":\n')
+ }
+ index++
+ mut parts := mod.split('.')
+ // in case the author isn't present
+ if parts.len == 1 {
+ parts << parts[0]
+ parts[0] = ' '
+ } else {
+ parts[0] = ' by ${parts[0]} '
+ }
+ installed := if mod in installed_modules { ' (installed)' } else { '' }
+ println('${index}. ${parts[1]}${parts[0]}[$mod]$installed')
+ break
+ }
+ }
+ if index == 0 {
+ vexe := os.getenv('VEXE')
+ vroot := os.real_path(os.dir(vexe))
+ mut messages := ['No module(s) found for `$joined` .']
+ for vlibmod in search_keys {
+ if os.is_dir(os.join_path(vroot, 'vlib', vlibmod)) {
+ messages << 'There is already an existing "$vlibmod" module in vlib, so you can just `import $vlibmod` .'
+ }
+ }
+ for m in messages {
+ println(m)
+ }
+ } else {
+ println('\nUse "v install author_name.module_name" to install the module.')
+ }
+}
+
+fn vpm_install(module_names []string) {
+ if settings.is_help {
+ vhelp.show_topic('install')
+ exit(0)
+ }
+ if module_names.len == 0 {
+ println('´v install´ requires *at least one* module name.')
+ exit(2)
+ }
+ mut errors := 0
+ for n in module_names {
+ name := n.trim_space().replace('_', '-')
+ mod := get_module_meta_info(name) or {
+ errors++
+ println('Errors while retrieving meta data for module $name:')
+ println(err)
+ continue
+ }
+ mut vcs := mod.vcs
+ if vcs == '' {
+ vcs = supported_vcs_systems[0]
+ }
+ if vcs !in supported_vcs_systems {
+ errors++
+ println('Skipping module "$name", since it uses an unsupported VCS {$vcs} .')
+ continue
+ }
+ mod_name_as_path := mod.name.replace('.', os.path_separator).replace('-', '_').to_lower()
+ final_module_path := os.real_path(os.join_path(settings.vmodules_path, mod_name_as_path))
+ if os.exists(final_module_path) {
+ vpm_update([name])
+ continue
+ }
+ println('Installing module "$name" from $mod.url to $final_module_path ...')
+ vcs_install_cmd := supported_vcs_install_cmds[vcs]
+ cmd := '$vcs_install_cmd "$mod.url" "$final_module_path"'
+ verbose_println(' command: $cmd')
+ cmdres := os.execute(cmd)
+ if cmdres.exit_code != 0 {
+ errors++
+ println('Failed installing module "$name" to "$final_module_path" .')
+ verbose_println('Failed command: $cmd')
+ verbose_println('Failed command output:\n$cmdres.output')
+ continue
+ }
+ resolve_dependencies(name, final_module_path, module_names)
+ }
+ if errors > 0 {
+ exit(1)
+ }
+}
+
+fn vpm_update(m []string) {
+ mut module_names := m.clone()
+ if settings.is_help {
+ vhelp.show_topic('update')
+ exit(0)
+ }
+ if module_names.len == 0 {
+ module_names = get_installed_modules()
+ }
+ mut errors := 0
+ for name in module_names {
+ final_module_path := valid_final_path_of_existing_module(name) or { continue }
+ os.chdir(final_module_path)
+ println('Updating module "$name"...')
+ verbose_println(' work folder: $final_module_path')
+ vcs := vcs_used_in_dir(final_module_path) or { continue }
+ vcs_cmd := supported_vcs_update_cmds[vcs[0]]
+ verbose_println(' command: $vcs_cmd')
+ vcs_res := os.execute('$vcs_cmd')
+ if vcs_res.exit_code != 0 {
+ errors++
+ println('Failed updating module "$name".')
+ verbose_println('Failed command: $vcs_cmd')
+ verbose_println('Failed details:\n$vcs_res.output')
+ continue
+ } else {
+ verbose_println(' $vcs_res.output.trim_space()')
+ }
+ resolve_dependencies(name, final_module_path, module_names)
+ }
+ if errors > 0 {
+ exit(1)
+ }
+}
+
+fn get_outdated() ?[]string {
+ module_names := get_installed_modules()
+ mut outdated := []string{}
+ for name in module_names {
+ final_module_path := valid_final_path_of_existing_module(name) or { continue }
+ os.chdir(final_module_path)
+ vcs := vcs_used_in_dir(final_module_path) or { continue }
+ vcs_cmd_steps := supported_vcs_outdated_steps[vcs[0]]
+ mut outputs := []string{}
+ for step in vcs_cmd_steps {
+ res := os.execute(step)
+ if res.exit_code < 0 {
+ verbose_println('Error command: $step')
+ verbose_println('Error details:\n$res.output')
+ return error('Error while checking latest commits for "$name".')
+ }
+ if vcs[0] == 'hg' {
+ if res.exit_code == 1 {
+ outdated << name
+ }
+ } else {
+ outputs << res.output
+ }
+ }
+ if vcs[0] == 'git' && outputs[1] != outputs[2] {
+ outdated << name
+ }
+ }
+ return outdated
+}
+
+fn vpm_upgrade() {
+ outdated := get_outdated() or { exit(1) }
+ if outdated.len > 0 {
+ vpm_update(outdated)
+ } else {
+ println('Modules are up to date.')
+ }
+}
+
+fn vpm_outdated() {
+ outdated := get_outdated() or { exit(1) }
+ if outdated.len > 0 {
+ println('Outdated modules:')
+ for m in outdated {
+ println(' $m')
+ }
+ } else {
+ println('Modules are up to date.')
+ }
+}
+
+fn vpm_list() {
+ module_names := get_installed_modules()
+ if module_names.len == 0 {
+ println('You have no modules installed.')
+ exit(0)
+ }
+ println('Installed modules:')
+ for mod in module_names {
+ println(' $mod')
+ }
+}
+
+fn vpm_remove(module_names []string) {
+ if settings.is_help {
+ vhelp.show_topic('remove')
+ exit(0)
+ }
+ if module_names.len == 0 {
+ println('´v remove´ requires *at least one* module name.')
+ exit(2)
+ }
+ for name in module_names {
+ final_module_path := valid_final_path_of_existing_module(name) or { continue }
+ println('Removing module "$name"...')
+ verbose_println('removing folder $final_module_path')
+ os.rmdir_all(final_module_path) or {
+ verbose_println('error while removing "$final_module_path": $err.msg')
+ }
+ // delete author directory if it is empty
+ author := name.split('.')[0]
+ author_dir := os.real_path(os.join_path(settings.vmodules_path, author))
+ if !os.exists(author_dir) {
+ continue
+ }
+ if os.is_dir_empty(author_dir) {
+ verbose_println('removing author folder $author_dir')
+ os.rmdir(author_dir) or {
+ verbose_println('error while removing "$author_dir": $err.msg')
+ }
+ }
+ }
+}
+
+fn valid_final_path_of_existing_module(name string) ?string {
+ mod_name_as_path := name.replace('.', os.path_separator).replace('-', '_').to_lower()
+ name_of_vmodules_folder := os.join_path(settings.vmodules_path, mod_name_as_path)
+ final_module_path := os.real_path(name_of_vmodules_folder)
+ if !os.exists(final_module_path) {
+ println('No module with name "$name" exists at $name_of_vmodules_folder')
+ return none
+ }
+ if !os.is_dir(final_module_path) {
+ println('Skipping "$name_of_vmodules_folder", since it is not a folder.')
+ return none
+ }
+ vcs_used_in_dir(final_module_path) or {
+ println('Skipping "$name_of_vmodules_folder", since it does not use a supported vcs.')
+ return none
+ }
+ return final_module_path
+}
+
+fn ensure_vmodules_dir_exist() {
+ if !os.is_dir(settings.vmodules_path) {
+ println('Creating $settings.vmodules_path/ ...')
+ os.mkdir(settings.vmodules_path) or { panic(err) }
+ }
+}
+
+fn vpm_help() {
+ vhelp.show_topic('vpm')
+}
+
+fn vcs_used_in_dir(dir string) ?[]string {
+ mut vcs := []string{}
+ for repo_subfolder in supported_vcs_folders {
+ checked_folder := os.real_path(os.join_path(dir, repo_subfolder))
+ if os.is_dir(checked_folder) {
+ vcs << repo_subfolder.replace('.', '')
+ }
+ }
+ if vcs.len == 0 {
+ return none
+ }
+ return vcs
+}
+
+fn get_installed_modules() []string {
+ dirs := os.ls(settings.vmodules_path) or { return [] }
+ mut modules := []string{}
+ for dir in dirs {
+ adir := os.join_path(settings.vmodules_path, dir)
+ if dir in excluded_dirs || !os.is_dir(adir) {
+ continue
+ }
+ if os.exists(os.join_path(adir, 'v.mod')) && os.exists(os.join_path(adir, '.git', 'config')) {
+ // an official vlang module with a short module name, like `vsl`, `ui` or `markdown`
+ modules << dir
+ continue
+ }
+ author := dir
+ mods := os.ls(adir) or { continue }
+ for m in mods {
+ vcs_used_in_dir(os.join_path(adir, m)) or { continue }
+ modules << '${author}.$m'
+ }
+ }
+ return modules
+}
+
+fn get_all_modules() []string {
+ url := get_working_server_url()
+ r := http.get(url) or { panic(err) }
+ if r.status_code != 200 {
+ println('Failed to search vpm.vlang.io. Status code: $r.status_code')
+ exit(1)
+ }
+ s := r.text
+ mut read_len := 0
+ mut modules := []string{}
+ for read_len < s.len {
+ mut start_token := '<a href="/mod'
+ end_token := '</a>'
+ // get the start index of the module entry
+ mut start_index := s.index_after(start_token, read_len)
+ if start_index == -1 {
+ break
+ }
+ // get the index of the end of anchor (a) opening tag
+ // we use the previous start_index to make sure we are getting a module and not just a random 'a' tag
+ start_token = '">'
+ start_index = s.index_after(start_token, start_index) + start_token.len
+ // get the index of the end of module entry
+ end_index := s.index_after(end_token, start_index)
+ if end_index == -1 {
+ break
+ }
+ modules << s[start_index..end_index]
+ read_len = end_index
+ if read_len >= s.len {
+ break
+ }
+ }
+ return modules
+}
+
+fn resolve_dependencies(name string, module_path string, module_names []string) {
+ vmod_path := os.join_path(module_path, 'v.mod')
+ if !os.exists(vmod_path) {
+ return
+ }
+ data := os.read_file(vmod_path) or { return }
+ vmod := parse_vmod(data)
+ mut deps := []string{}
+ // filter out dependencies that were already specified by the user
+ for d in vmod.deps {
+ if d !in module_names {
+ deps << d
+ }
+ }
+ if deps.len > 0 {
+ println('Resolving $deps.len dependencies for module "$name"...')
+ verbose_println('Found dependencies: $deps')
+ vpm_install(deps)
+ }
+}
+
+fn parse_vmod(data string) Vmod {
+ keys := ['name', 'version', 'deps']
+ mut m := map{
+ 'name': ''
+ 'version': ''
+ 'deps': ''
+ }
+ for key in keys {
+ mut key_index := data.index('$key:') or { continue }
+ key_index += key.len + 1
+ m[key] = data[key_index..data.index_after('\n', key_index)].trim_space().replace("'",
+ '').replace('[', '').replace(']', '')
+ }
+ mut vmod := Vmod{}
+ vmod.name = m['name']
+ vmod.version = m['version']
+ if m['deps'].len > 0 {
+ vmod.deps = m['deps'].split(',')
+ }
+ return vmod
+}
+
+fn get_working_server_url() string {
+ server_urls := if settings.server_urls.len > 0 {
+ settings.server_urls
+ } else {
+ default_vpm_server_urls
+ }
+ for url in server_urls {
+ verbose_println('Trying server url: $url')
+ http.head(url) or {
+ verbose_println(' $url failed.')
+ continue
+ }
+ return url
+ }
+ panic('No responding vpm server found. Please check your network connectivity and try again later.')
+}
+
+// settings context:
+struct VpmSettings {
+mut:
+ is_help bool
+ is_verbose bool
+ server_urls []string
+ vmodules_path string
+}
+
+const (
+ settings = &VpmSettings{}
+)
+
+fn init_settings() {
+ mut s := &VpmSettings(0)
+ unsafe {
+ s = settings
+ }
+ s.is_help = '-h' in os.args || '--help' in os.args || 'help' in os.args
+ s.is_verbose = '-v' in os.args
+ s.server_urls = cmdline.options(os.args, '-server-url')
+ s.vmodules_path = os.vmodules_dir()
+}
+
+fn verbose_println(s string) {
+ if settings.is_verbose {
+ println(s)
+ }
+}
+
+fn get_module_meta_info(name string) ?Mod {
+ mut errors := []string{}
+ for server_url in default_vpm_server_urls {
+ modurl := server_url + '/jsmod/$name'
+ verbose_println('Retrieving module metadata from: $modurl ...')
+ r := http.get(modurl) or {
+ errors << 'Http server did not respond to our request for ${modurl}.'
+ errors << 'Error details: $err'
+ continue
+ }
+ if r.status_code == 404 || r.text.trim_space() == '404' {
+ errors << 'Skipping module "$name", since $server_url reported that "$name" does not exist.'
+ continue
+ }
+ if r.status_code != 200 {
+ errors << 'Skipping module "$name", since $server_url responded with $r.status_code http status code. Please try again later.'
+ continue
+ }
+ s := r.text
+ if s.len > 0 && s[0] != `{` {
+ errors << 'Invalid json data'
+ errors << s.trim_space().limit(100) + '...'
+ continue
+ }
+ mod := json.decode(Mod, s) or {
+ errors << 'Skipping module "$name", since its information is not in json format.'
+ continue
+ }
+ if '' == mod.url || '' == mod.name {
+ errors << 'Skipping module "$name", since it is missing name or url information.'
+ continue
+ }
+ return mod
+ }
+ return error(errors.join_lines())
+}
+
+fn vpm_show(module_names []string) {
+ installed_modules := get_installed_modules()
+ for module_name in module_names {
+ if module_name !in installed_modules {
+ module_meta_info := get_module_meta_info(module_name) or { continue }
+ print('
+Name: $module_meta_info.name
+Homepage: $module_meta_info.url
+Downloads: $module_meta_info.nr_downloads
+Installed: False
+--------
+')
+ continue
+ }
+ path := os.join_path(os.vmodules_dir(), module_name)
+ mod := vmod.from_file(os.join_path(path, 'v.mod')) or { continue }
+ print('Name: $mod.name
+Version: $mod.version
+Description: $mod.description
+Homepage: $mod.repo_url
+Author: $mod.author
+License: $mod.license
+Location: $path
+Requires: ${mod.dependencies.join(', ')}
+--------
+')
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vrepl.exe b/v_windows/v/old/cmd/tools/vrepl.exe
new file mode 100644
index 0000000..356bd4d
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vrepl.exe
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vrepl.v b/v_windows/v/old/cmd/tools/vrepl.v
new file mode 100644
index 0000000..99ea224
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vrepl.v
@@ -0,0 +1,390 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module main
+
+import os
+import term
+import rand
+import readline
+import os.cmdline
+import v.util.version
+
+struct Repl {
+mut:
+ readline readline.Readline
+ indent int // indentation level
+ in_func bool // are we inside a new custom user function
+ line string // the current line entered by the user
+ //
+ modules []string // all the import modules
+ includes []string // all the #include statements
+ functions []string // all the user function declarations
+ functions_name []string // all the user function names
+ lines []string // all the other lines/statements
+ temp_lines []string // all the temporary expressions/printlns
+ vstartup_lines []string // lines in the `VSTARTUP` file
+}
+
+const is_stdin_a_pipe = (os.is_atty(0) == 0)
+
+const vexe = os.getenv('VEXE')
+
+const vstartup = os.getenv('VSTARTUP')
+
+fn new_repl() Repl {
+ return Repl{
+ readline: readline.Readline{}
+ modules: ['os', 'time', 'math']
+ vstartup_lines: os.read_file(vstartup) or { '' }.trim_right('\n\r').split_into_lines()
+ }
+}
+
+fn (mut r Repl) checks() bool {
+ mut in_string := false
+ was_indent := r.indent > 0
+ for i := 0; i < r.line.len; i++ {
+ if r.line[i] == `\'` && (i == 0 || r.line[i - 1] != `\\`) {
+ in_string = !in_string
+ }
+ if r.line[i] == `{` && !in_string {
+ r.line = r.line[..i + 1] + '\n' + r.line[i + 1..]
+ i++
+ r.indent++
+ }
+ if r.line[i] == `}` && !in_string {
+ r.line = r.line[..i] + '\n' + r.line[i..]
+ i++
+ r.indent--
+ if r.indent == 0 {
+ r.in_func = false
+ }
+ }
+ if i + 2 < r.line.len && r.indent == 0 && r.line[i + 1] == `f` && r.line[i + 2] == `n` {
+ r.in_func = true
+ }
+ }
+ return r.in_func || (was_indent && r.indent <= 0) || r.indent > 0
+}
+
+fn (r &Repl) function_call(line string) bool {
+ for function in r.functions_name {
+ is_function_definition := line.replace(' ', '').starts_with('$function:=')
+ if line.starts_with(function) && !is_function_definition {
+ return true
+ }
+ }
+ return false
+}
+
+fn (r &Repl) current_source_code(should_add_temp_lines bool, not_add_print bool) string {
+ mut all_lines := []string{}
+ for mod in r.modules {
+ all_lines << 'import $mod\n'
+ }
+ if vstartup != '' {
+ mut lines := []string{}
+ if !not_add_print {
+ lines = r.vstartup_lines.filter(!it.starts_with('print'))
+ } else {
+ lines = r.vstartup_lines
+ }
+ all_lines << lines
+ }
+ all_lines << r.includes
+ all_lines << r.functions
+ all_lines << r.lines
+
+ if should_add_temp_lines {
+ all_lines << r.temp_lines
+ }
+ return all_lines.join('\n')
+}
+
+fn repl_help() {
+ println(version.full_v_version(false))
+ println('
+ |help Displays this information.
+ |list Show the program so far.
+ |reset Clears the accumulated program, so you can start a fresh.
+ |Ctrl-C, Ctrl-D, exit Exits the REPL.
+ |clear Clears the screen.
+'.strip_margin())
+}
+
+fn run_repl(workdir string, vrepl_prefix string) {
+ if !is_stdin_a_pipe {
+ println(version.full_v_version(false))
+ println('Use Ctrl-C or ${term.highlight_command('exit')} to exit, or ${term.highlight_command('help')} to see other available commands')
+ }
+
+ if vstartup != '' {
+ result := repl_run_vfile(vstartup) or {
+ os.Result{
+ output: '$vstartup file not found'
+ }
+ }
+ print('\n')
+ print_output(result)
+ }
+
+ file := os.join_path(workdir, '.${vrepl_prefix}vrepl.v')
+ temp_file := os.join_path(workdir, '.${vrepl_prefix}vrepl_temp.v')
+ mut prompt := '>>> '
+ defer {
+ if !is_stdin_a_pipe {
+ println('')
+ }
+ cleanup_files([file, temp_file])
+ }
+ mut r := new_repl()
+ for {
+ if r.indent == 0 {
+ prompt = '>>> '
+ } else {
+ prompt = '... '
+ }
+ oline := r.get_one_line(prompt) or { break }
+ line := oline.trim_space()
+ if line == '' && oline.ends_with('\n') {
+ continue
+ }
+ if line.len <= -1 || line == '' || line == 'exit' {
+ break
+ }
+ r.line = line
+ if r.line == '\n' {
+ continue
+ }
+ if r.line == 'clear' {
+ term.erase_clear()
+ continue
+ }
+ if r.line == 'help' {
+ repl_help()
+ continue
+ }
+ if r.line.contains(':=') && r.line.contains('fn(') {
+ r.in_func = true
+ r.functions_name << r.line.all_before(':= fn(').trim_space()
+ }
+ if r.line.starts_with('fn') {
+ r.in_func = true
+ r.functions_name << r.line.all_after('fn').all_before('(').trim_space()
+ }
+ was_func := r.in_func
+ if r.checks() {
+ for rline in r.line.split('\n') {
+ if r.in_func || was_func {
+ r.functions << rline
+ } else {
+ r.temp_lines << rline
+ }
+ }
+ if r.indent > 0 {
+ continue
+ }
+ r.line = ''
+ }
+ if r.line == 'debug_repl' {
+ eprintln('repl: $r')
+ continue
+ }
+ if r.line == 'reset' {
+ r = new_repl()
+ continue
+ }
+ if r.line == 'list' {
+ source_code := r.current_source_code(true, true)
+ println('//////////////////////////////////////////////////////////////////////////////////////')
+ println(source_code)
+ println('//////////////////////////////////////////////////////////////////////////////////////')
+ continue
+ }
+ // Save the source only if the user is printing something,
+ // but don't add this print call to the `lines` array,
+ // so that it doesn't get called during the next print.
+ if r.line.starts_with('=') {
+ r.line = 'println(' + r.line[1..] + ')'
+ }
+ if r.line.starts_with('print') {
+ source_code := r.current_source_code(false, false) + '\n$r.line\n'
+ os.write_file(file, source_code) or { panic(err) }
+ s := repl_run_vfile(file) or { return }
+ print_output(s)
+ } else {
+ mut temp_line := r.line
+ mut temp_flag := false
+ func_call := r.function_call(r.line)
+ filter_line := r.line.replace(r.line.find_between("'", "'"), '').replace(r.line.find_between('"',
+ '"'), '')
+ possible_statement_patterns := [
+ '++',
+ '--',
+ '<<',
+ '//',
+ '/*',
+ 'fn ',
+ 'pub ',
+ 'mut ',
+ 'enum ',
+ 'const ',
+ 'struct ',
+ 'interface ',
+ 'import ',
+ '#include ',
+ 'for ',
+ 'or ',
+ 'insert',
+ 'delete',
+ 'prepend',
+ 'sort',
+ 'clear',
+ 'trim',
+ ]
+ mut is_statement := false
+ if filter_line.count('=') % 2 == 1 {
+ is_statement = true
+ } else {
+ for pattern in possible_statement_patterns {
+ if filter_line.contains(pattern) {
+ is_statement = true
+ break
+ }
+ }
+ }
+ // NB: starting a line with 2 spaces escapes the println heuristic
+ if oline.starts_with(' ') {
+ is_statement = true
+ }
+ if !is_statement && !func_call && r.line != '' {
+ temp_line = 'println($r.line)'
+ temp_flag = true
+ }
+ mut temp_source_code := ''
+ if temp_line.starts_with('import ') {
+ mod := r.line.fields()[1]
+ if mod !in r.modules {
+ temp_source_code = '$temp_line\n' + r.current_source_code(false, true)
+ }
+ } else if temp_line.starts_with('#include ') {
+ temp_source_code = '$temp_line\n' + r.current_source_code(false, false)
+ } else {
+ for i, l in r.lines {
+ if (l.starts_with('for ') || l.starts_with('if ')) && l.contains('println') {
+ r.lines.delete(i)
+ break
+ }
+ }
+ temp_source_code = r.current_source_code(true, false) + '\n$temp_line\n'
+ }
+ os.write_file(temp_file, temp_source_code) or { panic(err) }
+ s := repl_run_vfile(temp_file) or { return }
+ if !func_call && s.exit_code == 0 && !temp_flag {
+ for r.temp_lines.len > 0 {
+ if !r.temp_lines[0].starts_with('print') {
+ r.lines << r.temp_lines[0]
+ }
+ r.temp_lines.delete(0)
+ }
+ if r.line.starts_with('import ') {
+ mod := r.line.fields()[1]
+ if mod !in r.modules {
+ r.modules << mod
+ }
+ } else if r.line.starts_with('#include ') {
+ r.includes << r.line
+ } else {
+ r.lines << r.line
+ }
+ } else {
+ for r.temp_lines.len > 0 {
+ r.temp_lines.delete(0)
+ }
+ }
+ print_output(s)
+ }
+ }
+}
+
+fn print_output(s os.Result) {
+ lines := s.output.trim_right('\n\r').split_into_lines()
+ for line in lines {
+ if line.contains('.vrepl_temp.v:') {
+ // Hide the temporary file name
+ sline := line.all_after('.vrepl_temp.v:')
+ idx := sline.index(' ') or {
+ println(sline)
+ return
+ }
+ println(sline[idx + 1..])
+ } else if line.contains('.vrepl.v:') {
+ // Ensure that .vrepl.v: is at the start, ignore the path
+ // This is needed to have stable .repl tests.
+ idx := line.index('.vrepl.v:') or { return }
+ println(line[idx..])
+ } else {
+ println(line)
+ }
+ }
+}
+
+fn main() {
+ // Support for the parameters replfolder and replprefix is needed
+ // so that the repl can be launched in parallel by several different
+ // threads by the REPL test runner.
+ args := cmdline.options_after(os.args, ['repl'])
+ replfolder := os.real_path(cmdline.option(args, '-replfolder', os.temp_dir()))
+ replprefix := cmdline.option(args, '-replprefix', 'noprefix.${rand.ulid()}.')
+ if !os.exists(os.getenv('VEXE')) {
+ println('Usage:')
+ println(' VEXE=vexepath vrepl\n')
+ println(' ... where vexepath is the full path to the v executable file')
+ return
+ }
+ run_repl(replfolder, replprefix)
+}
+
+fn rerror(s string) {
+ println('V repl error: $s')
+ os.flush()
+}
+
+fn (mut r Repl) get_one_line(prompt string) ?string {
+ if is_stdin_a_pipe {
+ iline := os.get_raw_line()
+ if iline.len == 0 {
+ return none
+ }
+ return iline
+ }
+ rline := r.readline.read_line(prompt) or { return none }
+ return rline
+}
+
+fn cleanup_files(files []string) {
+ for file in files {
+ os.rm(file) or {}
+ $if windows {
+ os.rm(file[..file.len - 2] + '.exe') or {}
+ $if msvc {
+ os.rm(file[..file.len - 2] + '.ilk') or {}
+ os.rm(file[..file.len - 2] + '.pdb') or {}
+ }
+ } $else {
+ os.rm(file[..file.len - 2]) or {}
+ }
+ }
+}
+
+fn repl_run_vfile(file string) ?os.Result {
+ $if trace_repl_temp_files ? {
+ eprintln('>> repl_run_vfile file: $file')
+ }
+ s := os.execute('"$vexe" -repl run "$file"')
+ if s.exit_code < 0 {
+ rerror(s.output)
+ return error(s.output)
+ }
+ return s
+}
diff --git a/v_windows/v/old/cmd/tools/vself.exe b/v_windows/v/old/cmd/tools/vself.exe
new file mode 100644
index 0000000..0ca7946
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vself.exe
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vself.v b/v_windows/v/old/cmd/tools/vself.v
new file mode 100644
index 0000000..06d2189
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vself.v
@@ -0,0 +1,89 @@
+module main
+
+import os
+import os.cmdline
+import v.pref
+import v.util.recompilation
+
+const is_debug = os.args.contains('-debug')
+
+fn main() {
+ vexe := pref.vexe_path()
+ vroot := os.dir(vexe)
+ recompilation.must_be_enabled(vroot, 'Please install V from source, to use `v self` .')
+ os.chdir(vroot)
+ os.setenv('VCOLORS', 'always', true)
+ self_idx := os.args.index('self')
+ args := os.args[1..self_idx]
+ jargs := args.join(' ')
+ obinary := cmdline.option(args, '-o', '')
+ sargs := if obinary != '' { jargs } else { '$jargs -o v2' }
+ cmd := '$vexe $sargs cmd/v'
+ options := if args.len > 0 { '($sargs)' } else { '' }
+ println('V self compiling ${options}...')
+ compile(vroot, cmd)
+ if obinary != '' {
+ // When -o was given, there is no need to backup/rename the original.
+ // The user just wants an independent copy of v, and so we are done.
+ return
+ }
+ backup_old_version_and_rename_newer() or { panic(err.msg) }
+ println('V built successfully!')
+}
+
+fn compile(vroot string, cmd string) {
+ result := os.execute_or_exit(cmd)
+ if result.exit_code != 0 {
+ eprintln('cannot compile to `$vroot`: \n$result.output')
+ exit(1)
+ }
+ if result.output.len > 0 {
+ println(result.output.trim_space())
+ }
+}
+
+fn list_folder(bmessage string, message string) {
+ if !is_debug {
+ return
+ }
+ if bmessage != '' {
+ println(bmessage)
+ }
+ if os.user_os() == 'windows' {
+ os.system('dir v*.exe')
+ } else {
+ os.system('ls -lartd v*')
+ }
+ println(message)
+}
+
+fn backup_old_version_and_rename_newer() ?bool {
+ mut errors := []string{}
+ short_v_file := if os.user_os() == 'windows' { 'v.exe' } else { 'v' }
+ short_v2_file := if os.user_os() == 'windows' { 'v2.exe' } else { 'v2' }
+ short_bak_file := if os.user_os() == 'windows' { 'v_old.exe' } else { 'v_old' }
+ v_file := os.real_path(short_v_file)
+ v2_file := os.real_path(short_v2_file)
+ bak_file := os.real_path(short_bak_file)
+
+ list_folder('before:', 'removing $bak_file ...')
+ if os.exists(bak_file) {
+ os.rm(bak_file) or { errors << 'failed removing $bak_file: $err.msg' }
+ }
+
+ list_folder('', 'moving $v_file to $bak_file ...')
+ os.mv(v_file, bak_file) or { errors << err.msg }
+
+ list_folder('', 'removing $v_file ...')
+ os.rm(v_file) or {}
+
+ list_folder('', 'moving $v2_file to $v_file ...')
+ os.mv_by_cp(v2_file, v_file) or { panic(err.msg) }
+
+ list_folder('after:', '')
+
+ if errors.len > 0 {
+ eprintln('backup errors:\n >> ' + errors.join('\n >> '))
+ }
+ return true
+}
diff --git a/v_windows/v/old/cmd/tools/vsetup-freetype.v b/v_windows/v/old/cmd/tools/vsetup-freetype.v
new file mode 100644
index 0000000..c17ee67
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vsetup-freetype.v
@@ -0,0 +1,28 @@
+module main
+
+import os
+import v.pref
+
+const freetype_repo_url = 'https://github.com/ubawurinna/freetype-windows-binaries'
+
+const freetype_folder = os.join_path('thirdparty', 'freetype')
+
+fn main() {
+ $if windows {
+ println('Setup freetype...')
+ vroot := os.dir(pref.vexe_path())
+ os.chdir(vroot)
+ if os.is_dir(freetype_folder) {
+ println('Thirdparty "freetype" is already installed.')
+ } else {
+ s := os.execute('git clone --depth=1 $freetype_repo_url $freetype_folder')
+ if s.exit_code != 0 {
+ panic(s.output)
+ }
+ println(s.output)
+ println('Thirdparty "freetype" installed successfully.')
+ }
+ } $else {
+ println('It is only for Windows to setup thirdparty "freetype".')
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vsymlink.v b/v_windows/v/old/cmd/tools/vsymlink.v
new file mode 100644
index 0000000..c5d7d8a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vsymlink.v
@@ -0,0 +1,182 @@
+import os
+import v.pref
+import v.util
+
+$if windows {
+ $if tinyc {
+ #flag -ladvapi32
+ #flag -luser32
+ }
+}
+fn main() {
+ C.atexit(cleanup_vtmp_folder)
+ vexe := os.real_path(pref.vexe_path())
+ $if windows {
+ setup_symlink_windows(vexe)
+ } $else {
+ setup_symlink_unix(vexe)
+ }
+}
+
+fn cleanup_vtmp_folder() {
+ os.rmdir_all(util.get_vtmp_folder()) or {}
+}
+
+fn setup_symlink_unix(vexe string) {
+ mut link_path := '/data/data/com.termux/files/usr/bin/v'
+ if !os.is_dir('/data/data/com.termux/files') {
+ link_dir := '/usr/local/bin'
+ if !os.exists(link_dir) {
+ os.mkdir_all(link_dir) or { panic(err) }
+ }
+ link_path = link_dir + '/v'
+ }
+ os.rm(link_path) or {}
+ os.symlink(vexe, link_path) or {
+ eprintln('Failed to create symlink "$link_path". Try again with sudo.')
+ exit(1)
+ }
+}
+
+fn setup_symlink_windows(vexe string) {
+ $if windows {
+ // Create a symlink in a new local folder (.\.bin\.v.exe)
+ // Puts `v` in %PATH% without polluting it with anything else (like make.bat).
+ // This will make `v` available on cmd.exe, PowerShell, and MinGW(MSYS)/WSL/Cygwin
+ vdir := os.real_path(os.dir(vexe))
+ vsymlinkdir := os.join_path(vdir, '.bin')
+ mut vsymlink := os.join_path(vsymlinkdir, 'v.exe')
+ // Remove old symlink first (v could have been moved, symlink rerun)
+ if !os.exists(vsymlinkdir) {
+ os.mkdir(vsymlinkdir) or { panic(err) }
+ } else {
+ if os.exists(vsymlink) {
+ os.rm(vsymlink) or { panic(err) }
+ } else {
+ vsymlink = os.join_path(vsymlinkdir, 'v.bat')
+ if os.exists(vsymlink) {
+ os.rm(vsymlink) or { panic(err) }
+ }
+ vsymlink = os.join_path(vsymlinkdir, 'v.exe')
+ }
+ }
+ // First, try to create a native symlink at .\.bin\v.exe
+ os.symlink(vsymlink, vexe) or {
+ // typically only fails if you're on a network drive (VirtualBox)
+ // do batch file creation instead
+ eprintln('Could not create a native symlink: $err')
+ eprintln('Creating a batch file instead...')
+ vsymlink = os.join_path(vsymlinkdir, 'v.bat')
+ if os.exists(vsymlink) {
+ os.rm(vsymlink) or { panic(err) }
+ }
+ os.write_file(vsymlink, '@echo off\n$vexe %*') or { panic(err) }
+ eprintln('$vsymlink file written.')
+ }
+ if !os.exists(vsymlink) {
+ warn_and_exit('Could not create $vsymlink')
+ }
+ println('Symlink $vsymlink to $vexe created.')
+ println('Checking system %PATH%...')
+ reg_sys_env_handle := get_reg_sys_env_handle() or {
+ warn_and_exit(err.msg)
+ return
+ }
+ // TODO: Fix defers inside ifs
+ // defer {
+ // C.RegCloseKey(reg_sys_env_handle)
+ // }
+ // if the above succeeded, and we cannot get the value, it may simply be empty
+ sys_env_path := get_reg_value(reg_sys_env_handle, 'Path') or { '' }
+ current_sys_paths := sys_env_path.split(os.path_delimiter).map(it.trim('/$os.path_separator'))
+ mut new_paths := [vsymlinkdir]
+ for p in current_sys_paths {
+ if p == '' {
+ continue
+ }
+ if p !in new_paths {
+ new_paths << p
+ }
+ }
+ new_sys_env_path := new_paths.join(';')
+ if new_sys_env_path == sys_env_path {
+ println('System %PATH% was already configured.')
+ } else {
+ println('System %PATH% was not configured.')
+ println('Adding symlink directory to system %PATH%...')
+ set_reg_value(reg_sys_env_handle, 'Path', new_sys_env_path) or {
+ C.RegCloseKey(reg_sys_env_handle)
+ warn_and_exit(err.msg)
+ }
+ println('Done.')
+ }
+ println('Notifying running processes to update their Environment...')
+ send_setting_change_msg('Environment') or {
+ eprintln(err)
+ C.RegCloseKey(reg_sys_env_handle)
+ warn_and_exit('You might need to run this again to have the `v` command in your %PATH%')
+ }
+ C.RegCloseKey(reg_sys_env_handle)
+ println('Done.')
+ println('Note: Restart your shell/IDE to load the new %PATH%.')
+ println('After restarting your shell/IDE, give `v version` a try in another directory!')
+ }
+}
+
+fn warn_and_exit(err string) {
+ eprintln(err)
+ exit(1)
+}
+
+// get the system environment registry handle
+fn get_reg_sys_env_handle() ?voidptr {
+ $if windows { // wrap for cross-compile compat
+ // open the registry key
+ reg_key_path := 'Environment'
+ reg_env_key := voidptr(0) // or HKEY (HANDLE)
+ if C.RegOpenKeyEx(os.hkey_current_user, reg_key_path.to_wide(), 0, 1 | 2, &reg_env_key) != 0 {
+ return error('Could not open "$reg_key_path" in the registry')
+ }
+ return reg_env_key
+ }
+ return error('not on windows')
+}
+
+// get a value from a given $key
+fn get_reg_value(reg_env_key voidptr, key string) ?string {
+ $if windows {
+ // query the value (shortcut the sizing step)
+ reg_value_size := u32(4095) // this is the max length (not for the registry, but for the system %PATH%)
+ mut reg_value := unsafe { &u16(malloc(int(reg_value_size))) }
+ if C.RegQueryValueExW(reg_env_key, key.to_wide(), 0, 0, reg_value, &reg_value_size) != 0 {
+ return error('Unable to get registry value for "$key".')
+ }
+ return unsafe { string_from_wide(reg_value) }
+ }
+ return error('not on windows')
+}
+
+// sets the value for the given $key to the given $value
+fn set_reg_value(reg_key voidptr, key string, value string) ?bool {
+ $if windows {
+ if C.RegSetValueExW(reg_key, key.to_wide(), 0, C.REG_EXPAND_SZ, value.to_wide(),
+ value.len * 2) != 0 {
+ return error('Unable to set registry value for "$key". %PATH% may be too long.')
+ }
+ return true
+ }
+ return error('not on windows')
+}
+
+// Broadcasts a message to all listening windows (explorer.exe in particular)
+// letting them know that the system environment has changed and should be reloaded
+fn send_setting_change_msg(message_data string) ?bool {
+ $if windows {
+ if C.SendMessageTimeoutW(os.hwnd_broadcast, os.wm_settingchange, 0, unsafe { &u32(message_data.to_wide()) },
+ os.smto_abortifhung, 5000, 0) == 0 {
+ return error('Could not broadcast WM_SETTINGCHANGE')
+ }
+ return true
+ }
+ return error('not on windows')
+}
diff --git a/v_windows/v/old/cmd/tools/vtest-all.v b/v_windows/v/old/cmd/tools/vtest-all.v
new file mode 100644
index 0000000..d865872
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vtest-all.v
@@ -0,0 +1,187 @@
+module main
+
+import os
+import term
+import time
+
+const vexe = os.getenv('VEXE')
+
+const vroot = os.dir(vexe)
+
+const args_string = os.args[1..].join(' ')
+
+const vargs = args_string.all_before('test-all')
+
+const vtest_nocleanup = os.getenv('VTEST_NOCLEANUP').bool()
+
+fn main() {
+ mut commands := get_all_commands()
+ // summary
+ sw := time.new_stopwatch()
+ for mut cmd in commands {
+ cmd.run()
+ }
+ spent := sw.elapsed().milliseconds()
+ oks := commands.filter(it.ecode == 0)
+ fails := commands.filter(it.ecode != 0)
+ println('')
+ println(term.header_left(term_highlight('Summary of `v test-all`:'), '-'))
+ println(term_highlight('Total runtime: $spent ms'))
+ for ocmd in oks {
+ msg := if ocmd.okmsg != '' { ocmd.okmsg } else { ocmd.line }
+ println(term.colorize(term.green, '> OK: $msg '))
+ }
+ for fcmd in fails {
+ msg := if fcmd.errmsg != '' { fcmd.errmsg } else { fcmd.line }
+ println(term.failed('> Failed:') + ' $msg')
+ }
+ if fails.len > 0 {
+ exit(1)
+ }
+}
+
+struct Command {
+mut:
+ line string
+ label string // when set, the label will be printed *before* cmd.line is executed
+ ecode int
+ okmsg string
+ errmsg string
+ rmfile string
+}
+
+fn get_all_commands() []Command {
+ mut res := []Command{}
+ res << Command{
+ line: '$vexe examples/hello_world.v'
+ okmsg: 'V can compile hello world.'
+ rmfile: 'examples/hello_world'
+ }
+ res << Command{
+ line: '$vexe -o hhww.c examples/hello_world.v'
+ okmsg: 'V can output a .c file, without compiling further.'
+ rmfile: 'hhww.c'
+ }
+ $if linux || macos {
+ res << Command{
+ line: '$vexe -o - examples/hello_world.v | grep "#define V_COMMIT_HASH" > /dev/null'
+ okmsg: 'V prints the generated source code to stdout with `-o -` .'
+ }
+ }
+ res << Command{
+ line: '$vexe -o vtmp cmd/v'
+ okmsg: 'V can compile itself.'
+ rmfile: 'vtmp'
+ }
+ res << Command{
+ line: '$vexe -o vtmp_werror -cstrict cmd/v'
+ okmsg: 'V can compile itself with -cstrict.'
+ rmfile: 'vtmp_werror'
+ }
+ res << Command{
+ line: '$vexe -o vtmp_autofree -autofree cmd/v'
+ okmsg: 'V can compile itself with -autofree.'
+ rmfile: 'vtmp_autofree'
+ }
+ res << Command{
+ line: '$vexe -o vtmp_prealloc -prealloc cmd/v'
+ okmsg: 'V can compile itself with -prealloc.'
+ rmfile: 'vtmp_prealloc'
+ }
+ res << Command{
+ line: '$vexe -o vtmp_unused -skip-unused cmd/v'
+ okmsg: 'V can compile itself with -skip-unused.'
+ rmfile: 'vtmp_unused'
+ }
+ $if linux {
+ res << Command{
+ line: '$vexe -cc gcc -keepc -freestanding -o bel vlib/os/bare/bare_example_linux.v'
+ okmsg: 'V can compile with -freestanding on Linux with GCC.'
+ rmfile: 'bel'
+ }
+ }
+ res << Command{
+ line: '$vexe $vargs -progress test-cleancode'
+ okmsg: 'All .v files are invariant when processed with `v fmt`'
+ }
+ res << Command{
+ line: '$vexe $vargs -progress test-fmt'
+ okmsg: 'All .v files can be processed with `v fmt`. NB: the result may not always be compilable, but `v fmt` should not crash.'
+ }
+ res << Command{
+ line: '$vexe $vargs -progress test-self'
+ okmsg: 'There are no _test.v file regressions.'
+ }
+ res << Command{
+ line: '$vexe $vargs -progress -W build-tools'
+ okmsg: 'All tools can be compiled.'
+ }
+ res << Command{
+ line: '$vexe $vargs -progress -W build-examples'
+ okmsg: 'All examples can be compiled.'
+ }
+ res << Command{
+ line: '$vexe check-md -hide-warnings .'
+ label: 'Check ```v ``` code examples and formatting of .MD files...'
+ okmsg: 'All .md files look good.'
+ }
+ res << Command{
+ line: '$vexe install nedpals.args'
+ okmsg: '`v install` works.'
+ }
+ // NB: test that a program that depends on thirdparty libraries with its
+ // own #flags (tetris depends on gg, which uses sokol) can be compiled
+ // with -usecache:
+ res << Command{
+ line: '$vexe -usecache examples/tetris/tetris.v'
+ okmsg: '`v -usecache` works.'
+ rmfile: 'examples/tetris/tetris'
+ }
+ $if macos || linux {
+ res << Command{
+ line: '$vexe -o v.c cmd/v && cc -Werror v.c && rm -rf a.out'
+ label: 'v.c should be buildable with no warnings...'
+ okmsg: 'v.c can be compiled without warnings. This is good :)'
+ rmfile: 'v.c'
+ }
+ }
+ return res
+}
+
+fn (mut cmd Command) run() {
+ // Changing the current directory is needed for some of the compiler tests,
+ // vlib/v/tests/local_test.v and vlib/v/tests/repl/repl_test.v
+ os.chdir(vroot)
+ if cmd.label != '' {
+ println(term.header_left(cmd.label, '*'))
+ }
+ sw := time.new_stopwatch()
+ cmd.ecode = os.system(cmd.line)
+ spent := sw.elapsed().milliseconds()
+ println('> Running: "$cmd.line" took: $spent ms ... ' +
+ if cmd.ecode != 0 { term.failed('FAILED') } else { term_highlight('OK') })
+ if vtest_nocleanup {
+ return
+ }
+ if cmd.rmfile != '' {
+ mut file_existed := rm_existing(cmd.rmfile)
+ if os.user_os() == 'windows' {
+ file_existed = file_existed || rm_existing(cmd.rmfile + '.exe')
+ }
+ if !file_existed {
+ eprintln('Expected file did not exist: $cmd.rmfile')
+ cmd.ecode = 999
+ }
+ }
+}
+
+// try to remove a file, return if it existed before the removal attempt
+fn rm_existing(path string) bool {
+ existed := os.exists(path)
+ os.rm(path) or {}
+ return existed
+}
+
+fn term_highlight(s string) string {
+ return term.colorize(term.yellow, term.colorize(term.bold, s))
+}
diff --git a/v_windows/v/old/cmd/tools/vtest-cleancode.v b/v_windows/v/old/cmd/tools/vtest-cleancode.v
new file mode 100644
index 0000000..3c849b7
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vtest-cleancode.v
@@ -0,0 +1,102 @@
+module main
+
+import os
+import testing
+import v.util
+import arrays
+
+const (
+ vet_known_failing_exceptions = []string{}
+ vet_folders = [
+ 'vlib/sqlite',
+ 'vlib/v',
+ 'vlib/x/json2',
+ 'vlib/x/ttf',
+ 'cmd/v',
+ 'cmd/tools',
+ 'examples/2048',
+ 'examples/tetris',
+ 'examples/term.ui',
+ ]
+ verify_known_failing_exceptions = [
+ // Handcrafted meaningful formatting of code parts (mostly arrays)
+ 'examples/sokol/02_cubes_glsl/cube_glsl.v',
+ 'examples/sokol/03_march_tracing_glsl/rt_glsl.v',
+ 'examples/sokol/04_multi_shader_glsl/rt_glsl.v',
+ 'examples/sokol/05_instancing_glsl/rt_glsl.v',
+ 'examples/sokol/06_obj_viewer/show_obj.v',
+ 'vlib/gg/m4/graphic.v',
+ 'vlib/gg/m4/m4_test.v',
+ 'vlib/gg/m4/matrix.v',
+ 'vlib/sqlite/orm.v' /* mut c &int -> mut c int */,
+ 'vlib/builtin/int_test.v' /* special number formatting that should be tested */,
+ // TODOs and unfixed vfmt bugs
+ 'vlib/builtin/int.v' /* TODO byteptr: vfmt converts `pub fn (nn byteptr) str() string {` to `nn &byte` and that conflicts with `nn byte` */,
+ 'vlib/builtin/string_charptr_byteptr_helpers.v' /* TODO byteptr: a temporary shim to ease the byteptr=>&byte transition */,
+ 'vlib/v/tests/interop_test.v', /* bad comment formatting */
+ 'vlib/v/gen/js/tests/js.v', /* local `hello` fn, gets replaced with module `hello` aliased as `hl` */
+ ]
+ vfmt_verify_list = [
+ 'cmd/',
+ 'examples/',
+ 'tutorials/',
+ 'vlib/',
+ ]
+ vfmt_known_failing_exceptions = arrays.merge(verify_known_failing_exceptions, [
+ 'vlib/regex/regex_test.v' /* contains meaningfull formatting of the test case data */,
+ 'vlib/readline/readline_test.v' /* vfmt eats `{ Readline }` from `import readline { Readline }` */,
+ 'vlib/glm/glm.v' /* `mut res &f32` => `mut res f32`, which then fails to compile */,
+ 'vlib/fontstash/fontstash_structs.v' /* eats fn arg names for inline callback types in struct field declarations */,
+ 'vlib/crypto/sha512/sha512block_generic.v' /* formatting of large constant arrays wraps to too many lines */,
+ 'vlib/crypto/aes/const.v' /* formatting of large constant arrays wraps to too many lines */,
+ ])
+)
+
+const (
+ vexe = os.getenv('VEXE')
+ vroot = os.dir(vexe)
+ is_fix = '-fix' in os.args
+)
+
+fn main() {
+ args_string := os.args[1..].join(' ')
+ pass_args := args_string.all_before('test-cleancode')
+ v_test_vetting(pass_args)
+}
+
+fn tsession(vargs string, tool_source string, tool_cmd string, tool_args string, flist []string, slist []string) testing.TestSession {
+ os.chdir(vroot)
+ title_message := 'running $tool_cmd over most .v files'
+ testing.eheader(title_message)
+ mut test_session := testing.new_test_session('$vargs $tool_args', false)
+ test_session.files << flist
+ test_session.skip_files << slist
+ util.prepare_tool_when_needed(tool_source)
+ // note that util.prepare_tool_when_needed will put its temporary files
+ // in the VTMP from the test session too, so they will be cleaned up
+ // at the end
+ test_session.test()
+ eprintln(test_session.benchmark.total_message(title_message))
+ return test_session
+}
+
+fn v_test_vetting(vargs string) {
+ expanded_vet_list := util.find_all_v_files(vet_folders) or { return }
+ vet_session := tsession(vargs, 'vvet', 'v vet', 'vet', expanded_vet_list, vet_known_failing_exceptions)
+ //
+ fmt_cmd, fmt_args := if is_fix { 'v fmt -w', 'fmt -w' } else { 'v fmt -verify', 'fmt -verify' }
+ vfmt_list := util.find_all_v_files(vfmt_verify_list) or { return }
+ exceptions := util.find_all_v_files(vfmt_known_failing_exceptions) or { return }
+ verify_session := tsession(vargs, 'vfmt.v', fmt_cmd, fmt_args, vfmt_list, exceptions)
+ //
+ if vet_session.benchmark.nfail > 0 || verify_session.benchmark.nfail > 0 {
+ eprintln('\n')
+ if vet_session.benchmark.nfail > 0 {
+ eprintln('WARNING: `v vet` failed $vet_session.benchmark.nfail times.')
+ }
+ if verify_session.benchmark.nfail > 0 {
+ eprintln('WARNING: `v fmt -verify` failed $verify_session.benchmark.nfail times.')
+ }
+ exit(1)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vtest-fmt.v b/v_windows/v/old/cmd/tools/vtest-fmt.v
new file mode 100644
index 0000000..b1ebb81
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vtest-fmt.v
@@ -0,0 +1,43 @@
+module main
+
+import os
+import testing
+import v.util
+
+const (
+ known_failing_exceptions = [
+ 'vlib/crypto/aes/const.v' /* const array wrapped in too many lines */,
+ ]
+)
+
+fn main() {
+ args_string := os.args[1..].join(' ')
+ v_test_formatting(args_string.all_before('test-fmt'))
+}
+
+fn v_test_formatting(vargs string) {
+ all_v_files := v_files()
+ util.prepare_tool_when_needed('vfmt.v')
+ testing.eheader('Run "v fmt" over all .v files')
+ mut vfmt_test_session := testing.new_test_session('$vargs fmt -worker', false)
+ vfmt_test_session.files << all_v_files
+ vfmt_test_session.skip_files << known_failing_exceptions
+ vfmt_test_session.test()
+ eprintln(vfmt_test_session.benchmark.total_message('running vfmt over V files'))
+ if vfmt_test_session.benchmark.nfail > 0 {
+ eprintln('\nWARNING: v fmt failed $vfmt_test_session.benchmark.nfail times.\n')
+ exit(1)
+ }
+}
+
+fn v_files() []string {
+ mut files_that_can_be_formatted := []string{}
+ all_test_files := os.walk_ext('.', '.v')
+ for tfile in all_test_files {
+ if tfile.starts_with('./vlib/v/cgen/tests') {
+ continue
+ }
+ files_that_can_be_formatted << tfile
+ }
+ return files_that_can_be_formatted
+}
diff --git a/v_windows/v/old/cmd/tools/vtest-parser.v b/v_windows/v/old/cmd/tools/vtest-parser.v
new file mode 100644
index 0000000..e1e829a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vtest-parser.v
@@ -0,0 +1,289 @@
+import os
+import flag
+import term
+import time
+import v.parser
+import v.ast
+import v.pref
+
+const (
+ vexe = pref.vexe_path()
+ vroot = os.dir(vexe)
+ support_color = term.can_show_color_on_stderr() && term.can_show_color_on_stdout()
+ ecode_timeout = 101
+ ecode_memout = 102
+ ecode_details = map{
+ -1: 'worker executable not found'
+ 101: 'too slow'
+ 102: 'too memory hungry'
+ }
+)
+
+struct Context {
+mut:
+ is_help bool
+ is_worker bool
+ is_verbose bool
+ is_silent bool // do not print any status/progress during processing, just failures.
+ is_linear bool // print linear progress log, without trying to do term cursor up + \r msg. Easier to use in a CI job
+ timeout_ms int
+ myself string // path to this executable, so the supervisor can launch worker processes
+ all_paths []string // all files given to the supervisor process
+ path string // the current path, given to a worker process
+ cut_index int // the cut position in the source from context.path
+ max_index int // the maximum index (equivalent to the file content length)
+ // parser context in the worker processes:
+ table ast.Table
+ scope ast.Scope
+ pref &pref.Preferences
+ period_ms int // print periodic progress
+ stop_print bool // stop printing the periodic progress
+}
+
+fn main() {
+ mut context := process_cli_args()
+ if context.is_worker {
+ pid := os.getpid()
+ context.log('> worker ${pid:5} starts parsing at cut_index: ${context.cut_index:5} | $context.path')
+ // A worker's process job is to try to parse a single given file in context.path.
+ // It can crash/panic freely.
+ context.table = ast.new_table()
+ context.scope = &ast.Scope{
+ parent: 0
+ }
+ context.pref = &pref.Preferences{
+ output_mode: .silent
+ }
+ mut source := os.read_file(context.path) ?
+ source = source[..context.cut_index]
+
+ go fn (ms int) {
+ time.sleep(ms * time.millisecond)
+ exit(ecode_timeout)
+ }(context.timeout_ms)
+ _ := parser.parse_text(source, context.path, context.table, .skip_comments, context.pref)
+ context.log('> worker ${pid:5} finished parsing $context.path')
+ exit(0)
+ } else {
+ // The process supervisor should NOT crash/panic, unlike the workers.
+ // It's job, is to:
+ // 1) start workers
+ // 2) accumulate results
+ // 3) produce a summary at the end
+ context.expand_all_paths()
+ mut fails := 0
+ mut panics := 0
+ sw := time.new_stopwatch()
+ for path in context.all_paths {
+ filesw := time.new_stopwatch()
+ context.start_printing()
+ new_fails, new_panics := context.process_whole_file_in_worker(path)
+ fails += new_fails
+ panics += new_panics
+ context.stop_printing()
+ context.info('File: ${path:-30} | new_fails: ${new_fails:5} | new_panics: ${new_panics:5} | Elapsed time: ${filesw.elapsed().milliseconds()}ms')
+ }
+ non_panics := fails - panics
+ context.info('Total files processed: ${context.all_paths.len:5} | Errors found: ${fails:5} | Panics: ${panics:5} | Non panics: ${non_panics:5} | Elapsed time: ${sw.elapsed().milliseconds()}ms')
+ if fails > 0 {
+ exit(1)
+ }
+ exit(0)
+ }
+}
+
+fn process_cli_args() &Context {
+ mut context := &Context{
+ pref: pref.new_preferences()
+ }
+ context.myself = os.executable()
+ mut fp := flag.new_flag_parser(os.args_after('test-parser'))
+ fp.application(os.file_name(context.myself))
+ fp.version('0.0.1')
+ fp.description('Test the V parser, by parsing each .v file in each PATH,\n' +
+ 'as if it was typed character by character by the user.\n' +
+ 'A PATH can be either a folder, or a specific .v file.\n' +
+ 'NB: you *have to quote* the PATH, if it contains spaces/punctuation.')
+ fp.arguments_description('PATH1 PATH2 ...')
+ fp.skip_executable()
+ context.is_help = fp.bool('help', `h`, false, 'Show help/usage screen.')
+ context.is_verbose = fp.bool('verbose', `v`, false, 'Be more verbose.')
+ context.is_silent = fp.bool('silent', `S`, false, 'Do not print progress at all.')
+ context.is_linear = fp.bool('linear', `L`, false, 'Print linear progress log. Suitable for CI.')
+ context.period_ms = fp.int('progress_ms', `s`, 500, 'print a status report periodically, the period is given in milliseconds.')
+ context.is_worker = fp.bool('worker', `w`, false, 'worker specific flag - is this a worker process, that can crash/panic.')
+ context.cut_index = fp.int('cut_index', `c`, 1, 'worker specific flag - cut index in the source file, everything before that will be parsed, the rest - ignored.')
+ context.timeout_ms = fp.int('timeout_ms', `t`, 250, 'worker specific flag - timeout in ms; a worker taking longer, will self terminate.')
+ context.path = fp.string('path', `p`, '', 'worker specific flag - path to the current source file, which will be parsed.')
+ //
+ if context.is_help {
+ println(fp.usage())
+ exit(0)
+ }
+ context.all_paths = fp.finalize() or {
+ context.error(err.msg)
+ exit(1)
+ }
+ if !context.is_worker && context.all_paths.len == 0 {
+ println(fp.usage())
+ exit(0)
+ }
+ return context
+}
+
+// ////////////////
+fn bold(msg string) string {
+ if !support_color {
+ return msg
+ }
+ return term.bold(msg)
+}
+
+fn red(msg string) string {
+ if !support_color {
+ return msg
+ }
+ return term.red(msg)
+}
+
+fn yellow(msg string) string {
+ if !support_color {
+ return msg
+ }
+ return term.yellow(msg)
+}
+
+fn (mut context Context) info(msg string) {
+ println(msg)
+}
+
+fn (mut context Context) log(msg string) {
+ if context.is_verbose {
+ label := yellow('info')
+ ts := time.now().format_ss_micro()
+ eprintln('$label: $ts | $msg')
+ }
+}
+
+fn (mut context Context) error(msg string) {
+ label := red('error')
+ eprintln('$label: $msg')
+}
+
+fn (mut context Context) expand_all_paths() {
+ context.log('> context.all_paths before: $context.all_paths')
+ mut files := []string{}
+ for path in context.all_paths {
+ if os.is_dir(path) {
+ files << os.walk_ext(path, '.v')
+ files << os.walk_ext(path, '.vsh')
+ continue
+ }
+ if !path.ends_with('.v') && !path.ends_with('.vv') && !path.ends_with('.vsh') {
+ context.error('`v test-parser` can only be used on .v/.vv/.vsh files.\nOffending file: "$path".')
+ continue
+ }
+ if !os.exists(path) {
+ context.error('"$path" does not exist.')
+ continue
+ }
+ files << path
+ }
+ context.all_paths = files
+ context.log('> context.all_paths after: $context.all_paths')
+}
+
+fn (mut context Context) process_whole_file_in_worker(path string) (int, int) {
+ context.path = path // needed for the progress bar
+ context.log('> context.process_whole_file_in_worker path: $path')
+ if !(os.is_file(path) && os.is_readable(path)) {
+ context.error('$path is not readable')
+ return 1, 0
+ }
+ source := os.read_file(path) or { '' }
+ if source == '' {
+ // an empty file is a valid .v file
+ return 0, 0
+ }
+ len := source.len - 1
+ mut fails := 0
+ mut panics := 0
+ context.max_index = len
+ for i in 0 .. len {
+ verbosity := if context.is_verbose { '-v' } else { '' }
+ context.cut_index = i // needed for the progress bar
+ cmd := '"$context.myself" $verbosity --worker --timeout_ms ${context.timeout_ms:5} --cut_index ${i:5} --path "$path" '
+ context.log(cmd)
+ mut res := os.execute(cmd)
+ context.log('worker exit_code: $res.exit_code | worker output:\n$res.output')
+ if res.exit_code != 0 {
+ fails++
+ mut is_panic := false
+ if res.output.contains('V panic:') {
+ is_panic = true
+ panics++
+ }
+ part := source[..i]
+ line := part.count('\n') + 1
+ last_line := part.all_after_last('\n')
+ col := last_line.len
+ err := if is_panic {
+ red('parser failure: panic')
+ } else {
+ red('parser failure: crash, ${ecode_details[res.exit_code]}')
+ }
+ path_to_line := bold('$path:$line:$col:')
+ err_line := last_line.trim_left('\t')
+ println('$path_to_line $err')
+ println('\t$line | $err_line')
+ println('')
+ eprintln(res.output)
+ }
+ }
+ return fails, panics
+}
+
+fn (mut context Context) start_printing() {
+ context.stop_print = false
+ if !context.is_linear && !context.is_silent {
+ println('\n')
+ }
+ go context.print_periodic_status()
+}
+
+fn (mut context Context) stop_printing() {
+ context.stop_print = true
+ time.sleep(time.millisecond * context.period_ms / 5)
+}
+
+fn (mut context Context) print_status() {
+ if context.is_silent {
+ return
+ }
+ if (context.cut_index == 1) && (context.max_index == 0) {
+ return
+ }
+ msg := '> ${context.path:-30} | index: ${context.cut_index:5}/${context.max_index - 1:5}'
+ if context.is_linear {
+ eprintln(msg)
+ return
+ }
+ term.cursor_up(1)
+ eprint('\r $msg\n')
+}
+
+fn (mut context Context) print_periodic_status() {
+ context.print_status()
+ mut printed_at_least_once := false
+ for !context.stop_print {
+ context.print_status()
+ for i := 0; i < 10 && !context.stop_print; i++ {
+ time.sleep(time.millisecond * context.period_ms / 10)
+ if context.cut_index > 50 && !printed_at_least_once {
+ context.print_status()
+ printed_at_least_once = true
+ }
+ }
+ }
+ context.print_status()
+}
diff --git a/v_windows/v/old/cmd/tools/vtest-self.v b/v_windows/v/old/cmd/tools/vtest-self.v
new file mode 100644
index 0000000..99cc83c
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vtest-self.v
@@ -0,0 +1,220 @@
+module main
+
+import os
+import testing
+import v.pref
+
+const github_job = os.getenv('GITHUB_JOB')
+
+const (
+ skip_test_files = [
+ 'vlib/context/deadline_test.v' /* sometimes blocks */,
+ 'vlib/mysql/mysql_orm_test.v' /* mysql not installed */,
+ 'vlib/pg/pg_orm_test.v' /* pg not installed */,
+ ]
+ skip_fsanitize_too_slow = [
+ // These tests are too slow to be run in the CI on each PR/commit
+ // in the sanitized modes:
+ 'vlib/v/compiler_errors_test.v',
+ 'vlib/v/doc/doc_test.v',
+ 'vlib/v/fmt/fmt_test.v',
+ 'vlib/v/fmt/fmt_keep_test.v',
+ 'vlib/v/fmt/fmt_vlib_test.v',
+ 'vlib/v/live/live_test.v',
+ 'vlib/v/parser/v_parser_test.v',
+ 'vlib/v/scanner/scanner_test.v',
+ 'vlib/v/tests/inout/compiler_test.v',
+ 'vlib/v/tests/prod_test.v',
+ 'vlib/v/tests/profile/profile_test.v',
+ 'vlib/v/tests/repl/repl_test.v',
+ 'vlib/v/tests/valgrind/valgrind_test.v',
+ ]
+ skip_with_fsanitize_memory = [
+ 'vlib/net/tcp_simple_client_server_test.v',
+ 'vlib/net/http/cookie_test.v',
+ 'vlib/net/http/http_test.v',
+ 'vlib/net/http/status_test.v',
+ 'vlib/net/http/http_httpbin_test.v',
+ 'vlib/net/http/header_test.v',
+ 'vlib/net/udp_test.v',
+ 'vlib/net/tcp_test.v',
+ 'vlib/orm/orm_test.v',
+ 'vlib/sqlite/sqlite_test.v',
+ 'vlib/sqlite/sqlite_orm_test.v',
+ 'vlib/v/tests/orm_sub_struct_test.v',
+ 'vlib/v/tests/orm_sub_array_struct_test.v',
+ 'vlib/vweb/tests/vweb_test.v',
+ 'vlib/vweb/request_test.v',
+ 'vlib/net/http/request_test.v',
+ 'vlib/vweb/route_test.v',
+ 'vlib/net/websocket/websocket_test.v',
+ 'vlib/crypto/rand/crypto_rand_read_test.v',
+ ]
+ skip_with_fsanitize_address = [
+ 'vlib/net/websocket/websocket_test.v',
+ ]
+ skip_with_fsanitize_undefined = [
+ 'do_not_remove',
+ ]
+ skip_with_werror = [
+ 'do_not_remove',
+ ]
+ skip_with_asan_compiler = [
+ 'do_not_remove',
+ ]
+ skip_with_msan_compiler = [
+ 'do_not_remove',
+ ]
+ skip_on_musl = [
+ 'vlib/v/tests/profile/profile_test.v',
+ ]
+ skip_on_ubuntu_musl = [
+ //'vlib/v/gen/js/jsgen_test.v',
+ 'vlib/net/http/cookie_test.v',
+ 'vlib/net/http/http_test.v',
+ 'vlib/net/http/status_test.v',
+ 'vlib/net/websocket/ws_test.v',
+ 'vlib/sqlite/sqlite_test.v',
+ 'vlib/sqlite/sqlite_orm_test.v',
+ 'vlib/orm/orm_test.v',
+ 'vlib/v/tests/orm_sub_struct_test.v',
+ 'vlib/v/tests/orm_sub_array_struct_test.v',
+ 'vlib/clipboard/clipboard_test.v',
+ 'vlib/vweb/tests/vweb_test.v',
+ 'vlib/vweb/request_test.v',
+ 'vlib/net/http/request_test.v',
+ 'vlib/vweb/route_test.v',
+ 'vlib/net/websocket/websocket_test.v',
+ 'vlib/net/http/http_httpbin_test.v',
+ 'vlib/net/http/header_test.v',
+ ]
+ skip_on_linux = [
+ 'do_not_remove',
+ ]
+ skip_on_non_linux = [
+ 'do_not_remove',
+ ]
+ skip_on_windows = [
+ 'vlib/orm/orm_test.v',
+ 'vlib/v/tests/orm_sub_struct_test.v',
+ 'vlib/net/websocket/ws_test.v',
+ 'vlib/net/unix/unix_test.v',
+ 'vlib/net/websocket/websocket_test.v',
+ 'vlib/vweb/tests/vweb_test.v',
+ 'vlib/vweb/request_test.v',
+ 'vlib/net/http/request_test.v',
+ 'vlib/vweb/route_test.v',
+ ]
+ skip_on_non_windows = [
+ 'do_not_remove',
+ ]
+ skip_on_macos = [
+ 'do_not_remove',
+ ]
+ skip_on_non_macos = [
+ 'do_not_remove',
+ ]
+)
+
+// NB: musl misses openssl, thus the http tests can not be done there
+// NB: http_httpbin_test.v: fails with 'cgen error: json: map_string_string is not struct'
+fn main() {
+ vexe := pref.vexe_path()
+ vroot := os.dir(vexe)
+ os.chdir(vroot)
+ args := os.args.clone()
+ args_string := args[1..].join(' ')
+ cmd_prefix := args_string.all_before('test-self')
+ title := 'testing vlib'
+ all_test_files := os.walk_ext(os.join_path(vroot, 'vlib'), '_test.v')
+ testing.eheader(title)
+ mut tsession := testing.new_test_session(cmd_prefix, true)
+ tsession.files << all_test_files.filter(!it.contains('testdata' + os.path_separator))
+ tsession.skip_files << skip_test_files
+
+ if github_job == 'windows-tcc' {
+ // TODO: fix these ASAP
+ tsession.skip_files << 'vlib/net/tcp_test.v'
+ tsession.skip_files << 'vlib/net/udp_test.v'
+ }
+
+ mut werror := false
+ mut sanitize_memory := false
+ mut sanitize_address := false
+ mut sanitize_undefined := false
+ mut asan_compiler := false
+ mut msan_compiler := false
+ for arg in args {
+ if arg.contains('-asan-compiler') {
+ asan_compiler = true
+ }
+ if arg.contains('-msan-compiler') {
+ msan_compiler = true
+ }
+ if arg.contains('-Werror') || arg.contains('-cstrict') {
+ werror = true
+ }
+ if arg.contains('-fsanitize=memory') {
+ sanitize_memory = true
+ }
+ if arg.contains('-fsanitize=address') {
+ sanitize_address = true
+ }
+ if arg.contains('-fsanitize=undefined') {
+ sanitize_undefined = true
+ }
+ }
+ if os.getenv('VTEST_RUN_FSANITIZE_TOO_SLOW').len == 0
+ && (sanitize_undefined || sanitize_memory || sanitize_address) {
+ tsession.skip_files << skip_fsanitize_too_slow
+ }
+ if werror {
+ tsession.skip_files << skip_with_werror
+ }
+ if sanitize_memory {
+ tsession.skip_files << skip_with_fsanitize_memory
+ }
+ if sanitize_address {
+ tsession.skip_files << skip_with_fsanitize_address
+ }
+ if sanitize_undefined {
+ tsession.skip_files << skip_with_fsanitize_undefined
+ }
+ if asan_compiler {
+ tsession.skip_files << skip_with_asan_compiler
+ }
+ if msan_compiler {
+ tsession.skip_files << skip_with_msan_compiler
+ }
+ // println(tsession.skip_files)
+ if os.getenv('V_CI_MUSL').len > 0 {
+ tsession.skip_files << skip_on_musl
+ }
+ if os.getenv('V_CI_UBUNTU_MUSL').len > 0 {
+ tsession.skip_files << skip_on_ubuntu_musl
+ }
+ $if !linux {
+ tsession.skip_files << skip_on_non_linux
+ }
+ $if linux {
+ tsession.skip_files << skip_on_linux
+ }
+ $if windows {
+ tsession.skip_files << skip_on_windows
+ }
+ $if !windows {
+ tsession.skip_files << skip_on_non_windows
+ }
+ $if macos {
+ tsession.skip_files << skip_on_macos
+ }
+ $if !macos {
+ tsession.skip_files << skip_on_non_macos
+ }
+ tsession.test()
+ eprintln(tsession.benchmark.total_message(title))
+ if tsession.benchmark.nfail > 0 {
+ eprintln('\nWARNING: failed $tsession.benchmark.nfail times.\n')
+ exit(1)
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vtest.v b/v_windows/v/old/cmd/tools/vtest.v
new file mode 100644
index 0000000..d116d29
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vtest.v
@@ -0,0 +1,135 @@
+module main
+
+import os
+import os.cmdline
+import testing
+import v.pref
+
+fn main() {
+ args := os.args.clone()
+ if os.args.last() == 'test' {
+ show_usage()
+ return
+ }
+ args_to_executable := args[1..]
+ args_before := cmdline.options_before(args_to_executable, ['test'])
+ args_after := cmdline.options_after(args_to_executable, ['test'])
+ if args_after.join(' ') == 'v' {
+ eprintln('`v test v` has been deprecated.')
+ eprintln('Use `v test-all` instead.')
+ exit(1)
+ }
+ backend_pos := args_before.index('-b')
+ backend := if backend_pos == -1 { '.c' } else { args_before[backend_pos + 1] } // this giant mess because closures are not implemented
+
+ mut ts := testing.new_test_session(args_before.join(' '), true)
+ for targ in args_after {
+ if os.is_dir(targ) {
+ // Fetch all tests from the directory
+ files, skip_files := should_test_dir(targ.trim_right(os.path_separator), backend)
+ ts.files << files
+ ts.skip_files << skip_files
+ continue
+ } else if os.exists(targ) {
+ match should_test(targ, backend) {
+ .test {
+ ts.files << targ
+ continue
+ }
+ .skip {
+ ts.files << targ
+ ts.skip_files << targ
+ continue
+ }
+ .ignore {}
+ }
+ } else {
+ eprintln('\nUnrecognized test file `$targ`.\n `v test` can only be used with folders and/or _test.v files.\n')
+ show_usage()
+ exit(1)
+ }
+ }
+ testing.header('Testing...')
+ ts.test()
+ println(ts.benchmark.total_message('all V _test.v files'))
+ if ts.failed {
+ exit(1)
+ }
+}
+
+fn show_usage() {
+ println('Usage:')
+ println(' A)')
+ println(' v test folder/ : run all v tests in the given folder.')
+ println(' v -stats test folder/ : the same, but print more stats.')
+ println(' B)')
+ println(' v test file_test.v : run test functions in a given test file.')
+ println(' v -stats test file_test.v : as above, but with more stats.')
+ println(' NB: you can also give many and mixed folder/ file_test.v arguments after `v test` .')
+ println('')
+}
+
+pub fn should_test_dir(path string, backend string) ([]string, []string) { // return is (files, skip_files)
+ mut files := os.ls(path) or { return []string{}, []string{} }
+ mut local_path_separator := os.path_separator
+ if path.ends_with(os.path_separator) {
+ local_path_separator = ''
+ }
+ mut res_files := []string{}
+ mut skip_files := []string{}
+ for file in files {
+ p := path + local_path_separator + file
+ if os.is_dir(p) && !os.is_link(p) {
+ if file == 'testdata' {
+ continue
+ }
+ ret_files, ret_skip_files := should_test_dir(p, backend)
+ res_files << ret_files
+ skip_files << ret_skip_files
+ } else if os.exists(p) {
+ match should_test(p, backend) {
+ .test {
+ res_files << p
+ }
+ .skip {
+ res_files << p
+ skip_files << p
+ }
+ .ignore {}
+ }
+ }
+ }
+ return res_files, skip_files
+}
+
+enum ShouldTestStatus {
+ test // do test
+ skip
+ ignore
+}
+
+fn should_test(path string, backend string) ShouldTestStatus {
+ if path.ends_with('_test.v') {
+ return .test
+ }
+ if path.ends_with('.v') && path.count('.') == 2 {
+ if !path.all_before_last('.v').all_before_last('.').ends_with('_test') {
+ return .ignore
+ }
+ backend_arg := path.all_before_last('.v').all_after_last('.')
+ arch := pref.arch_from_string(backend_arg) or { pref.Arch._auto }
+ if arch == pref.get_host_arch() {
+ return .test
+ } else if arch == ._auto {
+ if backend_arg == 'c' { // .c.v
+ return if backend == 'c' { ShouldTestStatus.test } else { ShouldTestStatus.skip }
+ }
+ if backend_arg == 'js' {
+ return if backend == 'js' { ShouldTestStatus.test } else { ShouldTestStatus.skip }
+ }
+ } else {
+ return .skip
+ }
+ }
+ return .ignore
+}
diff --git a/v_windows/v/old/cmd/tools/vtracev.v b/v_windows/v/old/cmd/tools/vtracev.v
new file mode 100644
index 0000000..1666188
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vtracev.v
@@ -0,0 +1,17 @@
+module main
+
+import os
+import v.pref
+
+fn main() {
+ vexe := pref.vexe_path()
+ vroot := os.dir(vexe)
+ os.chdir(vroot)
+ os.setenv('VCOLORS', 'always', true)
+ self_idx := os.args.index('tracev')
+ args := os.args[1..self_idx]
+ args_str := args.join(' ')
+ options := if args.len > 0 { '($args_str)' } else { '' }
+ println('Compiling a `tracev` executable ${options}...')
+ os.system('$vexe -cg -d trace_parser -d trace_checker -d trace_gen -o tracev $args_str cmd/v')
+}
diff --git a/v_windows/v/old/cmd/tools/vup.exe b/v_windows/v/old/cmd/tools/vup.exe
new file mode 100644
index 0000000..cdaa765
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vup.exe
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vup.v b/v_windows/v/old/cmd/tools/vup.v
new file mode 100644
index 0000000..db6684a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vup.v
@@ -0,0 +1,164 @@
+module main
+
+import os
+import v.pref
+import v.util.version
+import v.util.recompilation
+
+struct App {
+ is_verbose bool
+ is_prod bool
+ vexe string
+ vroot string
+}
+
+fn new_app() App {
+ vexe := os.real_path(pref.vexe_path())
+ vroot := os.dir(vexe)
+ return App{
+ is_verbose: '-v' in os.args
+ is_prod: '-prod' in os.args
+ vexe: vexe
+ vroot: vroot
+ }
+}
+
+fn main() {
+ app := new_app()
+ recompilation.must_be_enabled(app.vroot, 'Please install V from source, to use `v up` .')
+ os.chdir(app.vroot)
+ println('Updating V...')
+ app.update_from_master()
+ v_hash := version.githash(false)
+ current_hash := version.githash(true)
+ // println(v_hash)
+ // println(current_hash)
+ if v_hash == current_hash {
+ app.show_current_v_version()
+ return
+ }
+ $if windows {
+ app.backup('cmd/tools/vup.exe')
+ }
+ app.recompile_v()
+ app.recompile_vup()
+ app.show_current_v_version()
+}
+
+fn (app App) vprintln(s string) {
+ if app.is_verbose {
+ println(s)
+ }
+}
+
+fn (app App) update_from_master() {
+ app.vprintln('> updating from master ...')
+ if !os.exists('.git') {
+ // initialize as if it had been cloned
+ app.git_command('init')
+ app.git_command('remote add origin https://github.com/vlang/v')
+ app.git_command('fetch')
+ app.git_command('reset --hard origin/master')
+ app.git_command('clean --quiet -xdf --exclude v.exe --exclude cmd/tools/vup.exe')
+ } else {
+ // pull latest
+ app.git_command('pull https://github.com/vlang/v master')
+ }
+}
+
+fn (app App) recompile_v() {
+ // NB: app.vexe is more reliable than just v (which may be a symlink)
+ opts := if app.is_prod { '-prod' } else { '' }
+ vself := '"$app.vexe" $opts self'
+ app.vprintln('> recompiling v itself with `$vself` ...')
+ self_result := os.execute(vself)
+ if self_result.exit_code == 0 {
+ println(self_result.output.trim_space())
+ return
+ } else {
+ app.vprintln('`$vself` failed, running `make`...')
+ app.vprintln(self_result.output.trim_space())
+ }
+ app.make(vself)
+}
+
+fn (app App) recompile_vup() {
+ vup_result := os.execute('"$app.vexe" -g cmd/tools/vup.v')
+ if vup_result.exit_code != 0 {
+ eprintln('recompiling vup.v failed:')
+ eprintln(vup_result.output)
+ }
+}
+
+fn (app App) make(vself string) {
+ mut make := 'make'
+ $if windows {
+ make = 'make.bat'
+ }
+ make_result := os.execute(make)
+ if make_result.exit_code != 0 {
+ eprintln('> $make failed:')
+ eprintln('> make output:')
+ eprintln(make_result.output)
+ return
+ }
+ app.vprintln(make_result.output)
+}
+
+fn (app App) show_current_v_version() {
+ vout := os.execute('"$app.vexe" version')
+ if vout.exit_code >= 0 {
+ mut vversion := vout.output.trim_space()
+ if vout.exit_code == 0 {
+ latest_v_commit := vversion.split(' ').last().all_after('.')
+ latest_v_commit_time := os.execute('git show -s --format=%ci $latest_v_commit')
+ if latest_v_commit_time.exit_code == 0 {
+ vversion += ', timestamp: ' + latest_v_commit_time.output.trim_space()
+ }
+ }
+ println('Current V version:')
+ println(vversion)
+ }
+}
+
+fn (app App) backup(file string) {
+ backup_file := '${file}_old.exe'
+ if os.exists(backup_file) {
+ os.rm(backup_file) or { eprintln('failed removing $backup_file: $err.msg') }
+ }
+ os.mv(file, backup_file) or { eprintln('failed moving $file: $err.msg') }
+}
+
+fn (app App) git_command(command string) {
+ app.vprintln('git_command: git $command')
+ git_result := os.execute('git $command')
+ if git_result.exit_code < 0 {
+ app.get_git()
+ // Try it again with (maybe) git installed
+ os.execute_or_exit('git $command')
+ }
+ if git_result.exit_code != 0 {
+ eprintln(git_result.output)
+ exit(1)
+ }
+ app.vprintln(git_result.output)
+}
+
+fn (app App) get_git() {
+ $if windows {
+ println('Downloading git 32 bit for Windows, please wait.')
+ // We'll use 32 bit because maybe someone out there is using 32-bit windows
+ res_download := os.execute('bitsadmin.exe /transfer "vgit" https://github.com/git-for-windows/git/releases/download/v2.30.0.windows.2/Git-2.30.0.2-32-bit.exe "$os.getwd()/git32.exe"')
+ if res_download.exit_code != 0 {
+ eprintln('Unable to install git automatically: please install git manually')
+ panic(res_download.output)
+ }
+ res_git32 := os.execute('$os.getwd()/git32.exe')
+ if res_git32.exit_code != 0 {
+ eprintln('Unable to install git automatically: please install git manually')
+ panic(res_git32.output)
+ }
+ } $else { // Probably some kind of *nix, usually need to get using a package manager.
+ eprintln("error: Install `git` using your system's package manager")
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.out b/v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.out
new file mode 100644
index 0000000..e10d511
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.out
@@ -0,0 +1,2 @@
+cmd/tools/vvet/tests/array_init_one_val.vv:2: error: Use `var == value` instead of `var in [value]`
+NB: You can run `v fmt -w file.v` to fix these errors automatically
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.vv b/v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.vv
new file mode 100644
index 0000000..2aa3514
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/array_init_one_val.vv
@@ -0,0 +1,5 @@
+fn main() {
+ if 1 in [1] {
+ println('hello world')
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.out b/v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.out
new file mode 100644
index 0000000..b307e20
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.out
@@ -0,0 +1,6 @@
+cmd/tools/vvet/tests/indent_with_space.vv:2: error: Looks like you are using spaces for indentation.
+cmd/tools/vvet/tests/indent_with_space.vv:10: error: Looks like you are using spaces for indentation.
+cmd/tools/vvet/tests/indent_with_space.vv:17: error: Looks like you are using spaces for indentation.
+cmd/tools/vvet/tests/indent_with_space.vv:20: error: Looks like you are using spaces for indentation.
+cmd/tools/vvet/tests/indent_with_space.vv:22: error: Looks like you are using spaces for indentation.
+NB: You can run `v fmt -w file.v` to fix these errors automatically
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.vv b/v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.vv
new file mode 100644
index 0000000..9b466ef
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/indent_with_space.vv
@@ -0,0 +1,24 @@
+fn main() {
+ _ = 1 == 2
+}
+
+fn block_comments() {
+ /* tab to indent the comment
+ spaces before
+ also spaces before
+ same here */
+ /* spaces for comment indentation (ouch)
+ and inside too
+ */
+}
+
+fn space_inside_strings() {
+ // Plain strings
+ str := "Bad space usage for variable indentation.
+ Here it's fine.
+ Here too."
+ str2 := 'linebreak and space\n inside'
+ // String interpolation
+ si1 := 'Error here $foo
+ and not here'
+}
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/module_file_test.out b/v_windows/v/old/cmd/tools/vvet/tests/module_file_test.out
new file mode 100644
index 0000000..b033e71
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/module_file_test.out
@@ -0,0 +1,5 @@
+cmd/tools/vvet/tests/module_file_test.vv:7: warning: Function documentation seems to be missing for "pub fn foo() string".
+cmd/tools/vvet/tests/module_file_test.vv:13: warning: A function name is missing from the documentation of "pub fn bar() string".
+cmd/tools/vvet/tests/module_file_test.vv:35: warning: Function documentation seems to be missing for "pub fn (f Foo) foo() string".
+cmd/tools/vvet/tests/module_file_test.vv:46: warning: A function name is missing from the documentation of "pub fn (f Foo) fooo() string".
+cmd/tools/vvet/tests/module_file_test.vv:52: warning: The documentation for "pub fn (f Foo) boo() string" seems incomplete. \ No newline at end of file
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/module_file_test.vv b/v_windows/v/old/cmd/tools/vvet/tests/module_file_test.vv
new file mode 100644
index 0000000..f0f5b24
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/module_file_test.vv
@@ -0,0 +1,55 @@
+module foo
+
+struct Foo {
+ foo int
+}
+
+pub fn foo() string {
+ // Missing doc
+ return 'foo'
+}
+
+// foo does bar
+pub fn bar() string {
+ // not using convention style: '// <fn name>'
+ return 'bar'
+}
+
+// fooo does x
+pub fn fooo() string {
+ // Documented
+ return 'fooo'
+}
+
+// booo does x
+fn booo() string {
+ // Documented, but not pub
+ return 'booo'
+}
+
+fn boo() string {
+ // Missing doc
+ return 'boo'
+}
+
+pub fn (f Foo) foo() string {
+ // Missing doc
+ return f.fo()
+}
+
+fn (f Foo) fo() string {
+ // Missing doc, but not pub
+ return 'foo'
+}
+
+// wrong doc
+pub fn (f Foo) fooo() string {
+ // not using convention
+ return f.fo()
+}
+
+// boo
+pub fn (f Foo) boo() string {
+ // Incomplete doc
+ return f.fo()
+}
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.out b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.out
new file mode 100644
index 0000000..dbda99a
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.out
@@ -0,0 +1,2 @@
+cmd/tools/vvet/tests/parens_space_a.vv:1: error: Looks like you are adding a space after `(`
+NB: You can run `v fmt -w file.v` to fix these errors automatically
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.vv b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.vv
new file mode 100644
index 0000000..2b3b508
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_a.vv
@@ -0,0 +1,4 @@
+fn main() {
+ _ = 1 + ( 1 + 2)
+}
+
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.out b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.out
new file mode 100644
index 0000000..d1d8791
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.out
@@ -0,0 +1,2 @@
+cmd/tools/vvet/tests/parens_space_b.vv:1: error: Looks like you are adding a space before `)`
+NB: You can run `v fmt -w file.v` to fix these errors automatically
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.vv b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.vv
new file mode 100644
index 0000000..9ea8932
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/parens_space_b.vv
@@ -0,0 +1,4 @@
+fn main() {
+ _ = 1 + (1 + 2 )
+}
+
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/trailing_space.out b/v_windows/v/old/cmd/tools/vvet/tests/trailing_space.out
new file mode 100644
index 0000000..1899a21
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/trailing_space.out
@@ -0,0 +1,7 @@
+cmd/tools/vvet/tests/trailing_space.vv:5: error: Looks like you have trailing whitespace.
+cmd/tools/vvet/tests/trailing_space.vv:6: error: Looks like you have trailing whitespace.
+cmd/tools/vvet/tests/trailing_space.vv:7: error: Looks like you have trailing whitespace.
+cmd/tools/vvet/tests/trailing_space.vv:8: error: Looks like you have trailing whitespace.
+cmd/tools/vvet/tests/trailing_space.vv:9: error: Looks like you have trailing whitespace.
+cmd/tools/vvet/tests/trailing_space.vv:13: error: Looks like you have trailing whitespace.
+cmd/tools/vvet/tests/trailing_space.vv:15: error: Looks like you have trailing whitespace.
diff --git a/v_windows/v/old/cmd/tools/vvet/tests/trailing_space.vv b/v_windows/v/old/cmd/tools/vvet/tests/trailing_space.vv
new file mode 100644
index 0000000..4fe733e
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/tests/trailing_space.vv
@@ -0,0 +1,16 @@
+// NB: This file has and *should* have trailing spaces.
+// When making changes, please ensure they are not removed.
+
+fn after_comments() {
+ // spaces after line comments give errors
+ /*
+ in block comments
+ too
+ */
+}
+
+fn main() {
+ var := 'error about the spaces right there'
+ no_err := "inside multi line strings it's fine.
+but not after"
+}
diff --git a/v_windows/v/old/cmd/tools/vvet/vet_test.v b/v_windows/v/old/cmd/tools/vvet/vet_test.v
new file mode 100644
index 0000000..3291f20
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/vet_test.v
@@ -0,0 +1,72 @@
+import os
+import rand
+import term
+import v.util.vtest
+import v.util.diff
+
+const diff_cmd = find_diff_cmd()
+
+fn find_diff_cmd() string {
+ res := diff.find_working_diff_command() or { '' }
+ return res
+}
+
+fn test_vet() {
+ vexe := os.getenv('VEXE')
+ vroot := os.dir(vexe)
+ os.chdir(vroot)
+ test_dir := 'cmd/tools/vvet/tests'
+ tests := get_tests_in_dir(test_dir)
+ fails := check_path(vexe, test_dir, tests)
+ assert fails == 0
+}
+
+fn get_tests_in_dir(dir string) []string {
+ files := os.ls(dir) or { panic(err) }
+ mut tests := files.filter(it.ends_with('.vv'))
+ tests.sort()
+ return tests
+}
+
+fn check_path(vexe string, dir string, tests []string) int {
+ mut nb_fail := 0
+ paths := vtest.filter_vtest_only(tests, basepath: dir)
+ for path in paths {
+ program := path
+ print(path + ' ')
+ // -force is needed so that `v vet` would not skip the regression files
+ res := os.execute('$vexe vet -force -nocolor $program')
+ if res.exit_code < 0 {
+ panic(res.output)
+ }
+ mut expected := os.read_file(program.replace('.vv', '') + '.out') or { panic(err) }
+ expected = clean_line_endings(expected)
+ found := clean_line_endings(res.output)
+ if expected != found {
+ println(term.red('FAIL'))
+ println('============')
+ println('expected:')
+ println(expected)
+ println('============')
+ println('found:')
+ println(found)
+ println('============\n')
+ println('diff:')
+ println(diff.color_compare_strings(diff_cmd, rand.ulid(), found, expected))
+ println('============\n')
+ nb_fail++
+ } else {
+ println(term.green('OK'))
+ }
+ }
+ return nb_fail
+}
+
+fn clean_line_endings(s string) string {
+ mut res := s.trim_space()
+ res = res.replace(' \n', '\n')
+ res = res.replace(' \r\n', '\n')
+ res = res.replace('\r\n', '\n')
+ res = res.trim('\n')
+ return res
+}
diff --git a/v_windows/v/old/cmd/tools/vvet/vvet.v b/v_windows/v/old/cmd/tools/vvet/vvet.v
new file mode 100644
index 0000000..fd04b40
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vvet/vvet.v
@@ -0,0 +1,256 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license that can be found in the LICENSE file.
+module main
+
+import os
+import os.cmdline
+import v.vet
+import v.pref
+import v.parser
+import v.token
+import v.ast
+import term
+
+struct Vet {
+ opt Options
+mut:
+ errors []vet.Error
+ warns []vet.Error
+ file string
+}
+
+struct Options {
+ is_force bool
+ is_werror bool
+ is_verbose bool
+ show_warnings bool
+ use_color bool
+}
+
+const term_colors = term.can_show_color_on_stderr()
+
+fn main() {
+ vet_options := cmdline.options_after(os.args, ['vet'])
+ mut vt := Vet{
+ opt: Options{
+ is_force: '-force' in vet_options
+ is_werror: '-W' in vet_options
+ is_verbose: '-verbose' in vet_options || '-v' in vet_options
+ show_warnings: '-hide-warnings' !in vet_options && '-w' !in vet_options
+ use_color: '-color' in vet_options || (term_colors && '-nocolor' !in vet_options)
+ }
+ }
+ mut paths := cmdline.only_non_options(vet_options)
+ vtmp := os.getenv('VTMP')
+ if vtmp != '' {
+ // `v test-cleancode` passes also `-o tmpfolder` as well as all options in VFLAGS
+ paths = paths.filter(!it.starts_with(vtmp))
+ }
+ for path in paths {
+ if !os.exists(path) {
+ eprintln('File/folder $path does not exist')
+ continue
+ }
+ if os.is_file(path) {
+ vt.vet_file(path)
+ }
+ if os.is_dir(path) {
+ vt.vprintln("vetting folder: '$path' ...")
+ vfiles := os.walk_ext(path, '.v')
+ vvfiles := os.walk_ext(path, '.vv')
+ mut files := []string{}
+ files << vfiles
+ files << vvfiles
+ for file in files {
+ vt.vet_file(file)
+ }
+ }
+ }
+ vfmt_err_count := vt.errors.filter(it.fix == .vfmt).len
+ if vt.opt.show_warnings {
+ for w in vt.warns {
+ eprintln(vt.e2string(w))
+ }
+ }
+ for err in vt.errors {
+ eprintln(vt.e2string(err))
+ }
+ if vfmt_err_count > 0 {
+ eprintln('NB: You can run `v fmt -w file.v` to fix these errors automatically')
+ }
+ if vt.errors.len > 0 {
+ exit(1)
+ }
+}
+
+// vet_file vets the file read from `path`.
+fn (mut vt Vet) vet_file(path string) {
+ if path.contains('/tests/') && !vt.opt.is_force {
+ // skip all /tests/ files, since usually their content is not
+ // important enough to be documented/vetted, and they may even
+ // contain intentionally invalid code.
+ vt.vprintln("skipping test file: '$path' ...")
+ return
+ }
+ vt.file = path
+ mut prefs := pref.new_preferences()
+ prefs.is_vet = true
+ prefs.is_vsh = path.ends_with('.vsh')
+ table := ast.new_table()
+ vt.vprintln("vetting file '$path'...")
+ _, errors := parser.parse_vet_file(path, table, prefs)
+ // Transfer errors from scanner and parser
+ vt.errors << errors
+ // Scan each line in file for things to improve
+ source_lines := os.read_lines(vt.file) or { []string{} }
+ for lnumber, line in source_lines {
+ vt.vet_line(source_lines, line, lnumber)
+ }
+}
+
+// vet_line vets the contents of `line` from `vet.file`.
+fn (mut vt Vet) vet_line(lines []string, line string, lnumber int) {
+ // Vet public functions
+ if line.starts_with('pub fn') || (line.starts_with('fn ') && !(line.starts_with('fn C.')
+ || line.starts_with('fn main'))) {
+ // Scan function declarations for missing documentation
+ is_pub_fn := line.starts_with('pub fn')
+ if lnumber > 0 {
+ collect_tags := fn (line string) []string {
+ mut cleaned := line.all_before('/')
+ cleaned = cleaned.replace_each(['[', '', ']', '', ' ', ''])
+ return cleaned.split(',')
+ }
+ ident_fn_name := fn (line string) string {
+ mut fn_idx := line.index(' fn ') or { return '' }
+ if line.len < fn_idx + 5 {
+ return ''
+ }
+ mut tokens := line[fn_idx + 4..].split(' ')
+ // Skip struct identifier
+ if tokens.first().starts_with('(') {
+ fn_idx = line.index(')') or { return '' }
+ tokens = line[fn_idx..].split(' ')
+ if tokens.len > 1 {
+ tokens = [tokens[1]]
+ }
+ }
+ if tokens.len > 0 {
+ return tokens[0].all_before('(')
+ }
+ return ''
+ }
+ mut line_above := lines[lnumber - 1]
+ mut tags := []string{}
+ if !line_above.starts_with('//') {
+ mut grab := true
+ for j := lnumber - 1; j >= 0; j-- {
+ prev_line := lines[j]
+ if prev_line.contains('}') { // We've looked back to the above scope, stop here
+ break
+ } else if prev_line.starts_with('[') {
+ tags << collect_tags(prev_line)
+ continue
+ } else if prev_line.starts_with('//') { // Single-line comment
+ grab = false
+ break
+ }
+ }
+ if grab {
+ clean_line := line.all_before_last('{').trim(' ')
+ if is_pub_fn {
+ vt.warn('Function documentation seems to be missing for "$clean_line".',
+ lnumber, .doc)
+ }
+ }
+ } else {
+ fn_name := ident_fn_name(line)
+ mut grab := true
+ for j := lnumber - 1; j >= 0; j-- {
+ prev_line := lines[j]
+ if prev_line.contains('}') { // We've looked back to the above scope, stop here
+ break
+ } else if prev_line.starts_with('// $fn_name ') {
+ grab = false
+ break
+ } else if prev_line.starts_with('// $fn_name') {
+ grab = false
+ if is_pub_fn {
+ clean_line := line.all_before_last('{').trim(' ')
+ vt.warn('The documentation for "$clean_line" seems incomplete.',
+ lnumber, .doc)
+ }
+ break
+ } else if prev_line.starts_with('[') {
+ tags << collect_tags(prev_line)
+ continue
+ } else if prev_line.starts_with('//') { // Single-line comment
+ continue
+ }
+ }
+ if grab {
+ clean_line := line.all_before_last('{').trim(' ')
+ if is_pub_fn {
+ vt.warn('A function name is missing from the documentation of "$clean_line".',
+ lnumber, .doc)
+ }
+ }
+ }
+ }
+ }
+}
+
+fn (vt &Vet) vprintln(s string) {
+ if !vt.opt.is_verbose {
+ return
+ }
+ println(s)
+}
+
+fn (vt &Vet) e2string(err vet.Error) string {
+ mut kind := '$err.kind:'
+ mut location := '$err.file_path:$err.pos.line_nr:'
+ if vt.opt.use_color {
+ kind = match err.kind {
+ .warning { term.magenta(kind) }
+ .error { term.red(kind) }
+ }
+ kind = term.bold(kind)
+ location = term.bold(location)
+ }
+ return '$location $kind $err.message'
+}
+
+fn (mut vt Vet) error(msg string, line int, fix vet.FixKind) {
+ pos := token.Position{
+ line_nr: line + 1
+ }
+ vt.errors << vet.Error{
+ message: msg
+ file_path: vt.file
+ pos: pos
+ kind: .error
+ fix: fix
+ typ: .default
+ }
+}
+
+fn (mut vt Vet) warn(msg string, line int, fix vet.FixKind) {
+ pos := token.Position{
+ line_nr: line + 1
+ }
+ mut w := vet.Error{
+ message: msg
+ file_path: vt.file
+ pos: pos
+ kind: .warning
+ fix: fix
+ typ: .default
+ }
+ if vt.opt.is_werror {
+ w.kind = .error
+ vt.errors << w
+ } else {
+ vt.warns << w
+ }
+}
diff --git a/v_windows/v/old/cmd/tools/vwatch.exe b/v_windows/v/old/cmd/tools/vwatch.exe
new file mode 100644
index 0000000..ade1298
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vwatch.exe
Binary files differ
diff --git a/v_windows/v/old/cmd/tools/vwatch.v b/v_windows/v/old/cmd/tools/vwatch.v
new file mode 100644
index 0000000..0ac243d
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vwatch.v
@@ -0,0 +1,381 @@
+module main
+
+import os
+import time
+import term
+import flag
+
+const scan_timeout_s = get_scan_timeout_seconds()
+
+const max_v_cycles = 1000
+
+const scan_frequency_hz = 4
+
+const scan_period_ms = 1000 / scan_frequency_hz
+
+const max_scan_cycles = scan_timeout_s * scan_frequency_hz
+
+fn get_scan_timeout_seconds() int {
+ env_vw_timeout := os.getenv('VWATCH_TIMEOUT').int()
+ if env_vw_timeout == 0 {
+ $if gcboehm ? {
+ return 35000000 // over 1 year
+ } $else {
+ return 5 * 60
+ }
+ }
+ return env_vw_timeout
+}
+
+//
+// Implements `v watch file.v` , `v watch run file.v` etc.
+// With this command, V will collect all .v files that are needed for the
+// compilation, then it will enter an infinite loop, monitoring them for
+// changes.
+//
+// When a change is detected, it will stop the current process, if it is
+// still running, then rerun/recompile/etc.
+//
+// In effect, this makes it easy to have an editor session and a separate
+// terminal, running just `v watch run file.v`, and you will see your
+// changes right after you save your .v file in your editor.
+//
+//
+// Since -gc boehm is not available on all platforms yet,
+// and this program leaks ~8MB/minute without it, the implementation here
+// is done similarly to vfmt in 2 modes, in the same executable:
+//
+// a) A parent/manager process that only manages a single worker
+// process. The parent process does mostly nothing except restarting
+// workers, thus it does not leak much.
+//
+// b) A worker process, doing the actual monitoring/polling.
+// NB: *workers are started with the --vwatchworker option*
+//
+// Worker processes will run for a limited number of iterations, then
+// they will do exit(255), and then the parent will start a new worker.
+// Exiting by any other code will cause the parent to also exit with the
+// same error code. This limits the potential leak that a worker process
+// can do, even without using the garbage collection mode.
+//
+
+struct VFileStat {
+ path string
+ mtime int
+}
+
+[unsafe]
+fn (mut vfs VFileStat) free() {
+ unsafe { vfs.path.free() }
+}
+
+enum RerunCommand {
+ restart
+ quit
+}
+
+struct Context {
+mut:
+ pid int // the pid of the current process; useful while debugging manager/worker interactions
+ is_worker bool // true in the workers, false in the manager process
+ check_period_ms int = scan_period_ms
+ vexe string
+ affected_paths []string
+ vfiles []VFileStat
+ opts []string
+ rerun_channel chan RerunCommand
+ child_process &os.Process
+ is_exiting bool // set by SIGINT/Ctrl-C
+ v_cycles int // how many times the worker has restarted the V compiler
+ scan_cycles int // how many times the worker has scanned for source file changes
+ clear_terminal bool // whether to clear the terminal before each re-run
+ silent bool // when true, watch will not print a timestamp line before each re-run
+ add_files []string // path to additional files that have to be watched for changes
+ ignore_exts []string // extensions of files that will be ignored, even if they change (useful for sqlite.db files for example)
+ cmd_before_run string // a command to run before each re-run
+ cmd_after_run string // a command to run after each re-run
+}
+
+[if debug_vwatch ?]
+fn (mut context Context) elog(msg string) {
+ eprintln('> vwatch $context.pid, $msg')
+}
+
+fn (context &Context) str() string {
+ return 'Context{ pid: $context.pid, is_worker: $context.is_worker, check_period_ms: $context.check_period_ms, vexe: $context.vexe, opts: $context.opts, is_exiting: $context.is_exiting, vfiles: $context.vfiles'
+}
+
+fn (mut context Context) get_stats_for_affected_vfiles() []VFileStat {
+ if context.affected_paths.len == 0 {
+ mut apaths := map[string]bool{}
+ // The next command will make V parse the program, and print all .v files,
+ // needed for its compilation, without actually compiling it.
+ copts := context.opts.join(' ')
+ cmd := '"$context.vexe" -silent -print-v-files $copts'
+ // context.elog('> cmd: $cmd')
+ mut paths := []string{}
+ if context.add_files.len > 0 && context.add_files[0] != '' {
+ paths << context.add_files
+ }
+ vfiles := os.execute(cmd)
+ if vfiles.exit_code == 0 {
+ paths_trimmed := vfiles.output.trim_space()
+ paths << paths_trimmed.split('\n')
+ }
+ for vf in paths {
+ apaths[os.real_path(os.dir(vf))] = true
+ }
+ context.affected_paths = apaths.keys()
+ // context.elog('vfiles paths to be scanned: $context.affected_paths')
+ }
+ // scan all files in the found folders
+ mut newstats := []VFileStat{}
+ for path in context.affected_paths {
+ mut files := os.ls(path) or { []string{} }
+ for pf in files {
+ pf_ext := os.file_ext(pf).to_lower()
+ if pf_ext in ['', '.bak', '.exe', '.dll', '.so', '.def'] {
+ continue
+ }
+ if pf_ext in context.ignore_exts {
+ continue
+ }
+ if pf.starts_with('.#') {
+ continue
+ }
+ if pf.ends_with('~') {
+ continue
+ }
+ f := os.join_path(path, pf)
+ fullpath := os.real_path(f)
+ mtime := os.file_last_mod_unix(fullpath)
+ newstats << VFileStat{fullpath, mtime}
+ }
+ }
+ // always add the v compiler itself, so that if it is recompiled with `v self`
+ // the watcher will rerun the compilation too
+ newstats << VFileStat{context.vexe, os.file_last_mod_unix(context.vexe)}
+ return newstats
+}
+
+fn (mut context Context) get_changed_vfiles() int {
+ mut changed := 0
+ newfiles := context.get_stats_for_affected_vfiles()
+ for vfs in newfiles {
+ mut found := false
+ for existing_vfs in context.vfiles {
+ if existing_vfs.path == vfs.path {
+ found = true
+ if existing_vfs.mtime != vfs.mtime {
+ context.elog('> new updates for file: $vfs')
+ changed++
+ }
+ break
+ }
+ }
+ if !found {
+ changed++
+ continue
+ }
+ }
+ context.vfiles = newfiles
+ if changed > 0 {
+ context.elog('> get_changed_vfiles: $changed')
+ }
+ return changed
+}
+
+fn change_detection_loop(ocontext &Context) {
+ mut context := unsafe { ocontext }
+ for {
+ if context.v_cycles >= max_v_cycles || context.scan_cycles >= max_scan_cycles {
+ context.is_exiting = true
+ context.kill_pgroup()
+ time.sleep(50 * time.millisecond)
+ exit(255)
+ }
+ if context.is_exiting {
+ return
+ }
+ changes := context.get_changed_vfiles()
+ if changes > 0 {
+ context.rerun_channel <- RerunCommand.restart
+ }
+ time.sleep(context.check_period_ms * time.millisecond)
+ context.scan_cycles++
+ }
+}
+
+fn (mut context Context) kill_pgroup() {
+ if context.child_process == 0 {
+ return
+ }
+ if context.child_process.is_alive() {
+ context.child_process.signal_pgkill()
+ }
+ context.child_process.wait()
+}
+
+fn (mut context Context) run_before_cmd() {
+ if context.cmd_before_run != '' {
+ context.elog('> run_before_cmd: "$context.cmd_before_run"')
+ os.system(context.cmd_before_run)
+ }
+}
+
+fn (mut context Context) run_after_cmd() {
+ if context.cmd_after_run != '' {
+ context.elog('> run_after_cmd: "$context.cmd_after_run"')
+ os.system(context.cmd_after_run)
+ }
+}
+
+fn (mut context Context) compilation_runner_loop() {
+ cmd := '"$context.vexe" ${context.opts.join(' ')}'
+ _ := <-context.rerun_channel
+ for {
+ context.elog('>> loop: v_cycles: $context.v_cycles')
+ if context.clear_terminal {
+ term.clear()
+ }
+ context.run_before_cmd()
+ timestamp := time.now().format_ss_milli()
+ context.child_process = os.new_process(context.vexe)
+ context.child_process.use_pgroup = true
+ context.child_process.set_args(context.opts)
+ context.child_process.run()
+ if !context.silent {
+ eprintln('$timestamp: $cmd | pid: ${context.child_process.pid:7d} | reload cycle: ${context.v_cycles:5d}')
+ }
+ for {
+ mut notalive_count := 0
+ mut cmds := []RerunCommand{}
+ for {
+ if context.is_exiting {
+ return
+ }
+ if !context.child_process.is_alive() {
+ context.child_process.wait()
+ notalive_count++
+ if notalive_count == 1 {
+ // a short lived process finished, do cleanup:
+ context.run_after_cmd()
+ }
+ }
+ select {
+ action := <-context.rerun_channel {
+ cmds << action
+ if action == .quit {
+ context.kill_pgroup()
+ return
+ }
+ }
+ 100 * time.millisecond {
+ should_restart := RerunCommand.restart in cmds
+ cmds = []
+ if should_restart {
+ // context.elog('>>>>>>>> KILLING $context.child_process.pid')
+ context.kill_pgroup()
+ break
+ }
+ }
+ }
+ }
+ if !context.child_process.is_alive() {
+ context.child_process.wait()
+ context.child_process.close()
+ if notalive_count == 0 {
+ // a long running process was killed, do cleanup:
+ context.run_after_cmd()
+ }
+ break
+ }
+ }
+ context.v_cycles++
+ }
+}
+
+const ccontext = Context{
+ child_process: 0
+}
+
+fn main() {
+ dump(scan_timeout_s)
+ mut context := unsafe { &Context(voidptr(&ccontext)) }
+ context.pid = os.getpid()
+ context.vexe = os.getenv('VEXE')
+
+ mut fp := flag.new_flag_parser(os.args[1..])
+ fp.application('v watch')
+ if os.args[1] == 'watch' {
+ fp.skip_executable()
+ }
+ fp.version('0.0.2')
+ fp.description('Collect all .v files needed for a compilation, then re-run the compilation when any of the source changes.')
+ fp.arguments_description('[--silent] [--clear] [--ignore .db] [--add /path/to/a/file.v] [run] program.v')
+ fp.allow_unknown_args()
+ fp.limit_free_args_to_at_least(1)
+ context.is_worker = fp.bool('vwatchworker', 0, false, 'Internal flag. Used to distinguish vwatch manager and worker processes.')
+ context.silent = fp.bool('silent', `s`, false, 'Be more silent; do not print the watch timestamp before each re-run.')
+ context.clear_terminal = fp.bool('clear', `c`, false, 'Clears the terminal before each re-run.')
+ context.add_files = fp.string('add', `a`, '', 'Add more files to be watched. Useful with `v watch -add=/tmp/feature.v run cmd/v /tmp/feature.v`, if you change *both* the compiler, and the feature.v file.').split(',')
+ context.ignore_exts = fp.string('ignore', `i`, '', 'Ignore files having these extensions. Useful with `v watch -ignore=.db run server.v`, if your server writes to an sqlite.db file in the same folder.').split(',')
+ show_help := fp.bool('help', `h`, false, 'Show this help screen.')
+ context.cmd_before_run = fp.string('before', 0, '', 'A command to execute *before* each re-run.')
+ context.cmd_after_run = fp.string('after', 0, '', 'A command to execute *after* each re-run.')
+ if show_help {
+ println(fp.usage())
+ exit(0)
+ }
+ remaining_options := fp.finalize() or {
+ eprintln('Error: $err')
+ exit(1)
+ }
+ context.opts = remaining_options
+ context.elog('>>> context.pid: $context.pid')
+ context.elog('>>> context.vexe: $context.vexe')
+ context.elog('>>> context.opts: $context.opts')
+ context.elog('>>> context.is_worker: $context.is_worker')
+ context.elog('>>> context.clear_terminal: $context.clear_terminal')
+ context.elog('>>> context.add_files: $context.add_files')
+ context.elog('>>> context.ignore_exts: $context.ignore_exts')
+ if context.is_worker {
+ context.worker_main()
+ } else {
+ context.manager_main()
+ }
+}
+
+fn (mut context Context) manager_main() {
+ myexecutable := os.executable()
+ mut worker_opts := ['--vwatchworker']
+ worker_opts << os.args[2..]
+ for {
+ mut worker_process := os.new_process(myexecutable)
+ worker_process.set_args(worker_opts)
+ worker_process.run()
+ for {
+ if !worker_process.is_alive() {
+ worker_process.wait()
+ break
+ }
+ time.sleep(200 * time.millisecond)
+ }
+ if !(worker_process.code == 255 && worker_process.status == .exited) {
+ worker_process.close()
+ break
+ }
+ worker_process.close()
+ }
+}
+
+fn (mut context Context) worker_main() {
+ context.rerun_channel = chan RerunCommand{cap: 10}
+ os.signal_opt(.int, fn (_ os.Signal) {
+ mut context := unsafe { &Context(voidptr(&ccontext)) }
+ context.is_exiting = true
+ context.kill_pgroup()
+ }) or { panic(err) }
+ go context.compilation_runner_loop()
+ change_detection_loop(context)
+}
diff --git a/v_windows/v/old/cmd/tools/vwipe-cache.v b/v_windows/v/old/cmd/tools/vwipe-cache.v
new file mode 100644
index 0000000..988b7c2
--- /dev/null
+++ b/v_windows/v/old/cmd/tools/vwipe-cache.v
@@ -0,0 +1,13 @@
+module main
+
+import os
+import v.vcache
+
+fn main() {
+ mut cm := vcache.new_cache_manager([])
+ cpath := cm.basepath
+ if os.exists(cpath) && os.is_dir(cpath) {
+ os.rmdir_all(cpath) or {}
+ }
+ println('V cache folder $cpath was wiped.')
+}