aboutsummaryrefslogtreecommitdiff
path: root/v_windows/v/vlib/net
diff options
context:
space:
mode:
Diffstat (limited to 'v_windows/v/vlib/net')
-rw-r--r--v_windows/v/vlib/net/aasocket.c.v104
-rw-r--r--v_windows/v/vlib/net/address.v258
-rw-r--r--v_windows/v/vlib/net/address_darwin.c.v74
-rw-r--r--v_windows/v/vlib/net/address_default.c.v32
-rw-r--r--v_windows/v/vlib/net/address_freebsd.c.v77
-rw-r--r--v_windows/v/vlib/net/address_linux.c.v63
-rw-r--r--v_windows/v/vlib/net/address_test.v98
-rw-r--r--v_windows/v/vlib/net/address_windows.c.v58
-rw-r--r--v_windows/v/vlib/net/afunix.h26
-rw-r--r--v_windows/v/vlib/net/common.v129
-rw-r--r--v_windows/v/vlib/net/conv/conv.c.v21
-rw-r--r--v_windows/v/vlib/net/conv/conv_default.c.v46
-rw-r--r--v_windows/v/vlib/net/conv/conv_windows.c.v21
-rw-r--r--v_windows/v/vlib/net/errors.v70
-rw-r--r--v_windows/v/vlib/net/ftp/ftp.v265
-rw-r--r--v_windows/v/vlib/net/ftp/ftp_test.v50
-rw-r--r--v_windows/v/vlib/net/html/README.md16
-rw-r--r--v_windows/v/vlib/net/html/data_structures.v91
-rw-r--r--v_windows/v/vlib/net/html/dom.v189
-rw-r--r--v_windows/v/vlib/net/html/dom_test.v56
-rw-r--r--v_windows/v/vlib/net/html/html.v18
-rw-r--r--v_windows/v/vlib/net/html/html_test.v15
-rw-r--r--v_windows/v/vlib/net/html/parser.v260
-rw-r--r--v_windows/v/vlib/net/html/parser_test.v41
-rw-r--r--v_windows/v/vlib/net/html/tag.v68
-rw-r--r--v_windows/v/vlib/net/http/backend_nix.c.v74
-rw-r--r--v_windows/v/vlib/net/http/backend_windows.c.v28
-rw-r--r--v_windows/v/vlib/net/http/chunked/dechunk.v72
-rw-r--r--v_windows/v/vlib/net/http/cookie.v413
-rw-r--r--v_windows/v/vlib/net/http/cookie_test.v468
-rw-r--r--v_windows/v/vlib/net/http/download.v18
-rw-r--r--v_windows/v/vlib/net/http/download_nix.c.v52
-rw-r--r--v_windows/v/vlib/net/http/download_windows.c.v29
-rw-r--r--v_windows/v/vlib/net/http/header.v698
-rw-r--r--v_windows/v/vlib/net/http/header_test.v387
-rw-r--r--v_windows/v/vlib/net/http/http.v186
-rw-r--r--v_windows/v/vlib/net/http/http_httpbin_test.v95
-rw-r--r--v_windows/v/vlib/net/http/http_test.v56
-rw-r--r--v_windows/v/vlib/net/http/method.v48
-rw-r--r--v_windows/v/vlib/net/http/request.v324
-rw-r--r--v_windows/v/vlib/net/http/request_test.v138
-rw-r--r--v_windows/v/vlib/net/http/response.v152
-rw-r--r--v_windows/v/vlib/net/http/response_test.v36
-rw-r--r--v_windows/v/vlib/net/http/server.v123
-rw-r--r--v_windows/v/vlib/net/http/server_test.v90
-rw-r--r--v_windows/v/vlib/net/http/status.v255
-rw-r--r--v_windows/v/vlib/net/http/status_test.v49
-rw-r--r--v_windows/v/vlib/net/http/version.v40
-rw-r--r--v_windows/v/vlib/net/ipv6_v6only.h5
-rw-r--r--v_windows/v/vlib/net/net_nix.c.v26
-rw-r--r--v_windows/v/vlib/net/net_windows.c.v780
-rw-r--r--v_windows/v/vlib/net/openssl/c.v120
-rw-r--r--v_windows/v/vlib/net/openssl/openssl.v32
-rw-r--r--v_windows/v/vlib/net/openssl/ssl_connection.v268
-rw-r--r--v_windows/v/vlib/net/smtp/smtp.v190
-rw-r--r--v_windows/v/vlib/net/smtp/smtp_test.v89
-rw-r--r--v_windows/v/vlib/net/socket_options.c.v50
-rw-r--r--v_windows/v/vlib/net/tcp.v420
-rw-r--r--v_windows/v/vlib/net/tcp_read_line.v90
-rw-r--r--v_windows/v/vlib/net/tcp_simple_client_server_test.v150
-rw-r--r--v_windows/v/vlib/net/tcp_test.v100
-rw-r--r--v_windows/v/vlib/net/udp.v289
-rw-r--r--v_windows/v/vlib/net/udp_test.v67
-rw-r--r--v_windows/v/vlib/net/unix/aasocket.c.v104
-rw-r--r--v_windows/v/vlib/net/unix/common.v128
-rw-r--r--v_windows/v/vlib/net/unix/stream_nix.v288
-rw-r--r--v_windows/v/vlib/net/unix/unix_test.v50
-rw-r--r--v_windows/v/vlib/net/urllib/urllib.v1095
-rw-r--r--v_windows/v/vlib/net/urllib/urllib_test.v51
-rw-r--r--v_windows/v/vlib/net/urllib/values.v87
-rw-r--r--v_windows/v/vlib/net/util.v27
-rw-r--r--v_windows/v/vlib/net/websocket/events.v227
-rw-r--r--v_windows/v/vlib/net/websocket/handshake.v185
-rw-r--r--v_windows/v/vlib/net/websocket/io.v100
-rw-r--r--v_windows/v/vlib/net/websocket/message.v295
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/README.md20
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client.v33
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client_wss.v35
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_server.v27
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/docker-compose.yml21
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/Dockerfile5
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/check_results.py46
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingclient.json22
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingserver.json14
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/Dockerfile9
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/check_results.py35
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/fuzzingserver.json16
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.crt19
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.csr16
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.key27
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.pem19
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/local_run/Dockerfile12
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client.v33
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client_wss.v35
-rw-r--r--v_windows/v/vlib/net/websocket/tests/autobahn/ws_test/Dockerfile12
-rw-r--r--v_windows/v/vlib/net/websocket/uri.v16
-rw-r--r--v_windows/v/vlib/net/websocket/utils.v54
-rw-r--r--v_windows/v/vlib/net/websocket/websocket_client.v488
-rw-r--r--v_windows/v/vlib/net/websocket/websocket_nix.c.v10
-rw-r--r--v_windows/v/vlib/net/websocket/websocket_server.v189
-rw-r--r--v_windows/v/vlib/net/websocket/websocket_test.v122
-rw-r--r--v_windows/v/vlib/net/websocket/websocket_windows.c.v12
102 files changed, 12637 insertions, 0 deletions
diff --git a/v_windows/v/vlib/net/aasocket.c.v b/v_windows/v/vlib/net/aasocket.c.v
new file mode 100644
index 0000000..60418c3
--- /dev/null
+++ b/v_windows/v/vlib/net/aasocket.c.v
@@ -0,0 +1,104 @@
+module net
+
+$if windows {
+ // This is mainly here for tcc on windows
+ // which apparently doesnt have this definition
+ #include "@VROOT/vlib/net/ipv6_v6only.h"
+}
+
+// Select represents a select operation
+enum Select {
+ read
+ write
+ except
+}
+
+// SocketType are the available sockets
+pub enum SocketType {
+ udp = C.SOCK_DGRAM
+ tcp = C.SOCK_STREAM
+ seqpacket = C.SOCK_SEQPACKET
+}
+
+// AddrFamily are the available address families
+pub enum AddrFamily {
+ unix = C.AF_UNIX
+ ip = C.AF_INET
+ ip6 = C.AF_INET6
+ unspec = C.AF_UNSPEC
+}
+
+fn C.socket(domain AddrFamily, typ SocketType, protocol int) int
+
+// fn C.setsockopt(sockfd int, level int, optname int, optval voidptr, optlen C.socklen_t) int
+fn C.setsockopt(sockfd int, level int, optname int, optval voidptr, optlen u32) int
+
+fn C.htonl(hostlong u32) int
+
+fn C.htons(netshort u16) int
+
+// fn C.bind(sockfd int, addr &C.sockaddr, addrlen C.socklen_t) int
+// use voidptr for arg 2 becasue sockaddr is a generic descriptor for any kind of socket operation,
+// it can also take sockaddr_in depending on the type of socket used in arg 1
+fn C.bind(sockfd int, addr &Addr, addrlen u32) int
+
+fn C.listen(sockfd int, backlog int) int
+
+// fn C.accept(sockfd int, addr &C.sockaddr, addrlen &C.socklen_t) int
+fn C.accept(sockfd int, addr &Addr, addrlen &u32) int
+
+fn C.getaddrinfo(node &char, service &char, hints &C.addrinfo, res &&C.addrinfo) int
+
+fn C.freeaddrinfo(info &C.addrinfo)
+
+// fn C.connect(sockfd int, addr &C.sockaddr, addrlen C.socklen_t) int
+fn C.connect(sockfd int, addr &Addr, addrlen u32) int
+
+// fn C.send(sockfd int, buf voidptr, len size_t, flags int) size_t
+fn C.send(sockfd int, buf voidptr, len size_t, flags int) int
+
+// fn C.sendto(sockfd int, buf voidptr, len size_t, flags int, dest_add &C.sockaddr, addrlen C.socklen_t) size_t
+fn C.sendto(sockfd int, buf voidptr, len size_t, flags int, dest_add &Addr, addrlen u32) int
+
+// fn C.recv(sockfd int, buf voidptr, len size_t, flags int) size_t
+fn C.recv(sockfd int, buf voidptr, len size_t, flags int) int
+
+// fn C.recvfrom(sockfd int, buf voidptr, len size_t, flags int, src_addr &C.sockaddr, addrlen &C.socklen_t) size_t
+fn C.recvfrom(sockfd int, buf voidptr, len size_t, flags int, src_addr &Addr, addrlen &u32) int
+
+fn C.shutdown(socket int, how int) int
+
+fn C.ntohs(netshort u16) int
+
+// fn C.getpeername(sockfd int, addr &C.sockaddr, addlen &C.socklen_t) int
+fn C.getpeername(sockfd int, addr &Addr, addlen &u32) int
+
+fn C.inet_ntop(af AddrFamily, src voidptr, dst &char, dst_size int) &char
+
+fn C.WSAAddressToStringA(lpsaAddress &Addr, dwAddressLength u32, lpProtocolInfo voidptr, lpszAddressString &char, lpdwAddressStringLength &u32) int
+
+// fn C.getsockname(sockfd int, addr &C.sockaddr, addrlen &C.socklen_t) int
+fn C.getsockname(sockfd int, addr &C.sockaddr, addrlen &u32) int
+
+fn C.getsockopt(sockfd int, level int, optname int, optval voidptr, optlen &u32) int
+
+// defined in builtin
+// fn C.read() int
+// fn C.close() int
+
+fn C.ioctlsocket(s int, cmd int, argp &u32) int
+
+fn C.fcntl(fd int, cmd int, arg ...voidptr) int
+
+fn C.@select(ndfs int, readfds &C.fd_set, writefds &C.fd_set, exceptfds &C.fd_set, timeout &C.timeval) int
+
+fn C.FD_ZERO(fdset &C.fd_set)
+
+fn C.FD_SET(fd int, fdset &C.fd_set)
+
+fn C.FD_ISSET(fd int, fdset &C.fd_set) bool
+
+fn C.inet_pton(family AddrFamily, saddr &char, addr voidptr) int
+
+[typedef]
+pub struct C.fd_set {}
diff --git a/v_windows/v/vlib/net/address.v b/v_windows/v/vlib/net/address.v
new file mode 100644
index 0000000..af1a000
--- /dev/null
+++ b/v_windows/v/vlib/net/address.v
@@ -0,0 +1,258 @@
+module net
+
+import io.util
+import os
+
+union AddrData {
+ Unix
+ Ip
+ Ip6
+}
+
+const (
+ addr_ip6_any = [16]byte{init: byte(0)}
+ addr_ip_any = [4]byte{init: byte(0)}
+)
+
+fn new_ip6(port u16, addr [16]byte) Addr {
+ a := Addr{
+ f: u16(AddrFamily.ip6)
+ addr: AddrData{
+ Ip6: Ip6{
+ port: u16(C.htons(port))
+ }
+ }
+ }
+
+ copy(a.addr.Ip6.addr[0..], addr[0..])
+
+ return a
+}
+
+fn new_ip(port u16, addr [4]byte) Addr {
+ a := Addr{
+ f: u16(AddrFamily.ip)
+ addr: AddrData{
+ Ip: Ip{
+ port: u16(C.htons(port))
+ }
+ }
+ }
+
+ copy(a.addr.Ip6.addr[0..], addr[0..])
+
+ return a
+}
+
+fn temp_unix() ?Addr {
+ // create a temp file to get a filename
+ // close it
+ // remove it
+ // then reuse the filename
+ mut file, filename := util.temp_file() ?
+ file.close()
+ os.rm(filename) ?
+ addrs := resolve_addrs(filename, .unix, .udp) ?
+ return addrs[0]
+}
+
+pub fn (a Addr) family() AddrFamily {
+ return AddrFamily(a.f)
+}
+
+const (
+ max_ip_len = 24
+ max_ip6_len = 46
+)
+
+fn (a Ip) str() string {
+ buf := []byte{len: net.max_ip_len, init: 0}
+
+ res := &char(C.inet_ntop(.ip, &a.addr, buf.data, buf.len))
+
+ if res == 0 {
+ return '<Unknown>'
+ }
+
+ saddr := buf.bytestr()
+ port := C.ntohs(a.port)
+
+ return '$saddr:$port'
+}
+
+fn (a Ip6) str() string {
+ buf := []byte{len: net.max_ip6_len, init: 0}
+
+ res := &char(C.inet_ntop(.ip6, &a.addr, buf.data, buf.len))
+
+ if res == 0 {
+ return '<Unknown>'
+ }
+
+ saddr := buf.bytestr()
+ port := C.ntohs(a.port)
+
+ return '[$saddr]:$port'
+}
+
+const aoffset = __offsetof(Addr, addr)
+
+fn (a Addr) len() u32 {
+ match a.family() {
+ .ip {
+ return sizeof(Ip) + net.aoffset
+ }
+ .ip6 {
+ return sizeof(Ip6) + net.aoffset
+ }
+ .unix {
+ return sizeof(Unix) + net.aoffset
+ }
+ else {
+ panic('Unknown address family')
+ }
+ }
+}
+
+pub fn resolve_addrs(addr string, family AddrFamily, @type SocketType) ?[]Addr {
+ match family {
+ .ip, .ip6, .unspec {
+ return resolve_ipaddrs(addr, family, @type)
+ }
+ .unix {
+ resolved := Unix{}
+
+ if addr.len > max_unix_path {
+ return error('net: resolve_addrs Unix socket address is too long')
+ }
+
+ // Copy the unix path into the address struct
+ unsafe {
+ C.memcpy(&resolved.path, addr.str, addr.len)
+ }
+
+ return [Addr{
+ f: u16(AddrFamily.unix)
+ addr: AddrData{
+ Unix: resolved
+ }
+ }]
+ }
+ }
+}
+
+pub fn resolve_addrs_fuzzy(addr string, @type SocketType) ?[]Addr {
+ if addr.len == 0 {
+ return none
+ }
+
+ // Use a small heuristic to figure out what address family this is
+ // (out of the ones that we support)
+
+ if addr.contains(':') {
+ // Colon is a reserved character in unix paths
+ // so this must be an ip address
+ return resolve_addrs(addr, .unspec, @type)
+ }
+
+ return resolve_addrs(addr, .unix, @type)
+}
+
+pub fn resolve_ipaddrs(addr string, family AddrFamily, typ SocketType) ?[]Addr {
+ address, port := split_address(addr) ?
+
+ if addr[0] == `:` {
+ // Use in6addr_any
+ return [new_ip6(port, net.addr_ip6_any)]
+ }
+
+ mut hints := C.addrinfo{
+ // ai_family: int(family)
+ // ai_socktype: int(typ)
+ // ai_flags: C.AI_PASSIVE
+ }
+ hints.ai_family = int(family)
+ hints.ai_socktype = int(typ)
+ hints.ai_flags = C.AI_PASSIVE
+ hints.ai_protocol = 0
+ hints.ai_addrlen = 0
+ hints.ai_addr = voidptr(0)
+ hints.ai_canonname = voidptr(0)
+ hints.ai_next = voidptr(0)
+ results := &C.addrinfo(0)
+
+ sport := '$port'
+
+ // This might look silly but is recommended by MSDN
+ $if windows {
+ socket_error(0 - C.getaddrinfo(&char(address.str), &char(sport.str), &hints, &results)) ?
+ } $else {
+ x := C.getaddrinfo(&char(address.str), &char(sport.str), &hints, &results)
+ wrap_error(x) ?
+ }
+
+ defer {
+ C.freeaddrinfo(results)
+ }
+
+ // Now that we have our linked list of addresses
+ // convert them into an array
+ mut addresses := []Addr{}
+
+ for result := results; !isnil(result); result = result.ai_next {
+ match AddrFamily(result.ai_family) {
+ .ip, .ip6 {
+ new_addr := Addr{
+ addr: AddrData{
+ Ip6: Ip6{}
+ }
+ }
+ unsafe {
+ C.memcpy(&new_addr, result.ai_addr, result.ai_addrlen)
+ }
+ addresses << new_addr
+ }
+ else {
+ panic('Unexpected address family $result.ai_family')
+ }
+ }
+ }
+
+ return addresses
+}
+
+fn (a Addr) str() string {
+ match AddrFamily(a.f) {
+ .ip {
+ unsafe {
+ return a.addr.Ip.str()
+ }
+ }
+ .ip6 {
+ unsafe {
+ return a.addr.Ip6.str()
+ }
+ }
+ .unix {
+ unsafe {
+ return tos_clone(a.addr.Unix.path[0..max_unix_path].data)
+ }
+ }
+ .unspec {
+ return '<.unspec>'
+ }
+ }
+}
+
+pub fn addr_from_socket_handle(handle int) Addr {
+ addr := Addr{
+ addr: AddrData{
+ Ip6: Ip6{}
+ }
+ }
+ size := sizeof(addr)
+
+ C.getsockname(handle, voidptr(&addr), &size)
+
+ return addr
+}
diff --git a/v_windows/v/vlib/net/address_darwin.c.v b/v_windows/v/vlib/net/address_darwin.c.v
new file mode 100644
index 0000000..041ccf2
--- /dev/null
+++ b/v_windows/v/vlib/net/address_darwin.c.v
@@ -0,0 +1,74 @@
+module net
+
+const max_unix_path = 104
+
+struct C.addrinfo {
+mut:
+ ai_family int
+ ai_socktype int
+ ai_flags int
+ ai_protocol int
+ ai_addrlen int
+ ai_addr voidptr
+ ai_canonname voidptr
+ ai_next voidptr
+}
+
+struct C.sockaddr_in6 {
+mut:
+ // 1 + 1 + 2 + 4 + 16 + 4 = 28;
+ sin6_len byte // 1
+ sin6_family byte // 1
+ sin6_port u16 // 2
+ sin6_flowinfo u32 // 4
+ sin6_addr [16]byte // 16
+ sin6_scope_id u32 // 4
+}
+
+struct C.sockaddr_in {
+mut:
+ sin_len byte
+ sin_family byte
+ sin_port u16
+ sin_addr u32
+ sin_zero [8]char
+}
+
+struct C.sockaddr_un {
+mut:
+ sun_len byte
+ sun_family byte
+ sun_path [max_unix_path]char
+}
+
+[_pack: '1']
+struct Ip6 {
+ port u16
+ flow_info u32
+ addr [16]byte
+ scope_id u32
+}
+
+[_pack: '1']
+struct Ip {
+ port u16
+ addr [4]byte
+ // Pad to size so that socket functions
+ // dont complain to us (see in.h and bind())
+ // TODO(emily): I would really like to use
+ // some constant calculations here
+ // so that this doesnt have to be hardcoded
+ sin_pad [8]byte
+}
+
+struct Unix {
+ path [max_unix_path]char
+}
+
+[_pack: '1']
+struct Addr {
+pub:
+ len u8
+ f u8
+ addr AddrData
+}
diff --git a/v_windows/v/vlib/net/address_default.c.v b/v_windows/v/vlib/net/address_default.c.v
new file mode 100644
index 0000000..95942cc
--- /dev/null
+++ b/v_windows/v/vlib/net/address_default.c.v
@@ -0,0 +1,32 @@
+module net
+
+const max_unix_path = 104
+
+struct C.addrinfo {
+mut:
+ ai_family int
+ ai_socktype int
+ ai_flags int
+ ai_protocol int
+ ai_addrlen int
+ ai_addr voidptr
+ ai_canonname voidptr
+ ai_next voidptr
+}
+
+struct C.sockaddr_in {
+ sin_family byte
+ sin_port u16
+ sin_addr u32
+}
+
+struct C.sockaddr_in6 {
+ sin6_family byte
+ sin6_port u16
+ sin6_addr [4]u32
+}
+
+struct C.sockaddr_un {
+ sun_family byte
+ sun_path [max_unix_path]char
+}
diff --git a/v_windows/v/vlib/net/address_freebsd.c.v b/v_windows/v/vlib/net/address_freebsd.c.v
new file mode 100644
index 0000000..bc84665
--- /dev/null
+++ b/v_windows/v/vlib/net/address_freebsd.c.v
@@ -0,0 +1,77 @@
+module net
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+const max_unix_path = 104
+
+struct C.addrinfo {
+mut:
+ ai_family int
+ ai_socktype int
+ ai_flags int
+ ai_protocol int
+ ai_addrlen int
+ ai_addr voidptr
+ ai_canonname voidptr
+ ai_next voidptr
+}
+
+struct C.sockaddr_in6 {
+mut:
+ // 1 + 1 + 2 + 4 + 16 + 4 = 28;
+ sin6_len byte // 1
+ sin6_family byte // 1
+ sin6_port u16 // 2
+ sin6_flowinfo u32 // 4
+ sin6_addr [16]byte // 16
+ sin6_scope_id u32 // 4
+}
+
+struct C.sockaddr_in {
+mut:
+ sin_len byte
+ sin_family byte
+ sin_port u16
+ sin_addr u32
+ sin_zero [8]char
+}
+
+struct C.sockaddr_un {
+mut:
+ sun_len byte
+ sun_family byte
+ sun_path [max_unix_path]char
+}
+
+[_pack: '1']
+struct Ip6 {
+ port u16
+ flow_info u32
+ addr [16]byte
+ scope_id u32
+}
+
+[_pack: '1']
+struct Ip {
+ port u16
+ addr [4]byte
+ // Pad to size so that socket functions
+ // dont complain to us (see in.h and bind())
+ // TODO(emily): I would really like to use
+ // some constant calculations here
+ // so that this doesnt have to be hardcoded
+ sin_pad [8]byte
+}
+
+struct Unix {
+ path [max_unix_path]char
+}
+
+[_pack: '1']
+struct Addr {
+pub:
+ len u8
+ f u8
+ addr AddrData
+}
diff --git a/v_windows/v/vlib/net/address_linux.c.v b/v_windows/v/vlib/net/address_linux.c.v
new file mode 100644
index 0000000..a6f7a97
--- /dev/null
+++ b/v_windows/v/vlib/net/address_linux.c.v
@@ -0,0 +1,63 @@
+module net
+
+const max_unix_path = 108
+
+struct C.addrinfo {
+mut:
+ ai_family int
+ ai_socktype int
+ ai_flags int
+ ai_protocol int
+ ai_addrlen int
+ ai_addr voidptr
+ ai_canonname voidptr
+ ai_next voidptr
+}
+
+struct C.sockaddr_in {
+ sin_family u16
+ sin_port u16
+ sin_addr u32
+}
+
+struct C.sockaddr_in6 {
+ sin6_family u16
+ sin6_port u16
+ sin6_addr [4]u32
+}
+
+struct C.sockaddr_un {
+ sun_family u16
+ sun_path [max_unix_path]char
+}
+
+[_pack: '1']
+struct Ip6 {
+ port u16
+ flow_info u32
+ addr [16]byte
+ scope_id u32
+}
+
+[_pack: '1']
+struct Ip {
+ port u16
+ addr [4]byte
+ // Pad to size so that socket functions
+ // dont complain to us (see in.h and bind())
+ // TODO(emily): I would really like to use
+ // some constant calculations here
+ // so that this doesnt have to be hardcoded
+ sin_pad [8]byte
+}
+
+struct Unix {
+ path [max_unix_path]byte
+}
+
+[_pack: '1']
+struct Addr {
+pub:
+ f u16
+ addr AddrData
+}
diff --git a/v_windows/v/vlib/net/address_test.v b/v_windows/v/vlib/net/address_test.v
new file mode 100644
index 0000000..5b3aab0
--- /dev/null
+++ b/v_windows/v/vlib/net/address_test.v
@@ -0,0 +1,98 @@
+module net
+
+$if windows {
+ $if msvc {
+ // Force these to be included before afunix!
+ #include <winsock2.h>
+ #include <ws2tcpip.h>
+ #include <afunix.h>
+ } $else {
+ #include "@VROOT/vlib/net/afunix.h"
+ }
+} $else {
+ #include <sys/un.h>
+}
+
+fn test_diagnostics() {
+ dump(aoffset)
+ eprintln('--------')
+ in6 := C.sockaddr_in6{}
+ our_ip6 := Ip6{}
+ $if macos {
+ dump(__offsetof(C.sockaddr_in6, sin6_len))
+ }
+ dump(__offsetof(C.sockaddr_in6, sin6_family))
+ dump(__offsetof(C.sockaddr_in6, sin6_port))
+ dump(__offsetof(C.sockaddr_in6, sin6_addr))
+ $if macos {
+ dump(sizeof(in6.sin6_len))
+ }
+ dump(sizeof(in6.sin6_family))
+ dump(sizeof(in6.sin6_port))
+ dump(sizeof(in6.sin6_addr))
+ dump(sizeof(in6))
+ eprintln('')
+ dump(__offsetof(Ip6, port))
+ dump(__offsetof(Ip6, addr))
+ dump(sizeof(our_ip6.port))
+ dump(sizeof(our_ip6.addr))
+ dump(sizeof(our_ip6))
+ eprintln('--------')
+ in4 := C.sockaddr_in{}
+ our_ip4 := Ip{}
+ $if macos {
+ dump(__offsetof(C.sockaddr_in, sin_len))
+ }
+ dump(__offsetof(C.sockaddr_in, sin_family))
+ dump(__offsetof(C.sockaddr_in, sin_port))
+ dump(__offsetof(C.sockaddr_in, sin_addr))
+ $if macos {
+ dump(sizeof(in4.sin_len))
+ }
+ dump(sizeof(in4.sin_family))
+ dump(sizeof(in4.sin_port))
+ dump(sizeof(in4.sin_addr))
+ dump(sizeof(in4))
+ eprintln('')
+ dump(__offsetof(Ip, port))
+ dump(__offsetof(Ip, addr))
+ dump(sizeof(our_ip4.port))
+ dump(sizeof(our_ip4.addr))
+ dump(sizeof(our_ip4))
+ eprintln('--------')
+ dump(__offsetof(C.sockaddr_un, sun_path))
+ dump(__offsetof(Unix, path))
+ eprintln('--------')
+}
+
+fn test_sizes_unix_sun_path() {
+ x1 := C.sockaddr_un{}
+ x2 := Unix{}
+ assert sizeof(x1.sun_path) == sizeof(x2.path)
+}
+
+fn test_offsets_ipv6() {
+ assert __offsetof(C.sockaddr_in6, sin6_addr) == __offsetof(Ip6, addr) + aoffset
+ assert __offsetof(C.sockaddr_in6, sin6_port) == __offsetof(Ip6, port) + aoffset
+}
+
+fn test_offsets_ipv4() {
+ assert __offsetof(C.sockaddr_in, sin_addr) == __offsetof(Ip, addr) + aoffset
+ assert __offsetof(C.sockaddr_in, sin_port) == __offsetof(Ip, port) + aoffset
+}
+
+fn test_offsets_unix() {
+ assert __offsetof(C.sockaddr_un, sun_path) == __offsetof(Unix, path) + aoffset
+}
+
+fn test_sizes_ipv6() {
+ assert sizeof(C.sockaddr_in6) == sizeof(Ip6) + aoffset
+}
+
+fn test_sizes_ipv4() {
+ assert sizeof(C.sockaddr_in) == sizeof(Ip) + aoffset
+}
+
+fn test_sizes_unix() {
+ assert sizeof(C.sockaddr_un) == sizeof(Unix) + aoffset
+}
diff --git a/v_windows/v/vlib/net/address_windows.c.v b/v_windows/v/vlib/net/address_windows.c.v
new file mode 100644
index 0000000..e50fdb4
--- /dev/null
+++ b/v_windows/v/vlib/net/address_windows.c.v
@@ -0,0 +1,58 @@
+module net
+
+const max_unix_path = 108
+
+struct C.addrinfo {
+mut:
+ ai_family int
+ ai_socktype int
+ ai_flags int
+ ai_protocol int
+ ai_addrlen int
+ ai_addr voidptr
+ ai_canonname voidptr
+ ai_next voidptr
+}
+
+struct C.sockaddr_in {
+ sin_family u16
+ sin_port u16
+ sin_addr u32
+}
+
+struct C.sockaddr_in6 {
+ sin6_family u16
+ sin6_port u16
+ sin6_addr [4]u32
+}
+
+struct C.sockaddr_un {
+ sun_family u16
+ sun_path [max_unix_path]char
+}
+
+[_pack: '1']
+struct Ip6 {
+ port u16
+ flow_info u32
+ addr [16]byte
+ scope_id u32
+}
+
+[_pack: '1']
+struct Ip {
+ port u16
+ addr [4]byte
+ sin_pad [8]byte
+}
+
+struct Unix {
+ path [max_unix_path]byte
+}
+
+[_pack: '1']
+struct Addr {
+pub:
+ f u16
+ addr AddrData
+}
diff --git a/v_windows/v/vlib/net/afunix.h b/v_windows/v/vlib/net/afunix.h
new file mode 100644
index 0000000..5fedca2
--- /dev/null
+++ b/v_windows/v/vlib/net/afunix.h
@@ -0,0 +1,26 @@
+/**
+ * This file has no copyright assigned and is placed in the Public Domain.
+ * This file is part of the mingw-w64 runtime package.
+ * No warranty is given; refer to the file DISCLAIMER.PD within this package.
+ */
+
+#ifndef _AFUNIX_
+#define _AFUNIX_
+
+#define UNIX_PATH_MAX 108
+
+#if !defined(ADDRESS_FAMILY)
+#define UNDEF_ADDRESS_FAMILY
+#define ADDRESS_FAMILY unsigned short
+#endif
+
+typedef struct sockaddr_un {
+ ADDRESS_FAMILY sun_family;
+ char sun_path[UNIX_PATH_MAX];
+} SOCKADDR_UN, *PSOCKADDR_UN;
+
+#if defined(UNDEF_ADDRESS_FAMILY)
+#undef ADDRESS_FAMILY
+#endif
+
+#endif /* _AFUNIX_ */
diff --git a/v_windows/v/vlib/net/common.v b/v_windows/v/vlib/net/common.v
new file mode 100644
index 0000000..aab8f16
--- /dev/null
+++ b/v_windows/v/vlib/net/common.v
@@ -0,0 +1,129 @@
+module net
+
+import time
+
+// no_deadline should be given to functions when no deadline is wanted (i.e. all functions
+// return instantly)
+const no_deadline = time.Time{
+ unix: 0
+}
+
+// no_timeout should be given to functions when no timeout is wanted (i.e. all functions
+// return instantly)
+pub const no_timeout = time.Duration(0)
+
+// infinite_timeout should be given to functions when an infinite_timeout is wanted (i.e. functions
+// only ever return with data)
+pub const infinite_timeout = time.infinite
+
+// Shutdown shutsdown a socket and closes it
+fn shutdown(handle int) ? {
+ $if windows {
+ C.shutdown(handle, C.SD_BOTH)
+ socket_error(C.closesocket(handle)) ?
+ } $else {
+ C.shutdown(handle, C.SHUT_RDWR)
+ socket_error(C.close(handle)) ?
+ }
+}
+
+// Select waits for an io operation (specified by parameter `test`) to be available
+fn @select(handle int, test Select, timeout time.Duration) ?bool {
+ set := C.fd_set{}
+
+ C.FD_ZERO(&set)
+ C.FD_SET(handle, &set)
+
+ seconds := timeout / time.second
+ microseconds := time.Duration(timeout - (seconds * time.second)).microseconds()
+
+ mut tt := C.timeval{
+ tv_sec: u64(seconds)
+ tv_usec: u64(microseconds)
+ }
+
+ mut timeval_timeout := &tt
+
+ // infinite timeout is signaled by passing null as the timeout to
+ // select
+ if timeout == net.infinite_timeout {
+ timeval_timeout = &C.timeval(0)
+ }
+
+ match test {
+ .read {
+ socket_error(C.@select(handle + 1, &set, C.NULL, C.NULL, timeval_timeout)) ?
+ }
+ .write {
+ socket_error(C.@select(handle + 1, C.NULL, &set, C.NULL, timeval_timeout)) ?
+ }
+ .except {
+ socket_error(C.@select(handle + 1, C.NULL, C.NULL, &set, timeval_timeout)) ?
+ }
+ }
+
+ return C.FD_ISSET(handle, &set)
+}
+
+// select_with_retry will retry the select if select is failing
+// due to interrupted system call. This can happen on signals
+// for example the GC Boehm uses signals internally on garbage
+// collection
+[inline]
+fn select_with_retry(handle int, test Select, timeout time.Duration) ?bool {
+ mut retries := 10
+ for retries > 0 {
+ ready := @select(handle, test, timeout) or {
+ if err.code == 4 {
+ // signal! lets retry max 10 times
+ // suspend thread with sleep to let the gc get
+ // cycles in the case the Bohem gc is interupting
+ time.sleep(1 * time.millisecond)
+ retries -= 1
+ continue
+ }
+ // we got other error
+ return err
+ }
+ return ready
+ }
+ return error('failed to @select more that three times due to interrupted system call')
+}
+
+// wait_for_common wraps the common wait code
+fn wait_for_common(handle int, deadline time.Time, timeout time.Duration, test Select) ? {
+ if deadline.unix == 0 {
+ // do not accept negative timeout
+ if timeout < 0 {
+ return err_timed_out
+ }
+ ready := select_with_retry(handle, test, timeout) ?
+ if ready {
+ return
+ }
+ return err_timed_out
+ }
+ // Convert the deadline into a timeout
+ // and use that
+ d_timeout := deadline.unix - time.now().unix
+ if d_timeout < 0 {
+ // deadline is in the past so this has already
+ // timed out
+ return err_timed_out
+ }
+ ready := select_with_retry(handle, test, timeout) ?
+ if ready {
+ return
+ }
+ return err_timed_out
+}
+
+// wait_for_write waits for a write io operation to be available
+fn wait_for_write(handle int, deadline time.Time, timeout time.Duration) ? {
+ return wait_for_common(handle, deadline, timeout, .write)
+}
+
+// wait_for_read waits for a read io operation to be available
+fn wait_for_read(handle int, deadline time.Time, timeout time.Duration) ? {
+ return wait_for_common(handle, deadline, timeout, .read)
+}
diff --git a/v_windows/v/vlib/net/conv/conv.c.v b/v_windows/v/vlib/net/conv/conv.c.v
new file mode 100644
index 0000000..e29741a
--- /dev/null
+++ b/v_windows/v/vlib/net/conv/conv.c.v
@@ -0,0 +1,21 @@
+module conv
+
+// host to net 32 (htonl)
+pub fn htn32(host &u32) u32 {
+ return C.htonl(host)
+}
+
+// host to net 16 (htons)
+pub fn htn16(host &u16) u16 {
+ return C.htons(host)
+}
+
+// net to host 32 (ntohl)
+pub fn nth32(host &u32) u32 {
+ return C.ntohl(host)
+}
+
+// net to host 16 (ntohs)
+pub fn nth16(host &u16) u16 {
+ return C.ntohs(host)
+}
diff --git a/v_windows/v/vlib/net/conv/conv_default.c.v b/v_windows/v/vlib/net/conv/conv_default.c.v
new file mode 100644
index 0000000..8e8c582
--- /dev/null
+++ b/v_windows/v/vlib/net/conv/conv_default.c.v
@@ -0,0 +1,46 @@
+module conv
+
+#include <arpa/inet.h>
+
+fn C.htonl(host u32) u32
+fn C.htons(host u16) u16
+
+fn C.ntohl(net u32) u32
+fn C.ntohs(net u16) u16
+
+struct Bytes {
+mut:
+ first u32
+ last u32
+}
+
+union LongLong {
+ Bytes
+ ll u64
+}
+
+// host to net 64 (htonll)
+pub fn htn64(host &u64) u64 {
+ mut ll := LongLong{
+ ll: host
+ }
+
+ unsafe {
+ ll.first = htn32(ll.first)
+ ll.last = htn32(ll.last)
+ }
+ return unsafe { ll.ll }
+}
+
+// net to host 64 (ntohll)
+pub fn nth64(net &u64) u64 {
+ mut ll := LongLong{
+ ll: net
+ }
+
+ unsafe {
+ ll.first = nth32(ll.first)
+ ll.last = nth32(ll.last)
+ }
+ return unsafe { ll.ll }
+}
diff --git a/v_windows/v/vlib/net/conv/conv_windows.c.v b/v_windows/v/vlib/net/conv/conv_windows.c.v
new file mode 100644
index 0000000..15827f7
--- /dev/null
+++ b/v_windows/v/vlib/net/conv/conv_windows.c.v
@@ -0,0 +1,21 @@
+module conv
+
+#include <winsock2.h>
+
+fn C.htonll(host u64) u64
+fn C.htonl(host u32) u32
+fn C.htons(host u16) u16
+
+fn C.ntohll(net u32) u32
+fn C.ntohl(net u32) u32
+fn C.ntohs(net u16) u16
+
+// host to net 64 (htonll)
+pub fn htn64(host &u64) u64 {
+ return C.htonll(host)
+}
+
+// net to host 64 (htonll)
+pub fn nth64(host &u64) u64 {
+ return C.ntohll(host)
+}
diff --git a/v_windows/v/vlib/net/errors.v b/v_windows/v/vlib/net/errors.v
new file mode 100644
index 0000000..f6ada74
--- /dev/null
+++ b/v_windows/v/vlib/net/errors.v
@@ -0,0 +1,70 @@
+module net
+
+const (
+ errors_base = 0
+)
+
+// Well defined errors that are returned from socket functions
+pub const (
+ err_new_socket_failed = error_with_code('net: new_socket failed to create socket',
+ errors_base + 1)
+ err_option_not_settable = error_with_code('net: set_option_xxx option not settable',
+ errors_base + 2)
+ err_option_wrong_type = error_with_code('net: set_option_xxx option wrong type',
+ errors_base + 3)
+ err_port_out_of_range = error_with_code('', errors_base + 5)
+ err_no_udp_remote = error_with_code('', errors_base + 6)
+ err_connect_failed = error_with_code('net: connect failed', errors_base + 7)
+ err_connect_timed_out = error_with_code('net: connect timed out', errors_base + 8)
+ err_timed_out = error_with_code('net: op timed out', errors_base + 9)
+ err_timed_out_code = errors_base + 9
+)
+
+pub fn socket_error(potential_code int) ?int {
+ $if windows {
+ if potential_code < 0 {
+ last_error_int := C.WSAGetLastError()
+ last_error := wsa_error(last_error_int)
+ return error_with_code('net: socket error: ($last_error_int) $last_error',
+ int(last_error))
+ }
+ } $else {
+ if potential_code < 0 {
+ last_error := error_code()
+ return error_with_code('net: socket error: $last_error', last_error)
+ }
+ }
+
+ return potential_code
+}
+
+pub fn wrap_error(error_code int) ? {
+ $if windows {
+ enum_error := wsa_error(error_code)
+ return error_with_code('net: socket error: $enum_error', error_code)
+ } $else {
+ if error_code == 0 {
+ return
+ }
+ return error_with_code('net: socket error: $error_code', error_code)
+ }
+}
+
+// wrap_read_result takes a read result and sees if it is 0 for graceful
+// connection termination and returns none
+// e.g. res := wrap_read_result(C.recv(c.sock.handle, voidptr(buf_ptr), len, 0))?
+[inline]
+fn wrap_read_result(result int) ?int {
+ if result == 0 {
+ return none
+ }
+ return result
+}
+
+[inline]
+fn wrap_write_result(result int) ?int {
+ if result == 0 {
+ return none
+ }
+ return result
+}
diff --git a/v_windows/v/vlib/net/ftp/ftp.v b/v_windows/v/vlib/net/ftp/ftp.v
new file mode 100644
index 0000000..41b2cde
--- /dev/null
+++ b/v_windows/v/vlib/net/ftp/ftp.v
@@ -0,0 +1,265 @@
+module ftp
+
+/*
+basic ftp module
+ RFC-959
+ https://tools.ietf.org/html/rfc959
+
+ Methods:
+ ftp.connect(host)
+ ftp.login(user, passw)
+ pwd := ftp.pwd()
+ ftp.cd(folder)
+ dtp := ftp.pasv()
+ ftp.dir()
+ ftp.get(file)
+ dtp.read()
+ dtp.close()
+ ftp.close()
+*/
+import net
+import io
+
+const (
+ connected = 220
+ specify_password = 331
+ logged_in = 230
+ login_first = 503
+ anonymous = 530
+ open_data_connection = 150
+ close_data_connection = 226
+ command_ok = 200
+ denied = 550
+ passive_mode = 227
+ complete = 226
+)
+
+struct DTP {
+mut:
+ conn &net.TcpConn
+ reader io.BufferedReader
+ ip string
+ port int
+}
+
+fn (mut dtp DTP) read() ?[]byte {
+ mut data := []byte{}
+ mut buf := []byte{len: 1024}
+ for {
+ len := dtp.reader.read(mut buf) or { break }
+ if len == 0 {
+ break
+ }
+ data << buf[..len]
+ }
+ return data
+}
+
+fn (mut dtp DTP) close() {
+ dtp.conn.close() or { panic(err) }
+}
+
+struct FTP {
+mut:
+ conn &net.TcpConn
+ reader io.BufferedReader
+ buffer_size int
+}
+
+pub fn new() FTP {
+ mut f := FTP{
+ conn: 0
+ }
+ f.buffer_size = 1024
+ return f
+}
+
+fn (mut zftp FTP) write(data string) ?int {
+ $if debug {
+ println('FTP.v >>> $data')
+ }
+ return zftp.conn.write('$data\r\n'.bytes())
+}
+
+fn (mut zftp FTP) read() ?(int, string) {
+ mut data := zftp.reader.read_line() ?
+ $if debug {
+ println('FTP.v <<< $data')
+ }
+ if data.len < 5 {
+ return 0, ''
+ }
+ code := data[..3].int()
+ if data[3] == `-` {
+ for {
+ data = zftp.reader.read_line() ?
+ if data[..3].int() == code && data[3] != `-` {
+ break
+ }
+ }
+ }
+ return code, data
+}
+
+pub fn (mut zftp FTP) connect(ip string) ?bool {
+ zftp.conn = net.dial_tcp('$ip:21') ?
+ zftp.reader = io.new_buffered_reader(reader: zftp.conn)
+ code, _ := zftp.read() ?
+ if code == ftp.connected {
+ return true
+ }
+ return false
+}
+
+pub fn (mut zftp FTP) login(user string, passwd string) ?bool {
+ zftp.write('USER $user') or {
+ $if debug {
+ println('ERROR sending user')
+ }
+ return false
+ }
+ mut code, _ := zftp.read() ?
+ if code == ftp.logged_in {
+ return true
+ }
+ if code != ftp.specify_password {
+ return false
+ }
+ zftp.write('PASS $passwd') or {
+ $if debug {
+ println('ERROR sending password')
+ }
+ return false
+ }
+ code, _ = zftp.read() ?
+ if code == ftp.logged_in {
+ return true
+ }
+ return false
+}
+
+pub fn (mut zftp FTP) close() ? {
+ zftp.write('QUIT') ?
+ zftp.conn.close() ?
+}
+
+pub fn (mut zftp FTP) pwd() ?string {
+ zftp.write('PWD') ?
+ _, data := zftp.read() ?
+ spl := data.split('"') // "
+ if spl.len >= 2 {
+ return spl[1]
+ }
+ return data
+}
+
+pub fn (mut zftp FTP) cd(dir string) ? {
+ zftp.write('CWD $dir') or { return }
+ mut code, mut data := zftp.read() ?
+ match int(code) {
+ ftp.denied {
+ $if debug {
+ println('CD $dir denied!')
+ }
+ }
+ ftp.complete {
+ code, data = zftp.read() ?
+ }
+ else {}
+ }
+ $if debug {
+ println('CD $data')
+ }
+}
+
+fn new_dtp(msg string) ?&DTP {
+ if !is_dtp_message_valid(msg) {
+ return error('Bad message')
+ }
+ ip, port := get_host_ip_from_dtp_message(msg)
+ mut dtp := &DTP{
+ ip: ip
+ port: port
+ conn: 0
+ }
+ conn := net.dial_tcp('$ip:$port') or { return error('Cannot connect to the data channel') }
+ dtp.conn = conn
+ dtp.reader = io.new_buffered_reader(reader: dtp.conn)
+ return dtp
+}
+
+fn (mut zftp FTP) pasv() ?&DTP {
+ zftp.write('PASV') ?
+ code, data := zftp.read() ?
+ $if debug {
+ println('pass: $data')
+ }
+ if code != ftp.passive_mode {
+ return error('pasive mode not allowed')
+ }
+ dtp := new_dtp(data) ?
+ return dtp
+}
+
+pub fn (mut zftp FTP) dir() ?[]string {
+ mut dtp := zftp.pasv() or { return error('Cannot establish data connection') }
+ zftp.write('LIST') ?
+ code, _ := zftp.read() ?
+ if code == ftp.denied {
+ return error('`LIST` denied')
+ }
+ if code != ftp.open_data_connection {
+ return error('Data channel empty')
+ }
+ list_dir := dtp.read() ?
+ result, _ := zftp.read() ?
+ if result != ftp.close_data_connection {
+ println('`LIST` not ok')
+ }
+ dtp.close()
+ mut dir := []string{}
+ sdir := list_dir.bytestr()
+ for lfile in sdir.split('\n') {
+ if lfile.len > 1 {
+ dir << lfile.after(' ').trim_space()
+ }
+ }
+ return dir
+}
+
+pub fn (mut zftp FTP) get(file string) ?[]byte {
+ mut dtp := zftp.pasv() or { return error('Cannot stablish data connection') }
+ zftp.write('RETR $file') ?
+ code, _ := zftp.read() ?
+ if code == ftp.denied {
+ return error('Permission denied')
+ }
+ if code != ftp.open_data_connection {
+ return error('Data connection not ready')
+ }
+ blob := dtp.read() ?
+ dtp.close()
+ return blob
+}
+
+fn is_dtp_message_valid(msg string) bool {
+ // An example of message:
+ // '227 Entering Passive Mode (209,132,183,61,48,218)'
+ return msg.contains('(') && msg.contains(')') && msg.contains(',')
+}
+
+fn get_host_ip_from_dtp_message(msg string) (string, int) {
+ mut par_start_idx := -1
+ mut par_end_idx := -1
+ for i, c in msg {
+ if c == `(` {
+ par_start_idx = i + 1
+ } else if c == `)` {
+ par_end_idx = i
+ }
+ }
+ data := msg[par_start_idx..par_end_idx].split(',')
+ ip := data[0..4].join('.')
+ port := data[4].int() * 256 + data[5].int()
+ return ip, port
+}
diff --git a/v_windows/v/vlib/net/ftp/ftp_test.v b/v_windows/v/vlib/net/ftp/ftp_test.v
new file mode 100644
index 0000000..a62316d
--- /dev/null
+++ b/v_windows/v/vlib/net/ftp/ftp_test.v
@@ -0,0 +1,50 @@
+import net.ftp
+
+fn test_ftp_cleint() {
+ $if !network ? {
+ return
+ }
+ // NB: this function makes network calls to external servers,
+ // that is why it is not a very good idea to run it in CI.
+ // If you want to run it manually, use:
+ // `v -d network vlib/net/ftp/ftp_test.v`
+ ftp_client_test_inside() or { panic(err) }
+}
+
+fn ftp_client_test_inside() ? {
+ mut zftp := ftp.new()
+ // eprintln(zftp)
+ defer {
+ zftp.close() or { panic(err) }
+ }
+ connect_result := zftp.connect('ftp.redhat.com') ?
+ assert connect_result
+ login_result := zftp.login('ftp', 'ftp') ?
+ assert login_result
+ pwd := zftp.pwd() ?
+ assert pwd.len > 0
+ zftp.cd('/') or {
+ assert false
+ return
+ }
+ dir_list1 := zftp.dir() or {
+ assert false
+ return
+ }
+ assert dir_list1.len > 0
+ zftp.cd('/suse/linux/enterprise/11Server/en/SAT-TOOLS/SRPMS/') or {
+ assert false
+ return
+ }
+ dir_list2 := zftp.dir() or {
+ assert false
+ return
+ }
+ assert dir_list2.len > 0
+ assert dir_list2.contains('katello-host-tools-3.3.5-8.sles11_4sat.src.rpm')
+ blob := zftp.get('katello-host-tools-3.3.5-8.sles11_4sat.src.rpm') or {
+ assert false
+ return
+ }
+ assert blob.len > 0
+}
diff --git a/v_windows/v/vlib/net/html/README.md b/v_windows/v/vlib/net/html/README.md
new file mode 100644
index 0000000..a92a6e6
--- /dev/null
+++ b/v_windows/v/vlib/net/html/README.md
@@ -0,0 +1,16 @@
+net/http is an HTML written in pure V.
+
+## Usage
+```v oksyntax
+import net.html
+
+fn main() {
+ doc := html.parse('<html><body><h1 class="title">Hello world!</h1></body></html>')
+ tag := doc.get_tag('h1')[0] // <h1>Hello world!</h1>
+ println(tag.name) // h1
+ println(tag.content) // Hello world!
+ println(tag.attributes) // {'class':'title'}
+ println(tag.str()) // <h1 class="title">Hello world!</h1>
+}
+```
+More examples found on [`parser_test.v`](parser_test.v) and [`html_test.v`](html_test.v)
diff --git a/v_windows/v/vlib/net/html/data_structures.v b/v_windows/v/vlib/net/html/data_structures.v
new file mode 100644
index 0000000..688b756
--- /dev/null
+++ b/v_windows/v/vlib/net/html/data_structures.v
@@ -0,0 +1,91 @@
+module html
+
+const (
+ null_element = int(0x80000000)
+)
+
+struct Stack {
+mut:
+ elements []int
+ size int
+}
+
+[inline]
+fn is_null(data int) bool {
+ return data == html.null_element
+}
+
+[inline]
+fn (stack Stack) is_empty() bool {
+ return stack.size <= 0
+}
+
+fn (stack Stack) peek() int {
+ return if !stack.is_empty() { stack.elements[stack.size - 1] } else { html.null_element }
+}
+
+fn (mut stack Stack) pop() int {
+ mut to_return := html.null_element
+ if !stack.is_empty() {
+ to_return = stack.elements[stack.size - 1]
+ stack.size--
+ }
+ return to_return
+}
+
+fn (mut stack Stack) push(item int) {
+ if stack.elements.len > stack.size {
+ stack.elements[stack.size] = item
+ } else {
+ stack.elements << item
+ }
+ stack.size++
+}
+
+struct BTree {
+mut:
+ all_tags []Tag
+ node_pointer int
+ childrens [][]int
+ parents []int
+}
+
+fn (mut btree BTree) add_children(tag Tag) int {
+ btree.all_tags << tag
+ if btree.all_tags.len > 1 {
+ for btree.childrens.len <= btree.node_pointer {
+ mut temp_array := btree.childrens
+ temp_array << []int{}
+ btree.childrens = temp_array
+ }
+ btree.childrens[btree.node_pointer] << btree.all_tags.len - 1
+ for btree.parents.len < btree.all_tags.len {
+ mut temp_array := btree.parents
+ temp_array << 0
+ btree.parents = temp_array
+ }
+ btree.parents[btree.all_tags.len - 1] = btree.node_pointer
+ }
+ return btree.all_tags.len - 1
+}
+
+[inline]
+fn (btree BTree) get_children() []int {
+ return btree.childrens[btree.node_pointer]
+}
+
+[inline]
+fn (btree BTree) get_parent() int {
+ return btree.parents[btree.node_pointer]
+}
+
+[inline]
+fn (btree BTree) get_stored() Tag {
+ return btree.all_tags[btree.node_pointer]
+}
+
+fn (mut btree BTree) move_pointer(to int) {
+ if to < btree.all_tags.len {
+ btree.node_pointer = to
+ }
+}
diff --git a/v_windows/v/vlib/net/html/dom.v b/v_windows/v/vlib/net/html/dom.v
new file mode 100644
index 0000000..b145ddc
--- /dev/null
+++ b/v_windows/v/vlib/net/html/dom.v
@@ -0,0 +1,189 @@
+module html
+
+import os
+
+// The W3C Document Object Model (DOM) is a platform and language-neutral
+// interface that allows programs and scripts to dynamically access and
+// update the content, structure, and style of a document.
+//
+// https://www.w3.org/TR/WD-DOM/introduction.html
+pub struct DocumentObjectModel {
+mut:
+ root &Tag
+ constructed bool
+ btree BTree
+ all_tags []&Tag
+ all_attributes map[string][]&Tag
+ close_tags map[string]bool // add a counter to see count how many times is closed and parse correctly
+ attributes map[string][]string
+ tag_attributes map[string][][]&Tag
+ tag_type map[string][]&Tag
+ debug_file os.File
+}
+
+[if debug]
+fn (mut dom DocumentObjectModel) print_debug(data string) {
+ $if debug {
+ if data.len > 0 {
+ dom.debug_file.writeln(data) or { panic(err) }
+ }
+ }
+}
+
+[inline]
+fn is_close_tag(tag &Tag) bool {
+ return tag.name.len > 0 && tag.name[0] == `/`
+}
+
+fn (mut dom DocumentObjectModel) where_is(item_name string, attribute_name string) int {
+ if attribute_name !in dom.attributes {
+ dom.attributes[attribute_name] = []string{}
+ }
+ mut string_array := dom.attributes[attribute_name]
+ mut counter := 0
+ for value in string_array {
+ if value == item_name {
+ return counter
+ }
+ counter++
+ }
+ string_array << item_name
+ dom.attributes[attribute_name] = string_array
+ return string_array.len - 1
+}
+
+fn (mut dom DocumentObjectModel) add_tag_attribute(tag &Tag) {
+ for attribute_name, _ in tag.attributes {
+ attribute_value := tag.attributes[attribute_name]
+ location := dom.where_is(attribute_value, attribute_name)
+ if attribute_name !in dom.tag_attributes {
+ dom.tag_attributes[attribute_name] = []
+ }
+ for {
+ mut temp_array := dom.tag_attributes[attribute_name]
+ temp_array << []&Tag{}
+ dom.tag_attributes[attribute_name] = temp_array
+ if location < dom.tag_attributes[attribute_name].len + 1 {
+ break
+ }
+ }
+ mut temp_array := dom.tag_attributes[attribute_name][location]
+ temp_array << tag
+ dom.tag_attributes[attribute_name][location] = temp_array
+ }
+}
+
+fn (mut dom DocumentObjectModel) add_tag_by_type(tag &Tag) {
+ tag_name := tag.name
+ if tag_name !in dom.tag_type {
+ dom.tag_type[tag_name] = [tag]
+ } else {
+ mut temp_array := dom.tag_type[tag_name]
+ temp_array << tag
+ dom.tag_type[tag_name] = temp_array
+ }
+}
+
+fn (mut dom DocumentObjectModel) add_tag_by_attribute(tag &Tag) {
+ for attribute_name in tag.attributes.keys() {
+ if attribute_name !in dom.all_attributes {
+ dom.all_attributes[attribute_name] = [tag]
+ } else {
+ mut temp_array := dom.all_attributes[attribute_name]
+ temp_array << tag
+ dom.all_attributes[attribute_name] = temp_array
+ }
+ }
+}
+
+fn (mut dom DocumentObjectModel) construct(tag_list []&Tag) {
+ dom.constructed = true
+ mut temp_map := map[string]int{}
+ mut temp_int := null_element
+ mut temp_string := ''
+ mut stack := Stack{}
+ dom.btree = BTree{}
+ dom.root = tag_list[0]
+ dom.all_tags = [tag_list[0]]
+ temp_map['0'] = dom.btree.add_children(tag_list[0])
+ stack.push(0)
+ root_index := 0
+ for index := 1; index < tag_list.len; index++ {
+ mut tag := tag_list[index]
+ dom.print_debug(tag.str())
+ if is_close_tag(tag) {
+ temp_int = stack.peek()
+ temp_string = tag.name[1..]
+ for !is_null(temp_int) && temp_string != tag_list[temp_int].name
+ && !tag_list[temp_int].closed {
+ dom.print_debug(temp_string + ' >> ' + tag_list[temp_int].name + ' ' +
+ (temp_string == tag_list[temp_int].name).str())
+ stack.pop()
+ temp_int = stack.peek()
+ }
+ temp_int = stack.peek()
+ temp_int = if !is_null(temp_int) { stack.pop() } else { root_index }
+ if is_null(temp_int) {
+ stack.push(root_index)
+ }
+ dom.print_debug('Removed ' + temp_string + ' -- ' + tag_list[temp_int].name)
+ } else if tag.name.len > 0 {
+ dom.add_tag_attribute(tag) // error here
+ dom.add_tag_by_attribute(tag)
+ dom.add_tag_by_type(tag)
+ dom.all_tags << tag
+ temp_int = stack.peek()
+ if !is_null(temp_int) {
+ dom.btree.move_pointer(temp_map[temp_int.str()])
+ temp_map[index.str()] = dom.btree.add_children(tag)
+ mut temp_tag := tag_list[temp_int]
+ position_in_parent := temp_tag.add_child(tag) // tag_list[temp_int] = temp_tag
+ tag.add_parent(temp_tag, position_in_parent)
+ /*
+ dom.print_debug("Added ${tag.name} as child of '" + tag_list[temp_int].name +
+ "' which now has ${dom.btree.get_children().len} childrens")
+ */
+ dom.print_debug("Added $tag.name as child of '" + temp_tag.name +
+ "' which now has $temp_tag.children.len childrens")
+ } else { // dom.new_root(tag)
+ stack.push(root_index)
+ }
+ temp_string = '/' + tag.name
+ if temp_string in dom.close_tags && !tag.closed { // if tag ends with />
+ dom.print_debug('Pushed ' + temp_string)
+ stack.push(index)
+ }
+ }
+ } // println(tag_list[root_index]) for debug purposes
+ dom.root = tag_list[0]
+}
+
+// get_tag_by_attribute_value retrieves all the tags in the document that has the given attribute name and value.
+pub fn (mut dom DocumentObjectModel) get_tag_by_attribute_value(name string, value string) []&Tag {
+ location := dom.where_is(value, name)
+ return if dom.tag_attributes[name].len > location {
+ dom.tag_attributes[name][location]
+ } else {
+ []&Tag{}
+ }
+}
+
+// get_tag retrieves all the tags in the document that has the given tag name.
+pub fn (dom DocumentObjectModel) get_tag(name string) []&Tag {
+ return if name in dom.tag_type { dom.tag_type[name] } else { []&Tag{} }
+}
+
+// get_tag_by_attribute retrieves all the tags in the document that has the given attribute name.
+pub fn (dom DocumentObjectModel) get_tag_by_attribute(name string) []&Tag {
+ return if name in dom.all_attributes { dom.all_attributes[name] } else { []&Tag{} }
+}
+
+// get_root returns the root of the document.
+pub fn (dom DocumentObjectModel) get_root() &Tag {
+ return dom.root
+}
+
+// get_tags returns all of the tags stored in the document.
+pub fn (dom DocumentObjectModel) get_tags() []&Tag {
+ return dom.all_tags
+}
diff --git a/v_windows/v/vlib/net/html/dom_test.v b/v_windows/v/vlib/net/html/dom_test.v
new file mode 100644
index 0000000..d4fd292
--- /dev/null
+++ b/v_windows/v/vlib/net/html/dom_test.v
@@ -0,0 +1,56 @@
+module html
+
+import strings
+
+fn generate_temp_html() string {
+ mut temp_html := strings.new_builder(200)
+ temp_html.write_string('<!doctype html><html><head><title>Giant String</title></head><body>')
+ for counter := 0; counter < 4; counter++ {
+ temp_html.write_string("<div id='name_$counter' ")
+ temp_html.write_string("class='several-$counter'>Look at $counter</div>")
+ }
+ temp_html.write_string('</body></html>')
+ return temp_html.str()
+}
+
+fn test_search_by_tag_type() {
+ dom := parse(generate_temp_html())
+ assert dom.get_tag('div').len == 4
+ assert dom.get_tag('head').len == 1
+ assert dom.get_tag('body').len == 1
+}
+
+fn test_search_by_attribute_value() {
+ mut dom := parse(generate_temp_html())
+ // println(temp_html)
+ print('Amount ')
+ println(dom.get_tag_by_attribute_value('id', 'name_0'))
+ assert dom.get_tag_by_attribute_value('id', 'name_0').len == 1
+}
+
+fn test_access_parent() {
+ mut dom := parse(generate_temp_html())
+ div_tags := dom.get_tag('div')
+ parent := div_tags[0].parent
+ assert parent != 0
+ for div_tag in div_tags {
+ assert div_tag.parent == parent
+ }
+}
+
+fn test_search_by_attributes() {
+ dom := parse(generate_temp_html())
+ assert dom.get_tag_by_attribute('id').len == 4
+}
+
+fn test_tags_used() {
+ dom := parse(generate_temp_html())
+ assert dom.get_tags().len == 9
+}
+
+fn test_access_tag_fields() {
+ dom := parse(generate_temp_html())
+ id_tags := dom.get_tag_by_attribute('id')
+ assert id_tags[0].name == 'div'
+ assert id_tags[1].attributes['class'] == 'several-1'
+}
diff --git a/v_windows/v/vlib/net/html/html.v b/v_windows/v/vlib/net/html/html.v
new file mode 100644
index 0000000..293b643
--- /dev/null
+++ b/v_windows/v/vlib/net/html/html.v
@@ -0,0 +1,18 @@
+module html
+
+import os
+
+// parse parses and returns the DOM from the given text.
+pub fn parse(text string) DocumentObjectModel {
+ mut parser := Parser{}
+ parser.parse_html(text)
+ return parser.get_dom()
+}
+
+// parse_file parses and returns the DOM from the contents of a file.
+pub fn parse_file(filename string) DocumentObjectModel {
+ content := os.read_file(filename) or { return DocumentObjectModel{
+ root: &Tag{}
+ } }
+ return parse(content)
+}
diff --git a/v_windows/v/vlib/net/html/html_test.v b/v_windows/v/vlib/net/html/html_test.v
new file mode 100644
index 0000000..51271cd
--- /dev/null
+++ b/v_windows/v/vlib/net/html/html_test.v
@@ -0,0 +1,15 @@
+module html
+
+fn test_parse() {
+ doc := parse('<html><body><h1 class="title">Hello world!</h1></body></html>')
+ tags := doc.get_tag('h1')
+ assert tags.len == 1
+ h1_tag := tags[0] // <h1>Hello world!</h1>
+ assert h1_tag.name == 'h1'
+ assert h1_tag.content == 'Hello world!'
+ assert h1_tag.attributes.len == 2
+ // TODO: do not remove. Attributes must not have an empty attr.
+ // assert h1_tag.attributes.len == 1
+ assert h1_tag.str() == '<h1 class="title" >Hello world!</h1>'
+ // assert h1_tag.str() == '<h1 class="title">Hello world!</h1>'
+}
diff --git a/v_windows/v/vlib/net/html/parser.v b/v_windows/v/vlib/net/html/parser.v
new file mode 100644
index 0000000..5b9bbd1
--- /dev/null
+++ b/v_windows/v/vlib/net/html/parser.v
@@ -0,0 +1,260 @@
+module html
+
+import os
+import strings
+
+struct LexicalAttributes {
+mut:
+ current_tag &Tag
+ open_tag bool
+ open_code bool
+ open_string int
+ open_comment bool
+ is_attribute bool
+ opened_code_type string
+ line_count int
+ lexeme_builder strings.Builder = strings.new_builder(100)
+ code_tags map[string]bool = {
+ 'script': true
+ 'style': true
+ }
+}
+
+// Parser is responsible for reading the HTML strings and converting them into a `DocumentObjectModel`.
+pub struct Parser {
+mut:
+ dom DocumentObjectModel
+ lexical_attributes LexicalAttributes = LexicalAttributes{
+ current_tag: &Tag{}
+ }
+ filename string = 'direct-parse'
+ initialized bool
+ tags []&Tag
+ debug_file os.File
+}
+
+// This function is used to add a tag for the parser ignore it's content.
+// For example, if you have an html or XML with a custom tag, like `<script>`, using this function,
+// like `add_code_tag('script')` will make all `script` tags content be jumped,
+// so you still have its content, but will not confuse the parser with it's `>` or `<`.
+pub fn (mut parser Parser) add_code_tag(name string) {
+ if name.len <= 0 {
+ return
+ }
+ parser.lexical_attributes.code_tags[name] = true
+}
+
+[inline]
+fn (parser Parser) builder_str() string {
+ return parser.lexical_attributes.lexeme_builder.after(0)
+}
+
+[if debug]
+fn (mut parser Parser) print_debug(data string) {
+ $if debug {
+ if data.len > 0 {
+ parser.debug_file.writeln(data) or { panic(err) }
+ }
+ }
+}
+
+fn (mut parser Parser) verify_end_comment(remove bool) bool {
+ lexeme := parser.builder_str()
+ last := lexeme[lexeme.len - 1]
+ penultimate := lexeme[lexeme.len - 2]
+ is_end_comment := last == `-` && penultimate == `-`
+ if is_end_comment && remove {
+ parser.lexical_attributes.lexeme_builder.go_back(2)
+ }
+ return is_end_comment
+}
+
+fn blank_string(data string) bool {
+ mut count := 0
+ for chr in data {
+ if chr == 9 || chr == 32 {
+ count++
+ }
+ }
+ return count == data.len
+}
+
+// init initializes the parser.
+fn (mut parser Parser) init() {
+ if parser.initialized {
+ return
+ }
+ parser.dom = DocumentObjectModel{
+ debug_file: parser.debug_file
+ root: &Tag{}
+ }
+ parser.add_code_tag('')
+ parser.tags = []&Tag{}
+ parser.dom.close_tags['/!document'] = true
+ parser.lexical_attributes.current_tag = &Tag{}
+ parser.initialized = true
+}
+
+fn (mut parser Parser) generate_tag() {
+ if parser.lexical_attributes.open_tag {
+ return
+ }
+ if parser.lexical_attributes.current_tag.name.len > 0
+ || parser.lexical_attributes.current_tag.content.len > 0 {
+ parser.tags << parser.lexical_attributes.current_tag
+ }
+ parser.lexical_attributes.current_tag = &Tag{}
+}
+
+// split_parse parses the HTML fragment
+pub fn (mut parser Parser) split_parse(data string) {
+ parser.init()
+ for chr in data {
+ // returns true if byte is a " or '
+ is_quote := chr == `"` || chr == `'`
+ string_code := match chr {
+ `"` { 1 } // "
+ `'` { 2 } // '
+ else { 0 }
+ }
+ if parser.lexical_attributes.open_code { // here will verify all needed to know if open_code finishes and string in code
+ parser.lexical_attributes.lexeme_builder.write_b(chr)
+ if parser.lexical_attributes.open_string > 0
+ && parser.lexical_attributes.open_string == string_code {
+ parser.lexical_attributes.open_string = 0
+ } else if is_quote {
+ parser.lexical_attributes.open_string = string_code
+ } else if chr == `>` { // only execute verification if is a > // here will verify < to know if code tag is finished
+ name_close_tag := '</$parser.lexical_attributes.opened_code_type>'
+ if parser.builder_str().to_lower().ends_with(name_close_tag) {
+ parser.lexical_attributes.open_code = false
+ // need to modify lexeme_builder to add script text as a content in next loop (not gave error in dom)
+ parser.lexical_attributes.lexeme_builder.go_back(name_close_tag.len)
+ parser.lexical_attributes.current_tag.closed = true
+ parser.lexical_attributes.current_tag.close_type = .new_tag
+ }
+ }
+ } else if parser.lexical_attributes.open_comment {
+ if chr == `>` && parser.verify_end_comment(false) { // close tag '>'
+ // parser.print_debug(parser.builder_str() + " >> " + parser.lexical_attributes.line_count.str())
+ parser.lexical_attributes.lexeme_builder.go_back_to(0)
+ parser.lexical_attributes.open_comment = false
+ parser.lexical_attributes.open_tag = false
+ } else {
+ parser.lexical_attributes.lexeme_builder.write_b(chr)
+ }
+ } else if parser.lexical_attributes.open_string > 0 {
+ if parser.lexical_attributes.open_string == string_code {
+ parser.lexical_attributes.open_string = 0
+ parser.lexical_attributes.lexeme_builder.write_b(chr)
+ temp_lexeme := parser.builder_str()
+ if parser.lexical_attributes.current_tag.last_attribute != '' {
+ lattr := parser.lexical_attributes.current_tag.last_attribute
+ nval := temp_lexeme.substr(1, temp_lexeme.len - 1)
+ // parser.print_debug(lattr + " = " + temp_lexeme)
+ parser.lexical_attributes.current_tag.attributes[lattr] = nval
+ parser.lexical_attributes.current_tag.last_attribute = ''
+ } else {
+ parser.lexical_attributes.current_tag.attributes[temp_lexeme.to_lower()] = '' // parser.print_debug(temp_lexeme)
+ }
+ parser.lexical_attributes.lexeme_builder.go_back_to(0)
+ } else {
+ parser.lexical_attributes.lexeme_builder.write_b(chr)
+ }
+ } else if parser.lexical_attributes.open_tag {
+ if parser.lexical_attributes.lexeme_builder.len == 0 && is_quote {
+ parser.lexical_attributes.open_string = string_code
+ parser.lexical_attributes.lexeme_builder.write_b(chr)
+ } else if chr == `>` { // close tag >
+ complete_lexeme := parser.builder_str().to_lower()
+ parser.lexical_attributes.current_tag.closed = (complete_lexeme.len > 0
+ && complete_lexeme[complete_lexeme.len - 1] == `/`) // if equals to /
+ if complete_lexeme.len > 0 && complete_lexeme[0] == `/` {
+ parser.dom.close_tags[complete_lexeme] = true
+ }
+ /*
+ else if complete_lexeme.len > 0 && complete_lexeme[complete_lexeme.len - 1] == 47 { // if end tag like "/>"
+ parser.lexical_attributes.current_tag.closed = true
+ }
+ */
+ if parser.lexical_attributes.current_tag.name == '' {
+ parser.lexical_attributes.current_tag.name = complete_lexeme
+ } else if complete_lexeme != '/' {
+ parser.lexical_attributes.current_tag.attributes[complete_lexeme] = ''
+ }
+ parser.lexical_attributes.open_tag = false
+ parser.lexical_attributes.lexeme_builder.go_back_to(0) // if tag name is code
+ if parser.lexical_attributes.current_tag.name in parser.lexical_attributes.code_tags {
+ parser.lexical_attributes.open_code = true
+ parser.lexical_attributes.opened_code_type = parser.lexical_attributes.current_tag.name
+ }
+ // parser.print_debug(parser.lexical_attributes.current_tag.name)
+ } else if chr !in [byte(9), ` `, `=`, `\n`] { // Tab, space, = and \n
+ parser.lexical_attributes.lexeme_builder.write_b(chr)
+ } else if chr != 10 {
+ complete_lexeme := parser.builder_str().to_lower()
+ if parser.lexical_attributes.current_tag.name == '' {
+ parser.lexical_attributes.current_tag.name = complete_lexeme
+ } else {
+ parser.lexical_attributes.current_tag.attributes[complete_lexeme] = ''
+ parser.lexical_attributes.current_tag.last_attribute = ''
+ if chr == `=` { // if was a =
+ parser.lexical_attributes.current_tag.last_attribute = complete_lexeme
+ }
+ }
+ parser.lexical_attributes.lexeme_builder.go_back_to(0)
+ }
+ if parser.builder_str() == '!--' {
+ parser.lexical_attributes.open_comment = true
+ }
+ } else if chr == `<` { // open tag '<'
+ temp_string := parser.builder_str()
+ if parser.lexical_attributes.lexeme_builder.len >= 1 {
+ if parser.lexical_attributes.current_tag.name.len > 1
+ && parser.lexical_attributes.current_tag.name[0] == 47
+ && !blank_string(temp_string) {
+ parser.tags << &Tag{
+ name: 'text'
+ content: temp_string
+ }
+ } else {
+ parser.lexical_attributes.current_tag.content = temp_string // verify later who has this content
+ }
+ }
+ // parser.print_debug(parser.lexical_attributes.current_tag.str())
+ parser.lexical_attributes.lexeme_builder.go_back_to(0)
+ parser.generate_tag()
+ parser.lexical_attributes.open_tag = true
+ } else {
+ parser.lexical_attributes.lexeme_builder.write_b(chr)
+ }
+ }
+}
+
+// parse_html parses the given HTML string
+pub fn (mut parser Parser) parse_html(data string) {
+ parser.init()
+ mut lines := data.split_into_lines()
+ for line in lines {
+ parser.lexical_attributes.line_count++
+ parser.split_parse(line)
+ }
+ parser.generate_tag()
+ parser.dom.debug_file = parser.debug_file
+ parser.dom.construct(parser.tags)
+}
+
+// finalize finishes the parsing stage .
+[inline]
+pub fn (mut parser Parser) finalize() {
+ parser.generate_tag()
+}
+
+// get_dom returns the parser's current DOM representation.
+pub fn (mut parser Parser) get_dom() DocumentObjectModel {
+ if !parser.dom.constructed {
+ parser.generate_tag()
+ parser.dom.construct(parser.tags)
+ }
+ return parser.dom
+}
diff --git a/v_windows/v/vlib/net/html/parser_test.v b/v_windows/v/vlib/net/html/parser_test.v
new file mode 100644
index 0000000..274a47c
--- /dev/null
+++ b/v_windows/v/vlib/net/html/parser_test.v
@@ -0,0 +1,41 @@
+module html
+
+import strings
+
+fn test_split_parse() {
+ mut parser := Parser{}
+ parser.init()
+ parser.split_parse('<!doctype htm')
+ parser.split_parse('l public')
+ parser.split_parse('><html><he')
+ parser.split_parse('ad><t')
+ parser.split_parse('itle> Hum... ')
+ parser.split_parse('A Tit')
+ parser.split_parse('\nle</ti\ntle>')
+ parser.split_parse('</\nhead><body>\t\t\t<h3>')
+ parser.split_parse('Nice Test!</h3>')
+ parser.split_parse('</bo\n\n\ndy></html>')
+ parser.finalize()
+ assert parser.tags.len == 11
+ assert parser.tags[3].content == ' Hum... A Tit\nle'
+}
+
+fn test_giant_string() {
+ mut temp_html := strings.new_builder(200)
+ mut parser := Parser{}
+ temp_html.write_string('<!doctype html><html><head><title>Giant String</title></head><body>')
+ for counter := 0; counter < 2000; counter++ {
+ temp_html.write_string("<div id='name_$counter' class='several-$counter'>Look at $counter</div>")
+ }
+ temp_html.write_string('</body></html>')
+ parser.parse_html(temp_html.str())
+ assert parser.tags.len == 4009
+}
+
+fn test_script_tag() {
+ mut parser := Parser{}
+ script_content := "\nvar googletag = googletag || {};\ngoogletag.cmd = googletag.cmd || [];if(3 > 5) {console.log('Birl');}\n"
+ temp_html := '<html><body><script>$script_content</script></body></html>'
+ parser.parse_html(temp_html)
+ assert parser.tags[2].content.len == script_content.replace('\n', '').len
+}
diff --git a/v_windows/v/vlib/net/html/tag.v b/v_windows/v/vlib/net/html/tag.v
new file mode 100644
index 0000000..62260c0
--- /dev/null
+++ b/v_windows/v/vlib/net/html/tag.v
@@ -0,0 +1,68 @@
+module html
+
+import strings
+
+enum CloseTagType {
+ in_name
+ new_tag
+}
+
+// Tag holds the information of an HTML tag.
+[heap]
+pub struct Tag {
+pub mut:
+ name string
+ content string
+ children []&Tag
+ attributes map[string]string // attributes will be like map[name]value
+ last_attribute string
+ parent &Tag = 0
+ position_in_parent int
+ closed bool
+ close_type CloseTagType = .in_name
+}
+
+fn (mut tag Tag) add_parent(t &Tag, position int) {
+ tag.position_in_parent = position
+ tag.parent = t
+}
+
+fn (mut tag Tag) add_child(t &Tag) int {
+ tag.children << t
+ return tag.children.len
+}
+
+// text returns the text contents of the tag.
+pub fn (tag Tag) text() string {
+ if tag.name.len >= 2 && tag.name[..2] == 'br' {
+ return '\n'
+ }
+ mut text_str := strings.new_builder(200)
+ text_str.write_string(tag.content.replace('\n', ''))
+ for child in tag.children {
+ text_str.write_string(child.text())
+ }
+ return text_str.str()
+}
+
+pub fn (tag &Tag) str() string {
+ mut html_str := strings.new_builder(200)
+ html_str.write_string('<$tag.name')
+ for key, value in tag.attributes {
+ html_str.write_string(' $key')
+ if value.len > 0 {
+ html_str.write_string('="$value"')
+ }
+ }
+ html_str.write_string(if tag.closed && tag.close_type == .in_name { '/>' } else { '>' })
+ html_str.write_string(tag.content)
+ if tag.children.len > 0 {
+ for child in tag.children {
+ html_str.write_string(child.str())
+ }
+ }
+ if !tag.closed || tag.close_type == .new_tag {
+ html_str.write_string('</$tag.name>')
+ }
+ return html_str.str()
+}
diff --git a/v_windows/v/vlib/net/http/backend_nix.c.v b/v_windows/v/vlib/net/http/backend_nix.c.v
new file mode 100644
index 0000000..1243442
--- /dev/null
+++ b/v_windows/v/vlib/net/http/backend_nix.c.v
@@ -0,0 +1,74 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import strings
+import net.openssl
+
+const (
+ is_used = openssl.is_used
+)
+
+fn (req &Request) ssl_do(port int, method Method, host_name string, path string) ?Response {
+ // ssl_method := C.SSLv23_method()
+ ctx := C.SSL_CTX_new(C.TLS_method())
+ C.SSL_CTX_set_verify_depth(ctx, 4)
+ flags := C.SSL_OP_NO_SSLv2 | C.SSL_OP_NO_SSLv3 | C.SSL_OP_NO_COMPRESSION
+ C.SSL_CTX_set_options(ctx, flags)
+ mut res := C.SSL_CTX_load_verify_locations(ctx, c'random-org-chain.pem', 0)
+ web := C.BIO_new_ssl_connect(ctx)
+ addr := host_name + ':' + port.str()
+ res = C.BIO_set_conn_hostname(web, addr.str)
+ ssl := &openssl.SSL(0)
+ C.BIO_get_ssl(web, &ssl)
+ preferred_ciphers := 'HIGH:!aNULL:!kRSA:!PSK:!SRP:!MD5:!RC4'
+ res = C.SSL_set_cipher_list(voidptr(ssl), &char(preferred_ciphers.str))
+ if res != 1 {
+ println('http: openssl: cipher failed')
+ }
+ res = C.SSL_set_tlsext_host_name(voidptr(ssl), host_name.str)
+ res = C.BIO_do_connect(web)
+ if res != 1 {
+ return error('cannot connect the endpoint')
+ }
+ res = C.BIO_do_handshake(web)
+ C.SSL_get_peer_certificate(voidptr(ssl))
+ res = C.SSL_get_verify_result(voidptr(ssl))
+ // /////
+ req_headers := req.build_request_headers(method, host_name, path)
+ $if trace_http_request ? {
+ eprintln('> $req_headers')
+ }
+ // println(req_headers)
+ C.BIO_puts(web, &char(req_headers.str))
+ mut content := strings.new_builder(100)
+ mut buff := [bufsize]byte{}
+ bp := unsafe { &buff[0] }
+ mut readcounter := 0
+ for {
+ readcounter++
+ len := unsafe { C.BIO_read(web, bp, bufsize) }
+ if len <= 0 {
+ break
+ }
+ $if debug_http ? {
+ eprintln('ssl_do, read ${readcounter:4d} | len: $len')
+ eprintln('-'.repeat(20))
+ eprintln(unsafe { tos(bp, len) })
+ eprintln('-'.repeat(20))
+ }
+ unsafe { content.write_ptr(bp, len) }
+ }
+ if web != 0 {
+ C.BIO_free_all(web)
+ }
+ if ctx != 0 {
+ C.SSL_CTX_free(ctx)
+ }
+ response_text := content.str()
+ $if trace_http_response ? {
+ eprintln('< $response_text')
+ }
+ return parse_response(response_text)
+}
diff --git a/v_windows/v/vlib/net/http/backend_windows.c.v b/v_windows/v/vlib/net/http/backend_windows.c.v
new file mode 100644
index 0000000..9181166
--- /dev/null
+++ b/v_windows/v/vlib/net/http/backend_windows.c.v
@@ -0,0 +1,28 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+#flag windows -I @VEXEROOT/thirdparty/vschannel
+#flag -l ws2_32 -l crypt32 -l secur32 -l user32
+#include "vschannel.c"
+
+fn C.new_tls_context() C.TlsContext
+
+fn (req &Request) ssl_do(port int, method Method, host_name string, path string) ?Response {
+ mut ctx := C.new_tls_context()
+ C.vschannel_init(&ctx)
+ mut buff := unsafe { malloc_noscan(C.vsc_init_resp_buff_size) }
+ addr := host_name
+ sdata := req.build_request_headers(method, host_name, path)
+ $if trace_http_request ? {
+ eprintln('> $sdata')
+ }
+ length := C.request(&ctx, port, addr.to_wide(), sdata.str, &buff)
+ C.vschannel_cleanup(&ctx)
+ response_text := unsafe { buff.vstring_with_len(length) }
+ $if trace_http_response ? {
+ eprintln('< $response_text')
+ }
+ return parse_response(response_text)
+}
diff --git a/v_windows/v/vlib/net/http/chunked/dechunk.v b/v_windows/v/vlib/net/http/chunked/dechunk.v
new file mode 100644
index 0000000..0e82586
--- /dev/null
+++ b/v_windows/v/vlib/net/http/chunked/dechunk.v
@@ -0,0 +1,72 @@
+module chunked
+
+import strings
+// See: https://en.wikipedia.org/wiki/Chunked_transfer_encoding
+// /////////////////////////////////////////////////////////////
+// The chunk size is transferred as a hexadecimal number
+// followed by \r\n as a line separator,
+// followed by a chunk of data of the given size.
+// The end is marked with a chunk with size 0.
+
+struct ChunkScanner {
+mut:
+ pos int
+ text string
+}
+
+fn (mut s ChunkScanner) read_chunk_size() int {
+ mut n := 0
+ for {
+ if s.pos >= s.text.len {
+ break
+ }
+ c := s.text[s.pos]
+ if !c.is_hex_digit() {
+ break
+ }
+ n = n << 4
+ n += int(unhex(c))
+ s.pos++
+ }
+ return n
+}
+
+fn unhex(c byte) byte {
+ if `0` <= c && c <= `9` {
+ return c - `0`
+ } else if `a` <= c && c <= `f` {
+ return c - `a` + 10
+ } else if `A` <= c && c <= `F` {
+ return c - `A` + 10
+ }
+ return 0
+}
+
+fn (mut s ChunkScanner) skip_crlf() {
+ s.pos += 2
+}
+
+fn (mut s ChunkScanner) read_chunk(chunksize int) string {
+ startpos := s.pos
+ s.pos += chunksize
+ return s.text[startpos..s.pos]
+}
+
+pub fn decode(text string) string {
+ mut sb := strings.new_builder(100)
+ mut cscanner := ChunkScanner{
+ pos: 0
+ text: text
+ }
+ for {
+ csize := cscanner.read_chunk_size()
+ if 0 == csize {
+ break
+ }
+ cscanner.skip_crlf()
+ sb.write_string(cscanner.read_chunk(csize))
+ cscanner.skip_crlf()
+ }
+ cscanner.skip_crlf()
+ return sb.str()
+}
diff --git a/v_windows/v/vlib/net/http/cookie.v b/v_windows/v/vlib/net/http/cookie.v
new file mode 100644
index 0000000..d647b3d
--- /dev/null
+++ b/v_windows/v/vlib/net/http/cookie.v
@@ -0,0 +1,413 @@
+// Copyright (c) 2019 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import time
+import strings
+
+pub struct Cookie {
+pub mut:
+ name string
+ value string
+ path string // optional
+ domain string // optional
+ expires time.Time // optional
+ raw_expires string // for reading cookies only. optional.
+ // max_age=0 means no 'Max-Age' attribute specified.
+ // max_age<0 means delete cookie now, equivalently 'Max-Age: 0'
+ // max_age>0 means Max-Age attribute present and given in seconds
+ max_age int
+ secure bool
+ http_only bool
+ same_site SameSite
+ raw string
+ unparsed []string // Raw text of unparsed attribute-value pairs
+}
+
+// SameSite allows a server to define a cookie attribute making it impossible for
+// the browser to send this cookie along with cross-site requests. The main
+// goal is to mitigate the risk of cross-origin information leakage, and provide
+// some protection against cross-site request forgery attacks.
+//
+// See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details.
+pub enum SameSite {
+ same_site_default_mode = 1
+ same_site_lax_mode
+ same_site_strict_mode
+ same_site_none_mode
+}
+
+// Parses all "Set-Cookie" values from the header `h` and
+// returns the successfully parsed Cookies.
+pub fn read_set_cookies(h map[string][]string) []&Cookie {
+ cookies_s := h['Set-Cookie']
+ cookie_count := cookies_s.len
+ if cookie_count == 0 {
+ return []
+ }
+ mut cookies := []&Cookie{}
+ for _, line in cookies_s {
+ c := parse_cookie(line) or { continue }
+ cookies << &c
+ }
+ return cookies
+}
+
+// Parses all "Cookie" values from the header `h` and
+// returns the successfully parsed Cookies.
+//
+// if `filter` isn't empty, only cookies of that name are returned
+pub fn read_cookies(h map[string][]string, filter string) []&Cookie {
+ lines := h['Cookie']
+ if lines.len == 0 {
+ return []
+ }
+ mut cookies := []&Cookie{}
+ for _, line_ in lines {
+ mut line := line_.trim_space()
+ mut part := ''
+ for line.len > 0 {
+ if line.index_any(';') > 0 {
+ line_parts := line.split(';')
+ part = line_parts[0]
+ line = line_parts[1]
+ } else {
+ part = line
+ line = ''
+ }
+ part = part.trim_space()
+ if part.len == 0 {
+ continue
+ }
+ mut name := part
+ mut val := ''
+ if part.contains('=') {
+ val_parts := part.split('=')
+ name = val_parts[0]
+ val = val_parts[1]
+ }
+ if !is_cookie_name_valid(name) {
+ continue
+ }
+ if filter != '' && filter != name {
+ continue
+ }
+ val = parse_cookie_value(val, true) or { continue }
+ cookies << &Cookie{
+ name: name
+ value: val
+ }
+ }
+ }
+ return cookies
+}
+
+// Returns the serialization of the cookie for use in a Cookie header
+// (if only Name and Value are set) or a Set-Cookie response
+// header (if other fields are set).
+//
+// If c.name is invalid, the empty string is returned.
+pub fn (c &Cookie) str() string {
+ if !is_cookie_name_valid(c.name) {
+ return ''
+ }
+ // extra_cookie_length derived from typical length of cookie attributes
+ // see RFC 6265 Sec 4.1.
+ extra_cookie_length := 110
+ mut b := strings.new_builder(c.name.len + c.value.len + c.domain.len + c.path.len +
+ extra_cookie_length)
+ b.write_string(c.name)
+ b.write_string('=')
+ b.write_string(sanitize_cookie_value(c.value))
+ if c.path.len > 0 {
+ b.write_string('; path=')
+ b.write_string(sanitize_cookie_path(c.path))
+ }
+ if c.domain.len > 0 {
+ if valid_cookie_domain(c.domain) {
+ // A `domain` containing illegal characters is not
+ // sanitized but simply dropped which turns the cookie
+ // into a host-only cookie. A leading dot is okay
+ // but won't be sent.
+ mut d := c.domain
+ if d[0] == `.` {
+ d = d.substr(1, d.len)
+ }
+ b.write_string('; domain=')
+ b.write_string(d)
+ } else {
+ // TODO: Log invalid cookie domain warning
+ }
+ }
+ if c.expires.year > 1600 {
+ e := c.expires
+ time_str := '$e.weekday_str(), $e.day.str() $e.smonth() $e.year $e.hhmmss() GMT'
+ b.write_string('; expires=')
+ b.write_string(time_str)
+ }
+ // TODO: Fix this. Techically a max age of 0 or less should be 0
+ // We need a way to not have a max age.
+ if c.max_age > 0 {
+ b.write_string('; Max-Age=')
+ b.write_string(c.max_age.str())
+ } else if c.max_age < 0 {
+ b.write_string('; Max-Age=0')
+ }
+ if c.http_only {
+ b.write_string('; HttpOnly')
+ }
+ if c.secure {
+ b.write_string('; Secure')
+ }
+ match c.same_site {
+ .same_site_default_mode {
+ b.write_string('; SameSite')
+ }
+ .same_site_none_mode {
+ b.write_string('; SameSite=None')
+ }
+ .same_site_lax_mode {
+ b.write_string('; SameSite=Lax')
+ }
+ .same_site_strict_mode {
+ b.write_string('; SameSite=Strict')
+ }
+ }
+ return b.str()
+}
+
+fn sanitize(valid fn (byte) bool, v string) string {
+ mut ok := true
+ for i in 0 .. v.len {
+ if valid(v[i]) {
+ continue
+ }
+ // TODO: Warn that we're dropping the invalid byte?
+ ok = false
+ break
+ }
+ if ok {
+ return v.clone()
+ }
+ return v.bytes().filter(valid(it)).bytestr()
+}
+
+fn sanitize_cookie_name(name string) string {
+ return name.replace_each(['\n', '-', '\r', '-'])
+}
+
+// https://tools.ietf.org/html/rfc6265#section-4.1.1
+// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE )
+// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
+// ; US-ASCII characters excluding CTLs,
+// ; whitespace DQUOTE, comma, semicolon,
+// ; and backslash
+// We loosen this as spaces and commas are common in cookie values
+// but we produce a quoted cookie-value in when value starts or ends
+// with a comma or space.
+pub fn sanitize_cookie_value(v string) string {
+ val := sanitize(valid_cookie_value_byte, v)
+ if v.len == 0 {
+ return v
+ }
+ // Check for the existence of a space or comma
+ if val.starts_with(' ') || val.ends_with(' ') || val.starts_with(',') || val.ends_with(',') {
+ return '"$v"'
+ }
+ return v
+}
+
+fn sanitize_cookie_path(v string) string {
+ return sanitize(valid_cookie_path_byte, v)
+}
+
+fn valid_cookie_value_byte(b byte) bool {
+ return 0x20 <= b && b < 0x7f && b != `"` && b != `;` && b != `\\`
+}
+
+fn valid_cookie_path_byte(b byte) bool {
+ return 0x20 <= b && b < 0x7f && b != `!`
+}
+
+fn valid_cookie_domain(v string) bool {
+ if is_cookie_domain_name(v) {
+ return true
+ }
+ // TODO
+ // valid_ip := net.parse_ip(v) or {
+ // false
+ // }
+ // if valid_ip {
+ // return true
+ // }
+ return false
+}
+
+pub fn is_cookie_domain_name(_s string) bool {
+ mut s := _s
+ if s.len == 0 {
+ return false
+ }
+ if s.len > 255 {
+ return false
+ }
+ if s[0] == `.` {
+ s = s.substr(1, s.len)
+ }
+ mut last := `.`
+ mut ok := false
+ mut part_len := 0
+ for i, _ in s {
+ c := s[i]
+ if (`a` <= c && c <= `z`) || (`A` <= c && c <= `Z`) {
+ // No '_' allowed here (in contrast to package net).
+ ok = true
+ part_len++
+ } else if `0` <= c && c <= `9` {
+ // fine
+ part_len++
+ } else if c == `-` {
+ // Byte before dash cannot be dot.
+ if last == `.` {
+ return false
+ }
+ part_len++
+ } else if c == `.` {
+ // Byte before dot cannot be dot, dash.
+ if last == `.` || last == `-` {
+ return false
+ }
+ if part_len > 63 || part_len == 0 {
+ return false
+ }
+ part_len = 0
+ } else {
+ return false
+ }
+ last = c
+ }
+ if last == `-` || part_len > 63 {
+ return false
+ }
+ return ok
+}
+
+fn parse_cookie_value(_raw string, allow_double_quote bool) ?string {
+ mut raw := _raw
+ // Strip the quotes, if present
+ if allow_double_quote && raw.len > 1 && raw[0] == `"` && raw[raw.len - 1] == `"` {
+ raw = raw.substr(1, raw.len - 1)
+ }
+ for i in 0 .. raw.len {
+ if !valid_cookie_value_byte(raw[i]) {
+ return error('http.cookie: invalid cookie value')
+ }
+ }
+ return raw
+}
+
+fn is_cookie_name_valid(name string) bool {
+ if name == '' {
+ return false
+ }
+ for b in name {
+ if b < 33 || b > 126 {
+ return false
+ }
+ }
+ return true
+}
+
+fn parse_cookie(line string) ?Cookie {
+ mut parts := line.trim_space().split(';')
+ if parts.len == 1 && parts[0] == '' {
+ return error('malformed cookie')
+ }
+ parts[0] = parts[0].trim_space()
+ keyval := parts[0].split('=')
+ if keyval.len != 2 {
+ return error('malformed cookie')
+ }
+ name := keyval[0]
+ raw_value := keyval[1]
+ if !is_cookie_name_valid(name) {
+ return error('malformed cookie')
+ }
+ value := parse_cookie_value(raw_value, true) or { return error('malformed cookie') }
+ mut c := Cookie{
+ name: name
+ value: value
+ raw: line
+ }
+ for i, _ in parts {
+ parts[i] = parts[i].trim_space()
+ if parts[i].len == 0 {
+ continue
+ }
+ mut attr := parts[i]
+ mut raw_val := ''
+ if attr.contains('=') {
+ pieces := attr.split('=')
+ attr = pieces[0]
+ raw_val = pieces[1]
+ }
+ lower_attr := attr.to_lower()
+ val := parse_cookie_value(raw_val, false) or {
+ c.unparsed << parts[i]
+ continue
+ }
+ match lower_attr {
+ 'samesite' {
+ lower_val := val.to_lower()
+ match lower_val {
+ 'lax' { c.same_site = .same_site_lax_mode }
+ 'strict' { c.same_site = .same_site_strict_mode }
+ 'none' { c.same_site = .same_site_none_mode }
+ else { c.same_site = .same_site_default_mode }
+ }
+ }
+ 'secure' {
+ c.secure = true
+ continue
+ }
+ 'httponly' {
+ c.http_only = true
+ continue
+ }
+ 'domain' {
+ c.domain = val
+ continue
+ }
+ 'max-age' {
+ mut secs := val.int()
+ if secs != 0 && val[0] != `0` {
+ break
+ }
+ if secs <= 0 {
+ secs = -1
+ }
+ c.max_age = secs
+ continue
+ }
+ // TODO: Fix this once time works better
+ // 'expires' {
+ // c.raw_expires = val
+ // mut exptime := time.parse_iso(val)
+ // if exptime.year == 0 {
+ // exptime = time.parse_iso('Mon, 02-Jan-2006 15:04:05 MST')
+ // }
+ // c.expires = exptime
+ // continue
+ // }
+ 'path' {
+ c.path = val
+ continue
+ }
+ else {
+ c.unparsed << parts[i]
+ }
+ }
+ }
+ return c
+}
diff --git a/v_windows/v/vlib/net/http/cookie_test.v b/v_windows/v/vlib/net/http/cookie_test.v
new file mode 100644
index 0000000..6a0c0cd
--- /dev/null
+++ b/v_windows/v/vlib/net/http/cookie_test.v
@@ -0,0 +1,468 @@
+import net.http
+
+struct SetCookieTestCase {
+ cookie &http.Cookie
+ raw string
+}
+
+struct ReadSetCookiesTestCase {
+ header map[string][]string
+ cookies []&http.Cookie
+}
+
+struct AddCookieTestCase {
+ cookie []&http.Cookie
+ raw string
+}
+
+const (
+ write_set_cookie_tests = [
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-1'
+ value: 'v1'
+ }
+ raw: 'cookie-1=v1'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-2'
+ value: 'two'
+ max_age: 3600
+ }
+ raw: 'cookie-2=two; Max-Age=3600'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-3'
+ value: 'three'
+ domain: '.example.com'
+ }
+ raw: 'cookie-3=three; domain=example.com'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-4'
+ value: 'four'
+ path: '/restricted/'
+ }
+ raw: 'cookie-4=four; path=/restricted/'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-5'
+ value: 'five'
+ domain: 'wrong;bad.abc'
+ }
+ raw: 'cookie-5=five'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-6'
+ value: 'six'
+ domain: 'bad-.abc'
+ }
+ raw: 'cookie-6=six'
+ },
+ // SetCookieTestCase{
+ // cookie: &http.Cookie{name: 'cookie-7', value: 'seven', domain: '127.0.0.1'},
+ // raw: 'cookie-7=seven; domain=127.0.0.1'
+ // },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-8'
+ value: 'eight'
+ domain: '::1'
+ }
+ raw: 'cookie-8=eight'
+ },
+ // {
+ // cookie: &http.Cookie{name: 'cookie-9', value: 'expiring', expires: time.unix(1257894000, 0)},
+ // 'cookie-9=expiring; Expires=Tue, 10 Nov 2009 23:00:00 GMT',
+ // },
+ // According to IETF 6265 Section 5.1.1.5, the year cannot be less than 1601
+ // SetCookieTestCase{
+ // cookie: &http.Cookie{name: 'cookie-10', value: 'expiring-1601', expires: time.parse('Mon, 01 Jan 1601 01:01:01 GMT')},
+ // raw: 'cookie-10=expiring-1601; Expires=Mon, 01 Jan 1601 01:01:01 GMT'
+ // },
+ // SetCookieTestCase{
+ // cookie: &http.Cookie{name: 'cookie-11', value: 'invalid-expiry', expires: time.parse('Mon, 01 Jan 1600 01:01:01 GMT')},
+ // raw: 'cookie-11=invalid-expiry'
+ // },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-12'
+ value: 'samesite-default'
+ same_site: .same_site_default_mode
+ }
+ raw: 'cookie-12=samesite-default; SameSite'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-13'
+ value: 'samesite-lax'
+ same_site: .same_site_lax_mode
+ }
+ raw: 'cookie-13=samesite-lax; SameSite=Lax'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-14'
+ value: 'samesite-strict'
+ same_site: .same_site_strict_mode
+ }
+ raw: 'cookie-14=samesite-strict; SameSite=Strict'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'cookie-15'
+ value: 'samesite-none'
+ same_site: .same_site_none_mode
+ }
+ raw: 'cookie-15=samesite-none; SameSite=None'
+ },
+ // The 'special' cookies have values containing commas or spaces which
+ // are disallowed by RFC 6265 but are common in the wild.
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-1'
+ value: 'a z'
+ }
+ raw: 'special-1=a z'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-2'
+ value: ' z'
+ }
+ raw: 'special-2=" z"'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-3'
+ value: 'a '
+ }
+ raw: 'special-3="a "'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-4'
+ value: ' '
+ }
+ raw: 'special-4=" "'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-5'
+ value: 'a,z'
+ }
+ raw: 'special-5=a,z'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-6'
+ value: ',z'
+ }
+ raw: 'special-6=",z"'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-7'
+ value: 'a,'
+ }
+ raw: 'special-7="a,"'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'special-8'
+ value: ','
+ }
+ raw: 'special-8=","'
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'empty-value'
+ value: ''
+ }
+ raw: 'empty-value='
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: ''
+ }
+ raw: ''
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: '\t'
+ }
+ raw: ''
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: '\r'
+ }
+ raw: ''
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'a\nb'
+ value: 'v'
+ }
+ raw: ''
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'a\nb'
+ value: 'v'
+ }
+ raw: ''
+ },
+ SetCookieTestCase{
+ cookie: &http.Cookie{
+ name: 'a\rb'
+ value: 'v'
+ }
+ raw: ''
+ },
+ ]
+ add_cookies_tests = [
+ AddCookieTestCase{
+ cookie: []
+ raw: ''
+ },
+ AddCookieTestCase{
+ cookie: [&http.Cookie{
+ name: 'cookie-1'
+ value: 'v1'
+ }]
+ raw: 'cookie-1=v1'
+ },
+ AddCookieTestCase{
+ cookie: [&http.Cookie{
+ name: 'cookie-1'
+ value: 'v1'
+ },
+ &http.Cookie{
+ name: 'cookie-2'
+ value: 'v2'
+ },
+ &http.Cookie{
+ name: 'cookie-3'
+ value: 'v3'
+ },
+ ]
+ raw: 'cookie-1=v1; cookie-2=v2; cookie-3=v3'
+ },
+ ]
+ read_set_cookies_tests = [
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['Cookie-1=v1']
+ }
+ cookies: [&http.Cookie{
+ name: 'Cookie-1'
+ value: 'v1'
+ raw: 'Cookie-1=v1'
+ }]
+ },
+ // ReadSetCookiesTestCase{
+ // header: {"Set-Cookie": ["NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"]},
+ // cookies: [&http.Cookie{
+ // name: "NID",
+ // value: "99=YsDT5i3E-CXax-",
+ // path: "/",
+ // domain: ".google.ch",
+ // http_only: true,
+ // expires: time.parse_iso('Wed, 23-Nov-2011 01:05:03 GMT'),
+ // raw_expires: "Wed, 23-Nov-2011 01:05:03 GMT",
+ // raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"
+ // }]
+ // },
+ // ReadSetCookiesTestCase{
+ // header: {"Set-Cookie": [".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"]},
+ // cookies: [&http.Cookie{
+ // name: ".ASPXAUTH",
+ // value: "7E3AA",
+ // path: "/",
+ // expires: time.parse_iso('Wed, 07-Mar-2012 14:25:06 GMT'),
+ // raw_expires: "Wed, 07-Mar-2012 14:25:06 GMT",
+ // http_only: true,
+ // raw: ".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"
+ // }]
+ // },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['ASP.NET_SessionId=foo; path=/; HttpOnly']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'ASP.NET_SessionId'
+ value: 'foo'
+ path: '/'
+ http_only: true
+ raw: 'ASP.NET_SessionId=foo; path=/; HttpOnly'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['samesitedefault=foo; SameSite']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'samesitedefault'
+ value: 'foo'
+ same_site: .same_site_default_mode
+ raw: 'samesitedefault=foo; SameSite'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['samesitelax=foo; SameSite=Lax']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'samesitelax'
+ value: 'foo'
+ same_site: .same_site_lax_mode
+ raw: 'samesitelax=foo; SameSite=Lax'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['samesitestrict=foo; SameSite=Strict']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'samesitestrict'
+ value: 'foo'
+ same_site: .same_site_strict_mode
+ raw: 'samesitestrict=foo; SameSite=Strict'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['samesitenone=foo; SameSite=None']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'samesitenone'
+ value: 'foo'
+ same_site: .same_site_none_mode
+ raw: 'samesitenone=foo; SameSite=None'
+ },
+ ]
+ },
+ // Make sure we can properly read back the Set-Cookie headers we create
+ // for values containing spaces or commas:
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['special-1=a z']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'special-1'
+ value: 'a z'
+ raw: 'special-1=a z'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['special-2=" z"']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'special-2'
+ value: ' z'
+ raw: 'special-2=" z"'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['special-3="a "']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'special-3'
+ value: 'a '
+ raw: 'special-3="a "'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['special-4=" "']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'special-4'
+ value: ' '
+ raw: 'special-4=" "'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['special-5=a,z']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'special-5'
+ value: 'a,z'
+ raw: 'special-5=a,z'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['special-6=",z"']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'special-6'
+ value: ',z'
+ raw: 'special-6=",z"'
+ },
+ ]
+ },
+ ReadSetCookiesTestCase{
+ header: {
+ 'Set-Cookie': ['special-7=","']
+ }
+ cookies: [
+ &http.Cookie{
+ name: 'special-7'
+ value: ','
+ raw: 'special-8=","'
+ },
+ ]
+ }
+ // TODO(bradfitz): users have reported seeing this in the
+ // wild, but do browsers handle it? RFC 6265 just says "don't
+ // do that" (section 3) and then never mentions header folding
+ // again.
+ // Header{"Set-Cookie": ["ASP.NET_SessionId=foo; path=/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"]},
+ ]
+)
+
+fn test_write_set_cookies() {
+ for _, tt in write_set_cookie_tests {
+ assert tt.cookie.str() == tt.raw
+ }
+}
+
+fn test_read_set_cookies() {
+ for _, tt in read_set_cookies_tests {
+ h := tt.header['Set-Cookie'][0]
+ c := http.read_set_cookies(tt.header)
+ println(h)
+ println(c[0].str())
+ assert c[0].str() == h
+ }
+}
diff --git a/v_windows/v/vlib/net/http/download.v b/v_windows/v/vlib/net/http/download.v
new file mode 100644
index 0000000..455c1e0
--- /dev/null
+++ b/v_windows/v/vlib/net/http/download.v
@@ -0,0 +1,18 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import os
+
+pub fn download_file(url string, out string) ? {
+ $if debug_http ? {
+ println('download file url=$url out=$out')
+ }
+ s := get(url) or { return err }
+ if s.status() != .ok {
+ return error('received http code $s.status_code')
+ }
+ os.write_file(out, s.text) ?
+ // download_file_with_progress(url, out, empty, empty)
+}
diff --git a/v_windows/v/vlib/net/http/download_nix.c.v b/v_windows/v/vlib/net/http/download_nix.c.v
new file mode 100644
index 0000000..724a256
--- /dev/null
+++ b/v_windows/v/vlib/net/http/download_nix.c.v
@@ -0,0 +1,52 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+type DownloadFn = fn (written int)
+
+/*
+struct DownloadStruct {
+mut:
+ stream voidptr
+ written int
+ cb DownloadFn
+}
+*/
+fn download_cb(ptr voidptr, size size_t, nmemb size_t, userp voidptr) {
+ /*
+ mut data := &DownloadStruct(userp)
+ written := C.fwrite(ptr, size, nmemb, data.stream)
+ data.written += written
+ data.cb(data.written)
+ //#data->cb(data->written); // TODO
+ return written
+ */
+}
+
+pub fn download_file_with_progress(url string, out string, cb DownloadFn, cb_finished fn ()) {
+ /*
+ curl := C.curl_easy_init()
+ if isnil(curl) {
+ return
+ }
+ cout := out.str
+ fp := C.fopen(cout, 'wb')
+ C.curl_easy_setopt(curl, CURLOPT_URL, url.str)
+ C.curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, download_cb)
+ data := &DownloadStruct {
+ stream:fp
+ cb: cb
+ }
+ C.curl_easy_setopt(curl, CURLOPT_WRITEDATA, data)
+ mut d := 0.0
+ C.curl_easy_getinfo(curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d)
+ C.curl_easy_perform(curl)
+ C.curl_easy_cleanup(curl)
+ C.fclose(fp)
+ cb_finished()
+ */
+}
+
+fn empty() {
+}
diff --git a/v_windows/v/vlib/net/http/download_windows.c.v b/v_windows/v/vlib/net/http/download_windows.c.v
new file mode 100644
index 0000000..422b6da
--- /dev/null
+++ b/v_windows/v/vlib/net/http/download_windows.c.v
@@ -0,0 +1,29 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+
+module http
+
+#flag -l urlmon
+
+#include <urlmon.h>
+
+fn download_file_with_progress(url string, out string, cb voidptr, cb_finished voidptr) {
+}
+
+/*
+pub fn download_file(url, out string) {
+ C.URLDownloadToFile(0, url.to_wide(), out.to_wide(), 0, 0)
+ /*
+ if (res == S_OK) {
+ println('Download Ok')
+ # } else if(res == E_OUTOFMEMORY) {
+ println('Buffer length invalid, or insufficient memory')
+ # } else if(res == INET_E_DOWNLOAD_FAILURE) {
+ println('URL is invalid')
+ # } else {
+ # printf("Download error: %d\n", res);
+ # }
+ */
+}
+*/
diff --git a/v_windows/v/vlib/net/http/header.v b/v_windows/v/vlib/net/http/header.v
new file mode 100644
index 0000000..c05bdbc
--- /dev/null
+++ b/v_windows/v/vlib/net/http/header.v
@@ -0,0 +1,698 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import strings
+
+// CommonHeader is an enum of the most common HTTP headers
+pub enum CommonHeader {
+ accept
+ accept_ch
+ accept_charset
+ accept_ch_lifetime
+ accept_encoding
+ accept_language
+ accept_patch
+ accept_post
+ accept_ranges
+ access_control_allow_credentials
+ access_control_allow_headers
+ access_control_allow_methods
+ access_control_allow_origin
+ access_control_expose_headers
+ access_control_max_age
+ access_control_request_headers
+ access_control_request_method
+ age
+ allow
+ alt_svc
+ authorization
+ cache_control
+ clear_site_data
+ connection
+ content_disposition
+ content_encoding
+ content_language
+ content_length
+ content_location
+ content_range
+ content_security_policy
+ content_security_policy_report_only
+ content_type
+ cookie
+ cross_origin_embedder_policy
+ cross_origin_opener_policy
+ cross_origin_resource_policy
+ date
+ device_memory
+ digest
+ dnt
+ early_data
+ etag
+ expect
+ expect_ct
+ expires
+ feature_policy
+ forwarded
+ from
+ host
+ if_match
+ if_modified_since
+ if_none_match
+ if_range
+ if_unmodified_since
+ index
+ keep_alive
+ large_allocation
+ last_modified
+ link
+ location
+ nel
+ origin
+ pragma
+ proxy_authenticate
+ proxy_authorization
+ range
+ referer
+ referrer_policy
+ retry_after
+ save_data
+ sec_fetch_dest
+ sec_fetch_mode
+ sec_fetch_site
+ sec_fetch_user
+ sec_websocket_accept
+ server
+ server_timing
+ set_cookie
+ sourcemap
+ strict_transport_security
+ te
+ timing_allow_origin
+ tk
+ trailer
+ transfer_encoding
+ upgrade
+ upgrade_insecure_requests
+ user_agent
+ vary
+ via
+ want_digest
+ warning
+ www_authenticate
+ x_content_type_options
+ x_dns_prefetch_control
+ x_forwarded_for
+ x_forwarded_host
+ x_forwarded_proto
+ x_frame_options
+ x_xss_protection
+}
+
+pub fn (h CommonHeader) str() string {
+ return match h {
+ .accept { 'Accept' }
+ .accept_ch { 'Accept-CH' }
+ .accept_charset { 'Accept-Charset' }
+ .accept_ch_lifetime { 'Accept-CH-Lifetime' }
+ .accept_encoding { 'Accept-Encoding' }
+ .accept_language { 'Accept-Language' }
+ .accept_patch { 'Accept-Patch' }
+ .accept_post { 'Accept-Post' }
+ .accept_ranges { 'Accept-Ranges' }
+ .access_control_allow_credentials { 'Access-Control-Allow-Credentials' }
+ .access_control_allow_headers { 'Access-Control-Allow-Headers' }
+ .access_control_allow_methods { 'Access-Control-Allow-Methods' }
+ .access_control_allow_origin { 'Access-Control-Allow-Origin' }
+ .access_control_expose_headers { 'Access-Control-Expose-Headers' }
+ .access_control_max_age { 'Access-Control-Max-Age' }
+ .access_control_request_headers { 'Access-Control-Request-Headers' }
+ .access_control_request_method { 'Access-Control-Request-Method' }
+ .age { 'Age' }
+ .allow { 'Allow' }
+ .alt_svc { 'Alt-Svc' }
+ .authorization { 'Authorization' }
+ .cache_control { 'Cache-Control' }
+ .clear_site_data { 'Clear-Site-Data' }
+ .connection { 'Connection' }
+ .content_disposition { 'Content-Disposition' }
+ .content_encoding { 'Content-Encoding' }
+ .content_language { 'Content-Language' }
+ .content_length { 'Content-Length' }
+ .content_location { 'Content-Location' }
+ .content_range { 'Content-Range' }
+ .content_security_policy { 'Content-Security-Policy' }
+ .content_security_policy_report_only { 'Content-Security-Policy-Report-Only' }
+ .content_type { 'Content-Type' }
+ .cookie { 'Cookie' }
+ .cross_origin_embedder_policy { 'Cross-Origin-Embedder-Policy' }
+ .cross_origin_opener_policy { 'Cross-Origin-Opener-Policy' }
+ .cross_origin_resource_policy { 'Cross-Origin-Resource-Policy' }
+ .date { 'Date' }
+ .device_memory { 'Device-Memory' }
+ .digest { 'Digest' }
+ .dnt { 'DNT' }
+ .early_data { 'Early-Data' }
+ .etag { 'ETag' }
+ .expect { 'Expect' }
+ .expect_ct { 'Expect-CT' }
+ .expires { 'Expires' }
+ .feature_policy { 'Feature-Policy' }
+ .forwarded { 'Forwarded' }
+ .from { 'From' }
+ .host { 'Host' }
+ .if_match { 'If-Match' }
+ .if_modified_since { 'If-Modified-Since' }
+ .if_none_match { 'If-None-Match' }
+ .if_range { 'If-Range' }
+ .if_unmodified_since { 'If-Unmodified-Since' }
+ .index { 'Index' }
+ .keep_alive { 'Keep-Alive' }
+ .large_allocation { 'Large-Allocation' }
+ .last_modified { 'Last-Modified' }
+ .link { 'Link' }
+ .location { 'Location' }
+ .nel { 'NEL' }
+ .origin { 'Origin' }
+ .pragma { 'Pragma' }
+ .proxy_authenticate { 'Proxy-Authenticate' }
+ .proxy_authorization { 'Proxy-Authorization' }
+ .range { 'Range' }
+ .referer { 'Referer' }
+ .referrer_policy { 'Referrer-Policy' }
+ .retry_after { 'Retry-After' }
+ .save_data { 'Save-Data' }
+ .sec_fetch_dest { 'Sec-Fetch-Dest' }
+ .sec_fetch_mode { 'Sec-Fetch-Mode' }
+ .sec_fetch_site { 'Sec-Fetch-Site' }
+ .sec_fetch_user { 'Sec-Fetch-User' }
+ .sec_websocket_accept { 'Sec-WebSocket-Accept' }
+ .server { 'Server' }
+ .server_timing { 'Server-Timing' }
+ .set_cookie { 'Set-Cookie' }
+ .sourcemap { 'SourceMap' }
+ .strict_transport_security { 'Strict-Transport-Security' }
+ .te { 'TE' }
+ .timing_allow_origin { 'Timing-Allow-Origin' }
+ .tk { 'Tk' }
+ .trailer { 'Trailer' }
+ .transfer_encoding { 'Transfer-Encoding' }
+ .upgrade { 'Upgrade' }
+ .upgrade_insecure_requests { 'Upgrade-Insecure-Requests' }
+ .user_agent { 'User-Agent' }
+ .vary { 'Vary' }
+ .via { 'Via' }
+ .want_digest { 'Want-Digest' }
+ .warning { 'Warning' }
+ .www_authenticate { 'WWW-Authenticate' }
+ .x_content_type_options { 'X-Content-Type-Options' }
+ .x_dns_prefetch_control { 'X-DNS-Prefetch-Control' }
+ .x_forwarded_for { 'X-Forwarded-For' }
+ .x_forwarded_host { 'X-Forwarded-Host' }
+ .x_forwarded_proto { 'X-Forwarded-Proto' }
+ .x_frame_options { 'X-Frame-Options' }
+ .x_xss_protection { 'X-XSS-Protection' }
+ }
+}
+
+const common_header_map = {
+ 'accept': CommonHeader.accept
+ 'accept-ch': .accept_ch
+ 'accept-charset': .accept_charset
+ 'accept-ch-lifetime': .accept_ch_lifetime
+ 'accept-encoding': .accept_encoding
+ 'accept-language': .accept_language
+ 'accept-patch': .accept_patch
+ 'accept-post': .accept_post
+ 'accept-ranges': .accept_ranges
+ 'access-control-allow-credentials': .access_control_allow_credentials
+ 'access-control-allow-headers': .access_control_allow_headers
+ 'access-control-allow-methods': .access_control_allow_methods
+ 'access-control-allow-origin': .access_control_allow_origin
+ 'access-control-expose-headers': .access_control_expose_headers
+ 'access-control-max-age': .access_control_max_age
+ 'access-control-request-headers': .access_control_request_headers
+ 'access-control-request-method': .access_control_request_method
+ 'age': .age
+ 'allow': .allow
+ 'alt-svc': .alt_svc
+ 'authorization': .authorization
+ 'cache-control': .cache_control
+ 'clear-site-data': .clear_site_data
+ 'connection': .connection
+ 'content-disposition': .content_disposition
+ 'content-encoding': .content_encoding
+ 'content-language': .content_language
+ 'content-length': .content_length
+ 'content-location': .content_location
+ 'content-range': .content_range
+ 'content-security-policy': .content_security_policy
+ 'content-security-policy-report-only': .content_security_policy_report_only
+ 'content-type': .content_type
+ 'cookie': .cookie
+ 'cross-origin-embedder-policy': .cross_origin_embedder_policy
+ 'cross-origin-opener-policy': .cross_origin_opener_policy
+ 'cross-origin-resource-policy': .cross_origin_resource_policy
+ 'date': .date
+ 'device-memory': .device_memory
+ 'digest': .digest
+ 'dnt': .dnt
+ 'early-data': .early_data
+ 'etag': .etag
+ 'expect': .expect
+ 'expect-ct': .expect_ct
+ 'expires': .expires
+ 'feature-policy': .feature_policy
+ 'forwarded': .forwarded
+ 'from': .from
+ 'host': .host
+ 'if-match': .if_match
+ 'if-modified-since': .if_modified_since
+ 'if-none-match': .if_none_match
+ 'if-range': .if_range
+ 'if-unmodified-since': .if_unmodified_since
+ 'index': .index
+ 'keep-alive': .keep_alive
+ 'large-allocation': .large_allocation
+ 'last-modified': .last_modified
+ 'link': .link
+ 'location': .location
+ 'nel': .nel
+ 'origin': .origin
+ 'pragma': .pragma
+ 'proxy-authenticate': .proxy_authenticate
+ 'proxy-authorization': .proxy_authorization
+ 'range': .range
+ 'referer': .referer
+ 'referrer-policy': .referrer_policy
+ 'retry-after': .retry_after
+ 'save-data': .save_data
+ 'sec-fetch-dest': .sec_fetch_dest
+ 'sec-fetch-mode': .sec_fetch_mode
+ 'sec-fetch-site': .sec_fetch_site
+ 'sec-fetch-user': .sec_fetch_user
+ 'sec-websocket-accept': .sec_websocket_accept
+ 'server': .server
+ 'server-timing': .server_timing
+ 'set-cookie': .set_cookie
+ 'sourcemap': .sourcemap
+ 'strict-transport-security': .strict_transport_security
+ 'te': .te
+ 'timing-allow-origin': .timing_allow_origin
+ 'tk': .tk
+ 'trailer': .trailer
+ 'transfer-encoding': .transfer_encoding
+ 'upgrade': .upgrade
+ 'upgrade-insecure-requests': .upgrade_insecure_requests
+ 'user-agent': .user_agent
+ 'vary': .vary
+ 'via': .via
+ 'want-digest': .want_digest
+ 'warning': .warning
+ 'www-authenticate': .www_authenticate
+ 'x-content-type-options': .x_content_type_options
+ 'x-dns-prefetch-control': .x_dns_prefetch_control
+ 'x-forwarded-for': .x_forwarded_for
+ 'x-forwarded-host': .x_forwarded_host
+ 'x-forwarded-proto': .x_forwarded_proto
+ 'x-frame-options': .x_frame_options
+ 'x-xss-protection': .x_xss_protection
+}
+
+// Header represents the key-value pairs in an HTTP header
+[noinit]
+pub struct Header {
+mut:
+ data map[string][]string
+ // map of lowercase header keys to their original keys
+ // in order of appearance
+ keys map[string][]string
+}
+
+pub fn (mut h Header) free() {
+ unsafe {
+ h.data.free()
+ h.keys.free()
+ }
+}
+
+pub struct HeaderConfig {
+ key CommonHeader
+ value string
+}
+
+// Create a new Header object
+pub fn new_header(kvs ...HeaderConfig) Header {
+ mut h := Header{
+ data: map[string][]string{}
+ }
+ for kv in kvs {
+ h.add(kv.key, kv.value)
+ }
+ return h
+}
+
+// new_header_from_map creates a Header from key value pairs
+pub fn new_header_from_map(kvs map[CommonHeader]string) Header {
+ mut h := new_header()
+ h.add_map(kvs)
+ return h
+}
+
+// new_custom_header_from_map creates a Header from string key value pairs
+pub fn new_custom_header_from_map(kvs map[string]string) ?Header {
+ mut h := new_header()
+ h.add_custom_map(kvs) ?
+ return h
+}
+
+// add appends a value to the header key.
+pub fn (mut h Header) add(key CommonHeader, value string) {
+ k := key.str()
+ h.data[k] << value
+ h.add_key(k)
+}
+
+// add_custom appends a value to a custom header key. This function will
+// return an error if the key contains invalid header characters.
+pub fn (mut h Header) add_custom(key string, value string) ? {
+ is_valid(key) ?
+ h.data[key] << value
+ h.add_key(key)
+}
+
+// add_map appends the value for each header key.
+pub fn (mut h Header) add_map(kvs map[CommonHeader]string) {
+ for k, v in kvs {
+ h.add(k, v)
+ }
+}
+
+// add_custom_map appends the value for each custom header key.
+pub fn (mut h Header) add_custom_map(kvs map[string]string) ? {
+ for k, v in kvs {
+ h.add_custom(k, v) ?
+ }
+}
+
+// set sets the key-value pair. This function will clear any other values
+// that exist for the CommonHeader.
+pub fn (mut h Header) set(key CommonHeader, value string) {
+ k := key.str()
+ h.data[k] = [value]
+ h.add_key(k)
+}
+
+// set_custom sets the key-value pair for a custom header key. This
+// function will clear any other values that exist for the header. This
+// function will return an error if the key contains invalid header
+// characters.
+pub fn (mut h Header) set_custom(key string, value string) ? {
+ is_valid(key) ?
+ h.data[key] = [value]
+ h.add_key(key)
+}
+
+// delete deletes all values for a key.
+pub fn (mut h Header) delete(key CommonHeader) {
+ h.delete_custom(key.str())
+}
+
+// delete_custom deletes all values for a custom header key.
+pub fn (mut h Header) delete_custom(key string) {
+ h.data.delete(key)
+
+ // remove key from keys metadata
+ kl := key.to_lower()
+ if kl in h.keys {
+ h.keys[kl] = h.keys[kl].filter(it != key)
+ }
+}
+
+pub struct HeaderCoerceConfig {
+ canonicalize bool
+}
+
+// coerce coerces data in the Header by joining keys that match
+// case-insensitively into one entry.
+pub fn (mut h Header) coerce(flags ...HeaderCoerceConfig) {
+ canon := flags.any(it.canonicalize)
+
+ for kl, data_keys in h.keys {
+ master_key := if canon { canonicalize(kl) } else { data_keys[0] }
+
+ // save master data
+ master_data := h.data[master_key]
+ h.data.delete(master_key)
+
+ for key in data_keys {
+ if key == master_key {
+ h.data[master_key] << master_data
+ continue
+ }
+ h.data[master_key] << h.data[key]
+ h.data.delete(key)
+ }
+ h.keys[kl] = [master_key]
+ }
+}
+
+// contains returns whether the header key exists in the map.
+pub fn (h Header) contains(key CommonHeader) bool {
+ return h.contains_custom(key.str())
+}
+
+pub struct HeaderQueryConfig {
+ exact bool
+}
+
+// contains_custom returns whether the custom header key exists in the map.
+pub fn (h Header) contains_custom(key string, flags ...HeaderQueryConfig) bool {
+ if flags.any(it.exact) {
+ return key in h.data
+ }
+ return key.to_lower() in h.keys
+}
+
+// get gets the first value for the CommonHeader, or none if the key
+// does not exist.
+pub fn (h Header) get(key CommonHeader) ?string {
+ return h.get_custom(key.str())
+}
+
+// get_custom gets the first value for the custom header, or none if
+// the key does not exist.
+pub fn (h Header) get_custom(key string, flags ...HeaderQueryConfig) ?string {
+ mut data_key := key
+ if !flags.any(it.exact) {
+ // get the first key from key metadata
+ k := key.to_lower()
+ if h.keys[k].len == 0 {
+ return none
+ }
+ data_key = h.keys[k][0]
+ }
+ if h.data[data_key].len == 0 {
+ return none
+ }
+ return h.data[data_key][0]
+}
+
+// starting_with gets the first header starting with key, or none if
+// the key does not exist.
+pub fn (h Header) starting_with(key string) ?string {
+ for k, _ in h.data {
+ if k.starts_with(key) {
+ return k
+ }
+ }
+ return none
+}
+
+// values gets all values for the CommonHeader.
+pub fn (h Header) values(key CommonHeader) []string {
+ return h.custom_values(key.str())
+}
+
+// custom_values gets all values for the custom header.
+pub fn (h Header) custom_values(key string, flags ...HeaderQueryConfig) []string {
+ if flags.any(it.exact) {
+ return h.data[key]
+ }
+ // case insensitive lookup
+ mut values := []string{cap: 10}
+ for k in h.keys[key.to_lower()] {
+ values << h.data[k]
+ }
+ return values
+}
+
+// keys gets all header keys as strings
+pub fn (h Header) keys() []string {
+ return h.data.keys()
+}
+
+pub struct HeaderRenderConfig {
+ version Version
+ coerce bool
+ canonicalize bool
+}
+
+// render renders the Header into a string for use in sending HTTP
+// requests. All header lines will end in `\r\n`
+[manualfree]
+pub fn (h Header) render(flags HeaderRenderConfig) string {
+ // estimate ~48 bytes per header
+ mut sb := strings.new_builder(h.data.len * 48)
+ if flags.coerce {
+ for kl, data_keys in h.keys {
+ key := if flags.version == .v2_0 {
+ kl
+ } else if flags.canonicalize {
+ canonicalize(kl)
+ } else {
+ data_keys[0]
+ }
+ for k in data_keys {
+ for v in h.data[k] {
+ sb.write_string(key)
+ sb.write_string(': ')
+ sb.write_string(v)
+ sb.write_string('\r\n')
+ }
+ }
+ }
+ } else {
+ for k, vs in h.data {
+ key := if flags.version == .v2_0 {
+ k.to_lower()
+ } else if flags.canonicalize {
+ canonicalize(k.to_lower())
+ } else {
+ k
+ }
+ for v in vs {
+ sb.write_string(key)
+ sb.write_string(': ')
+ sb.write_string(v)
+ sb.write_string('\r\n')
+ }
+ }
+ }
+ res := sb.str()
+ unsafe { sb.free() }
+ return res
+}
+
+// join combines two Header structs into a new Header struct
+pub fn (h Header) join(other Header) Header {
+ mut combined := Header{
+ data: h.data.clone()
+ keys: h.keys.clone()
+ }
+ for k in other.keys() {
+ for v in other.custom_values(k, exact: true) {
+ combined.add_custom(k, v) or {
+ // panic because this should never fail
+ panic('unexpected error: $err')
+ }
+ }
+ }
+ return combined
+}
+
+// canonicalize canonicalizes an HTTP header key
+// Common headers are determined by the common_header_map
+// Custom headers are capitalized on the first letter and any letter after a '-'
+// NOTE: Assumes sl is lowercase, since the caller usually already has the lowercase key
+fn canonicalize(sl string) string {
+ // check if we have a common header
+ if sl in http.common_header_map {
+ return http.common_header_map[sl].str()
+ }
+ return sl.split('-').map(it.capitalize()).join('-')
+}
+
+// Helper function to add a key to the keys map
+fn (mut h Header) add_key(key string) {
+ kl := key.to_lower()
+ if !h.keys[kl].contains(key) {
+ h.keys[kl] << key
+ }
+}
+
+// Custom error struct for invalid header tokens
+struct HeaderKeyError {
+ msg string
+ code int
+ header string
+ invalid_char byte
+}
+
+// is_valid checks if the header token contains all valid bytes
+fn is_valid(header string) ? {
+ for _, c in header {
+ if int(c) >= 128 || !is_token(c) {
+ return IError(HeaderKeyError{
+ msg: "Invalid header key: '$header'"
+ code: 1
+ header: header
+ invalid_char: c
+ })
+ }
+ }
+ if header.len == 0 {
+ return IError(HeaderKeyError{
+ msg: "Invalid header key: '$header'"
+ code: 2
+ header: header
+ invalid_char: 0
+ })
+ }
+}
+
+// is_token checks if the byte is valid for a header token
+fn is_token(b byte) bool {
+ return match b {
+ 33, 35...39, 42, 43, 45, 46, 48...57, 65...90, 94...122, 124, 126 { true }
+ else { false }
+ }
+}
+
+// str returns the headers string as seen in HTTP/1.1 requests.
+// Key order is not guaranteed.
+pub fn (h Header) str() string {
+ return h.render(version: .v1_1)
+}
+
+// parse_headers parses a newline delimited string into a Header struct
+fn parse_headers(s string) ?Header {
+ mut h := new_header()
+ mut last_key := ''
+ mut last_value := ''
+ for line in s.split_into_lines() {
+ if line.len == 0 {
+ break
+ }
+ // handle header fold
+ if line[0] == ` ` || line[0] == `\t` {
+ last_value += ' ${line.trim(' \t')}'
+ continue
+ } else if last_key != '' {
+ h.add_custom(last_key, last_value) ?
+ }
+ last_key, last_value = parse_header(line) ?
+ }
+ h.add_custom(last_key, last_value) ?
+ return h
+}
+
+fn parse_header(s string) ?(string, string) {
+ if !s.contains(':') {
+ return error('missing colon in header')
+ }
+ words := s.split_nth(':', 2)
+ // TODO: parse quoted text according to the RFC
+ return words[0], words[1].trim(' \t')
+}
diff --git a/v_windows/v/vlib/net/http/header_test.v b/v_windows/v/vlib/net/http/header_test.v
new file mode 100644
index 0000000..4f5f2ce
--- /dev/null
+++ b/v_windows/v/vlib/net/http/header_test.v
@@ -0,0 +1,387 @@
+module http
+
+fn test_header_new() {
+ h := new_header(HeaderConfig{ key: .accept, value: 'nothing' },
+ key: .expires
+ value: 'yesterday'
+ )
+ assert h.contains(.accept)
+ assert h.contains(.expires)
+ accept := h.get(.accept) or { '' }
+ expires := h.get(.expires) or { '' }
+ assert accept == 'nothing'
+ assert expires == 'yesterday'
+}
+
+fn test_header_invalid_key() {
+ mut h := new_header()
+ h.add_custom('space is invalid', ':(') or { return }
+ panic('should have returned')
+}
+
+fn test_header_adds_multiple() {
+ mut h := new_header()
+ h.add(.accept, 'one')
+ h.add(.accept, 'two')
+
+ assert h.values(.accept) == ['one', 'two']
+}
+
+fn test_header_get() ? {
+ mut h := new_header(key: .dnt, value: 'one')
+ h.add_custom('dnt', 'two') ?
+ dnt := h.get_custom('dnt') or { '' }
+ exact := h.get_custom('dnt', exact: true) or { '' }
+ assert dnt == 'one'
+ assert exact == 'two'
+}
+
+fn test_header_set() ? {
+ mut h := new_header(HeaderConfig{ key: .dnt, value: 'one' },
+ key: .dnt
+ value: 'two'
+ )
+ assert h.values(.dnt) == ['one', 'two']
+ h.set_custom('DNT', 'three') ?
+ assert h.values(.dnt) == ['three']
+}
+
+fn test_header_delete() {
+ mut h := new_header(HeaderConfig{ key: .dnt, value: 'one' },
+ key: .dnt
+ value: 'two'
+ )
+ assert h.values(.dnt) == ['one', 'two']
+ h.delete(.dnt)
+ assert h.values(.dnt) == []
+}
+
+fn test_header_delete_not_existing() {
+ mut h := new_header()
+ assert h.data.len == 0
+ assert h.keys.len == 0
+ h.delete(.dnt)
+ assert h.data.len == 0
+ assert h.keys.len == 0
+}
+
+fn test_custom_header() ? {
+ mut h := new_header()
+ h.add_custom('AbC', 'dEf') ?
+ h.add_custom('aBc', 'GhI') ?
+ assert h.custom_values('AbC', exact: true) == ['dEf']
+ assert h.custom_values('aBc', exact: true) == ['GhI']
+ assert h.custom_values('ABC') == ['dEf', 'GhI']
+ assert h.custom_values('abc') == ['dEf', 'GhI']
+ assert h.keys() == ['AbC', 'aBc']
+ h.delete_custom('AbC')
+ h.delete_custom('aBc')
+
+ h.add_custom('abc', 'def') ?
+ assert h.custom_values('abc') == ['def']
+ assert h.custom_values('ABC') == ['def']
+ assert h.keys() == ['abc']
+ h.delete_custom('abc')
+
+ h.add_custom('accEPT', '*/*') ?
+ assert h.custom_values('ACCept') == ['*/*']
+ assert h.values(.accept) == ['*/*']
+ assert h.keys() == ['accEPT']
+}
+
+fn test_contains_custom() ? {
+ mut h := new_header()
+ h.add_custom('Hello', 'world') ?
+ assert h.contains_custom('hello')
+ assert h.contains_custom('HELLO')
+ assert h.contains_custom('Hello', exact: true)
+ assert h.contains_custom('hello', exact: true) == false
+ assert h.contains_custom('HELLO', exact: true) == false
+}
+
+fn test_get_custom() ? {
+ mut h := new_header()
+ h.add_custom('Hello', 'world') ?
+ assert h.get_custom('hello') ? == 'world'
+ assert h.get_custom('HELLO') ? == 'world'
+ assert h.get_custom('Hello', exact: true) ? == 'world'
+ if _ := h.get_custom('hello', exact: true) {
+ // should be none
+ assert false
+ }
+ if _ := h.get_custom('HELLO', exact: true) {
+ // should be none
+ assert false
+ }
+}
+
+fn test_starting_with() ? {
+ mut h := new_header()
+ h.add_custom('Hello-1', 'world') ?
+ h.add_custom('Hello-21', 'world') ?
+ assert h.starting_with('Hello-') ? == 'Hello-1'
+ assert h.starting_with('Hello-2') ? == 'Hello-21'
+}
+
+fn test_custom_values() ? {
+ mut h := new_header()
+ h.add_custom('Hello', 'world') ?
+ assert h.custom_values('hello') == ['world']
+ assert h.custom_values('HELLO') == ['world']
+ assert h.custom_values('Hello', exact: true) == ['world']
+ assert h.custom_values('hello', exact: true) == []
+ assert h.custom_values('HELLO', exact: true) == []
+}
+
+fn test_coerce() ? {
+ mut h := new_header()
+ h.add_custom('accept', 'foo') ?
+ h.add(.accept, 'bar')
+ assert h.values(.accept) == ['foo', 'bar']
+ assert h.keys().len == 2
+
+ h.coerce()
+ assert h.values(.accept) == ['foo', 'bar']
+ assert h.keys() == ['accept'] // takes the first occurrence
+}
+
+fn test_coerce_canonicalize() ? {
+ mut h := new_header()
+ h.add_custom('accept', 'foo') ?
+ h.add(.accept, 'bar')
+ assert h.values(.accept) == ['foo', 'bar']
+ assert h.keys().len == 2
+
+ h.coerce(canonicalize: true)
+ assert h.values(.accept) == ['foo', 'bar']
+ assert h.keys() == ['Accept'] // canonicalize header
+}
+
+fn test_coerce_custom() ? {
+ mut h := new_header()
+ h.add_custom('Hello', 'foo') ?
+ h.add_custom('hello', 'bar') ?
+ h.add_custom('HELLO', 'baz') ?
+ assert h.custom_values('hello') == ['foo', 'bar', 'baz']
+ assert h.keys().len == 3
+
+ h.coerce()
+ assert h.custom_values('hello') == ['foo', 'bar', 'baz']
+ assert h.keys() == ['Hello'] // takes the first occurrence
+}
+
+fn test_coerce_canonicalize_custom() ? {
+ mut h := new_header()
+ h.add_custom('foo-BAR', 'foo') ?
+ h.add_custom('FOO-bar', 'bar') ?
+ assert h.custom_values('foo-bar') == ['foo', 'bar']
+ assert h.keys().len == 2
+
+ h.coerce(canonicalize: true)
+ assert h.custom_values('foo-bar') == ['foo', 'bar']
+ assert h.keys() == ['Foo-Bar'] // capitalizes the header
+}
+
+fn test_render_version() ? {
+ mut h := new_header()
+ h.add_custom('accept', 'foo') ?
+ h.add_custom('Accept', 'bar') ?
+ h.add(.accept, 'baz')
+
+ s1_0 := h.render(version: .v1_0)
+ assert s1_0.contains('accept: foo\r\n')
+ assert s1_0.contains('Accept: bar\r\n')
+ assert s1_0.contains('Accept: baz\r\n')
+
+ s1_1 := h.render(version: .v1_1)
+ assert s1_1.contains('accept: foo\r\n')
+ assert s1_1.contains('Accept: bar\r\n')
+ assert s1_1.contains('Accept: baz\r\n')
+
+ s2_0 := h.render(version: .v2_0)
+ assert s2_0.contains('accept: foo\r\n')
+ assert s2_0.contains('accept: bar\r\n')
+ assert s2_0.contains('accept: baz\r\n')
+}
+
+fn test_render_coerce() ? {
+ mut h := new_header()
+ h.add_custom('accept', 'foo') ?
+ h.add_custom('Accept', 'bar') ?
+ h.add(.accept, 'baz')
+ h.add(.host, 'host')
+
+ s1_0 := h.render(version: .v1_1, coerce: true)
+ assert s1_0.contains('accept: foo\r\n')
+ assert s1_0.contains('accept: bar\r\n')
+ assert s1_0.contains('accept: baz\r\n')
+ assert s1_0.contains('Host: host\r\n')
+
+ s1_1 := h.render(version: .v1_1, coerce: true)
+ assert s1_1.contains('accept: foo\r\n')
+ assert s1_1.contains('accept: bar\r\n')
+ assert s1_1.contains('accept: baz\r\n')
+ assert s1_1.contains('Host: host\r\n')
+
+ s2_0 := h.render(version: .v2_0, coerce: true)
+ assert s2_0.contains('accept: foo\r\n')
+ assert s2_0.contains('accept: bar\r\n')
+ assert s2_0.contains('accept: baz\r\n')
+ assert s2_0.contains('host: host\r\n')
+}
+
+fn test_render_canonicalize() ? {
+ mut h := new_header()
+ h.add_custom('accept', 'foo') ?
+ h.add_custom('Accept', 'bar') ?
+ h.add(.accept, 'baz')
+ h.add(.host, 'host')
+
+ s1_0 := h.render(version: .v1_1, canonicalize: true)
+ assert s1_0.contains('Accept: foo\r\n')
+ assert s1_0.contains('Accept: bar\r\n')
+ assert s1_0.contains('Accept: baz\r\n')
+ assert s1_0.contains('Host: host\r\n')
+
+ s1_1 := h.render(version: .v1_1, canonicalize: true)
+ assert s1_1.contains('Accept: foo\r\n')
+ assert s1_1.contains('Accept: bar\r\n')
+ assert s1_1.contains('Accept: baz\r\n')
+ assert s1_1.contains('Host: host\r\n')
+
+ s2_0 := h.render(version: .v2_0, canonicalize: true)
+ assert s2_0.contains('accept: foo\r\n')
+ assert s2_0.contains('accept: bar\r\n')
+ assert s2_0.contains('accept: baz\r\n')
+ assert s2_0.contains('host: host\r\n')
+}
+
+fn test_render_coerce_canonicalize() ? {
+ mut h := new_header()
+ h.add_custom('accept', 'foo') ?
+ h.add_custom('Accept', 'bar') ?
+ h.add(.accept, 'baz')
+ h.add(.host, 'host')
+
+ s1_0 := h.render(version: .v1_1, coerce: true, canonicalize: true)
+ assert s1_0.contains('Accept: foo\r\n')
+ assert s1_0.contains('Accept: bar\r\n')
+ assert s1_0.contains('Accept: baz\r\n')
+ assert s1_0.contains('Host: host\r\n')
+
+ s1_1 := h.render(version: .v1_1, coerce: true, canonicalize: true)
+ assert s1_1.contains('Accept: foo\r\n')
+ assert s1_1.contains('Accept: bar\r\n')
+ assert s1_1.contains('Accept: baz\r\n')
+ assert s1_1.contains('Host: host\r\n')
+
+ s2_0 := h.render(version: .v2_0, coerce: true, canonicalize: true)
+ assert s2_0.contains('accept: foo\r\n')
+ assert s2_0.contains('accept: bar\r\n')
+ assert s2_0.contains('accept: baz\r\n')
+ assert s2_0.contains('host: host\r\n')
+}
+
+fn test_str() ? {
+ mut h := new_header()
+ h.add(.accept, 'text/html')
+ h.add_custom('Accept', 'image/jpeg') ?
+ h.add_custom('X-custom', 'Hello') ?
+
+ // key order is not guaranteed
+ assert h.str() == 'Accept: text/html\r\nAccept: image/jpeg\r\nX-custom: Hello\r\n'
+ || h.str() == 'X-custom: Hello\r\nAccept:text/html\r\nAccept: image/jpeg\r\n'
+}
+
+fn test_header_from_map() ? {
+ h := new_header_from_map({
+ CommonHeader.accept: 'nothing'
+ CommonHeader.expires: 'yesterday'
+ })
+ assert h.contains(.accept)
+ assert h.contains(.expires)
+ assert h.get(.accept) or { '' } == 'nothing'
+ assert h.get(.expires) or { '' } == 'yesterday'
+}
+
+fn test_custom_header_from_map() ? {
+ h := new_custom_header_from_map({
+ 'Server': 'VWeb'
+ 'foo': 'bar'
+ }) ?
+ assert h.contains_custom('server')
+ assert h.contains_custom('foo')
+ assert h.get_custom('server') or { '' } == 'VWeb'
+ assert h.get_custom('foo') or { '' } == 'bar'
+}
+
+fn test_header_join() ? {
+ h1 := new_header_from_map({
+ CommonHeader.accept: 'nothing'
+ CommonHeader.expires: 'yesterday'
+ })
+ h2 := new_custom_header_from_map({
+ 'Server': 'VWeb'
+ 'foo': 'bar'
+ }) ?
+ h3 := h1.join(h2)
+ // h1 is unchanged
+ assert h1.contains(.accept)
+ assert h1.contains(.expires)
+ assert !h1.contains_custom('Server')
+ assert !h1.contains_custom('foo')
+ // h2 is unchanged
+ assert !h2.contains(.accept)
+ assert !h2.contains(.expires)
+ assert h2.contains_custom('Server')
+ assert h2.contains_custom('foo')
+ // h3 has all four headers
+ assert h3.contains(.accept)
+ assert h3.contains(.expires)
+ assert h3.contains_custom('Server')
+ assert h3.contains_custom('foo')
+}
+
+fn parse_headers_test(s string, expected map[string]string) ? {
+ assert parse_headers(s) ? == new_custom_header_from_map(expected) ?
+}
+
+fn test_parse_headers() ? {
+ parse_headers_test('foo: bar', {
+ 'foo': 'bar'
+ }) ?
+ parse_headers_test('foo: \t bar', {
+ 'foo': 'bar'
+ }) ?
+ parse_headers_test('foo: bar\r\n\tbaz', {
+ 'foo': 'bar baz'
+ }) ?
+ parse_headers_test('foo: bar \r\n\tbaz\r\n buzz', {
+ 'foo': 'bar baz buzz'
+ }) ?
+ parse_headers_test('foo: bar\r\nbar:baz', {
+ 'foo': 'bar'
+ 'bar': 'baz'
+ }) ?
+ parse_headers_test('foo: bar\r\nbar:baz\r\n', {
+ 'foo': 'bar'
+ 'bar': 'baz'
+ }) ?
+ parse_headers_test('foo: bar\r\nbar:baz\r\n\r\n', {
+ 'foo': 'bar'
+ 'bar': 'baz'
+ }) ?
+ assert parse_headers('foo: bar\r\nfoo:baz') ?.custom_values('foo') == ['bar', 'baz']
+
+ if x := parse_headers(' oops: oh no') {
+ return error('should have errored, but got $x')
+ }
+}
+
+fn test_set_cookie() {
+ // multiple Set-Cookie headers should be sent when rendered
+ mut h := new_header()
+ h.add(.set_cookie, 'foo')
+ h.add(.set_cookie, 'bar')
+ assert h.render() == 'Set-Cookie: foo\r\nSet-Cookie: bar\r\n'
+}
diff --git a/v_windows/v/vlib/net/http/http.v b/v_windows/v/vlib/net/http/http.v
new file mode 100644
index 0000000..7bdc5e2
--- /dev/null
+++ b/v_windows/v/vlib/net/http/http.v
@@ -0,0 +1,186 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import net.urllib
+
+const (
+ max_redirects = 4
+ content_type_default = 'text/plain'
+ bufsize = 1536
+)
+
+// FetchConfig holds configurations of fetch
+pub struct FetchConfig {
+pub mut:
+ url string
+ method Method
+ header Header
+ data string
+ params map[string]string
+ cookies map[string]string
+ user_agent string = 'v.http'
+ verbose bool
+}
+
+pub fn new_request(method Method, url_ string, data string) ?Request {
+ url := if method == .get { url_ + '?' + data } else { url_ }
+ // println('new req() method=$method url="$url" dta="$data"')
+ return Request{
+ method: method
+ url: url
+ data: data
+ /*
+ headers: {
+ 'Accept-Encoding': 'compress'
+ }
+ */
+ }
+}
+
+// get sends a GET HTTP request to the URL
+pub fn get(url string) ?Response {
+ return fetch(method: .get, url: url)
+}
+
+// post sends a POST HTTP request to the URL with a string data
+pub fn post(url string, data string) ?Response {
+ return fetch(
+ method: .post
+ url: url
+ data: data
+ header: new_header(key: .content_type, value: http.content_type_default)
+ )
+}
+
+// post_json sends a POST HTTP request to the URL with a JSON data
+pub fn post_json(url string, data string) ?Response {
+ return fetch(
+ method: .post
+ url: url
+ data: data
+ header: new_header(key: .content_type, value: 'application/json')
+ )
+}
+
+// post_form sends a POST HTTP request to the URL with X-WWW-FORM-URLENCODED data
+pub fn post_form(url string, data map[string]string) ?Response {
+ return fetch(
+ method: .post
+ url: url
+ header: new_header(key: .content_type, value: 'application/x-www-form-urlencoded')
+ data: url_encode_form_data(data)
+ )
+}
+
+// put sends a PUT HTTP request to the URL with a string data
+pub fn put(url string, data string) ?Response {
+ return fetch(
+ method: .put
+ url: url
+ data: data
+ header: new_header(key: .content_type, value: http.content_type_default)
+ )
+}
+
+// patch sends a PATCH HTTP request to the URL with a string data
+pub fn patch(url string, data string) ?Response {
+ return fetch(
+ method: .patch
+ url: url
+ data: data
+ header: new_header(key: .content_type, value: http.content_type_default)
+ )
+}
+
+// head sends a HEAD HTTP request to the URL
+pub fn head(url string) ?Response {
+ return fetch(method: .head, url: url)
+}
+
+// delete sends a DELETE HTTP request to the URL
+pub fn delete(url string) ?Response {
+ return fetch(method: .delete, url: url)
+}
+
+// fetch sends an HTTP request to the URL with the given method and configurations
+pub fn fetch(config FetchConfig) ?Response {
+ if config.url == '' {
+ return error('http.fetch: empty url')
+ }
+ url := build_url_from_fetch(config) or { return error('http.fetch: invalid url $config.url') }
+ req := Request{
+ method: config.method
+ url: url
+ data: config.data
+ header: config.header
+ cookies: config.cookies
+ user_agent: config.user_agent
+ user_ptr: 0
+ verbose: config.verbose
+ }
+ res := req.do() ?
+ return res
+}
+
+// get_text sends a GET HTTP request to the URL and returns the text content of the response
+pub fn get_text(url string) string {
+ resp := fetch(url: url, method: .get) or { return '' }
+ return resp.text
+}
+
+// url_encode_form_data converts mapped data to an URL encoded string
+pub fn url_encode_form_data(data map[string]string) string {
+ mut pieces := []string{}
+ for key_, value_ in data {
+ key := urllib.query_escape(key_)
+ value := urllib.query_escape(value_)
+ pieces << '$key=$value'
+ }
+ return pieces.join('&')
+}
+
+[deprecated: 'use fetch()']
+fn fetch_with_method(method Method, _config FetchConfig) ?Response {
+ mut config := _config
+ config.method = method
+ return fetch(config)
+}
+
+fn build_url_from_fetch(config FetchConfig) ?string {
+ mut url := urllib.parse(config.url) ?
+ if config.params.len == 0 {
+ return url.str()
+ }
+ mut pieces := []string{cap: config.params.len}
+ for key, val in config.params {
+ pieces << '$key=$val'
+ }
+ mut query := pieces.join('&')
+ if url.raw_query.len > 1 {
+ query = url.raw_query + '&' + query
+ }
+ url.raw_query = query
+ return url.str()
+}
+
+// unescape_url is deprecated, use urllib.query_unescape() instead
+pub fn unescape_url(s string) string {
+ panic('http.unescape_url() was replaced with urllib.query_unescape()')
+}
+
+// escape_url is deprecated, use urllib.query_escape() instead
+pub fn escape_url(s string) string {
+ panic('http.escape_url() was replaced with urllib.query_escape()')
+}
+
+// unescape is deprecated, use urllib.query_escape() instead
+pub fn unescape(s string) string {
+ panic('http.unescape() was replaced with http.unescape_url()')
+}
+
+// escape is deprecated, use urllib.query_unescape() instead
+pub fn escape(s string) string {
+ panic('http.escape() was replaced with http.escape_url()')
+}
diff --git a/v_windows/v/vlib/net/http/http_httpbin_test.v b/v_windows/v/vlib/net/http/http_httpbin_test.v
new file mode 100644
index 0000000..a3ddccc
--- /dev/null
+++ b/v_windows/v/vlib/net/http/http_httpbin_test.v
@@ -0,0 +1,95 @@
+module http
+
+// internal tests have access to *everything in the module*
+import json
+
+struct HttpbinResponseBody {
+ args map[string]string
+ data string
+ files map[string]string
+ form map[string]string
+ headers map[string]string
+ json map[string]string
+ origin string
+ url string
+}
+
+fn http_fetch_mock(_methods []string, _config FetchConfig) ?[]Response {
+ url := 'https://httpbin.org/'
+ methods := if _methods.len == 0 { ['GET', 'POST', 'PATCH', 'PUT', 'DELETE'] } else { _methods }
+ mut config := _config
+ mut result := []Response{}
+ // Note: httpbin doesn't support head
+ for method in methods {
+ lmethod := method.to_lower()
+ config.method = method_from_str(method)
+ res := fetch(FetchConfig{ ...config, url: url + lmethod }) ?
+ // TODO
+ // body := json.decode(HttpbinResponseBody,res.text)?
+ result << res
+ }
+ return result
+}
+
+fn test_http_fetch_bare() {
+ $if !network ? {
+ return
+ }
+ responses := http_fetch_mock([], FetchConfig{}) or { panic(err) }
+ for response in responses {
+ assert response.status() == .ok
+ }
+}
+
+fn test_http_fetch_with_data() {
+ $if !network ? {
+ return
+ }
+ responses := http_fetch_mock(['POST', 'PUT', 'PATCH', 'DELETE'],
+ data: 'hello world'
+ ) or { panic(err) }
+ for response in responses {
+ payload := json.decode(HttpbinResponseBody, response.text) or { panic(err) }
+ assert payload.data == 'hello world'
+ }
+}
+
+fn test_http_fetch_with_params() {
+ $if !network ? {
+ return
+ }
+ responses := http_fetch_mock([],
+ params: {
+ 'a': 'b'
+ 'c': 'd'
+ }
+ ) or { panic(err) }
+ for response in responses {
+ // payload := json.decode(HttpbinResponseBody,response.text) or {
+ // panic(err)
+ // }
+ assert response.status() == .ok
+ // TODO
+ // assert payload.args['a'] == 'b'
+ // assert payload.args['c'] == 'd'
+ }
+}
+
+fn test_http_fetch_with_headers() ? {
+ $if !network ? {
+ return
+ }
+ mut header := new_header()
+ header.add_custom('Test-Header', 'hello world') ?
+ responses := http_fetch_mock([],
+ header: header
+ ) or { panic(err) }
+ for response in responses {
+ // payload := json.decode(HttpbinResponseBody,response.text) or {
+ // panic(err)
+ // }
+ assert response.status() == .ok
+ // TODO
+ // assert payload.headers['Test-Header'] == 'hello world'
+ }
+}
diff --git a/v_windows/v/vlib/net/http/http_test.v b/v_windows/v/vlib/net/http/http_test.v
new file mode 100644
index 0000000..8b68073
--- /dev/null
+++ b/v_windows/v/vlib/net/http/http_test.v
@@ -0,0 +1,56 @@
+import net.http
+
+fn test_http_get() {
+ $if !network ? {
+ return
+ }
+ assert http.get_text('https://vlang.io/version') == '0.1.5'
+ println('http ok')
+}
+
+fn test_http_get_from_vlang_utc_now() {
+ $if !network ? {
+ return
+ }
+ urls := ['http://vlang.io/utc_now', 'https://vlang.io/utc_now']
+ for url in urls {
+ println('Test getting current time from $url by http.get')
+ res := http.get(url) or { panic(err) }
+ assert res.status() == .ok
+ assert res.text.len > 0
+ assert res.text.int() > 1566403696
+ println('Current time is: $res.text.int()')
+ }
+}
+
+fn test_public_servers() {
+ $if !network ? {
+ return
+ }
+ urls := [
+ 'http://github.com/robots.txt',
+ 'http://google.com/robots.txt',
+ 'https://github.com/robots.txt',
+ 'https://google.com/robots.txt',
+ // 'http://yahoo.com/robots.txt',
+ // 'https://yahoo.com/robots.txt',
+ ]
+ for url in urls {
+ println('Testing http.get on public url: $url ')
+ res := http.get(url) or { panic(err) }
+ assert res.status() == .ok
+ assert res.text.len > 0
+ }
+}
+
+fn test_relative_redirects() {
+ $if !network ? {
+ return
+ } $else {
+ return
+ } // tempfix periodic: httpbin relative redirects are broken
+ res := http.get('https://httpbin.org/relative-redirect/3?abc=xyz') or { panic(err) }
+ assert res.status() == .ok
+ assert res.text.len > 0
+ assert res.text.contains('"abc": "xyz"')
+}
diff --git a/v_windows/v/vlib/net/http/method.v b/v_windows/v/vlib/net/http/method.v
new file mode 100644
index 0000000..91c93e1
--- /dev/null
+++ b/v_windows/v/vlib/net/http/method.v
@@ -0,0 +1,48 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+// The methods listed here are some of the most used ones, ordered by
+// commonality. A comprehensive list is available at:
+// https://www.iana.org/assignments/http-methods/http-methods.xhtml
+pub enum Method {
+ get
+ post
+ put
+ head
+ delete
+ options
+ trace
+ connect
+ patch
+}
+
+pub fn (m Method) str() string {
+ return match m {
+ .get { 'GET' }
+ .post { 'POST' }
+ .put { 'PUT' }
+ .head { 'HEAD' }
+ .delete { 'DELETE' }
+ .options { 'OPTIONS' }
+ .trace { 'TRACE' }
+ .connect { 'CONNECT' }
+ .patch { 'PATCH' }
+ }
+}
+
+pub fn method_from_str(m string) Method {
+ return match m {
+ 'GET' { Method.get }
+ 'POST' { Method.post }
+ 'PUT' { Method.put }
+ 'HEAD' { Method.head }
+ 'DELETE' { Method.delete }
+ 'OPTIONS' { Method.options }
+ 'TRACE' { Method.trace }
+ 'CONNECT' { Method.connect }
+ 'PATCH' { Method.patch }
+ else { Method.get } // should we default to GET?
+ }
+}
diff --git a/v_windows/v/vlib/net/http/request.v b/v_windows/v/vlib/net/http/request.v
new file mode 100644
index 0000000..4664659
--- /dev/null
+++ b/v_windows/v/vlib/net/http/request.v
@@ -0,0 +1,324 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import io
+import net
+import net.urllib
+import strings
+import time
+
+// Request holds information about an HTTP request (either received by
+// a server or to be sent by a client)
+pub struct Request {
+pub mut:
+ version Version = .v1_1
+ method Method
+ header Header
+ cookies map[string]string
+ data string
+ url string
+ user_agent string = 'v.http'
+ verbose bool
+ user_ptr voidptr
+ // NOT implemented for ssl connections
+ // time = -1 for no timeout
+ read_timeout i64 = 30 * time.second
+ write_timeout i64 = 30 * time.second
+}
+
+fn (mut req Request) free() {
+ unsafe { req.header.free() }
+}
+
+// add_header adds the key and value of an HTTP request header
+// To add a custom header, use add_custom_header
+pub fn (mut req Request) add_header(key CommonHeader, val string) {
+ req.header.add(key, val)
+}
+
+// add_custom_header adds the key and value of an HTTP request header
+// This method may fail if the key contains characters that are not permitted
+pub fn (mut req Request) add_custom_header(key string, val string) ? {
+ return req.header.add_custom(key, val)
+}
+
+// do will send the HTTP request and returns `http.Response` as soon as the response is recevied
+pub fn (req &Request) do() ?Response {
+ mut url := urllib.parse(req.url) or { return error('http.Request.do: invalid url $req.url') }
+ mut rurl := url
+ mut resp := Response{}
+ mut no_redirects := 0
+ for {
+ if no_redirects == max_redirects {
+ return error('http.request.do: maximum number of redirects reached ($max_redirects)')
+ }
+ qresp := req.method_and_url_to_response(req.method, rurl) ?
+ resp = qresp
+ if resp.status() !in [.moved_permanently, .found, .see_other, .temporary_redirect,
+ .permanent_redirect,
+ ] {
+ break
+ }
+ // follow any redirects
+ mut redirect_url := resp.header.get(.location) or { '' }
+ if redirect_url.len > 0 && redirect_url[0] == `/` {
+ url.set_path(redirect_url) or {
+ return error('http.request.do: invalid path in redirect: "$redirect_url"')
+ }
+ redirect_url = url.str()
+ }
+ qrurl := urllib.parse(redirect_url) or {
+ return error('http.request.do: invalid URL in redirect "$redirect_url"')
+ }
+ rurl = qrurl
+ no_redirects++
+ }
+ return resp
+}
+
+fn (req &Request) method_and_url_to_response(method Method, url urllib.URL) ?Response {
+ host_name := url.hostname()
+ scheme := url.scheme
+ p := url.escaped_path().trim_left('/')
+ path := if url.query().len > 0 { '/$p?$url.query().encode()' } else { '/$p' }
+ mut nport := url.port().int()
+ if nport == 0 {
+ if scheme == 'http' {
+ nport = 80
+ }
+ if scheme == 'https' {
+ nport = 443
+ }
+ }
+ // println('fetch $method, $scheme, $host_name, $nport, $path ')
+ if scheme == 'https' {
+ // println('ssl_do( $nport, $method, $host_name, $path )')
+ res := req.ssl_do(nport, method, host_name, path) ?
+ return res
+ } else if scheme == 'http' {
+ // println('http_do( $nport, $method, $host_name, $path )')
+ res := req.http_do('$host_name:$nport', method, path) ?
+ return res
+ }
+ return error('http.request.method_and_url_to_response: unsupported scheme: "$scheme"')
+}
+
+fn (req &Request) build_request_headers(method Method, host_name string, path string) string {
+ ua := req.user_agent
+ mut uheaders := []string{}
+ if !req.header.contains(.host) {
+ uheaders << 'Host: $host_name\r\n'
+ }
+ if !req.header.contains(.user_agent) {
+ uheaders << 'User-Agent: $ua\r\n'
+ }
+ if req.data.len > 0 && !req.header.contains(.content_length) {
+ uheaders << 'Content-Length: $req.data.len\r\n'
+ }
+ for key in req.header.keys() {
+ if key == CommonHeader.cookie.str() {
+ continue
+ }
+ val := req.header.custom_values(key).join('; ')
+ uheaders << '$key: $val\r\n'
+ }
+ uheaders << req.build_request_cookies_header()
+ version := if req.version == .unknown { Version.v1_1 } else { req.version }
+ return '$method $path $version\r\n' + uheaders.join('') + 'Connection: close\r\n\r\n' + req.data
+}
+
+fn (req &Request) build_request_cookies_header() string {
+ if req.cookies.keys().len < 1 {
+ return ''
+ }
+ mut cookie := []string{}
+ for key, val in req.cookies {
+ cookie << '$key=$val'
+ }
+ cookie << req.header.values(.cookie)
+ return 'Cookie: ' + cookie.join('; ') + '\r\n'
+}
+
+fn (req &Request) http_do(host string, method Method, path string) ?Response {
+ host_name, _ := net.split_address(host) ?
+ s := req.build_request_headers(method, host_name, path)
+ mut client := net.dial_tcp(host) ?
+ client.set_read_timeout(req.read_timeout)
+ client.set_write_timeout(req.write_timeout)
+ // TODO this really needs to be exposed somehow
+ client.write(s.bytes()) ?
+ $if trace_http_request ? {
+ eprintln('> $s')
+ }
+ mut bytes := io.read_all(reader: client) ?
+ client.close() ?
+ response_text := bytes.bytestr()
+ $if trace_http_response ? {
+ eprintln('< $response_text')
+ }
+ return parse_response(response_text)
+}
+
+// referer returns 'Referer' header value of the given request
+pub fn (req &Request) referer() string {
+ return req.header.get(.referer) or { '' }
+}
+
+// Parse a raw HTTP request into a Request object
+pub fn parse_request(mut reader io.BufferedReader) ?Request {
+ // request line
+ mut line := reader.read_line() ?
+ method, target, version := parse_request_line(line) ?
+
+ // headers
+ mut header := new_header()
+ line = reader.read_line() ?
+ for line != '' {
+ key, value := parse_header(line) ?
+ header.add_custom(key, value) ?
+ line = reader.read_line() ?
+ }
+ header.coerce(canonicalize: true)
+
+ // body
+ mut body := []byte{}
+ if length := header.get(.content_length) {
+ n := length.int()
+ if n > 0 {
+ body = []byte{len: n}
+ mut count := 0
+ for count < body.len {
+ count += reader.read(mut body[count..]) or { break }
+ }
+ }
+ }
+
+ return Request{
+ method: method
+ url: target.str()
+ header: header
+ data: body.bytestr()
+ version: version
+ }
+}
+
+fn parse_request_line(s string) ?(Method, urllib.URL, Version) {
+ words := s.split(' ')
+ if words.len != 3 {
+ return error('malformed request line')
+ }
+ method := method_from_str(words[0])
+ target := urllib.parse(words[1]) ?
+ version := version_from_str(words[2])
+ if version == .unknown {
+ return error('unsupported version')
+ }
+
+ return method, target, version
+}
+
+// Parse URL encoded key=value&key=value forms
+fn parse_form(body string) map[string]string {
+ words := body.split('&')
+ mut form := map[string]string{}
+ for word in words {
+ kv := word.split_nth('=', 2)
+ if kv.len != 2 {
+ continue
+ }
+ key := urllib.query_unescape(kv[0]) or { continue }
+ val := urllib.query_unescape(kv[1]) or { continue }
+ form[key] = val
+ }
+ return form
+ // }
+ // todo: parse form-data and application/json
+ // ...
+}
+
+struct FileData {
+pub:
+ filename string
+ content_type string
+ data string
+}
+
+struct UnexpectedExtraAttributeError {
+ msg string
+ code int
+}
+
+struct MultiplePathAttributesError {
+ msg string = 'Expected at most one path attribute'
+ code int
+}
+
+fn parse_multipart_form(body string, boundary string) (map[string]string, map[string][]FileData) {
+ sections := body.split(boundary)
+ fields := sections[1..sections.len - 1]
+ mut form := map[string]string{}
+ mut files := map[string][]FileData{}
+
+ for field in fields {
+ // TODO: do not split into lines; do same parsing for HTTP body
+ lines := field.split_into_lines()[1..]
+ disposition := parse_disposition(lines[0])
+ // Grab everything between the double quotes
+ name := disposition['name'] or { continue }
+ // Parse files
+ // TODO: filename*
+ if 'filename' in disposition {
+ filename := disposition['filename']
+ // Parse Content-Type header
+ if lines.len == 1 || !lines[1].to_lower().starts_with('content-type:') {
+ continue
+ }
+ mut ct := lines[1].split_nth(':', 2)[1]
+ ct = ct.trim_left(' \t')
+ data := lines_to_string(field.len, lines, 3, lines.len - 1)
+ files[name] << FileData{
+ filename: filename
+ content_type: ct
+ data: data
+ }
+ continue
+ }
+ data := lines_to_string(field.len, lines, 2, lines.len - 1)
+ form[name] = data
+ }
+ return form, files
+}
+
+// Parse the Content-Disposition header of a multipart form
+// Returns a map of the key="value" pairs
+// Example: parse_disposition('Content-Disposition: form-data; name="a"; filename="b"') == {'name': 'a', 'filename': 'b'}
+fn parse_disposition(line string) map[string]string {
+ mut data := map[string]string{}
+ for word in line.split(';') {
+ kv := word.split_nth('=', 2)
+ if kv.len != 2 {
+ continue
+ }
+ key, value := kv[0].to_lower().trim_left(' \t'), kv[1]
+ if value.starts_with('"') && value.ends_with('"') {
+ data[key] = value[1..value.len - 1]
+ } else {
+ data[key] = value
+ }
+ }
+ return data
+}
+
+[manualfree]
+fn lines_to_string(len int, lines []string, start int, end int) string {
+ mut sb := strings.new_builder(len)
+ for i in start .. end {
+ sb.writeln(lines[i])
+ }
+ sb.cut_last(1) // last newline
+ res := sb.str()
+ unsafe { sb.free() }
+ return res
+}
diff --git a/v_windows/v/vlib/net/http/request_test.v b/v_windows/v/vlib/net/http/request_test.v
new file mode 100644
index 0000000..3950ad8
--- /dev/null
+++ b/v_windows/v/vlib/net/http/request_test.v
@@ -0,0 +1,138 @@
+module http
+
+import io
+
+struct StringReader {
+ text string
+mut:
+ place int
+}
+
+fn (mut s StringReader) read(mut buf []byte) ?int {
+ if s.place >= s.text.len {
+ return none
+ }
+ max_bytes := 100
+ end := if s.place + max_bytes >= s.text.len { s.text.len } else { s.place + max_bytes }
+ n := copy(buf, s.text[s.place..end].bytes())
+ s.place += n
+ return n
+}
+
+fn reader(s string) &io.BufferedReader {
+ return io.new_buffered_reader(
+ reader: &StringReader{
+ text: s
+ }
+ )
+}
+
+fn test_parse_request_not_http() {
+ mut reader__ := reader('hello')
+ parse_request(mut reader__) or { return }
+ panic('should not have parsed')
+}
+
+fn test_parse_request_no_headers() {
+ mut reader_ := reader('GET / HTTP/1.1\r\n\r\n')
+ req := parse_request(mut reader_) or { panic('did not parse: $err') }
+ assert req.method == .get
+ assert req.url == '/'
+ assert req.version == .v1_1
+}
+
+fn test_parse_request_two_headers() {
+ mut reader_ := reader('GET / HTTP/1.1\r\nTest1: a\r\nTest2: B\r\n\r\n')
+ req := parse_request(mut reader_) or { panic('did not parse: $err') }
+ assert req.header.custom_values('Test1') == ['a']
+ assert req.header.custom_values('Test2') == ['B']
+}
+
+fn test_parse_request_two_header_values() {
+ mut reader_ := reader('GET / HTTP/1.1\r\nTest1: a; b\r\nTest2: c\r\nTest2: d\r\n\r\n')
+ req := parse_request(mut reader_) or { panic('did not parse: $err') }
+ assert req.header.custom_values('Test1') == ['a; b']
+ assert req.header.custom_values('Test2') == ['c', 'd']
+}
+
+fn test_parse_request_body() {
+ mut reader_ := reader('GET / HTTP/1.1\r\nTest1: a\r\nTest2: b\r\nContent-Length: 4\r\n\r\nbodyabc')
+ req := parse_request(mut reader_) or { panic('did not parse: $err') }
+ assert req.data == 'body'
+}
+
+fn test_parse_request_line() {
+ method, target, version := parse_request_line('GET /target HTTP/1.1') or {
+ panic('did not parse: $err')
+ }
+ assert method == .get
+ assert target.str() == '/target'
+ assert version == .v1_1
+}
+
+fn test_parse_form() {
+ assert parse_form('foo=bar&bar=baz') == {
+ 'foo': 'bar'
+ 'bar': 'baz'
+ }
+ assert parse_form('foo=bar=&bar=baz') == {
+ 'foo': 'bar='
+ 'bar': 'baz'
+ }
+ assert parse_form('foo=bar%3D&bar=baz') == {
+ 'foo': 'bar='
+ 'bar': 'baz'
+ }
+ assert parse_form('foo=b%26ar&bar=baz') == {
+ 'foo': 'b&ar'
+ 'bar': 'baz'
+ }
+ assert parse_form('a=b& c=d') == {
+ 'a': 'b'
+ ' c': 'd'
+ }
+ assert parse_form('a=b&c= d ') == {
+ 'a': 'b'
+ 'c': ' d '
+ }
+}
+
+fn test_parse_multipart_form() {
+ boundary := '6844a625b1f0b299'
+ names := ['foo', 'fooz']
+ file := 'bar.v'
+ ct := 'application/octet-stream'
+ contents := ['baz', 'buzz']
+ data := "--------------------------$boundary
+Content-Disposition: form-data; name=\"${names[0]}\"; filename=\"$file\"
+Content-Type: $ct
+
+${contents[0]}
+--------------------------$boundary
+Content-Disposition: form-data; name=\"${names[1]}\"
+
+${contents[1]}
+--------------------------$boundary--
+"
+ form, files := parse_multipart_form(data, boundary)
+ assert files == {
+ names[0]: [FileData{
+ filename: file
+ content_type: ct
+ data: contents[0]
+ }]
+ }
+
+ assert form == {
+ names[1]: contents[1]
+ }
+}
+
+fn test_parse_large_body() ? {
+ body := 'A'.repeat(101) // greater than max_bytes
+ req := 'GET / HTTP/1.1\r\nContent-Length: $body.len\r\n\r\n$body'
+ mut reader_ := reader(req)
+ result := parse_request(mut reader_) ?
+ assert result.data.len == body.len
+ assert result.data == body
+}
diff --git a/v_windows/v/vlib/net/http/response.v b/v_windows/v/vlib/net/http/response.v
new file mode 100644
index 0000000..caa8228
--- /dev/null
+++ b/v_windows/v/vlib/net/http/response.v
@@ -0,0 +1,152 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import net.http.chunked
+import strconv
+
+// Response represents the result of the request
+pub struct Response {
+pub mut:
+ text string
+ header Header
+ status_code int
+ status_msg string
+ http_version string
+}
+
+fn (mut resp Response) free() {
+ unsafe { resp.header.free() }
+}
+
+// Formats resp to bytes suitable for HTTP response transmission
+pub fn (resp Response) bytes() []byte {
+ // TODO: build []byte directly; this uses two allocations
+ return resp.bytestr().bytes()
+}
+
+// Formats resp to a string suitable for HTTP response transmission
+pub fn (resp Response) bytestr() string {
+ return ('HTTP/$resp.http_version $resp.status_code $resp.status_msg\r\n' + '${resp.header.render(
+ version: resp.version()
+ )}\r\n' + '$resp.text')
+}
+
+// Parse a raw HTTP response into a Response object
+pub fn parse_response(resp string) ?Response {
+ version, status_code, status_msg := parse_status_line(resp.all_before('\n')) ?
+ // Build resp header map and separate the body
+ start_idx, end_idx := find_headers_range(resp) ?
+ header := parse_headers(resp.substr(start_idx, end_idx)) ?
+ mut text := resp.substr(end_idx, resp.len)
+ if header.get(.transfer_encoding) or { '' } == 'chunked' {
+ text = chunked.decode(text)
+ }
+ return Response{
+ http_version: version
+ status_code: status_code
+ status_msg: status_msg
+ header: header
+ text: text
+ }
+}
+
+// parse_status_line parses the first HTTP response line into the HTTP
+// version, status code, and reason phrase
+fn parse_status_line(line string) ?(string, int, string) {
+ if line.len < 5 || line[..5].to_lower() != 'http/' {
+ return error('response does not start with HTTP/')
+ }
+ data := line.split_nth(' ', 3)
+ if data.len != 3 {
+ return error('expected at least 3 tokens')
+ }
+ version := data[0].substr(5, data[0].len)
+ // validate version is 1*DIGIT "." 1*DIGIT
+ digits := version.split_nth('.', 3)
+ if digits.len != 2 {
+ return error('HTTP version malformed')
+ }
+ for digit in digits {
+ strconv.atoi(digit) or { return error('HTTP version must contain only integers') }
+ }
+ return version, strconv.atoi(data[1]) ?, data[2]
+}
+
+// cookies parses the Set-Cookie headers into Cookie objects
+pub fn (r Response) cookies() []Cookie {
+ mut cookies := []Cookie{}
+ for cookie in r.header.values(.set_cookie) {
+ cookies << parse_cookie(cookie) or { continue }
+ }
+ return cookies
+}
+
+// status parses the status_code into a Status struct
+pub fn (r Response) status() Status {
+ return status_from_int(r.status_code)
+}
+
+// set_status sets the status_code and status_msg of the response
+pub fn (mut r Response) set_status(s Status) {
+ r.status_code = s.int()
+ r.status_msg = s.str()
+}
+
+// version parses the version
+pub fn (r Response) version() Version {
+ return version_from_str('HTTP/$r.http_version')
+}
+
+// set_version sets the http_version string of the response
+pub fn (mut r Response) set_version(v Version) {
+ if v == .unknown {
+ r.http_version = ''
+ return
+ }
+ maj, min := v.protos()
+ r.http_version = '${maj}.$min'
+}
+
+pub struct ResponseConfig {
+ version Version = .v1_1
+ status Status = .ok
+ header Header
+ text string
+}
+
+// new_response creates a Response object from the configuration. This
+// function will add a Content-Length header if text is not empty.
+pub fn new_response(conf ResponseConfig) Response {
+ mut resp := Response{
+ text: conf.text
+ header: conf.header
+ }
+ if conf.text.len > 0 && !resp.header.contains(.content_length) {
+ resp.header.add(.content_length, conf.text.len.str())
+ }
+ resp.set_status(conf.status)
+ resp.set_version(conf.version)
+ return resp
+}
+
+// find_headers_range returns the start (inclusive) and end (exclusive)
+// index of the headers in the string, including the trailing newlines. This
+// helper function expects the first line in `data` to be the HTTP status line
+// (HTTP/1.1 200 OK).
+fn find_headers_range(data string) ?(int, int) {
+ start_idx := data.index('\n') or { return error('no start index found') } + 1
+ mut count := 0
+ for i := start_idx; i < data.len; i++ {
+ if data[i] == `\n` {
+ count++
+ } else if data[i] != `\r` {
+ count = 0
+ }
+ if count == 2 {
+ return start_idx, i + 1
+ }
+ }
+ return error('no end index found')
+}
diff --git a/v_windows/v/vlib/net/http/response_test.v b/v_windows/v/vlib/net/http/response_test.v
new file mode 100644
index 0000000..bf2fba3
--- /dev/null
+++ b/v_windows/v/vlib/net/http/response_test.v
@@ -0,0 +1,36 @@
+module http
+
+fn test_response_bytestr() ? {
+ {
+ resp := new_response(
+ status: .ok
+ text: 'Foo'
+ )
+ assert resp.bytestr() == 'HTTP/1.1 200 OK\r\n' + 'Content-Length: 3\r\n' + '\r\n' + 'Foo'
+ }
+ {
+ resp := new_response(
+ status: .found
+ text: 'Foo'
+ header: new_header(key: .location, value: '/')
+ )
+ lines := resp.bytestr().split_into_lines()
+ assert lines[0] == 'HTTP/1.1 302 Found'
+ // header order is not guaranteed
+ check_headers(['Location: /', 'Content-Length: 3'], lines[1..3]) ?
+ assert lines[3] == ''
+ assert lines[4] == 'Foo'
+ }
+}
+
+// check_headers is a helper function for asserting all expected headers
+// are found because rendered header order is not guaranteed. The check
+// is O(n^2) which is fine for small lists.
+fn check_headers(expected []string, found []string) ? {
+ assert expected.len == found.len
+ for header in expected {
+ if !found.contains(header) {
+ return error('expected header "$header" not in $found')
+ }
+ }
+}
diff --git a/v_windows/v/vlib/net/http/server.v b/v_windows/v/vlib/net/http/server.v
new file mode 100644
index 0000000..7a9660d
--- /dev/null
+++ b/v_windows/v/vlib/net/http/server.v
@@ -0,0 +1,123 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+import io
+import net
+import time
+
+// ServerStatus is the current status of the server.
+// .running means that the server is active and serving.
+// .stopped means that the server is not active but still listening.
+// .closed means that the server is completely inactive.
+pub enum ServerStatus {
+ running
+ stopped
+ closed
+}
+
+interface Handler {
+ handle(Request) Response
+}
+
+pub struct Server {
+mut:
+ state ServerStatus = .closed
+ listener net.TcpListener
+pub mut:
+ port int = 8080
+ handler Handler = DebugHandler{}
+ read_timeout time.Duration = 30 * time.second
+ write_timeout time.Duration = 30 * time.second
+ accept_timeout time.Duration = 30 * time.second
+}
+
+pub fn (mut s Server) listen_and_serve() ? {
+ if s.handler is DebugHandler {
+ eprintln('Server handler not set, using debug handler')
+ }
+ s.listener = net.listen_tcp(.ip6, ':$s.port') ?
+ s.listener.set_accept_timeout(s.accept_timeout)
+ eprintln('Listening on :$s.port')
+ s.state = .running
+ for {
+ // break if we have a stop signal
+ if s.state != .running {
+ break
+ }
+ mut conn := s.listener.accept() or {
+ if err.msg != 'net: op timed out' {
+ eprintln('accept() failed: $err; skipping')
+ }
+ continue
+ }
+ conn.set_read_timeout(s.read_timeout)
+ conn.set_write_timeout(s.write_timeout)
+ // TODO: make concurrent
+ s.parse_and_respond(mut conn)
+ }
+ if s.state == .stopped {
+ s.close()
+ }
+}
+
+// stop signals the server that it should not respond anymore
+[inline]
+pub fn (mut s Server) stop() {
+ s.state = .stopped
+}
+
+// close immediatly closes the port and signals the server that it has been closed
+[inline]
+pub fn (mut s Server) close() {
+ s.state = .closed
+ s.listener.close() or { return }
+}
+
+[inline]
+pub fn (s &Server) status() ServerStatus {
+ return s.state
+}
+
+fn (s &Server) parse_and_respond(mut conn net.TcpConn) {
+ defer {
+ conn.close() or { eprintln('close() failed: $err') }
+ }
+
+ mut reader := io.new_buffered_reader(reader: conn)
+ defer {
+ reader.free()
+ }
+ req := parse_request(mut reader) or {
+ $if debug {
+ // only show in debug mode to prevent abuse
+ eprintln('error parsing request: $err')
+ }
+ return
+ }
+ mut resp := s.handler.handle(req)
+ if resp.version() == .unknown {
+ resp.set_version(req.version)
+ }
+ conn.write(resp.bytes()) or { eprintln('error sending response: $err') }
+}
+
+// DebugHandler implements the Handler interface by echoing the request
+// in the response
+struct DebugHandler {}
+
+fn (d DebugHandler) handle(req Request) Response {
+ $if debug {
+ eprintln('[$time.now()] $req.method $req.url\n\r$req.header\n\r$req.data - 200 OK')
+ } $else {
+ eprintln('[$time.now()] $req.method $req.url - 200')
+ }
+ mut r := Response{
+ text: req.data
+ header: req.header
+ }
+ r.set_status(.ok)
+ r.set_version(req.version)
+ return r
+}
diff --git a/v_windows/v/vlib/net/http/server_test.v b/v_windows/v/vlib/net/http/server_test.v
new file mode 100644
index 0000000..790da30
--- /dev/null
+++ b/v_windows/v/vlib/net/http/server_test.v
@@ -0,0 +1,90 @@
+import net.http
+import time
+
+fn test_server_stop() ? {
+ mut server := &http.Server{
+ accept_timeout: 1 * time.second
+ }
+ t := go server.listen_and_serve()
+ time.sleep(250 * time.millisecond)
+ mut watch := time.new_stopwatch()
+ server.stop()
+ assert server.status() == .stopped
+ assert watch.elapsed() < 100 * time.millisecond
+ t.wait() ?
+ assert watch.elapsed() < 999 * time.millisecond
+}
+
+fn test_server_close() ? {
+ mut server := &http.Server{
+ accept_timeout: 1 * time.second
+ handler: MyHttpHandler{}
+ }
+ t := go server.listen_and_serve()
+ time.sleep(250 * time.millisecond)
+ mut watch := time.new_stopwatch()
+ server.close()
+ assert server.status() == .closed
+ assert watch.elapsed() < 100 * time.millisecond
+ t.wait() ?
+ assert watch.elapsed() < 999 * time.millisecond
+}
+
+struct MyHttpHandler {
+mut:
+ counter int
+ oks int
+ not_founds int
+}
+
+fn (mut handler MyHttpHandler) handle(req http.Request) http.Response {
+ handler.counter++
+ // eprintln('$time.now() | counter: $handler.counter | $req.method $req.url\n$req.header\n$req.data - 200 OK\n')
+ mut r := http.Response{
+ text: req.data + ', $req.url'
+ header: req.header
+ }
+ match req.url.all_before('?') {
+ '/endpoint', '/another/endpoint' {
+ r.set_status(.ok)
+ handler.oks++
+ }
+ else {
+ r.set_status(.not_found)
+ handler.not_founds++
+ }
+ }
+ r.set_version(req.version)
+ return r
+}
+
+const cport = 8198
+
+fn test_server_custom_handler() ? {
+ mut handler := MyHttpHandler{}
+ mut server := &http.Server{
+ accept_timeout: 1 * time.second
+ handler: handler
+ port: cport
+ }
+ t := go server.listen_and_serve()
+ for server.status() != .running {
+ time.sleep(10 * time.millisecond)
+ }
+ x := http.fetch(url: 'http://localhost:$cport/endpoint?abc=xyz', data: 'my data') ?
+ assert x.text == 'my data, /endpoint?abc=xyz'
+ assert x.status_code == 200
+ assert x.http_version == '1.1'
+ y := http.fetch(url: 'http://localhost:$cport/another/endpoint', data: 'abcde') ?
+ assert y.text == 'abcde, /another/endpoint'
+ assert y.status_code == 200
+ assert y.status() == .ok
+ assert y.http_version == '1.1'
+ //
+ http.fetch(url: 'http://localhost:$cport/something/else') ?
+ server.stop()
+ t.wait() ?
+ assert handler.counter == 3
+ assert handler.oks == 2
+ assert handler.not_founds == 1
+}
diff --git a/v_windows/v/vlib/net/http/status.v b/v_windows/v/vlib/net/http/status.v
new file mode 100644
index 0000000..f4bc9ee
--- /dev/null
+++ b/v_windows/v/vlib/net/http/status.v
@@ -0,0 +1,255 @@
+// Copyright (c) 2020 Justin E. Jones. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+// The status codes listed here are based on the comprehensive list,
+// available at:
+// https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
+pub enum Status {
+ unknown = -1
+ unassigned = 0
+ cont = 100
+ switching_protocols = 101
+ processing = 102
+ checkpoint_draft = 103
+ ok = 200
+ created = 201
+ accepted = 202
+ non_authoritative_information = 203
+ no_content = 204
+ reset_content = 205
+ partial_content = 206
+ multi_status = 207
+ already_reported = 208
+ im_used = 226
+ multiple_choices = 300
+ moved_permanently = 301
+ found = 302
+ see_other = 303
+ not_modified = 304
+ use_proxy = 305
+ switch_proxy = 306
+ temporary_redirect = 307
+ permanent_redirect = 308
+ bad_request = 400
+ unauthorized = 401
+ payment_required = 402
+ forbidden = 403
+ not_found = 404
+ method_not_allowed = 405
+ not_acceptable = 406
+ proxy_authentication_required = 407
+ request_timeout = 408
+ conflict = 409
+ gone = 410
+ length_required = 411
+ precondition_failed = 412
+ request_entity_too_large = 413
+ request_uri_too_long = 414
+ unsupported_media_type = 415
+ requested_range_not_satisfiable = 416
+ expectation_failed = 417
+ im_a_teapot = 418
+ misdirected_request = 421
+ unprocessable_entity = 422
+ locked = 423
+ failed_dependency = 424
+ unordered_collection = 425
+ upgrade_required = 426
+ precondition_required = 428
+ too_many_requests = 429
+ request_header_fields_too_large = 431
+ unavailable_for_legal_reasons = 451
+ client_closed_request = 499
+ internal_server_error = 500
+ not_implemented = 501
+ bad_gateway = 502
+ service_unavailable = 503
+ gateway_timeout = 504
+ http_version_not_supported = 505
+ variant_also_negotiates = 506
+ insufficient_storage = 507
+ loop_detected = 508
+ bandwidth_limit_exceeded = 509
+ not_extended = 510
+ network_authentication_required = 511
+}
+
+pub fn status_from_int(code int) Status {
+ return match code {
+ 100 { Status.cont }
+ 101 { Status.switching_protocols }
+ 102 { Status.processing }
+ 103 { Status.checkpoint_draft }
+ 104...199 { Status.unassigned }
+ 200 { Status.ok }
+ 201 { Status.created }
+ 202 { Status.accepted }
+ 203 { Status.non_authoritative_information }
+ 204 { Status.no_content }
+ 205 { Status.reset_content }
+ 206 { Status.partial_content }
+ 207 { Status.multi_status }
+ 208 { Status.already_reported }
+ 209...225 { Status.unassigned }
+ 226 { Status.im_used }
+ 227...299 { Status.unassigned }
+ 300 { Status.multiple_choices }
+ 301 { Status.moved_permanently }
+ 302 { Status.found }
+ 303 { Status.see_other }
+ 304 { Status.not_modified }
+ 305 { Status.use_proxy }
+ 306 { Status.switch_proxy }
+ 307 { Status.temporary_redirect }
+ 308 { Status.permanent_redirect }
+ 309...399 { Status.unassigned }
+ 400 { Status.bad_request }
+ 401 { Status.unauthorized }
+ 402 { Status.payment_required }
+ 403 { Status.forbidden }
+ 404 { Status.not_found }
+ 405 { Status.method_not_allowed }
+ 406 { Status.not_acceptable }
+ 407 { Status.proxy_authentication_required }
+ 408 { Status.request_timeout }
+ 409 { Status.conflict }
+ 410 { Status.gone }
+ 411 { Status.length_required }
+ 412 { Status.precondition_failed }
+ 413 { Status.request_entity_too_large }
+ 414 { Status.request_uri_too_long }
+ 415 { Status.unsupported_media_type }
+ 416 { Status.requested_range_not_satisfiable }
+ 417 { Status.expectation_failed }
+ 418 { Status.im_a_teapot }
+ 419...420 { Status.unassigned }
+ 421 { Status.misdirected_request }
+ 422 { Status.unprocessable_entity }
+ 423 { Status.locked }
+ 424 { Status.failed_dependency }
+ 425 { Status.unordered_collection }
+ 426 { Status.upgrade_required }
+ 428 { Status.precondition_required }
+ 429 { Status.too_many_requests }
+ 431 { Status.request_header_fields_too_large }
+ 432...450 { Status.unassigned }
+ 451 { Status.unavailable_for_legal_reasons }
+ 452...499 { Status.unassigned }
+ 500 { Status.internal_server_error }
+ 501 { Status.not_implemented }
+ 502 { Status.bad_gateway }
+ 503 { Status.service_unavailable }
+ 504 { Status.gateway_timeout }
+ 505 { Status.http_version_not_supported }
+ 506 { Status.variant_also_negotiates }
+ 507 { Status.insufficient_storage }
+ 508 { Status.loop_detected }
+ 509 { Status.bandwidth_limit_exceeded }
+ 510 { Status.not_extended }
+ 511 { Status.network_authentication_required }
+ 512...599 { Status.unassigned }
+ else { Status.unknown }
+ }
+}
+
+pub fn (code Status) str() string {
+ return match code {
+ .cont { 'Continue' }
+ .switching_protocols { 'Switching Protocols' }
+ .processing { 'Processing' }
+ .checkpoint_draft { 'Checkpoint Draft' }
+ .ok { 'OK' }
+ .created { 'Created' }
+ .accepted { 'Accepted' }
+ .non_authoritative_information { 'Non Authoritative Information' }
+ .no_content { 'No Content' }
+ .reset_content { 'Reset Content' }
+ .partial_content { 'Partial Content' }
+ .multi_status { 'Multi Status' }
+ .already_reported { 'Already Reported' }
+ .im_used { 'IM Used' }
+ .multiple_choices { 'Multiple Choices' }
+ .moved_permanently { 'Moved Permanently' }
+ .found { 'Found' }
+ .see_other { 'See Other' }
+ .not_modified { 'Not Modified' }
+ .use_proxy { 'Use Proxy' }
+ .switch_proxy { 'Switch Proxy' }
+ .temporary_redirect { 'Temporary Redirect' }
+ .permanent_redirect { 'Permanent Redirect' }
+ .bad_request { 'Bad Request' }
+ .unauthorized { 'Unauthorized' }
+ .payment_required { 'Payment Required' }
+ .forbidden { 'Forbidden' }
+ .not_found { 'Not Found' }
+ .method_not_allowed { 'Method Not Allowed' }
+ .not_acceptable { 'Not Acceptable' }
+ .proxy_authentication_required { 'Proxy Authentication Required' }
+ .request_timeout { 'Request Timeout' }
+ .conflict { 'Conflict' }
+ .gone { 'Gone' }
+ .length_required { 'Length Required' }
+ .precondition_failed { 'Precondition Failed' }
+ .request_entity_too_large { 'Request Entity Too Large' }
+ .request_uri_too_long { 'Request URI Too Long' }
+ .unsupported_media_type { 'Unsupported Media Type' }
+ .requested_range_not_satisfiable { 'Requested Range Not Satisfiable' }
+ .expectation_failed { 'Expectation Failed' }
+ .im_a_teapot { 'Im a teapot' }
+ .misdirected_request { 'Misdirected Request' }
+ .unprocessable_entity { 'Unprocessable Entity' }
+ .locked { 'Locked' }
+ .failed_dependency { 'Failed Dependency' }
+ .unordered_collection { 'Unordered Collection' }
+ .upgrade_required { 'Upgrade Required' }
+ .precondition_required { 'Precondition Required' }
+ .too_many_requests { 'Too Many Requests' }
+ .request_header_fields_too_large { 'Request Header Fields Too Large' }
+ .unavailable_for_legal_reasons { 'Unavailable For Legal Reasons' }
+ .internal_server_error { 'Internal Server Error' }
+ .not_implemented { 'Not Implemented' }
+ .bad_gateway { 'Bad Gateway' }
+ .service_unavailable { 'Service Unavailable' }
+ .gateway_timeout { 'Gateway Timeout' }
+ .http_version_not_supported { 'HTTP Version Not Supported' }
+ .variant_also_negotiates { 'Variant Also Negotiates' }
+ .insufficient_storage { 'Insufficient Storage' }
+ .loop_detected { 'Loop Detected' }
+ .bandwidth_limit_exceeded { 'Bandwidth Limit Exceeded' }
+ .not_extended { 'Not Extended' }
+ .network_authentication_required { 'Network Authentication Required' }
+ .unassigned { 'Unassigned' }
+ else { 'Unknown' }
+ }
+}
+
+// int converts an assigned and known Status to its integral equivalent.
+// if a Status is unknown or unassigned, this method will return zero
+pub fn (code Status) int() int {
+ if code in [.unknown, .unassigned] {
+ return 0
+ }
+ return int(code)
+}
+
+// is_valid returns true if the status code is assigned and known
+pub fn (code Status) is_valid() bool {
+ number := code.int()
+ return number >= 100 && number < 600
+}
+
+// is_error will return true if the status code represents either a client or
+// a server error; otherwise will return false
+pub fn (code Status) is_error() bool {
+ number := code.int()
+ return number >= 400 && number < 600
+}
+
+// is_success will return true if the status code represents either an
+// informational, success, or redirection response; otherwise will return false
+pub fn (code Status) is_success() bool {
+ number := code.int()
+ return number >= 100 && number < 400
+}
diff --git a/v_windows/v/vlib/net/http/status_test.v b/v_windows/v/vlib/net/http/status_test.v
new file mode 100644
index 0000000..154aec3
--- /dev/null
+++ b/v_windows/v/vlib/net/http/status_test.v
@@ -0,0 +1,49 @@
+module http
+
+fn test_str() {
+ code := Status.bad_gateway
+ actual := code.str()
+ assert actual == 'Bad Gateway'
+}
+
+fn test_int() {
+ code := Status.see_other
+ actual := code.int()
+ assert actual == 303
+}
+
+fn test_is_valid() {
+ code := Status.gateway_timeout
+ actual := code.is_valid()
+ assert actual == true
+}
+
+fn test_is_valid_negative() {
+ code := Status.unassigned
+ actual := code.is_valid()
+ assert actual == false
+}
+
+fn test_is_error() {
+ code := Status.too_many_requests
+ actual := code.is_error()
+ assert actual == true
+}
+
+fn test_is_error_negative() {
+ code := Status.cont
+ actual := code.is_error()
+ assert actual == false
+}
+
+fn test_is_success() {
+ code := Status.accepted
+ actual := code.is_success()
+ assert actual == true
+}
+
+fn test_is_success_negative() {
+ code := Status.forbidden
+ actual := code.is_success()
+ assert actual == false
+}
diff --git a/v_windows/v/vlib/net/http/version.v b/v_windows/v/vlib/net/http/version.v
new file mode 100644
index 0000000..f4388a3
--- /dev/null
+++ b/v_windows/v/vlib/net/http/version.v
@@ -0,0 +1,40 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module http
+
+// The versions listed here are the most common ones.
+pub enum Version {
+ unknown
+ v1_1
+ v2_0
+ v1_0
+}
+
+pub fn (v Version) str() string {
+ return match v {
+ .v1_1 { 'HTTP/1.1' }
+ .v2_0 { 'HTTP/2.0' }
+ .v1_0 { 'HTTP/1.0' }
+ .unknown { 'unknown' }
+ }
+}
+
+pub fn version_from_str(v string) Version {
+ return match v.to_lower() {
+ 'http/1.1' { Version.v1_1 }
+ 'http/2.0' { Version.v2_0 }
+ 'http/1.0' { Version.v1_0 }
+ else { Version.unknown }
+ }
+}
+
+// protos returns the version major and minor numbers
+pub fn (v Version) protos() (int, int) {
+ match v {
+ .v1_1 { return 1, 1 }
+ .v2_0 { return 2, 0 }
+ .v1_0 { return 1, 0 }
+ .unknown { return 0, 0 }
+ }
+}
diff --git a/v_windows/v/vlib/net/ipv6_v6only.h b/v_windows/v/vlib/net/ipv6_v6only.h
new file mode 100644
index 0000000..79393df
--- /dev/null
+++ b/v_windows/v/vlib/net/ipv6_v6only.h
@@ -0,0 +1,5 @@
+#if !defined(IPV6_V6ONLY)
+
+#define IPV6_V6ONLY 27
+
+#endif
diff --git a/v_windows/v/vlib/net/net_nix.c.v b/v_windows/v/vlib/net/net_nix.c.v
new file mode 100644
index 0000000..a9fa531
--- /dev/null
+++ b/v_windows/v/vlib/net/net_nix.c.v
@@ -0,0 +1,26 @@
+module net
+
+#include <unistd.h>
+#include <sys/select.h>
+// inet.h is needed for inet_ntop on macos
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#flag solaris -lsocket
+
+fn error_code() int {
+ return C.errno
+}
+
+fn init() {
+}
+
+pub const (
+ msg_nosignal = 0x4000
+)
+
+const (
+ error_ewouldblock = C.EWOULDBLOCK
+)
diff --git a/v_windows/v/vlib/net/net_windows.c.v b/v_windows/v/vlib/net/net_windows.c.v
new file mode 100644
index 0000000..337176f
--- /dev/null
+++ b/v_windows/v/vlib/net/net_windows.c.v
@@ -0,0 +1,780 @@
+module net
+
+// WsaError is all of the socket errors that WSA provides from WSAGetLastError
+pub enum WsaError {
+ //
+ // MessageId: WSAEINTR
+ //
+ // MessageText:
+ //
+ // A blocking operation was interrupted by a call to WSACancelBlockingCall.
+ //
+ wsaeintr = 10004
+ //
+ // MessageId: WSAEBADF
+ //
+ // MessageText:
+ //
+ // The file handle supplied is not valid.
+ //
+ wsaebadf = 10009
+ //
+ // MessageId: WSAEACCES
+ //
+ // MessageText:
+ //
+ // An attempt was made to access a socket in a way forbidden by its access permissions.
+ //
+ wsaeacces = 10013
+ //
+ // MessageId: WSAEFAULT
+ //
+ // MessageText:
+ //
+ // The system detected an invalid pointer address in attempting to use a pointer argument in a call.
+ //
+ wsaefault = 10014
+ //
+ // MessageId: WSAEINVAL
+ //
+ // MessageText:
+ //
+ // An invalid argument was supplied.
+ //
+ wsaeinval = 10022
+ //
+ // MessageId: WSAEMFILE
+ //
+ // MessageText:
+ //
+ // Too many open sockets.
+ //
+ wsaemfile = 10024
+ //
+ // MessageId: WSAEWOULDBLOCK
+ //
+ // MessageText:
+ //
+ // A non-blocking socket operation could not be completed immediately.
+ //
+ wsaewouldblock = 10035
+ //
+ // MessageId: WSAEINPROGRESS
+ //
+ // MessageText:
+ //
+ // A blocking operation is currently executing.
+ //
+ wsaeinprogress = 10036
+ //
+ // MessageId: WSAEALREADY
+ //
+ // MessageText:
+ //
+ // An operation was attempted on a non-blocking socket that already had an operation in progress.
+ //
+ wsaealready = 10037
+ //
+ // MessageId: WSAENOTSOCK
+ //
+ // MessageText:
+ //
+ // An operation was attempted on something that is not a socket.
+ //
+ wsaenotsock = 10038
+ //
+ // MessageId: WSAEDESTADDRREQ
+ //
+ // MessageText:
+ //
+ // A required address was omitted from an operation on a socket.
+ //
+ wsaedestaddrreq = 10039
+ //
+ // MessageId: WSAEMSGSIZE
+ //
+ // MessageText:
+ //
+ // A message sent on a datagram socket was larger than the internal message buffer or some other network limit, or the buffer used to receive a datagram into was smaller than the datagram itself.
+ //
+ wsaemsgsize = 10040
+ //
+ // MessageId: WSAEPROTOTYPE
+ //
+ // MessageText:
+ //
+ // A protocol was specified in the socket function call that does not support the semantics of the socket type requested.
+ //
+ wsaeprototype = 10041
+ //
+ // MessageId: WSAENOPROTOOPT
+ //
+ // MessageText:
+ //
+ // An unknown, invalid, or unsupported option or level was specified in a getsockopt or setsockopt call.
+ //
+ wsaenoprotoopt = 10042
+ //
+ // MessageId: WSAEPROTONOSUPPORT
+ //
+ // MessageText:
+ //
+ // The requested protocol has not been configured into the system, or no implementation for it exists.
+ //
+ wsaeprotonosupport = 10043
+ //
+ // MessageId: WSAESOCKTNOSUPPORT
+ //
+ // MessageText:
+ //
+ // The support for the specified socket type does not exist in this address family.
+ //
+ wsaesocktnosupport = 10044
+ //
+ // MessageId: WSAEOPNOTSUPP
+ //
+ // MessageText:
+ //
+ // The attempted operation is not supported for the type of object referenced.
+ //
+ wsaeopnotsupp = 10045
+ //
+ // MessageId: WSAEPFNOSUPPORT
+ //
+ // MessageText:
+ //
+ // The protocol family has not been configured into the system or no implementation for it exists.
+ //
+ wsaepfnosupport = 10046
+ //
+ // MessageId: WSAEAFNOSUPPORT
+ //
+ // MessageText:
+ //
+ // An address incompatible with the requested protocol was used.
+ //
+ wsaeafnosupport = 10047
+ //
+ // MessageId: WSAEADDRINUSE
+ //
+ // MessageText:
+ //
+ // Only one usage of each socket address (protocol/network address/port) is normally permitted.
+ //
+ wsaeaddrinuse = 10048
+ //
+ // MessageId: WSAEADDRNOTAVAIL
+ //
+ // MessageText:
+ //
+ // The requested address is not valid in its context.
+ //
+ wsaeaddrnotavail = 10049
+ //
+ // MessageId: WSAENETDOWN
+ //
+ // MessageText:
+ //
+ // A socket operation encountered a dead network.
+ //
+ wsaenetdown = 10050
+ //
+ // MessageId: WSAENETUNREACH
+ //
+ // MessageText:
+ //
+ // A socket operation was attempted to an unreachable network.
+ //
+ wsaenetunreach = 10051
+ //
+ // MessageId: WSAENETRESET
+ //
+ // MessageText:
+ //
+ // The connection has been broken due to keep-alive activity detecting a failure while the operation was in progress.
+ //
+ wsaenetreset = 10052
+ //
+ // MessageId: WSAECONNABORTED
+ //
+ // MessageText:
+ //
+ // An established connection was aborted by the software in your host machine.
+ //
+ wsaeconnaborted = 10053
+ //
+ // MessageId: WSAECONNRESET
+ //
+ // MessageText:
+ //
+ // An existing connection was forcibly closed by the remote host.
+ //
+ wsaeconnreset = 10054
+ //
+ // MessageId: WSAENOBUFS
+ //
+ // MessageText:
+ //
+ // An operation on a socket could not be performed because the system lacked sufficient buffer space or because a queue was full.
+ //
+ wsaenobufs = 10055
+ //
+ // MessageId: WSAEISCONN
+ //
+ // MessageText:
+ //
+ // A connect request was made on an already connected socket.
+ //
+ wsaeisconn = 10056
+ //
+ // MessageId: WSAENOTCONN
+ //
+ // MessageText:
+ //
+ // A request to send or receive data was disallowed because the socket is not connected and (when sending on a datagram socket using a sendto call) no address was supplied.
+ //
+ wsaenotconn = 10057
+ //
+ // MessageId: WSAESHUTDOWN
+ //
+ // MessageText:
+ //
+ // A request to send or receive data was disallowed because the socket had already been shut down in that direction with a previous shutdown call.
+ //
+ wsaeshutdown = 10058
+ //
+ // MessageId: WSAETOOMANYREFS
+ //
+ // MessageText:
+ //
+ // Too many references to some kernel object.
+ //
+ wsaetoomanyrefs = 10059
+ //
+ // MessageId: WSAETIMEDOUT
+ //
+ // MessageText:
+ //
+ // A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond.
+ //
+ wsaetimedout = 10060
+ //
+ // MessageId: WSAECONNREFUSED
+ //
+ // MessageText:
+ //
+ // No connection could be made because the target machine actively refused it.
+ //
+ wsaeconnrefused = 10061
+ //
+ // MessageId: WSAELOOP
+ //
+ // MessageText:
+ //
+ // Cannot translate name.
+ //
+ wsaeloop = 10062
+ //
+ // MessageId: WSAENAMETOOLONG
+ //
+ // MessageText:
+ //
+ // Name component or name was too long.
+ //
+ wsaenametoolong = 10063
+ //
+ // MessageId: WSAEHOSTDOWN
+ //
+ // MessageText:
+ //
+ // A socket operation failed because the destination host was down.
+ //
+ wsaehostdown = 10064
+ //
+ // MessageId: WSAEHOSTUNREACH
+ //
+ // MessageText:
+ //
+ // A socket operation was attempted to an unreachable host.
+ //
+ wsaehostunreach = 10065
+ //
+ // MessageId: WSAENOTEMPTY
+ //
+ // MessageText:
+ //
+ // Cannot remove a directory that is not empty.
+ //
+ wsaenotempty = 10066
+ //
+ // MessageId: WSAEPROCLIM
+ //
+ // MessageText:
+ //
+ // A Windows Sockets implementation may have a limit on the number of applications that may use it simultaneously.
+ //
+ wsaeproclim = 10067
+ //
+ // MessageId: WSAEUSERS
+ //
+ // MessageText:
+ //
+ // Ran out of quota.
+ //
+ wsaeusers = 10068
+ //
+ // MessageId: WSAEDQUOT
+ //
+ // MessageText:
+ //
+ // Ran out of disk quota.
+ //
+ wsaedquot = 10069
+ //
+ // MessageId: WSAESTALE
+ //
+ // MessageText:
+ //
+ // File handle reference is no longer available.
+ //
+ wsaestale = 10070
+ //
+ // MessageId: WSAEREMOTE
+ //
+ // MessageText:
+ //
+ // Item is not available locally.
+ //
+ wsaeremote = 10071
+ //
+ // MessageId: WSASYSNOTREADY
+ //
+ // MessageText:
+ //
+ // WSAStartup cannot function at this time because the underlying system it uses to provide network services is currently unavailable.
+ //
+ wsasysnotready = 10091
+ //
+ // MessageId: WSAVERNOTSUPPORTED
+ //
+ // MessageText:
+ //
+ // The Windows Sockets version requested is not supported.
+ //
+ wsavernotsupported = 10092
+ //
+ // MessageId: WSANOTINITIALISED
+ //
+ // MessageText:
+ //
+ // Either the application has not called WSAStartup, or WSAStartup failed.
+ //
+ wsanotinitialised = 10093
+ //
+ // MessageId: WSAEDISCON
+ //
+ // MessageText:
+ //
+ // Returned by WSARecv or WSARecvFrom to indicate the remote party has initiated a graceful shutdown sequence.
+ //
+ wsaediscon = 10101
+ //
+ // MessageId: WSAENOMORE
+ //
+ // MessageText:
+ //
+ // No more results can be returned by WSALookupServiceNext.
+ //
+ wsaenomore = 10102
+ //
+ // MessageId: WSAECANCELLED
+ //
+ // MessageText:
+ //
+ // A call to WSALookupServiceEnd was made while this call was still processing. The call has been canceled.
+ //
+ wsaecancelled = 10103
+ //
+ // MessageId: WSAEINVALIDPROCTABLE
+ //
+ // MessageText:
+ //
+ // The procedure call table is invalid.
+ //
+ wsaeinvalidproctable = 10104
+ //
+ // MessageId: WSAEINVALIDPROVIDER
+ //
+ // MessageText:
+ //
+ // The requested service provider is invalid.
+ //
+ wsaeinvalidprovider = 10105
+ //
+ // MessageId: WSAEPROVIDERFAILEDINIT
+ //
+ // MessageText:
+ //
+ // The requested service provider could not be loaded or initialized.
+ //
+ wsaeproviderfailedinit = 10106
+ //
+ // MessageId: WSASYSCALLFAILURE
+ //
+ // MessageText:
+ //
+ // A system call has failed.
+ //
+ wsasyscallfailure = 10107
+ //
+ // MessageId: WSASERVICE_NOT_FOUND
+ //
+ // MessageText:
+ //
+ // No such service is known. The service cannot be found in the specified name space.
+ //
+ wsaservice_not_found = 10108
+ //
+ // MessageId: WSATYPE_NOT_FOUND
+ //
+ // MessageText:
+ //
+ // The specified class was not found.
+ //
+ wsatype_not_found = 10109
+ //
+ // MessageId: WSA_E_NO_MORE
+ //
+ // MessageText:
+ //
+ // No more results can be returned by WSALookupServiceNext.
+ //
+ wsa_e_no_more = 10110
+ //
+ // MessageId: WSA_E_CANCELLED
+ //
+ // MessageText:
+ //
+ // A call to WSALookupServiceEnd was made while this call was still processing. The call has been canceled.
+ //
+ wsa_e_cancelled = 10111
+ //
+ // MessageId: WSAEREFUSED
+ //
+ // MessageText:
+ //
+ // A database query failed because it was actively refused.
+ //
+ wsaerefused = 10112
+ //
+ // MessageId: WSAHOST_NOT_FOUND
+ //
+ // MessageText:
+ //
+ // No such host is known.
+ //
+ wsahost_not_found = 11001
+ //
+ // MessageId: WSATRY_AGAIN
+ //
+ // MessageText:
+ //
+ // This is usually a temporary error during hostname resolution and means that the local server did not receive a response from an authoritative server.
+ //
+ wsatry_again = 11002
+ //
+ // MessageId: WSANO_RECOVERY
+ //
+ // MessageText:
+ //
+ // A non-recoverable error occurred during a database lookup.
+ //
+ wsano_recovery = 11003
+ //
+ // MessageId: WSANO_DATA
+ //
+ // MessageText:
+ //
+ // The requested name is valid, but no data of the requested type was found.
+ //
+ wsano_data = 11004
+ //
+ // MessageId: WSA_QOS_RECEIVERS
+ //
+ // MessageText:
+ //
+ // At least one reserve has arrived.
+ //
+ wsa_qos_receivers = 11005
+ //
+ // MessageId: WSA_QOS_SENDERS
+ //
+ // MessageText:
+ //
+ // At least one path has arrived.
+ //
+ wsa_qos_senders = 11006
+ //
+ // MessageId: WSA_QOS_NO_SENDERS
+ //
+ // MessageText:
+ //
+ // There are no senders.
+ //
+ wsa_qos_no_senders = 11007
+ //
+ // MessageId: WSA_QOS_NO_RECEIVERS
+ //
+ // MessageText:
+ //
+ // There are no receivers.
+ //
+ wsa_qos_no_receivers = 11008
+ //
+ // MessageId: WSA_QOS_REQUEST_CONFIRMED
+ //
+ // MessageText:
+ //
+ // Reserve has been confirmed.
+ //
+ wsa_qos_request_confirmed = 11009
+ //
+ // MessageId: WSA_QOS_ADMISSION_FAILURE
+ //
+ // MessageText:
+ //
+ // Error due to lack of resources.
+ //
+ wsa_qos_admission_failure = 11010
+ //
+ // MessageId: WSA_QOS_POLICY_FAILURE
+ //
+ // MessageText:
+ //
+ // Rejected for administrative reasons - bad credentials.
+ //
+ wsa_qos_policy_failure = 11011
+ //
+ // MessageId: WSA_QOS_BAD_STYLE
+ //
+ // MessageText:
+ //
+ // Unknown or conflicting style.
+ //
+ wsa_qos_bad_style = 11012
+ //
+ // MessageId: WSA_QOS_BAD_OBJECT
+ //
+ // MessageText:
+ //
+ // Problem with some part of the filterspec or providerspecific buffer in general.
+ //
+ wsa_qos_bad_object = 11013
+ //
+ // MessageId: WSA_QOS_TRAFFIC_CTRL_ERROR
+ //
+ // MessageText:
+ //
+ // Problem with some part of the flowspec.
+ //
+ wsa_qos_traffic_ctrl_error = 11014
+ //
+ // MessageId: WSA_QOS_GENERIC_ERROR
+ //
+ // MessageText:
+ //
+ // General QOS error.
+ //
+ wsa_qos_generic_error = 11015
+ //
+ // MessageId: WSA_QOS_ESERVICETYPE
+ //
+ // MessageText:
+ //
+ // An invalid or unrecognized service type was found in the flowspec.
+ //
+ wsa_qos_eservicetype = 11016
+ //
+ // MessageId: WSA_QOS_EFLOWSPEC
+ //
+ // MessageText:
+ //
+ // An invalid or inconsistent flowspec was found in the QOS structure.
+ //
+ wsa_qos_eflowspec = 11017
+ //
+ // MessageId: WSA_QOS_EPROVSPECBUF
+ //
+ // MessageText:
+ //
+ // Invalid QOS provider-specific buffer.
+ //
+ wsa_qos_eprovspecbuf = 11018
+ //
+ // MessageId: WSA_QOS_EFILTERSTYLE
+ //
+ // MessageText:
+ //
+ // An invalid QOS filter style was used.
+ //
+ wsa_qos_efilterstyle = 11019
+ //
+ // MessageId: WSA_QOS_EFILTERTYPE
+ //
+ // MessageText:
+ //
+ // An invalid QOS filter type was used.
+ //
+ wsa_qos_efiltertype = 11020
+ //
+ // MessageId: WSA_QOS_EFILTERCOUNT
+ //
+ // MessageText:
+ //
+ // An incorrect number of QOS FILTERSPECs were specified in the FLOWDESCRIPTOR.
+ //
+ wsa_qos_efiltercount = 11021
+ //
+ // MessageId: WSA_QOS_EOBJLENGTH
+ //
+ // MessageText:
+ //
+ // An object with an invalid ObjectLength field was specified in the QOS provider-specific buffer.
+ //
+ wsa_qos_eobjlength = 11022
+ //
+ // MessageId: WSA_QOS_EFLOWCOUNT
+ //
+ // MessageText:
+ //
+ // An incorrect number of flow descriptors was specified in the QOS structure.
+ //
+ wsa_qos_eflowcount = 11023
+ //
+ // MessageId: WSA_QOS_EUNKOWNPSOBJ
+ //
+ // MessageText:
+ //
+ // An unrecognized object was found in the QOS provider-specific buffer.
+ //
+ wsa_qos_eunkownpsobj = 11024
+ //
+ // MessageId: WSA_QOS_EPOLICYOBJ
+ //
+ // MessageText:
+ //
+ // An invalid policy object was found in the QOS provider-specific buffer.
+ //
+ wsa_qos_epolicyobj = 11025
+ //
+ // MessageId: WSA_QOS_EFLOWDESC
+ //
+ // MessageText:
+ //
+ // An invalid QOS flow descriptor was found in the flow descriptor list.
+ //
+ wsa_qos_eflowdesc = 11026
+ //
+ // MessageId: WSA_QOS_EPSFLOWSPEC
+ //
+ // MessageText:
+ //
+ // An invalid or inconsistent flowspec was found in the QOS provider specific buffer.
+ //
+ wsa_qos_epsflowspec = 11027
+ //
+ // MessageId: WSA_QOS_EPSFILTERSPEC
+ //
+ // MessageText:
+ //
+ // An invalid FILTERSPEC was found in the QOS provider-specific buffer.
+ //
+ wsa_qos_epsfilterspec = 11028
+ //
+ // MessageId: WSA_QOS_ESDMODEOBJ
+ //
+ // MessageText:
+ //
+ // An invalid shape discard mode object was found in the QOS provider specific buffer.
+ //
+ wsa_qos_esdmodeobj = 11029
+ //
+ // MessageId: WSA_QOS_ESHAPERATEOBJ
+ //
+ // MessageText:
+ //
+ // An invalid shaping rate object was found in the QOS provider-specific buffer.
+ //
+ wsa_qos_eshaperateobj = 11030
+ //
+ // MessageId: WSA_QOS_RESERVED_PETYPE
+ //
+ // MessageText:
+ //
+ // A reserved policy element was found in the QOS provider-specific buffer.
+ //
+ wsa_qos_reserved_petype = 11031
+ //
+ // MessageId: WSA_SECURE_HOST_NOT_FOUND
+ //
+ // MessageText:
+ //
+ // No such host is known securely.
+ //
+ wsa_secure_host_not_found = 11032
+ //
+ // MessageId: WSA_IPSEC_NAME_POLICY_ERROR
+ //
+ // MessageText:
+ //
+ // Name based IPSEC policy could not be added.
+ //
+ wsa_ipsec_name_policy_error = 11033
+}
+
+// wsa_error casts an int to its WsaError value
+pub fn wsa_error(code int) WsaError {
+ return WsaError(code)
+}
+
+const (
+ error_ewouldblock = WsaError.wsaewouldblock
+)
+
+// Link to Winsock library
+#flag -lws2_32
+#include <winsock2.h>
+#include <ws2tcpip.h>
+
+// Constants that windows needs
+const (
+ fionbio = C.FIONBIO
+ msg_nosignal = 0
+ wsa_v22 = 0x202 // C.MAKEWORD(2, 2)
+)
+
+// Error code returns the last socket error
+fn error_code() int {
+ return C.WSAGetLastError()
+}
+
+struct C.WSAData {
+mut:
+ wVersion u16
+ wHighVersion u16
+ szDescription [257]byte
+ szSystemStatus [129]byte
+ iMaxSockets u16
+ iMaxUdpDg u16
+ lpVendorInfo &byte
+}
+
+fn init() {
+ mut wsadata := C.WSAData{
+ lpVendorInfo: 0
+ }
+ res := C.WSAStartup(net.wsa_v22, &wsadata)
+ if res != 0 {
+ panic('socket: WSAStartup failed')
+ }
+}
diff --git a/v_windows/v/vlib/net/openssl/c.v b/v_windows/v/vlib/net/openssl/c.v
new file mode 100644
index 0000000..dedba2a
--- /dev/null
+++ b/v_windows/v/vlib/net/openssl/c.v
@@ -0,0 +1,120 @@
+module openssl
+
+// On Linux, prefer a localy built openssl, because it is
+// much more likely for it to be newer, than the system
+// openssl from libssl-dev. If there is no local openssl,
+// the next flag is harmless, since it will still use the
+// (older) system openssl.
+#flag linux -I/usr/local/include/openssl -L/usr/local/lib
+#flag windows -l libssl -l libcrypto
+#flag -lssl -lcrypto
+#flag linux -ldl -lpthread
+// MacPorts
+#flag darwin -I/opt/local/include
+#flag darwin -L/opt/local/lib
+// Brew
+#flag darwin -I/usr/local/opt/openssl/include
+#flag darwin -L/usr/local/opt/openssl/lib
+// Brew arm64
+#flag darwin -I /opt/homebrew/opt/openssl/include
+#flag darwin -L /opt/homebrew/opt/openssl/lib
+//
+#include <openssl/rand.h> # Please install OpenSSL development headers
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+
+pub struct C.SSL {
+}
+
+pub struct SSL_CTX {
+}
+
+pub struct SSL {
+}
+
+pub struct SSL_METHOD {
+}
+
+pub struct OPENSSL_INIT_SETTINGS {
+}
+
+fn C.BIO_new_ssl_connect(ctx &C.SSL_CTX) &C.BIO
+
+fn C.BIO_set_conn_hostname(b &C.BIO, name &char) int
+
+// there are actually 2 macros for BIO_get_ssl
+// fn C.BIO_get_ssl(bp &C.BIO, ssl charptr, c int)
+// fn C.BIO_get_ssl(bp &C.BIO, sslp charptr)
+fn C.BIO_get_ssl(bp &C.BIO, vargs ...voidptr)
+
+fn C.BIO_do_connect(b &C.BIO) int
+
+fn C.BIO_do_handshake(b &C.BIO) int
+
+fn C.BIO_puts(b &C.BIO, buf &char)
+
+fn C.BIO_read(b &C.BIO, buf voidptr, len int) int
+
+fn C.BIO_free_all(a &C.BIO)
+
+fn C.SSL_CTX_new(method &C.SSL_METHOD) &C.SSL_CTX
+
+fn C.SSL_CTX_set_options(ctx &C.SSL_CTX, options int)
+
+fn C.SSL_CTX_set_verify_depth(s &C.SSL_CTX, depth int)
+
+fn C.SSL_CTX_load_verify_locations(ctx &C.SSL_CTX, ca_file &char, ca_path &char) int
+
+fn C.SSL_CTX_free(ctx &C.SSL_CTX)
+
+fn C.SSL_new(&C.SSL_CTX) &C.SSL
+
+fn C.SSL_set_fd(ssl &C.SSL, fd int) int
+
+fn C.SSL_connect(&C.SSL) int
+
+fn C.SSL_set_cipher_list(ctx &SSL, str &char) int
+
+fn C.SSL_get_peer_certificate(ssl &SSL) &C.X509
+
+fn C.ERR_clear_error()
+
+fn C.SSL_get_error(ssl &C.SSL, ret int) int
+
+fn C.SSL_get_verify_result(ssl &SSL) int
+
+fn C.SSL_set_tlsext_host_name(s &SSL, name &char) int
+
+fn C.SSL_shutdown(&C.SSL) int
+
+fn C.SSL_free(&C.SSL)
+
+fn C.SSL_write(ssl &C.SSL, buf voidptr, buflen int) int
+
+fn C.SSL_read(ssl &C.SSL, buf voidptr, buflen int) int
+
+fn C.SSL_load_error_strings()
+
+fn C.SSL_library_init() int
+
+fn C.SSLv23_client_method() &C.SSL_METHOD
+
+fn C.TLS_method() voidptr
+
+fn C.TLSv1_2_method() voidptr
+
+fn C.OPENSSL_init_ssl(opts u64, settings &OPENSSL_INIT_SETTINGS) int
+
+fn init() {
+ $if ssl_pre_1_1_version ? {
+ // OPENSSL_VERSION_NUMBER < 0x10100000L
+ C.SSL_load_error_strings()
+ C.SSL_library_init()
+ } $else {
+ C.OPENSSL_init_ssl(C.OPENSSL_INIT_LOAD_SSL_STRINGS, 0)
+ }
+}
+
+pub const (
+ is_used = 1
+)
diff --git a/v_windows/v/vlib/net/openssl/openssl.v b/v_windows/v/vlib/net/openssl/openssl.v
new file mode 100644
index 0000000..ffcabf5
--- /dev/null
+++ b/v_windows/v/vlib/net/openssl/openssl.v
@@ -0,0 +1,32 @@
+module openssl
+
+// ssl_error returns non error ssl code or error if unrecoverable and we should panic
+pub fn ssl_error(ret int, ssl voidptr) ?SSLError {
+ res := C.SSL_get_error(ssl, ret)
+ match SSLError(res) {
+ .ssl_error_syscall {
+ return error_with_code('unrecoverable syscall ($res)', res)
+ }
+ .ssl_error_ssl {
+ return error_with_code('unrecoverable ssl protocol error ($res)', res)
+ }
+ else {
+ return SSLError(res)
+ }
+ }
+}
+
+pub enum SSLError {
+ ssl_error_none = 0 // SSL_ERROR_NONE
+ ssl_error_ssl = 1 // SSL_ERROR_SSL
+ ssl_error_want_read = 2 // SSL_ERROR_WANT_READ
+ ssl_error_want_write = 3 // SSL_ERROR_WANT_WRITE
+ ssl_error_want_x509_lookup = 4 // SSL_ERROR_WANT_X509_LOOKUP
+ ssl_error_syscall = 5 // SSL_ERROR_SYSCALL
+ ssl_error_zero_return = 6 // SSL_ERROR_ZERO_RETURN
+ ssl_error_want_connect = 7 // SSL_ERROR_WANT_CONNECT
+ ssl_error_want_accept = 8 // SSL_ERROR_WANT_ACCEPT
+ ssl_error_want_async = 9 // SSL_ERROR_WANT_ASYNC
+ ssl_error_want_async_job = 10 // SSL_ERROR_WANT_ASYNC_JOB
+ ssl_error_want_early = 11 // SSL_ERROR_WANT_EARLY
+}
diff --git a/v_windows/v/vlib/net/openssl/ssl_connection.v b/v_windows/v/vlib/net/openssl/ssl_connection.v
new file mode 100644
index 0000000..58f47f6
--- /dev/null
+++ b/v_windows/v/vlib/net/openssl/ssl_connection.v
@@ -0,0 +1,268 @@
+module openssl
+
+import net
+import time
+
+// SSLConn is the current connection
+pub struct SSLConn {
+mut:
+ sslctx &C.SSL_CTX
+ ssl &C.SSL
+ handle int
+ duration time.Duration
+}
+
+// new_ssl_conn instance an new SSLCon struct
+pub fn new_ssl_conn() &SSLConn {
+ return &SSLConn{
+ sslctx: 0
+ ssl: 0
+ handle: 0
+ }
+}
+
+// Select operation
+enum Select {
+ read
+ write
+ except
+}
+
+// shutdown closes the ssl connection and do clean up
+pub fn (mut s SSLConn) shutdown() ? {
+ if s.ssl != 0 {
+ mut res := 0
+ for {
+ res = C.SSL_shutdown(voidptr(s.ssl))
+ if res < 0 {
+ err_res := ssl_error(res, s.ssl) or {
+ break // We break to free rest of resources
+ }
+ if err_res == .ssl_error_want_read {
+ for {
+ ready := @select(s.handle, .read, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ continue
+ } else if err_res == .ssl_error_want_write {
+ for {
+ ready := @select(s.handle, .write, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ continue
+ } else {
+ unsafe { C.SSL_free(voidptr(s.ssl)) }
+ if s.sslctx != 0 {
+ C.SSL_CTX_free(s.sslctx)
+ }
+ return error('unexepedted ssl error $err_res')
+ }
+ if s.ssl != 0 {
+ unsafe { C.SSL_free(voidptr(s.ssl)) }
+ }
+ if s.sslctx != 0 {
+ C.SSL_CTX_free(s.sslctx)
+ }
+ return error('Could not connect using SSL. ($err_res),err')
+ } else if res == 0 {
+ continue
+ } else if res == 1 {
+ break
+ }
+ }
+ C.SSL_free(voidptr(s.ssl))
+ }
+ if s.sslctx != 0 {
+ C.SSL_CTX_free(s.sslctx)
+ }
+}
+
+// connect to server using open ssl
+pub fn (mut s SSLConn) connect(mut tcp_conn net.TcpConn, hostname string) ? {
+ s.handle = tcp_conn.sock.handle
+ s.duration = tcp_conn.read_timeout()
+
+ s.sslctx = unsafe { C.SSL_CTX_new(C.SSLv23_client_method()) }
+ if s.sslctx == 0 {
+ return error("Couldn't get ssl context")
+ }
+
+ // TODO: Fix option to enable/disable checks for valid
+ // certificates to allow both secure and self signed
+ // for now the checks are not done at all to comply
+ // to current autobahn tests
+
+ // C.SSL_CTX_set_verify_depth(s.sslctx, 4)
+ // flags := C.SSL_OP_NO_SSLv2 | C.SSL_OP_NO_SSLv3 | C.SSL_OP_NO_COMPRESSION
+ // C.SSL_CTX_set_options(s.sslctx, flags)
+ // mut res := C.SSL_CTX_load_verify_locations(s.sslctx, 'random-org-chain.pem', 0)
+
+ s.ssl = unsafe { &C.SSL(C.SSL_new(s.sslctx)) }
+ if s.ssl == 0 {
+ return error("Couldn't create OpenSSL instance.")
+ }
+
+ // preferred_ciphers := 'HIGH:!aNULL:!kRSA:!PSK:!SRP:!MD5:!RC4'
+ // mut res := C.SSL_set_cipher_list(s.ssl, preferred_ciphers.str)
+ // if res != 1 {
+ // println('http: openssl: cipher failed')
+ // }
+
+ mut res := C.SSL_set_tlsext_host_name(voidptr(s.ssl), voidptr(hostname.str))
+ if res != 1 {
+ return error('cannot set host name')
+ }
+
+ if C.SSL_set_fd(voidptr(s.ssl), tcp_conn.sock.handle) != 1 {
+ return error("Couldn't assign ssl to socket.")
+ }
+ for {
+ res = C.SSL_connect(voidptr(s.ssl))
+ if res != 1 {
+ err_res := ssl_error(res, s.ssl) ?
+ if err_res == .ssl_error_want_read {
+ for {
+ ready := @select(s.handle, .read, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ continue
+ } else if err_res == .ssl_error_want_write {
+ for {
+ ready := @select(s.handle, .write, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ continue
+ }
+ return error('Could not connect using SSL. ($err_res),err')
+ }
+ break
+ }
+}
+
+pub fn (mut s SSLConn) socket_read_into_ptr(buf_ptr &byte, len int) ?int {
+ mut res := 0
+ for {
+ res = C.SSL_read(voidptr(s.ssl), buf_ptr, len)
+ if res < 0 {
+ err_res := ssl_error(res, s.ssl) ?
+ if err_res == .ssl_error_want_read {
+ for {
+ ready := @select(s.handle, .read, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ continue
+ } else if err_res == .ssl_error_want_write {
+ for {
+ ready := @select(s.handle, .write, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ continue
+ } else if err_res == .ssl_error_zero_return {
+ return 0
+ }
+ return error('Could not read using SSL. ($err_res)')
+ }
+ break
+ }
+ return res
+}
+
+pub fn (mut s SSLConn) read_into(mut buffer []byte) ?int {
+ res := s.socket_read_into_ptr(&byte(buffer.data), buffer.len) ?
+ return res
+}
+
+// write number of bytes to SSL connection
+pub fn (mut s SSLConn) write(bytes []byte) ?int {
+ unsafe {
+ mut ptr_base := &byte(bytes.data)
+ mut total_sent := 0
+ for total_sent < bytes.len {
+ ptr := ptr_base + total_sent
+ remaining := bytes.len - total_sent
+ mut sent := C.SSL_write(voidptr(s.ssl), ptr, remaining)
+ if sent <= 0 {
+ err_res := ssl_error(sent, s.ssl) ?
+ if err_res == .ssl_error_want_read {
+ for {
+ ready := @select(s.handle, .read, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ } else if err_res == .ssl_error_want_write {
+ for {
+ ready := @select(s.handle, .write, s.duration) ?
+ if ready {
+ break
+ }
+ }
+ continue
+ } else if err_res == .ssl_error_zero_return {
+ return error('ssl write on closed connection') // Todo error_with_code close
+ }
+ return error_with_code('Could not write SSL. ($err_res),err', int(err_res))
+ }
+ total_sent += sent
+ }
+ return total_sent
+ }
+}
+
+/*
+This is basically a copy of Emily socket implementation of select.
+ This have to be consolidated into common net lib features
+ when merging this to V
+*/
+// [typedef]
+// pub struct C.fd_set {
+// }
+
+// Select waits for an io operation (specified by parameter `test`) to be available
+fn @select(handle int, test Select, timeout time.Duration) ?bool {
+ set := C.fd_set{}
+
+ C.FD_ZERO(&set)
+ C.FD_SET(handle, &set)
+
+ seconds := timeout.milliseconds() / 1000
+ microseconds := timeout - (seconds * time.second)
+ mut tt := C.timeval{
+ tv_sec: u64(seconds)
+ tv_usec: u64(microseconds)
+ }
+
+ mut timeval_timeout := &tt
+
+ // infinite timeout is signaled by passing null as the timeout to
+ // select
+ if timeout == net.infinite_timeout {
+ timeval_timeout = &C.timeval(0)
+ }
+
+ match test {
+ .read {
+ net.socket_error(C.@select(handle + 1, &set, C.NULL, C.NULL, timeval_timeout)) ?
+ }
+ .write {
+ net.socket_error(C.@select(handle + 1, C.NULL, &set, C.NULL, timeval_timeout)) ?
+ }
+ .except {
+ net.socket_error(C.@select(handle + 1, C.NULL, C.NULL, &set, timeval_timeout)) ?
+ }
+ }
+
+ return C.FD_ISSET(handle, &set)
+}
diff --git a/v_windows/v/vlib/net/smtp/smtp.v b/v_windows/v/vlib/net/smtp/smtp.v
new file mode 100644
index 0000000..50c537c
--- /dev/null
+++ b/v_windows/v/vlib/net/smtp/smtp.v
@@ -0,0 +1,190 @@
+module smtp
+
+/*
+*
+* smtp module
+* Created by: nedimf (07/2020)
+*/
+import net
+import encoding.base64
+import strings
+import time
+import io
+
+const (
+ recv_size = 128
+)
+
+enum ReplyCode {
+ ready = 220
+ close = 221
+ auth_ok = 235
+ action_ok = 250
+ mail_start = 354
+}
+
+pub enum BodyType {
+ text
+ html
+}
+
+pub struct Client {
+mut:
+ conn net.TcpConn
+ reader io.BufferedReader
+pub:
+ server string
+ port int = 25
+ username string
+ password string
+ from string
+pub mut:
+ is_open bool
+}
+
+pub struct Mail {
+ from string
+ to string
+ cc string
+ bcc string
+ date time.Time = time.now()
+ subject string
+ body_type BodyType
+ body string
+}
+
+// new_client returns a new SMTP client and connects to it
+pub fn new_client(config Client) ?&Client {
+ mut c := &Client{
+ ...config
+ }
+ c.reconnect() ?
+ return c
+}
+
+// reconnect reconnects to the SMTP server if the connection was closed
+pub fn (mut c Client) reconnect() ? {
+ if c.is_open {
+ return error('Already connected to server')
+ }
+
+ conn := net.dial_tcp('$c.server:$c.port') or { return error('Connecting to server failed') }
+ c.conn = conn
+
+ c.reader = io.new_buffered_reader(reader: c.conn)
+
+ c.expect_reply(.ready) or { return error('Received invalid response from server') }
+ c.send_ehlo() or { return error('Sending EHLO packet failed') }
+ c.send_auth() or { return error('Authenticating to server failed') }
+ c.is_open = true
+}
+
+// send sends an email
+pub fn (mut c Client) send(config Mail) ? {
+ if !c.is_open {
+ return error('Disconnected from server')
+ }
+ from := if config.from != '' { config.from } else { c.from }
+ c.send_mailfrom(from) or { return error('Sending mailfrom failed') }
+ c.send_mailto(config.to) or { return error('Sending mailto failed') }
+ c.send_data() or { return error('Sending mail data failed') }
+ c.send_body(Mail{
+ ...config
+ from: from
+ }) or { return error('Sending mail body failed') }
+}
+
+// quit closes the connection to the server
+pub fn (mut c Client) quit() ? {
+ c.send_str('QUIT\r\n') ?
+ c.expect_reply(.close) ?
+ c.conn.close() ?
+ c.is_open = false
+}
+
+// expect_reply checks if the SMTP server replied with the expected reply code
+fn (mut c Client) expect_reply(expected ReplyCode) ? {
+ bytes := io.read_all(reader: c.conn) ?
+
+ str := bytes.bytestr().trim_space()
+ $if smtp_debug ? {
+ eprintln('\n\n[RECV]')
+ eprint(str)
+ }
+
+ if str.len >= 3 {
+ status := str[..3].int()
+ if ReplyCode(status) != expected {
+ return error('Received unexpected status code $status, expecting $expected')
+ }
+ } else {
+ return error('Recieved unexpected SMTP data: $str')
+ }
+}
+
+[inline]
+fn (mut c Client) send_str(s string) ? {
+ $if smtp_debug ? {
+ eprintln('\n\n[SEND START]')
+ eprint(s.trim_space())
+ eprintln('\n[SEND END]')
+ }
+ c.conn.write(s.bytes()) ?
+}
+
+[inline]
+fn (mut c Client) send_ehlo() ? {
+ c.send_str('EHLO $c.server\r\n') ?
+ c.expect_reply(.action_ok) ?
+}
+
+[inline]
+fn (mut c Client) send_auth() ? {
+ if c.username.len == 0 {
+ return
+ }
+ mut sb := strings.new_builder(100)
+ sb.write_b(0)
+ sb.write_string(c.username)
+ sb.write_b(0)
+ sb.write_string(c.password)
+ a := sb.str()
+ auth := 'AUTH PLAIN ${base64.encode_str(a)}\r\n'
+ c.send_str(auth) ?
+ c.expect_reply(.auth_ok) ?
+}
+
+fn (mut c Client) send_mailfrom(from string) ? {
+ c.send_str('MAIL FROM: <$from>\r\n') ?
+ c.expect_reply(.action_ok) ?
+}
+
+fn (mut c Client) send_mailto(to string) ? {
+ c.send_str('RCPT TO: <$to>\r\n') ?
+ c.expect_reply(.action_ok) ?
+}
+
+fn (mut c Client) send_data() ? {
+ c.send_str('DATA\r\n') ?
+ c.expect_reply(.mail_start) ?
+}
+
+fn (mut c Client) send_body(cfg Mail) ? {
+ is_html := cfg.body_type == .html
+ date := cfg.date.utc_string().trim_right(' UTC') // TODO
+ mut sb := strings.new_builder(200)
+ sb.write_string('From: $cfg.from\r\n')
+ sb.write_string('To: <$cfg.to>\r\n')
+ sb.write_string('Cc: <$cfg.cc>\r\n')
+ sb.write_string('Bcc: <$cfg.bcc>\r\n')
+ sb.write_string('Date: $date\r\n')
+ sb.write_string('Subject: $cfg.subject\r\n')
+ if is_html {
+ sb.write_string('Content-Type: text/html; charset=ISO-8859-1')
+ }
+ sb.write_string('\r\n\r\n')
+ sb.write_string(cfg.body)
+ sb.write_string('\r\n.\r\n')
+ c.send_str(sb.str()) ?
+ c.expect_reply(.action_ok) ?
+}
diff --git a/v_windows/v/vlib/net/smtp/smtp_test.v b/v_windows/v/vlib/net/smtp/smtp_test.v
new file mode 100644
index 0000000..d975e57
--- /dev/null
+++ b/v_windows/v/vlib/net/smtp/smtp_test.v
@@ -0,0 +1,89 @@
+import os
+import net.smtp
+import time
+
+// Used to test that a function call returns an error
+fn fn_errors(mut c smtp.Client, m smtp.Mail) bool {
+ c.send(m) or { return true }
+ return false
+}
+
+/*
+*
+* smtp_test
+* Created by: nedimf (07/2020)
+*/
+fn test_smtp() {
+ $if !network ? {
+ return
+ }
+
+ client_cfg := smtp.Client{
+ server: 'smtp.mailtrap.io'
+ from: 'dev@vlang.io'
+ username: os.getenv('VSMTP_TEST_USER')
+ password: os.getenv('VSMTP_TEST_PASS')
+ }
+ if client_cfg.username == '' && client_cfg.password == '' {
+ eprintln('Please set VSMTP_TEST_USER and VSMTP_TEST_PASS before running this test')
+ exit(0)
+ }
+ send_cfg := smtp.Mail{
+ to: 'dev@vlang.io'
+ subject: 'Hello from V2'
+ body: 'Plain text'
+ }
+
+ mut client := smtp.new_client(client_cfg) or {
+ assert false
+ return
+ }
+ assert true
+ client.send(send_cfg) or {
+ assert false
+ return
+ }
+ assert true
+ client.send(smtp.Mail{
+ ...send_cfg
+ from: 'alexander@vlang.io'
+ }) or {
+ assert false
+ return
+ }
+ client.send(smtp.Mail{
+ ...send_cfg
+ cc: 'alexander@vlang.io,joe@vlang.io'
+ bcc: 'spytheman@vlang.io'
+ }) or {
+ assert false
+ return
+ }
+ client.send(smtp.Mail{
+ ...send_cfg
+ date: time.now().add_days(1000)
+ }) or {
+ assert false
+ return
+ }
+ assert true
+ client.quit() or {
+ assert false
+ return
+ }
+ assert true
+ // This call should return an error, since the connection is closed
+ if !fn_errors(mut client, send_cfg) {
+ assert false
+ return
+ }
+ client.reconnect() or {
+ assert false
+ return
+ }
+ client.send(send_cfg) or {
+ assert false
+ return
+ }
+ assert true
+}
diff --git a/v_windows/v/vlib/net/socket_options.c.v b/v_windows/v/vlib/net/socket_options.c.v
new file mode 100644
index 0000000..4e3240f
--- /dev/null
+++ b/v_windows/v/vlib/net/socket_options.c.v
@@ -0,0 +1,50 @@
+module net
+
+pub enum SocketOption {
+ // TODO: SO_ACCEPT_CONN is not here becuase windows doesnt support it
+ // and there is no easy way to define it
+ broadcast = C.SO_BROADCAST
+ debug = C.SO_DEBUG
+ dont_route = C.SO_DONTROUTE
+ error = C.SO_ERROR
+ keep_alive = C.SO_KEEPALIVE
+ linger = C.SO_LINGER
+ oob_inline = C.SO_OOBINLINE
+ reuse_addr = C.SO_REUSEADDR
+ recieve_buf_size = C.SO_RCVBUF
+ recieve_low_size = C.SO_RCVLOWAT
+ recieve_timeout = C.SO_RCVTIMEO
+ send_buf_size = C.SO_SNDBUF
+ send_low_size = C.SO_SNDLOWAT
+ send_timeout = C.SO_SNDTIMEO
+ socket_type = C.SO_TYPE
+ ipv6_only = C.IPV6_V6ONLY
+}
+
+const (
+ opts_bool = [SocketOption.broadcast, .debug, .dont_route, .error, .keep_alive, .oob_inline]
+ opts_int = [
+ .recieve_buf_size,
+ .recieve_low_size,
+ .recieve_timeout,
+ .send_buf_size,
+ .send_low_size,
+ .send_timeout,
+ ]
+
+ opts_can_set = [
+ SocketOption.broadcast,
+ .debug,
+ .dont_route,
+ .keep_alive,
+ .linger,
+ .oob_inline,
+ .recieve_buf_size,
+ .recieve_low_size,
+ .recieve_timeout,
+ .send_buf_size,
+ .send_low_size,
+ .send_timeout,
+ .ipv6_only,
+ ]
+)
diff --git a/v_windows/v/vlib/net/tcp.v b/v_windows/v/vlib/net/tcp.v
new file mode 100644
index 0000000..2d96194
--- /dev/null
+++ b/v_windows/v/vlib/net/tcp.v
@@ -0,0 +1,420 @@
+module net
+
+import time
+
+const (
+ tcp_default_read_timeout = 30 * time.second
+ tcp_default_write_timeout = 30 * time.second
+)
+
+[heap]
+pub struct TcpConn {
+pub mut:
+ sock TcpSocket
+mut:
+ write_deadline time.Time
+ read_deadline time.Time
+ read_timeout time.Duration
+ write_timeout time.Duration
+ is_blocking bool
+}
+
+pub fn dial_tcp(address string) ?&TcpConn {
+ addrs := resolve_addrs_fuzzy(address, .tcp) ?
+
+ // Very simple dialer
+ for addr in addrs {
+ mut s := new_tcp_socket(addr.family()) ?
+ s.connect(addr) or {
+ // Connection failed
+ s.close() or { continue }
+ continue
+ }
+
+ return &TcpConn{
+ sock: s
+ read_timeout: net.tcp_default_read_timeout
+ write_timeout: net.tcp_default_write_timeout
+ }
+ }
+ // failed
+ return error('dial_tcp failed')
+}
+
+pub fn (mut c TcpConn) close() ? {
+ $if trace_tcp ? {
+ eprintln(' TcpConn.close | c.sock.handle: ${c.sock.handle:6}')
+ }
+ c.sock.close() ?
+}
+
+// write_ptr blocks and attempts to write all data
+pub fn (mut c TcpConn) write_ptr(b &byte, len int) ?int {
+ $if trace_tcp ? {
+ eprintln(
+ '>>> TcpConn.write_ptr | c.sock.handle: $c.sock.handle | b: ${ptr_str(b)} len: $len |\n' +
+ unsafe { b.vstring_with_len(len) })
+ }
+ $if trace_tcp_data_write ? {
+ eprintln('>>> TcpConn.write_ptr | data.len: ${len:6} | data: ' +
+ unsafe { b.vstring_with_len(len) })
+ }
+ unsafe {
+ mut ptr_base := &byte(b)
+ mut total_sent := 0
+ for total_sent < len {
+ ptr := ptr_base + total_sent
+ remaining := len - total_sent
+ mut sent := C.send(c.sock.handle, ptr, remaining, msg_nosignal)
+ $if trace_tcp_data_write ? {
+ eprintln('>>> TcpConn.write_ptr | data chunk, total_sent: ${total_sent:6}, chunk_size: ${chunk_size:6}, sent: ${sent:6}, ptr: ${ptr_str(ptr)}')
+ }
+ if sent < 0 {
+ code := error_code()
+ if code == int(error_ewouldblock) {
+ c.wait_for_write() ?
+ continue
+ } else {
+ wrap_error(code) ?
+ }
+ }
+ total_sent += sent
+ }
+ return total_sent
+ }
+}
+
+// write blocks and attempts to write all data
+pub fn (mut c TcpConn) write(bytes []byte) ?int {
+ return c.write_ptr(bytes.data, bytes.len)
+}
+
+// write_string blocks and attempts to write all data
+pub fn (mut c TcpConn) write_string(s string) ?int {
+ return c.write_ptr(s.str, s.len)
+}
+
+pub fn (mut c TcpConn) read_ptr(buf_ptr &byte, len int) ?int {
+ mut res := wrap_read_result(C.recv(c.sock.handle, voidptr(buf_ptr), len, 0)) ?
+ $if trace_tcp ? {
+ eprintln('<<< TcpConn.read_ptr | c.sock.handle: $c.sock.handle | buf_ptr: ${ptr_str(buf_ptr)} len: $len | res: $res')
+ }
+ if res > 0 {
+ $if trace_tcp_data_read ? {
+ eprintln('<<< TcpConn.read_ptr | 1 data.len: ${res:6} | data: ' +
+ unsafe { buf_ptr.vstring_with_len(res) })
+ }
+ return res
+ }
+ code := error_code()
+ if code == int(error_ewouldblock) {
+ c.wait_for_read() ?
+ res = wrap_read_result(C.recv(c.sock.handle, voidptr(buf_ptr), len, 0)) ?
+ $if trace_tcp ? {
+ eprintln('<<< TcpConn.read_ptr | c.sock.handle: $c.sock.handle | buf_ptr: ${ptr_str(buf_ptr)} len: $len | res: $res')
+ }
+ $if trace_tcp_data_read ? {
+ if res > 0 {
+ eprintln('<<< TcpConn.read_ptr | 2 data.len: ${res:6} | data: ' +
+ unsafe { buf_ptr.vstring_with_len(res) })
+ }
+ }
+ return socket_error(res)
+ } else {
+ wrap_error(code) ?
+ }
+ return none
+}
+
+pub fn (mut c TcpConn) read(mut buf []byte) ?int {
+ return c.read_ptr(buf.data, buf.len)
+}
+
+pub fn (mut c TcpConn) read_deadline() ?time.Time {
+ if c.read_deadline.unix == 0 {
+ return c.read_deadline
+ }
+ return none
+}
+
+pub fn (mut c TcpConn) set_read_deadline(deadline time.Time) {
+ c.read_deadline = deadline
+}
+
+pub fn (mut c TcpConn) write_deadline() ?time.Time {
+ if c.write_deadline.unix == 0 {
+ return c.write_deadline
+ }
+ return none
+}
+
+pub fn (mut c TcpConn) set_write_deadline(deadline time.Time) {
+ c.write_deadline = deadline
+}
+
+pub fn (c &TcpConn) read_timeout() time.Duration {
+ return c.read_timeout
+}
+
+pub fn (mut c TcpConn) set_read_timeout(t time.Duration) {
+ c.read_timeout = t
+}
+
+pub fn (c &TcpConn) write_timeout() time.Duration {
+ return c.write_timeout
+}
+
+pub fn (mut c TcpConn) set_write_timeout(t time.Duration) {
+ c.write_timeout = t
+}
+
+[inline]
+pub fn (mut c TcpConn) wait_for_read() ? {
+ return wait_for_read(c.sock.handle, c.read_deadline, c.read_timeout)
+}
+
+[inline]
+pub fn (mut c TcpConn) wait_for_write() ? {
+ return wait_for_write(c.sock.handle, c.write_deadline, c.write_timeout)
+}
+
+pub fn (c &TcpConn) peer_addr() ?Addr {
+ mut addr := Addr{
+ addr: AddrData{
+ Ip6: Ip6{}
+ }
+ }
+ mut size := sizeof(Addr)
+ socket_error(C.getpeername(c.sock.handle, voidptr(&addr), &size)) ?
+ return addr
+}
+
+pub fn (c &TcpConn) peer_ip() ?string {
+ return c.peer_addr() ?.str()
+}
+
+pub fn (c &TcpConn) addr() ?Addr {
+ return c.sock.address()
+}
+
+pub fn (c TcpConn) str() string {
+ s := c.sock.str().replace('\n', ' ').replace(' ', ' ')
+ return 'TcpConn{ write_deadline: $c.write_deadline, read_deadline: $c.read_deadline, read_timeout: $c.read_timeout, write_timeout: $c.write_timeout, sock: $s }'
+}
+
+pub struct TcpListener {
+pub mut:
+ sock TcpSocket
+mut:
+ accept_timeout time.Duration
+ accept_deadline time.Time
+}
+
+pub fn listen_tcp(family AddrFamily, saddr string) ?&TcpListener {
+ s := new_tcp_socket(family) ?
+
+ addrs := resolve_addrs(saddr, family, .tcp) ?
+
+ // TODO(logic to pick here)
+ addr := addrs[0]
+
+ // cast to the correct type
+ alen := addr.len()
+ bindres := C.bind(s.handle, voidptr(&addr), alen)
+ socket_error(bindres) ?
+ socket_error(C.listen(s.handle, 128)) ?
+ return &TcpListener{
+ sock: s
+ accept_deadline: no_deadline
+ accept_timeout: infinite_timeout
+ }
+}
+
+pub fn (mut l TcpListener) accept() ?&TcpConn {
+ $if trace_tcp ? {
+ eprintln(' TcpListener.accept | l.sock.handle: ${l.sock.handle:6}')
+ }
+ addr := Addr{
+ addr: AddrData{
+ Ip6: Ip6{}
+ }
+ }
+ size := sizeof(Addr)
+ mut new_handle := C.accept(l.sock.handle, voidptr(&addr), &size)
+ if new_handle <= 0 {
+ l.wait_for_accept() ?
+ new_handle = C.accept(l.sock.handle, voidptr(&addr), &size)
+ if new_handle == -1 || new_handle == 0 {
+ return error('accept failed')
+ }
+ }
+ new_sock := tcp_socket_from_handle(new_handle) ?
+ $if trace_tcp ? {
+ eprintln(' TcpListener.accept | << new_sock.handle: ${new_sock.handle:6}')
+ }
+ return &TcpConn{
+ sock: new_sock
+ read_timeout: net.tcp_default_read_timeout
+ write_timeout: net.tcp_default_write_timeout
+ }
+}
+
+pub fn (c &TcpListener) accept_deadline() ?time.Time {
+ if c.accept_deadline.unix != 0 {
+ return c.accept_deadline
+ }
+ return error('invalid deadline')
+}
+
+pub fn (mut c TcpListener) set_accept_deadline(deadline time.Time) {
+ c.accept_deadline = deadline
+}
+
+pub fn (c &TcpListener) accept_timeout() time.Duration {
+ return c.accept_timeout
+}
+
+pub fn (mut c TcpListener) set_accept_timeout(t time.Duration) {
+ c.accept_timeout = t
+}
+
+pub fn (mut c TcpListener) wait_for_accept() ? {
+ return wait_for_read(c.sock.handle, c.accept_deadline, c.accept_timeout)
+}
+
+pub fn (mut c TcpListener) close() ? {
+ c.sock.close() ?
+}
+
+pub fn (c &TcpListener) addr() ?Addr {
+ return c.sock.address()
+}
+
+struct TcpSocket {
+pub:
+ handle int
+}
+
+fn new_tcp_socket(family AddrFamily) ?TcpSocket {
+ handle := socket_error(C.socket(family, SocketType.tcp, 0)) ?
+ mut s := TcpSocket{
+ handle: handle
+ }
+ $if trace_tcp ? {
+ eprintln(' new_tcp_socket | s.handle: ${s.handle:6}')
+ }
+
+ // TODO(emily):
+ // we shouldnt be using ioctlsocket in the 21st century
+ // use the non-blocking socket option instead please :)
+
+ // TODO(emily):
+ // Move this to its own function on the socket
+ s.set_option_int(.reuse_addr, 1) ?
+
+ $if !net_blocking_sockets ? {
+ $if windows {
+ t := u32(1) // true
+ socket_error(C.ioctlsocket(handle, fionbio, &t)) ?
+ } $else {
+ socket_error(C.fcntl(handle, C.F_SETFL, C.fcntl(handle, C.F_GETFL) | C.O_NONBLOCK)) ?
+ }
+ }
+ return s
+}
+
+fn tcp_socket_from_handle(sockfd int) ?TcpSocket {
+ mut s := TcpSocket{
+ handle: sockfd
+ }
+ $if trace_tcp ? {
+ eprintln(' tcp_socket_from_handle | s.handle: ${s.handle:6}')
+ }
+ // s.set_option_bool(.reuse_addr, true)?
+ s.set_option_int(.reuse_addr, 1) ?
+ s.set_dualstack(true) or {
+ // Not ipv6, we dont care
+ }
+ $if !net_blocking_sockets ? {
+ $if windows {
+ t := u32(1) // true
+ socket_error(C.ioctlsocket(sockfd, fionbio, &t)) ?
+ } $else {
+ socket_error(C.fcntl(sockfd, C.F_SETFL, C.fcntl(sockfd, C.F_GETFL) | C.O_NONBLOCK)) ?
+ }
+ }
+ return s
+}
+
+pub fn (mut s TcpSocket) set_option_bool(opt SocketOption, value bool) ? {
+ // TODO reenable when this `in` operation works again
+ // if opt !in opts_can_set {
+ // return err_option_not_settable
+ // }
+ // if opt !in opts_bool {
+ // return err_option_wrong_type
+ // }
+ x := int(value)
+ socket_error(C.setsockopt(s.handle, C.SOL_SOCKET, int(opt), &x, sizeof(int))) ?
+}
+
+pub fn (mut s TcpSocket) set_dualstack(on bool) ? {
+ x := int(!on)
+ socket_error(C.setsockopt(s.handle, C.IPPROTO_IPV6, int(SocketOption.ipv6_only), &x,
+ sizeof(int))) ?
+}
+
+pub fn (mut s TcpSocket) set_option_int(opt SocketOption, value int) ? {
+ socket_error(C.setsockopt(s.handle, C.SOL_SOCKET, int(opt), &value, sizeof(int))) ?
+}
+
+fn (mut s TcpSocket) close() ? {
+ return shutdown(s.handle)
+}
+
+fn (mut s TcpSocket) @select(test Select, timeout time.Duration) ?bool {
+ return @select(s.handle, test, timeout)
+}
+
+const (
+ connect_timeout = 5 * time.second
+)
+
+fn (mut s TcpSocket) connect(a Addr) ? {
+ res := C.connect(s.handle, voidptr(&a), a.len())
+ if res == 0 {
+ return
+ }
+
+ // The socket is nonblocking and the connection cannot be completed
+ // immediately. (UNIX domain sockets failed with EAGAIN instead.)
+ // It is possible to select(2) or poll(2) for completion by selecting
+ // the socket for writing. After select(2) indicates writability,
+ // use getsockopt(2) to read the SO_ERROR option at level SOL_SOCKET to
+ // determine whether connect() completed successfully (SO_ERROR is zero) or
+ // unsuccessfully (SO_ERROR is one of the usual error codes listed here,
+ // ex‐ plaining the reason for the failure).
+ write_result := s.@select(.write, net.connect_timeout) ?
+ if write_result {
+ err := 0
+ len := sizeof(err)
+ socket_error(C.getsockopt(s.handle, C.SOL_SOCKET, C.SO_ERROR, &err, &len)) ?
+
+ if err != 0 {
+ return wrap_error(err)
+ }
+ // Succeeded
+ return
+ }
+
+ // Get the error
+ socket_error(C.connect(s.handle, voidptr(&a), a.len())) ?
+
+ // otherwise we timed out
+ return err_connect_timed_out
+}
+
+// address gets the address of a socket
+pub fn (s &TcpSocket) address() ?Addr {
+ return addr_from_socket_handle(s.handle)
+}
diff --git a/v_windows/v/vlib/net/tcp_read_line.v b/v_windows/v/vlib/net/tcp_read_line.v
new file mode 100644
index 0000000..ec4a18e
--- /dev/null
+++ b/v_windows/v/vlib/net/tcp_read_line.v
@@ -0,0 +1,90 @@
+module net
+
+const (
+ crlf = '\r\n'
+ msg_peek = 0x02
+ max_read = 400
+)
+
+// get_blocking returns whether the connection is in a blocking state,
+// that is calls to .read_line, C.recv etc will block till there is new
+// data arrived, instead of returning immediately.
+pub fn (mut con TcpConn) get_blocking() bool {
+ // flags := C.fcntl(con.sock.handle, C.F_GETFL, 0)
+ // return 0 == flags & C.O_NONBLOCK
+ return con.is_blocking
+}
+
+// set_blocking will change the state of the connection to either blocking,
+// when state is true, or non blocking (false).
+// The default for `net` tcp connections is the non blocking mode.
+// Calling .read_line will set the connection to blocking mode.
+pub fn (mut con TcpConn) set_blocking(state bool) ? {
+ con.is_blocking = state
+ $if windows {
+ mut t := u32(0)
+ if !con.is_blocking {
+ t = 1
+ }
+ socket_error(C.ioctlsocket(con.sock.handle, fionbio, &t)) ?
+ } $else {
+ mut flags := C.fcntl(con.sock.handle, C.F_GETFL, 0)
+ if state {
+ flags &= ~C.O_NONBLOCK
+ } else {
+ flags |= C.O_NONBLOCK
+ }
+ socket_error(C.fcntl(con.sock.handle, C.F_SETFL, flags)) ?
+ }
+}
+
+// read_line is a *simple*, *non customizable*, blocking line reader.
+// It will *always* return a line, ending with CRLF, or just '', on EOF.
+// NB: if you want more control over the buffer, please use a buffered IO
+// reader instead: `io.new_buffered_reader({reader: io.make_reader(con)})`
+pub fn (mut con TcpConn) read_line() string {
+ mut buf := [net.max_read]byte{} // where C.recv will store the network data
+ mut res := '' // The final result, including the ending \n.
+ if !con.is_blocking {
+ con.set_blocking(true) or {}
+ }
+ for {
+ mut line := '' // The current line. Can be a partial without \n in it.
+ n := C.recv(con.sock.handle, &buf[0], net.max_read - 1, net.msg_peek | msg_nosignal)
+ if n == -1 {
+ return res
+ }
+ if n == 0 {
+ return res
+ }
+ buf[n] = `\0`
+ mut eol_idx := -1
+ for i in 0 .. n {
+ if int(buf[i]) == `\n` {
+ eol_idx = i
+ // Ensure that tos_clone(buf) later,
+ // will return *only* the first line (including \n),
+ // and ignore the rest
+ buf[i + 1] = `\0`
+ break
+ }
+ }
+ line = unsafe { tos_clone(&buf[0]) }
+ if eol_idx > 0 {
+ // At this point, we are sure that recv returned valid data,
+ // that contains *at least* one line.
+ // Ensure that the block till the first \n (including it)
+ // is removed from the socket's receive queue, so that it does
+ // not get read again.
+ C.recv(con.sock.handle, &buf[0], eol_idx + 1, msg_nosignal)
+ res += line
+ break
+ }
+ // recv returned a buffer without \n in it .
+ C.recv(con.sock.handle, &buf[0], n, msg_nosignal)
+ res += line
+ res += net.crlf
+ break
+ }
+ return res
+}
diff --git a/v_windows/v/vlib/net/tcp_simple_client_server_test.v b/v_windows/v/vlib/net/tcp_simple_client_server_test.v
new file mode 100644
index 0000000..317933f
--- /dev/null
+++ b/v_windows/v/vlib/net/tcp_simple_client_server_test.v
@@ -0,0 +1,150 @@
+import io
+import net
+import strings
+
+const (
+ server_port = ':22443'
+)
+
+fn accept(mut server net.TcpListener, c chan &net.TcpConn) {
+ c <- server.accept() or { panic(err) }
+}
+
+fn setup() (&net.TcpListener, &net.TcpConn, &net.TcpConn) {
+ mut server := net.listen_tcp(.ip6, server_port) or { panic(err) }
+
+ c := chan &net.TcpConn{}
+ go accept(mut server, c)
+ mut client := net.dial_tcp('localhost$server_port') or { panic(err) }
+
+ socket := <-c
+
+ $if debug_peer_ip ? {
+ eprintln('$server.addr()\n$client.peer_addr(), $client.addr()\n$socket.peer_addr(), $socket.addr()')
+ }
+ assert true
+ return server, client, socket
+}
+
+fn cleanup(mut server net.TcpListener, mut client net.TcpConn, mut socket net.TcpConn) {
+ server.close() or {}
+ client.close() or {}
+ socket.close() or {}
+}
+
+fn test_socket() {
+ mut server, mut client, mut socket := setup()
+ defer {
+ cleanup(mut server, mut client, mut socket)
+ }
+ message := 'Hello World'
+ socket.write_string(message) or {
+ assert false
+ return
+ }
+ assert true
+ $if debug {
+ println('message send: $message')
+ }
+ $if debug {
+ println('send socket: $socket.sock.handle')
+ }
+ mut buf := []byte{len: 1024}
+ nbytes := client.read(mut buf) or {
+ assert false
+ return
+ }
+ received := buf[0..nbytes].bytestr()
+ $if debug {
+ println('message received: $received')
+ }
+ $if debug {
+ println('client: $client.sock.handle')
+ }
+ assert message == received
+}
+
+fn test_socket_write_and_read() {
+ mut server, mut client, mut socket := setup()
+ defer {
+ cleanup(mut server, mut client, mut socket)
+ }
+ message1 := 'a message 1'
+ socket.write_string(message1) or { assert false }
+ mut rbuf := []byte{len: message1.len}
+ client.read(mut rbuf) or {
+ assert false
+ return
+ }
+ line := rbuf.bytestr()
+ assert line == message1
+}
+
+fn test_socket_read_line() {
+ mut server, mut client, mut socket := setup()
+ mut reader := io.new_buffered_reader(reader: client)
+ defer {
+ cleanup(mut server, mut client, mut socket)
+ }
+ message1, message2 := 'message1', 'message2'
+ message := '$message1\n$message2\n'
+ socket.write_string(message) or { assert false }
+ assert true
+ //
+ line1 := reader.read_line() or {
+ // println(reader.buf)
+ assert false
+ return
+ }
+ line2 := reader.read_line() or {
+ // println(reader.buf)
+ assert false
+ return
+ }
+ assert line1 == message1
+ assert line2 == message2
+}
+
+fn test_socket_write_fail_without_panic() {
+ mut server, mut client, mut socket := setup()
+ defer {
+ cleanup(mut server, mut client, mut socket)
+ }
+ message2 := 'a message 2'
+ // ensure that socket.write (i.e. done on the server side)
+ // continues to work, even when the client side has been disconnected
+ // this test is important for a stable long standing server
+ client.close() or {}
+ $if solaris {
+ return
+ }
+ // TODO: fix segfaulting on Solaris
+ for i := 0; i < 3; i++ {
+ socket.write_string(message2) or {
+ println('write to a socket without a recipient should produce an option fail: $err | $message2')
+ assert true
+ }
+ }
+}
+
+fn test_socket_read_line_long_line_without_eol() {
+ mut server, mut client, mut socket := setup()
+ mut reader := io.new_buffered_reader(reader: client)
+ defer {
+ cleanup(mut server, mut client, mut socket)
+ }
+ message := strings.repeat_string('123', 400)
+ socket.write_string(message) or {
+ assert false
+ return
+ }
+ socket.write_string('\n') or {
+ assert false
+ return
+ }
+ line := reader.read_line() or {
+ assert false
+ return
+ }
+ assert line == message
+}
diff --git a/v_windows/v/vlib/net/tcp_test.v b/v_windows/v/vlib/net/tcp_test.v
new file mode 100644
index 0000000..cbd2aa4
--- /dev/null
+++ b/v_windows/v/vlib/net/tcp_test.v
@@ -0,0 +1,100 @@
+import net
+import os
+
+const (
+ test_port = 45123
+)
+
+fn handle_conn(mut c net.TcpConn) {
+ for {
+ mut buf := []byte{len: 100, init: 0}
+ read := c.read(mut buf) or {
+ println('Server: connection dropped')
+ return
+ }
+ c.write(buf[..read]) or {
+ println('Server: connection dropped')
+ return
+ }
+ }
+}
+
+fn one_shot_echo_server(mut l net.TcpListener, ch_started chan int) ? {
+ eprintln('> one_shot_echo_server')
+ ch_started <- 1
+ mut new_conn := l.accept() or { return error('could not accept') }
+ eprintln(' > new_conn: $new_conn')
+ handle_conn(mut new_conn)
+ new_conn.close() or {}
+}
+
+fn echo(address string) ? {
+ mut c := net.dial_tcp(address) ?
+ defer {
+ c.close() or {}
+ }
+
+ println('local: ' + c.addr() ?.str())
+ println(' peer: ' + c.peer_addr() ?.str())
+
+ data := 'Hello from vlib/net!'
+ c.write_string(data) ?
+ mut buf := []byte{len: 4096}
+ read := c.read(mut buf) ?
+ assert read == data.len
+ for i := 0; i < read; i++ {
+ assert buf[i] == data[i]
+ }
+ println('Got "$buf.bytestr()"')
+}
+
+fn test_tcp_ip6() {
+ eprintln('\n>>> ${@FN}')
+ address := 'localhost:$test_port'
+ mut l := net.listen_tcp(.ip6, ':$test_port') or { panic(err) }
+ dump(l)
+ start_echo_server(mut l)
+ echo(address) or { panic(err) }
+ l.close() or {}
+ // ensure there is at least one new socket created before the next test
+ l = net.listen_tcp(.ip6, ':${test_port + 1}') or { panic(err) }
+}
+
+fn start_echo_server(mut l net.TcpListener) {
+ ch_server_started := chan int{}
+ go one_shot_echo_server(mut l, ch_server_started)
+ _ := <-ch_server_started
+}
+
+fn test_tcp_ip() {
+ eprintln('\n>>> ${@FN}')
+ address := 'localhost:$test_port'
+ mut l := net.listen_tcp(.ip, address) or { panic(err) }
+ dump(l)
+ start_echo_server(mut l)
+ echo(address) or { panic(err) }
+ l.close() or {}
+}
+
+fn test_tcp_unix() {
+ eprintln('\n>>> ${@FN}')
+ // TODO(emily):
+ // whilst windows supposedly supports unix sockets
+ // this doesnt work (wsaeopnotsupp at the call to bind())
+ $if !windows {
+ address := os.real_path('tcp-test.sock')
+ // address := 'tcp-test.sock'
+ println('$address')
+
+ mut l := net.listen_tcp(.unix, address) or { panic(err) }
+ start_echo_server(mut l)
+ echo(address) or { panic(err) }
+ l.close() or {}
+
+ os.rm(address) or { panic('failed to remove socket file') }
+ }
+}
+
+fn testsuite_end() {
+ eprintln('\ndone')
+}
diff --git a/v_windows/v/vlib/net/udp.v b/v_windows/v/vlib/net/udp.v
new file mode 100644
index 0000000..2fac9f3
--- /dev/null
+++ b/v_windows/v/vlib/net/udp.v
@@ -0,0 +1,289 @@
+module net
+
+import time
+
+const (
+ udp_default_read_timeout = time.second / 10
+ udp_default_write_timeout = time.second / 10
+)
+
+struct UdpSocket {
+ handle int
+ l Addr
+ // TODO(emily): replace with option again
+ // when i figure out how to coerce it properly
+mut:
+ has_r bool
+ r Addr
+}
+
+pub struct UdpConn {
+pub mut:
+ sock UdpSocket
+mut:
+ write_deadline time.Time
+ read_deadline time.Time
+ read_timeout time.Duration
+ write_timeout time.Duration
+}
+
+pub fn dial_udp(raddr string) ?&UdpConn {
+ addrs := resolve_addrs_fuzzy(raddr, .udp) ?
+
+ for addr in addrs {
+ // create a local socket for this
+ // bind to any port (or file) (we dont care in this
+ // case because we only care about the remote)
+ if sock := new_udp_socket_for_remote(addr) {
+ return &UdpConn{
+ sock: sock
+ read_timeout: net.udp_default_read_timeout
+ write_timeout: net.udp_default_write_timeout
+ }
+ }
+ }
+
+ return none
+}
+
+// pub fn dial_udp(laddr string, raddr string) ?&UdpConn {
+// local := resolve_addr(laddr, .inet, .udp) ?
+
+// sbase := new_udp_socket() ?
+
+// sock := UdpSocket{
+// handle: sbase.handle
+// l: local
+// r: resolve_wrapper(raddr)
+// }
+// }
+
+pub fn (mut c UdpConn) write_ptr(b &byte, len int) ?int {
+ remote := c.sock.remote() or { return err_no_udp_remote }
+ return c.write_to_ptr(remote, b, len)
+}
+
+pub fn (mut c UdpConn) write(buf []byte) ?int {
+ return c.write_ptr(buf.data, buf.len)
+}
+
+pub fn (mut c UdpConn) write_string(s string) ?int {
+ return c.write_ptr(s.str, s.len)
+}
+
+pub fn (mut c UdpConn) write_to_ptr(addr Addr, b &byte, len int) ?int {
+ res := C.sendto(c.sock.handle, b, len, 0, voidptr(&addr), addr.len())
+ if res >= 0 {
+ return res
+ }
+ code := error_code()
+ if code == int(error_ewouldblock) {
+ c.wait_for_write() ?
+ socket_error(C.sendto(c.sock.handle, b, len, 0, voidptr(&addr), addr.len())) ?
+ } else {
+ wrap_error(code) ?
+ }
+ return none
+}
+
+// write_to blocks and writes the buf to the remote addr specified
+pub fn (mut c UdpConn) write_to(addr Addr, buf []byte) ?int {
+ return c.write_to_ptr(addr, buf.data, buf.len)
+}
+
+// write_to_string blocks and writes the buf to the remote addr specified
+pub fn (mut c UdpConn) write_to_string(addr Addr, s string) ?int {
+ return c.write_to_ptr(addr, s.str, s.len)
+}
+
+// read reads from the socket into buf up to buf.len returning the number of bytes read
+pub fn (mut c UdpConn) read(mut buf []byte) ?(int, Addr) {
+ mut addr := Addr{
+ addr: AddrData{
+ Ip6: Ip6{}
+ }
+ }
+ len := sizeof(Addr)
+ mut res := wrap_read_result(C.recvfrom(c.sock.handle, voidptr(buf.data), buf.len,
+ 0, voidptr(&addr), &len)) ?
+ if res > 0 {
+ return res, addr
+ }
+ code := error_code()
+ if code == int(error_ewouldblock) {
+ c.wait_for_read() ?
+ // same setup as in tcp
+ res = wrap_read_result(C.recvfrom(c.sock.handle, voidptr(buf.data), buf.len, 0,
+ voidptr(&addr), &len)) ?
+ res2 := socket_error(res) ?
+ return res2, addr
+ } else {
+ wrap_error(code) ?
+ }
+ return none
+}
+
+pub fn (c &UdpConn) read_deadline() ?time.Time {
+ if c.read_deadline.unix == 0 {
+ return c.read_deadline
+ }
+ return none
+}
+
+pub fn (mut c UdpConn) set_read_deadline(deadline time.Time) {
+ c.read_deadline = deadline
+}
+
+pub fn (c &UdpConn) write_deadline() ?time.Time {
+ if c.write_deadline.unix == 0 {
+ return c.write_deadline
+ }
+ return none
+}
+
+pub fn (mut c UdpConn) set_write_deadline(deadline time.Time) {
+ c.write_deadline = deadline
+}
+
+pub fn (c &UdpConn) read_timeout() time.Duration {
+ return c.read_timeout
+}
+
+pub fn (mut c UdpConn) set_read_timeout(t time.Duration) {
+ c.read_timeout = t
+}
+
+pub fn (c &UdpConn) write_timeout() time.Duration {
+ return c.write_timeout
+}
+
+pub fn (mut c UdpConn) set_write_timeout(t time.Duration) {
+ c.write_timeout = t
+}
+
+[inline]
+pub fn (mut c UdpConn) wait_for_read() ? {
+ return wait_for_read(c.sock.handle, c.read_deadline, c.read_timeout)
+}
+
+[inline]
+pub fn (mut c UdpConn) wait_for_write() ? {
+ return wait_for_write(c.sock.handle, c.write_deadline, c.write_timeout)
+}
+
+pub fn (c &UdpConn) str() string {
+ // TODO
+ return 'UdpConn'
+}
+
+pub fn (mut c UdpConn) close() ? {
+ return c.sock.close()
+}
+
+pub fn listen_udp(laddr string) ?&UdpConn {
+ addrs := resolve_addrs_fuzzy(laddr, .udp) ?
+ // TODO(emily):
+ // here we are binding to the first address
+ // and that is probably not ideal
+ addr := addrs[0]
+ return &UdpConn{
+ sock: new_udp_socket(addr) ?
+ read_timeout: net.udp_default_read_timeout
+ write_timeout: net.udp_default_write_timeout
+ }
+}
+
+fn new_udp_socket(local_addr Addr) ?&UdpSocket {
+ family := local_addr.family()
+
+ sockfd := socket_error(C.socket(family, SocketType.udp, 0)) ?
+ mut s := &UdpSocket{
+ handle: sockfd
+ l: local_addr
+ r: Addr{
+ addr: AddrData{
+ Ip6: Ip6{}
+ }
+ }
+ }
+
+ s.set_option_bool(.reuse_addr, true) ?
+
+ if family == .ip6 {
+ s.set_dualstack(true) ?
+ }
+
+ $if !net_blocking_sockets ? {
+ // NOTE: refer to comments in tcp.v
+ $if windows {
+ t := u32(1) // true
+ socket_error(C.ioctlsocket(sockfd, fionbio, &t)) ?
+ } $else {
+ socket_error(C.fcntl(sockfd, C.F_SETFD, C.O_NONBLOCK)) ?
+ }
+ }
+
+ // cast to the correct type
+ socket_error(C.bind(s.handle, voidptr(&local_addr), local_addr.len())) ?
+ return s
+}
+
+fn new_udp_socket_for_remote(raddr Addr) ?&UdpSocket {
+ // Invent a sutible local address for this remote addr
+ // Appease compiler
+ mut addr := Addr{
+ addr: AddrData{
+ Ip6: Ip6{}
+ }
+ }
+ match raddr.family() {
+ .ip, .ip6 {
+ // Use ip6 dualstack
+ addr = new_ip6(0, addr_ip6_any)
+ }
+ .unix {
+ addr = temp_unix() ?
+ }
+ else {
+ panic('Invalid family')
+ }
+ }
+ mut sock := new_udp_socket(addr) ?
+ sock.has_r = true
+ sock.r = raddr
+
+ return sock
+}
+
+pub fn (mut s UdpSocket) set_option_bool(opt SocketOption, value bool) ? {
+ // TODO reenable when this `in` operation works again
+ // if opt !in opts_can_set {
+ // return err_option_not_settable
+ // }
+ // if opt !in opts_bool {
+ // return err_option_wrong_type
+ // }
+ x := int(value)
+ socket_error(C.setsockopt(s.handle, C.SOL_SOCKET, int(opt), &x, sizeof(int))) ?
+}
+
+pub fn (mut s UdpSocket) set_dualstack(on bool) ? {
+ x := int(!on)
+ socket_error(C.setsockopt(s.handle, C.IPPROTO_IPV6, int(SocketOption.ipv6_only), &x,
+ sizeof(int))) ?
+}
+
+fn (mut s UdpSocket) close() ? {
+ return shutdown(s.handle)
+}
+
+fn (mut s UdpSocket) @select(test Select, timeout time.Duration) ?bool {
+ return @select(s.handle, test, timeout)
+}
+
+fn (s &UdpSocket) remote() ?Addr {
+ if s.has_r {
+ return s.r
+ }
+ return none
+}
diff --git a/v_windows/v/vlib/net/udp_test.v b/v_windows/v/vlib/net/udp_test.v
new file mode 100644
index 0000000..83675a2
--- /dev/null
+++ b/v_windows/v/vlib/net/udp_test.v
@@ -0,0 +1,67 @@
+import net
+
+fn echo_server(mut c net.UdpConn) {
+ for {
+ mut buf := []byte{len: 100, init: 0}
+ read, addr := c.read(mut buf) or { continue }
+
+ println('Server got addr $addr')
+
+ c.write_to(addr, buf[..read]) or {
+ println('Server: connection dropped')
+ return
+ }
+ }
+}
+
+const (
+ local_addr = ':40003'
+ remote_addr = 'localhost:40003'
+)
+
+fn echo() ? {
+ mut c := net.dial_udp(remote_addr) ?
+ defer {
+ c.close() or {}
+ }
+ data := 'Hello from vlib/net!'
+
+ c.write_string(data) ?
+
+ mut buf := []byte{len: 100, init: 0}
+ read, addr := c.read(mut buf) ?
+
+ assert read == data.len
+ println('Got address $addr')
+ // Can't test this here because loopback addresses
+ // are mapped to other addresses
+ // assert addr.str() == '127.0.0.1:30001'
+
+ for i := 0; i < read; i++ {
+ assert buf[i] == data[i]
+ }
+
+ println('Got "$buf.bytestr()"')
+
+ c.close() ?
+}
+
+fn test_udp() {
+ mut l := net.listen_udp(local_addr) or {
+ println(err)
+ assert false
+ panic('')
+ }
+
+ go echo_server(mut l)
+ echo() or {
+ println(err)
+ assert false
+ }
+
+ l.close() or {}
+}
+
+fn main() {
+ test_udp()
+}
diff --git a/v_windows/v/vlib/net/unix/aasocket.c.v b/v_windows/v/vlib/net/unix/aasocket.c.v
new file mode 100644
index 0000000..7f762a5
--- /dev/null
+++ b/v_windows/v/vlib/net/unix/aasocket.c.v
@@ -0,0 +1,104 @@
+module unix
+
+#include <sys/un.h>
+
+// Select represents a select operation
+enum Select {
+ read
+ write
+ except
+}
+
+// SocketType are the available sockets
+// enum SocketType {
+// dgram = C.SOCK_DGRAM
+// stream = C.SOCK_STREAM
+// seqpacket = C.SOCK_SEQPACKET
+// }
+
+struct C.sockaddr {
+ sa_family u16
+}
+
+const max_sun_path = 104
+
+// 104 for macos, 108 for linux => use the minimum
+
+struct C.sockaddr_un {
+mut:
+ // sun_len byte // only on macos
+ sun_family int
+ sun_path [104]char // on linux that is 108
+}
+
+struct C.addrinfo {
+mut:
+ ai_family int
+ ai_socktype int
+ ai_flags int
+ ai_protocol int
+ ai_addrlen int
+ ai_addr voidptr
+ ai_canonname voidptr
+ ai_next voidptr
+}
+
+struct C.sockaddr_storage {
+}
+
+// fn C.socket() int
+
+// fn C.setsockopt() int
+
+// fn C.htonl() int
+
+// fn C.htons() int
+
+// fn C.bind() int
+
+// fn C.listen() int
+
+// fn C.accept() int
+
+// fn C.getaddrinfo() int
+
+// fn C.connect() int
+
+// fn C.send() int
+
+// fn C.sendto() int
+
+// fn C.recv() int
+
+// fn C.recvfrom() int
+
+// fn C.shutdown() int
+
+// fn C.ntohs() int
+
+// fn C.getpeername() int
+
+// fn C.inet_ntop(af int, src voidptr, dst charptr, dst_size int) charptr
+
+fn C.WSAAddressToStringA() int
+
+// fn C.getsockname() int
+
+// defined in builtin
+// fn C.read() int
+// fn C.close() int
+
+fn C.ioctlsocket() int
+
+// fn C.fcntl() int
+
+// fn C.@select() int
+
+// fn C.FD_ZERO()
+
+// fn C.FD_SET()
+
+// fn C.FD_ISSET() bool
+
+[typedef]
+struct C.fd_set {}
diff --git a/v_windows/v/vlib/net/unix/common.v b/v_windows/v/vlib/net/unix/common.v
new file mode 100644
index 0000000..75e591f
--- /dev/null
+++ b/v_windows/v/vlib/net/unix/common.v
@@ -0,0 +1,128 @@
+module unix
+
+import time
+import net
+
+const (
+ error_ewouldblock = C.EWOULDBLOCK
+)
+
+fn C.SUN_LEN(ptr &C.sockaddr_un) int
+
+fn C.strncpy(&char, &char, int)
+
+// Shutdown shutsdown a socket and closes it
+fn shutdown(handle int) ? {
+ $if windows {
+ C.shutdown(handle, C.SD_BOTH)
+ net.socket_error(C.closesocket(handle)) ?
+ } $else {
+ C.shutdown(handle, C.SHUT_RDWR)
+ net.socket_error(C.close(handle)) ?
+ }
+}
+
+// Select waits for an io operation (specified by parameter `test`) to be available
+fn @select(handle int, test Select, timeout time.Duration) ?bool {
+ set := C.fd_set{}
+
+ C.FD_ZERO(&set)
+ C.FD_SET(handle, &set)
+
+ seconds := timeout / time.second
+ microseconds := time.Duration(timeout - (seconds * time.second)).microseconds()
+
+ mut tt := C.timeval{
+ tv_sec: u64(seconds)
+ tv_usec: u64(microseconds)
+ }
+
+ mut timeval_timeout := &tt
+
+ // infinite timeout is signaled by passing null as the timeout to
+ // select
+ if timeout == unix.infinite_timeout {
+ timeval_timeout = &C.timeval(0)
+ }
+
+ match test {
+ .read {
+ net.socket_error(C.@select(handle + 1, &set, C.NULL, C.NULL, timeval_timeout)) ?
+ }
+ .write {
+ net.socket_error(C.@select(handle + 1, C.NULL, &set, C.NULL, timeval_timeout)) ?
+ }
+ .except {
+ net.socket_error(C.@select(handle + 1, C.NULL, C.NULL, &set, timeval_timeout)) ?
+ }
+ }
+
+ return C.FD_ISSET(handle, &set)
+}
+
+// wait_for_common wraps the common wait code
+fn wait_for_common(handle int, deadline time.Time, timeout time.Duration, test Select) ? {
+ if deadline.unix == 0 {
+ // do not accept negative timeout
+ if timeout < 0 {
+ return net.err_timed_out
+ }
+ ready := @select(handle, test, timeout) ?
+ if ready {
+ return
+ }
+ return net.err_timed_out
+ }
+ // Convert the deadline into a timeout
+ // and use that
+ d_timeout := deadline.unix - time.now().unix
+ if d_timeout < 0 {
+ // deadline is in the past so this has already
+ // timed out
+ return net.err_timed_out
+ }
+
+ ready := @select(handle, test, d_timeout) ?
+ if ready {
+ return
+ }
+ return net.err_timed_out
+}
+
+// wait_for_write waits for a write io operation to be available
+fn wait_for_write(handle int, deadline time.Time, timeout time.Duration) ? {
+ return wait_for_common(handle, deadline, timeout, .write)
+}
+
+// wait_for_read waits for a read io operation to be available
+fn wait_for_read(handle int, deadline time.Time, timeout time.Duration) ? {
+ return wait_for_common(handle, deadline, timeout, .read)
+}
+
+// no_deadline should be given to functions when no deadline is wanted (i.e. all functions
+// return instantly)
+const (
+ no_deadline = time.Time{
+ unix: 0
+ }
+)
+
+// no_timeout should be given to functions when no timeout is wanted (i.e. all functions
+// return instantly)
+const (
+ no_timeout = time.Duration(0)
+)
+
+// infinite_timeout should be given to functions when an infinite_timeout is wanted (i.e. functions
+// only ever return with data)
+const (
+ infinite_timeout = time.infinite
+)
+
+[inline]
+fn wrap_read_result(result int) ?int {
+ if result != 0 {
+ return result
+ }
+ return none
+}
diff --git a/v_windows/v/vlib/net/unix/stream_nix.v b/v_windows/v/vlib/net/unix/stream_nix.v
new file mode 100644
index 0000000..e73acb7
--- /dev/null
+++ b/v_windows/v/vlib/net/unix/stream_nix.v
@@ -0,0 +1,288 @@
+module unix
+
+import time
+import os
+import net
+
+const (
+ unix_default_read_timeout = 30 * time.second
+ unix_default_write_timeout = 30 * time.second
+ connect_timeout = 5 * time.second
+ msg_nosignal = 0x4000
+)
+
+struct StreamSocket {
+pub:
+ handle int
+mut:
+ path string
+}
+
+struct StreamConn {
+pub mut:
+ sock StreamSocket
+mut:
+ write_deadline time.Time
+ read_deadline time.Time
+ read_timeout time.Duration
+ write_timeout time.Duration
+}
+
+struct StreamListener {
+pub mut:
+ sock StreamSocket
+mut:
+ accept_timeout time.Duration
+ accept_deadline time.Time
+}
+
+fn error_code() int {
+ return C.errno
+}
+
+fn new_stream_socket() ?StreamSocket {
+ sockfd := net.socket_error(C.socket(net.AddrFamily.unix, net.SocketType.tcp, 0)) ?
+ mut s := StreamSocket{
+ handle: sockfd
+ }
+ return s
+}
+
+fn (mut s StreamSocket) close() ? {
+ return shutdown(s.handle)
+}
+
+fn (mut s StreamSocket) @select(test Select, timeout time.Duration) ?bool {
+ return @select(s.handle, test, timeout)
+}
+
+fn (mut s StreamSocket) connect(a string) ? {
+ if a.len >= max_sun_path {
+ return error('Socket path too long! Max length: ${max_sun_path - 1} chars.')
+ }
+ mut addr := C.sockaddr_un{}
+ unsafe { C.memset(&addr, 0, sizeof(C.sockaddr_un)) }
+ addr.sun_family = C.AF_UNIX
+ unsafe { C.strncpy(&addr.sun_path[0], &char(a.str), max_sun_path) }
+ size := C.SUN_LEN(&addr)
+ res := C.connect(s.handle, voidptr(&addr), size)
+ // if res != 1 {
+ // return none
+ //}
+ if res == 0 {
+ return
+ }
+ _ := error_code()
+ write_result := s.@select(.write, unix.connect_timeout) ?
+ if write_result {
+ // succeeded
+ return
+ }
+ except_result := s.@select(.except, unix.connect_timeout) ?
+ if except_result {
+ return net.err_connect_failed
+ }
+ // otherwise we timed out
+ return net.err_connect_timed_out
+}
+
+pub fn listen_stream(sock string) ?&StreamListener {
+ if sock.len >= max_sun_path {
+ return error('Socket path too long! Max length: ${max_sun_path - 1} chars.')
+ }
+ mut s := new_stream_socket() ?
+ s.path = sock
+ mut addr := C.sockaddr_un{}
+ unsafe { C.memset(&addr, 0, sizeof(C.sockaddr_un)) }
+ addr.sun_family = C.AF_UNIX
+ unsafe { C.strncpy(&addr.sun_path[0], &char(sock.str), max_sun_path) }
+ size := C.SUN_LEN(&addr)
+ if os.exists(sock) {
+ os.rm(sock) ?
+ }
+ net.socket_error(C.bind(s.handle, voidptr(&addr), size)) ?
+ os.chmod(sock, 0o777) ?
+ net.socket_error(C.listen(s.handle, 128)) ?
+ return &StreamListener{
+ sock: s
+ }
+}
+
+pub fn connect_stream(path string) ?&StreamConn {
+ mut s := new_stream_socket() ?
+ s.connect(path) ?
+ return &StreamConn{
+ sock: s
+ read_timeout: unix.unix_default_read_timeout
+ write_timeout: unix.unix_default_write_timeout
+ }
+}
+
+pub fn (mut l StreamListener) accept() ?&StreamConn {
+ mut new_handle := C.accept(l.sock.handle, 0, 0)
+ if new_handle <= 0 {
+ l.wait_for_accept() ?
+ new_handle = C.accept(l.sock.handle, 0, 0)
+ if new_handle == -1 || new_handle == 0 {
+ return error('accept failed')
+ }
+ }
+ new_sock := StreamSocket{
+ handle: new_handle
+ }
+ return &StreamConn{
+ sock: new_sock
+ read_timeout: unix.unix_default_read_timeout
+ write_timeout: unix.unix_default_write_timeout
+ }
+}
+
+pub fn (c &StreamListener) accept_deadline() ?time.Time {
+ if c.accept_deadline.unix != 0 {
+ return c.accept_deadline
+ }
+ return error('no deadline')
+}
+
+pub fn (mut c StreamListener) set_accept_deadline(deadline time.Time) {
+ c.accept_deadline = deadline
+}
+
+pub fn (c &StreamListener) accept_timeout() time.Duration {
+ return c.accept_timeout
+}
+
+pub fn (mut c StreamListener) set_accept_timeout(t time.Duration) {
+ c.accept_timeout = t
+}
+
+pub fn (mut c StreamListener) wait_for_accept() ? {
+ return wait_for_read(c.sock.handle, c.accept_deadline, c.accept_timeout)
+}
+
+pub fn (mut c StreamListener) close() ? {
+ os.rm(c.sock.path) ?
+ c.sock.close() ?
+}
+
+pub fn (mut c StreamConn) close() ? {
+ c.sock.close() ?
+}
+
+// write_ptr blocks and attempts to write all data
+pub fn (mut c StreamConn) write_ptr(b &byte, len int) ?int {
+ $if trace_unix ? {
+ eprintln(
+ '>>> StreamConn.write_ptr | c.sock.handle: $c.sock.handle | b: ${ptr_str(b)} len: $len |\n' +
+ unsafe { b.vstring_with_len(len) })
+ }
+ unsafe {
+ mut ptr_base := &byte(b)
+ mut total_sent := 0
+ for total_sent < len {
+ ptr := ptr_base + total_sent
+ remaining := len - total_sent
+ mut sent := C.send(c.sock.handle, ptr, remaining, unix.msg_nosignal)
+ if sent < 0 {
+ code := error_code()
+ if code == int(error_ewouldblock) {
+ c.wait_for_write() ?
+ continue
+ } else {
+ net.wrap_error(code) ?
+ }
+ }
+ total_sent += sent
+ }
+ return total_sent
+ }
+}
+
+// write blocks and attempts to write all data
+pub fn (mut c StreamConn) write(bytes []byte) ?int {
+ return c.write_ptr(bytes.data, bytes.len)
+}
+
+// write_string blocks and attempts to write all data
+pub fn (mut c StreamConn) write_string(s string) ?int {
+ return c.write_ptr(s.str, s.len)
+}
+
+pub fn (mut c StreamConn) read_ptr(buf_ptr &byte, len int) ?int {
+ mut res := wrap_read_result(C.recv(c.sock.handle, voidptr(buf_ptr), len, 0)) ?
+ $if trace_unix ? {
+ eprintln('<<< StreamConn.read_ptr | c.sock.handle: $c.sock.handle | buf_ptr: ${ptr_str(buf_ptr)} len: $len | res: $res')
+ }
+ if res > 0 {
+ return res
+ }
+ code := error_code()
+ if code == int(error_ewouldblock) {
+ c.wait_for_read() ?
+ res = wrap_read_result(C.recv(c.sock.handle, voidptr(buf_ptr), len, 0)) ?
+ $if trace_unix ? {
+ eprintln('<<< StreamConn.read_ptr | c.sock.handle: $c.sock.handle | buf_ptr: ${ptr_str(buf_ptr)} len: $len | res: $res')
+ }
+ return net.socket_error(res)
+ } else {
+ net.wrap_error(code) ?
+ }
+ return net.socket_error(code)
+}
+
+pub fn (mut c StreamConn) read(mut buf []byte) ?int {
+ return c.read_ptr(buf.data, buf.len)
+}
+
+pub fn (mut c StreamConn) read_deadline() ?time.Time {
+ if c.read_deadline.unix == 0 {
+ return c.read_deadline
+ }
+ return none
+}
+
+pub fn (mut c StreamConn) set_read_deadline(deadline time.Time) {
+ c.read_deadline = deadline
+}
+
+pub fn (mut c StreamConn) write_deadline() ?time.Time {
+ if c.write_deadline.unix == 0 {
+ return c.write_deadline
+ }
+ return none
+}
+
+pub fn (mut c StreamConn) set_write_deadline(deadline time.Time) {
+ c.write_deadline = deadline
+}
+
+pub fn (c &StreamConn) read_timeout() time.Duration {
+ return c.read_timeout
+}
+
+pub fn (mut c StreamConn) set_read_timeout(t time.Duration) {
+ c.read_timeout = t
+}
+
+pub fn (c &StreamConn) write_timeout() time.Duration {
+ return c.write_timeout
+}
+
+pub fn (mut c StreamConn) set_write_timeout(t time.Duration) {
+ c.write_timeout = t
+}
+
+[inline]
+pub fn (mut c StreamConn) wait_for_read() ? {
+ return wait_for_read(c.sock.handle, c.read_deadline, c.read_timeout)
+}
+
+[inline]
+pub fn (mut c StreamConn) wait_for_write() ? {
+ return wait_for_write(c.sock.handle, c.write_deadline, c.write_timeout)
+}
+
+pub fn (c StreamConn) str() string {
+ s := c.sock.str().replace('\n', ' ').replace(' ', ' ')
+ return 'StreamConn{ write_deadline: $c.write_deadline, read_deadline: $c.read_deadline, read_timeout: $c.read_timeout, write_timeout: $c.write_timeout, sock: $s }'
+}
diff --git a/v_windows/v/vlib/net/unix/unix_test.v b/v_windows/v/vlib/net/unix/unix_test.v
new file mode 100644
index 0000000..cabcd52
--- /dev/null
+++ b/v_windows/v/vlib/net/unix/unix_test.v
@@ -0,0 +1,50 @@
+import os
+import net.unix
+
+const test_port = os.join_path(os.temp_dir(), 'unix_domain_socket')
+
+fn handle_conn(mut c unix.StreamConn) {
+ for {
+ mut buf := []byte{len: 100, init: 0}
+ read := c.read(mut buf) or {
+ println('Server: connection dropped')
+ return
+ }
+ c.write(buf[..read]) or {
+ println('Server: connection dropped')
+ return
+ }
+ }
+}
+
+fn echo_server(mut l unix.StreamListener) ? {
+ for {
+ mut new_conn := l.accept() or { continue }
+ go handle_conn(mut new_conn)
+ }
+}
+
+fn echo() ? {
+ mut c := unix.connect_stream(test_port) ?
+ defer {
+ c.close() or {}
+ }
+ data := 'Hello from vlib/net!'
+ c.write_string(data) ?
+ mut buf := []byte{len: 4096}
+ read := c.read(mut buf) ?
+ assert read == data.len
+ for i := 0; i < read; i++ {
+ assert buf[i] == data[i]
+ }
+ println('Got "$buf.bytestr()"')
+ return
+}
+
+fn test_tcp() {
+ assert os.exists(test_port) == false
+ mut l := unix.listen_stream(test_port) or { panic(err) }
+ go echo_server(mut l)
+ echo() or { panic(err) }
+ l.close() or {}
+}
diff --git a/v_windows/v/vlib/net/urllib/urllib.v b/v_windows/v/vlib/net/urllib/urllib.v
new file mode 100644
index 0000000..3b02ef6
--- /dev/null
+++ b/v_windows/v/vlib/net/urllib/urllib.v
@@ -0,0 +1,1095 @@
+// urllib parses URLs and implements query escaping.
+// See RFC 3986. This module generally follows RFC 3986, except where
+// it deviates for compatibility reasons.
+// Based off: https://github.com/golang/go/blob/master/src/net/url/url.go
+// Last commit: https://github.com/golang/go/commit/fe2ed5054176935d4adcf13e891715ccf2ee3cce
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+module urllib
+
+import strings
+
+enum EncodingMode {
+ encode_path
+ encode_path_segment
+ encode_host
+ encode_zone
+ encode_user_password
+ encode_query_component
+ encode_fragment
+}
+
+const (
+ err_msg_escape = 'unescape: invalid URL escape'
+ err_msg_parse = 'parse: failed parsing url'
+)
+
+fn error_msg(message string, val string) string {
+ mut msg := 'net.urllib.$message'
+ if val != '' {
+ msg = '$msg ($val)'
+ }
+ return msg
+}
+
+// Return true if the specified character should be escaped when
+// appearing in a URL string, according to RFC 3986.
+//
+// Please be informed that for now should_escape does not check all
+// reserved characters correctly. See golang.org/issue/5684.
+fn should_escape(c byte, mode EncodingMode) bool {
+ // §2.3 Unreserved characters (alphanum)
+ if (`a` <= c && c <= `z`) || (`A` <= c && c <= `Z`) || (`0` <= c && c <= `9`) {
+ return false
+ }
+ if mode == .encode_host || mode == .encode_zone {
+ // §3.2.2 host allows
+ // sub-delims = `!` / `$` / `&` / ``` / `(` / `)` / `*` / `+` / `,` / `;` / `=`
+ // as part of reg-name.
+ // We add : because we include :port as part of host.
+ // We add [ ] because we include [ipv6]:port as part of host.
+ // We add < > because they`re the only characters left that
+ // we could possibly allow, and parse will reject them if we
+ // escape them (because hosts can`t use %-encoding for
+ // ASCII bytes).
+ if c in [`!`, `$`, `&`, `\\`, `(`, `)`, `*`, `+`, `,`, `;`, `=`, `:`, `[`, `]`, `<`, `>`,
+ `"`,
+ ] {
+ return false
+ }
+ }
+ match c {
+ `-`, `_`, `.`, `~` {
+ // §2.3 Unreserved characters (mark)
+ return false
+ }
+ `$`, `&`, `+`, `,`, `/`, `:`, `;`, `=`, `?`, `@` {
+ // §2.2 Reserved characters (reserved)
+ // Different sections of the URL allow a few of
+ // the reserved characters to appear unescaped.
+ match mode {
+ .encode_path {
+ // §3.3
+ // The RFC allows : @ & = + $ but saves / ; , for assigning
+ // meaning to individual path segments. This package
+ // only manipulates the path as a whole, so we allow those
+ // last three as well. That leaves only ? to escape.
+ return c == `?`
+ }
+ .encode_path_segment {
+ // §3.3
+ // The RFC allows : @ & = + $ but saves / ; , for assigning
+ // meaning to individual path segments.
+ return c == `/` || c == `;` || c == `,` || c == `?`
+ }
+ .encode_user_password {
+ // §3.2.1
+ // The RFC allows `;`, `:`, `&`, `=`, `+`, `$`, and `,` in
+ // userinfo, so we must escape only `@`, `/`, and `?`.
+ // The parsing of userinfo treats `:` as special so we must escape
+ // that too.
+ return c == `@` || c == `/` || c == `?` || c == `:`
+ }
+ .encode_query_component {
+ // §3.4
+ // The RFC reserves (so we must escape) everything.
+ return true
+ }
+ .encode_fragment {
+ // §4.1
+ // The RFC text is silent but the grammar allows
+ // everything, so escape nothing.
+ return false
+ }
+ else {}
+ }
+ }
+ else {}
+ }
+ if mode == .encode_fragment {
+ // RFC 3986 §2.2 allows not escaping sub-delims. A subset of sub-delims are
+ // included in reserved from RFC 2396 §2.2. The remaining sub-delims do not
+ // need to be escaped. To minimize potential breakage, we apply two restrictions:
+ // (1) we always escape sub-delims outside of the fragment, and (2) we always
+ // escape single quote to avoid breaking callers that had previously assumed that
+ // single quotes would be escaped. See issue #19917.
+ match c {
+ `!`, `(`, `)`, `*` { return false }
+ else {}
+ }
+ }
+ // Everything else must be escaped.
+ return true
+}
+
+// query_unescape does the inverse transformation of query_escape,
+// converting each 3-byte encoded substring of the form '%AB' into the
+// hex-decoded byte 0xAB.
+// It returns an error if any % is not followed by two hexadecimal
+// digits.
+pub fn query_unescape(s string) ?string {
+ return unescape(s, .encode_query_component)
+}
+
+// path_unescape does the inverse transformation of path_escape,
+// converting each 3-byte encoded substring of the form '%AB' into the
+// hex-decoded byte 0xAB. It returns an error if any % is not followed
+// by two hexadecimal digits.
+//
+// path_unescape is identical to query_unescape except that it does not
+// unescape '+' to ' ' (space).
+pub fn path_unescape(s string) ?string {
+ return unescape(s, .encode_path_segment)
+}
+
+// unescape unescapes a string; the mode specifies
+// which section of the URL string is being unescaped.
+fn unescape(s_ string, mode EncodingMode) ?string {
+ mut s := s_
+ // Count %, check that they're well-formed.
+ mut n := 0
+ mut has_plus := false
+ for i := 0; i < s.len; {
+ x := s[i]
+ match x {
+ `%` {
+ if s == '' {
+ break
+ }
+ n++
+ if i + 2 >= s.len || !ishex(s[i + 1]) || !ishex(s[i + 2]) {
+ if mode == .encode_query_component && i + 1 < s.len {
+ s = s[..i] + '%25' + s[(i + 1)..]
+ i += 4 // skip the %25 and the next character
+ continue
+ }
+ s = s[i..]
+ if s.len > 3 {
+ s = s[..3]
+ }
+ return error(error_msg(urllib.err_msg_escape, s))
+ }
+ // Per https://tools.ietf.org/html/rfc3986#page-21
+ // in the host component %-encoding can only be used
+ // for non-ASCII bytes.
+ // But https://tools.ietf.org/html/rfc6874#section-2
+ // introduces %25 being allowed to escape a percent sign
+ // in IPv6 scoped-address literals. Yay.
+ if i + 3 >= s.len && mode == .encode_host && unhex(s[i + 1]) < 8
+ && s[i..i + 3] != '%25' {
+ return error(error_msg(urllib.err_msg_escape, s[i..i + 3]))
+ }
+ if mode == .encode_zone {
+ // RFC 6874 says basically 'anything goes' for zone identifiers
+ // and that even non-ASCII can be redundantly escaped,
+ // but it seems prudent to restrict %-escaped bytes here to those
+ // that are valid host name bytes in their unescaped form.
+ // That is, you can use escaping in the zone identifier but not
+ // to introduce bytes you couldn't just write directly.
+ // But Windows puts spaces here! Yay.
+ if i + 3 >= s.len {
+ return error(error_msg('unescape: invalid escape sequence', ''))
+ }
+ v := ((unhex(s[i + 1]) << byte(4)) | unhex(s[i + 2]))
+ if s[i..i + 3] != '%25' && v != ` ` && should_escape(v, .encode_host) {
+ error(error_msg(urllib.err_msg_escape, s[i..i + 3]))
+ }
+ }
+ i += 3
+ }
+ `+` {
+ has_plus = mode == .encode_query_component
+ i++
+ }
+ else {
+ if (mode == .encode_host || mode == .encode_zone) && s[i] < 0x80
+ && should_escape(s[i], mode) {
+ error(error_msg('unescape: invalid character in host name', s[i..i + 1]))
+ }
+ i++
+ }
+ }
+ }
+ if n == 0 && !has_plus {
+ return s
+ }
+ if s.len < 2 * n {
+ return error(error_msg('unescape: invalid escape sequence', ''))
+ }
+ mut t := strings.new_builder(s.len - 2 * n)
+ for i := 0; i < s.len; i++ {
+ x := s[i]
+ match x {
+ `%` {
+ if i + 2 >= s.len {
+ return error(error_msg('unescape: invalid escape sequence', ''))
+ }
+ t.write_string(((unhex(s[i + 1]) << byte(4)) | unhex(s[i + 2])).ascii_str())
+ i += 2
+ }
+ `+` {
+ if mode == .encode_query_component {
+ t.write_string(' ')
+ } else {
+ t.write_string('+')
+ }
+ }
+ else {
+ t.write_string(s[i].ascii_str())
+ }
+ }
+ }
+ return t.str()
+}
+
+// query_escape escapes the string so it can be safely placed
+// inside a URL query.
+pub fn query_escape(s string) string {
+ return escape(s, .encode_query_component)
+}
+
+// path_escape escapes the string so it can be safely placed inside a URL path segment,
+// replacing special characters (including /) with %XX sequences as needed.
+pub fn path_escape(s string) string {
+ return escape(s, .encode_path_segment)
+}
+
+fn escape(s string, mode EncodingMode) string {
+ mut space_count := 0
+ mut hex_count := 0
+ mut c := byte(0)
+ for i in 0 .. s.len {
+ c = s[i]
+ if should_escape(c, mode) {
+ if c == ` ` && mode == .encode_query_component {
+ space_count++
+ } else {
+ hex_count++
+ }
+ }
+ }
+ if space_count == 0 && hex_count == 0 {
+ return s
+ }
+ buf := []byte{len: (64)}
+ mut t := []byte{}
+ required := s.len + 2 * hex_count
+ if required <= buf.len {
+ t = buf[..required]
+ } else {
+ t = []byte{len: required}
+ }
+ if hex_count == 0 {
+ copy(t, s.bytes())
+ for i in 0 .. s.len {
+ if s[i] == ` ` {
+ t[i] = `+`
+ }
+ }
+ return t.bytestr()
+ }
+ upperhex := '0123456789ABCDEF'
+ mut j := 0
+ for i in 0 .. s.len {
+ c1 := s[i]
+ if c1 == ` ` && mode == .encode_query_component {
+ t[j] = `+`
+ j++
+ } else if should_escape(c1, mode) {
+ t[j] = `%`
+ t[j + 1] = upperhex[c1 >> 4]
+ t[j + 2] = upperhex[c1 & 15]
+ j += 3
+ } else {
+ t[j] = s[i]
+ j++
+ }
+ }
+ return t.bytestr()
+}
+
+// A URL represents a parsed URL (technically, a URI reference).
+//
+// The general form represented is:
+//
+// [scheme:][//[userinfo@]host][/]path[?query][#fragment]
+//
+// URLs that do not start with a slash after the scheme are interpreted as:
+//
+// scheme:opaque[?query][#fragment]
+//
+// Note that the path field is stored in decoded form: /%47%6f%2f becomes /Go/.
+// A consequence is that it is impossible to tell which slashes in the path were
+// slashes in the raw URL and which were %2f. This distinction is rarely important,
+// but when it is, the code should use raw_path, an optional field which only gets
+// set if the default encoding is different from path.
+//
+// URL's String method uses the escaped_path method to obtain the path. See the
+// escaped_path method for more details.
+pub struct URL {
+pub mut:
+ scheme string
+ opaque string // encoded opaque data
+ user &Userinfo // username and password information
+ host string // host or host:port
+ path string // path (relative paths may omit leading slash)
+ raw_path string // encoded path hint (see escaped_path method)
+ force_query bool // append a query ('?') even if raw_query is empty
+ raw_query string // encoded query values, without '?'
+ fragment string // fragment for references, without '#'
+}
+
+// user returns a Userinfo containing the provided username
+// and no password set.
+pub fn user(username string) &Userinfo {
+ return &Userinfo{
+ username: username
+ password: ''
+ password_set: false
+ }
+}
+
+// user_password returns a Userinfo containing the provided username
+// and password.
+//
+// This functionality should only be used with legacy web sites.
+// RFC 2396 warns that interpreting Userinfo this way
+// ``is NOT RECOMMENDED, because the passing of authentication
+// information in clear text (such as URI) has proven to be a
+// security risk in almost every case where it has been used.''
+fn user_password(username string, password string) &Userinfo {
+ return &Userinfo{username, password, true}
+}
+
+// The Userinfo type is an immutable encapsulation of username and
+// password details for a URL. An existing Userinfo value is guaranteed
+// to have a username set (potentially empty, as allowed by RFC 2396),
+// and optionally a password.
+struct Userinfo {
+pub:
+ username string
+ password string
+ password_set bool
+}
+
+fn (u &Userinfo) empty() bool {
+ return isnil(u) || (u.username == '' && u.password == '')
+}
+
+// string returns the encoded userinfo information in the standard form
+// of 'username[:password]'.
+fn (u &Userinfo) str() string {
+ if u.empty() {
+ return ''
+ }
+ mut s := escape(u.username, .encode_user_password)
+ if u.password_set {
+ s += ':' + escape(u.password, .encode_user_password)
+ }
+ return s
+}
+
+// Maybe rawurl is of the form scheme:path.
+// (scheme must be [a-zA-Z][a-zA-Z0-9+-.]*)
+// If so, return [scheme, path]; else return ['', rawurl]
+fn split_by_scheme(rawurl string) ?[]string {
+ for i in 0 .. rawurl.len {
+ c := rawurl[i]
+ if (`a` <= c && c <= `z`) || (`A` <= c && c <= `Z`) {
+ // do nothing
+ } else if (`0` <= c && c <= `9`) || (c == `+` || c == `-` || c == `.`) {
+ if i == 0 {
+ return ['', rawurl]
+ }
+ } else if c == `:` {
+ if i == 0 {
+ return error(error_msg('split_by_scheme: missing protocol scheme', ''))
+ }
+ return [rawurl[..i], rawurl[i + 1..]]
+ } else {
+ // we have encountered an invalid character,
+ // so there is no valid scheme
+ return ['', rawurl]
+ }
+ }
+ return ['', rawurl]
+}
+
+fn get_scheme(rawurl string) ?string {
+ split := split_by_scheme(rawurl) or { return err.msg }
+ return split[0]
+}
+
+// split slices s into two substrings separated by the first occurence of
+// sep. If cutc is true then sep is included with the second substring.
+// If sep does not occur in s then s and the empty string is returned.
+fn split(s string, sep byte, cutc bool) (string, string) {
+ i := s.index_byte(sep)
+ if i < 0 {
+ return s, ''
+ }
+ if cutc {
+ return s[..i], s[i + 1..]
+ }
+ return s[..i], s[i..]
+}
+
+// parse parses rawurl into a URL structure.
+//
+// The rawurl may be relative (a path, without a host) or absolute
+// (starting with a scheme). Trying to parse a hostname and path
+// without a scheme is invalid but may not necessarily return an
+// error, due to parsing ambiguities.
+pub fn parse(rawurl string) ?URL {
+ // Cut off #frag
+ u, frag := split(rawurl, `#`, true)
+ mut url := parse_url(u, false) or { return error(error_msg(urllib.err_msg_parse, u)) }
+ if frag == '' {
+ return url
+ }
+ f := unescape(frag, .encode_fragment) or { return error(error_msg(urllib.err_msg_parse,
+ u)) }
+ url.fragment = f
+ return url
+}
+
+// parse_request_uri parses rawurl into a URL structure. It assumes that
+// rawurl was received in an HTTP request, so the rawurl is interpreted
+// only as an absolute URI or an absolute path.
+// The string rawurl is assumed not to have a #fragment suffix.
+// (Web browsers strip #fragment before sending the URL to a web server.)
+fn parse_request_uri(rawurl string) ?URL {
+ return parse_url(rawurl, true)
+}
+
+// parse_url parses a URL from a string in one of two contexts. If
+// via_request is true, the URL is assumed to have arrived via an HTTP request,
+// in which case only absolute URLs or path-absolute relative URLs are allowed.
+// If via_request is false, all forms of relative URLs are allowed.
+[manualfree]
+fn parse_url(rawurl string, via_request bool) ?URL {
+ if string_contains_ctl_byte(rawurl) {
+ return error(error_msg('parse_url: invalid control character in URL', rawurl))
+ }
+ if rawurl == '' && via_request {
+ return error(error_msg('parse_url: empty URL', rawurl))
+ }
+ mut url := URL{
+ user: 0
+ }
+ if rawurl == '*' {
+ url.path = '*'
+ return url
+ }
+ // Split off possible leading 'http:', 'mailto:', etc.
+ // Cannot contain escaped characters.
+ p := split_by_scheme(rawurl) ?
+ url.scheme = p[0]
+ mut rest := p[1]
+ url.scheme = url.scheme.to_lower()
+ // if rest.ends_with('?') && strings.count(rest, '?') == 1 {
+ if rest.ends_with('?') && !rest[..1].contains('?') {
+ url.force_query = true
+ rest = rest[..rest.len - 1]
+ } else {
+ r, raw_query := split(rest, `?`, true)
+ rest = r
+ url.raw_query = raw_query
+ }
+ if !rest.starts_with('/') {
+ if url.scheme != '' {
+ // We consider rootless paths per RFC 3986 as opaque.
+ url.opaque = rest
+ return url
+ }
+ if via_request {
+ return error(error_msg('parse_url: invalid URI for request', ''))
+ }
+ // Avoid confusion with malformed schemes, like cache_object:foo/bar.
+ // See golang.org/issue/16822.
+ //
+ // RFC 3986, §3.3:
+ // In addition, a URI reference (Section 4.1) may be a relative-path reference,
+ // in which case the first path segment cannot contain a colon (':') character.
+ colon := rest.index(':') or { return error('there should be a : in the URL') }
+ slash := rest.index('/') or { return error('there should be a / in the URL') }
+ if colon >= 0 && (slash < 0 || colon < slash) {
+ // First path segment has colon. Not allowed in relative URL.
+ return error(error_msg('parse_url: first path segment in URL cannot contain colon',
+ ''))
+ }
+ }
+ if ((url.scheme != '' || !via_request) && !rest.starts_with('///')) && rest.starts_with('//') {
+ authority, r := split(rest[2..], `/`, false)
+ rest = r
+ a := parse_authority(authority) ?
+ url.user = a.user
+ url.host = a.host
+ }
+ // Set path and, optionally, raw_path.
+ // raw_path is a hint of the encoding of path. We don't want to set it if
+ // the default escaping of path is equivalent, to help make sure that people
+ // don't rely on it in general.
+ url.set_path(rest) ?
+ return url
+}
+
+struct ParseAuthorityRes {
+ user &Userinfo
+ host string
+}
+
+fn parse_authority(authority string) ?ParseAuthorityRes {
+ i := authority.last_index('@') or { -1 }
+ mut host := ''
+ mut zuser := user('')
+ if i < 0 {
+ h := parse_host(authority) ?
+ host = h
+ } else {
+ h := parse_host(authority[i + 1..]) ?
+ host = h
+ }
+ if i < 0 {
+ return ParseAuthorityRes{
+ host: host
+ user: zuser
+ }
+ }
+ mut userinfo := authority[..i]
+ if !valid_userinfo(userinfo) {
+ return error(error_msg('parse_authority: invalid userinfo', ''))
+ }
+ if !userinfo.contains(':') {
+ u := unescape(userinfo, .encode_user_password) ?
+ userinfo = u
+ zuser = user(userinfo)
+ } else {
+ mut username, mut password := split(userinfo, `:`, true)
+ u := unescape(username, .encode_user_password) ?
+ username = u
+ p := unescape(password, .encode_user_password) ?
+ password = p
+ zuser = user_password(username, password)
+ }
+ return ParseAuthorityRes{
+ user: zuser
+ host: host
+ }
+}
+
+// parse_host parses host as an authority without user
+// information. That is, as host[:port].
+fn parse_host(host string) ?string {
+ if host.starts_with('[') {
+ // parse an IP-Literal in RFC 3986 and RFC 6874.
+ // E.g., '[fe80::1]', '[fe80::1%25en0]', '[fe80::1]:80'.
+ mut i := host.last_index(']') or {
+ return error(error_msg("parse_host: missing ']' in host", ''))
+ }
+ mut colon_port := host[i + 1..]
+ if !valid_optional_port(colon_port) {
+ return error(error_msg('parse_host: invalid port $colon_port after host ',
+ ''))
+ }
+ // RFC 6874 defines that %25 (%-encoded percent) introduces
+ // the zone identifier, and the zone identifier can use basically
+ // any %-encoding it likes. That's different from the host, which
+ // can only %-encode non-ASCII bytes.
+ // We do impose some restrictions on the zone, to avoid stupidity
+ // like newlines.
+ if zone := host[..i].index('%25') {
+ host1 := unescape(host[..zone], .encode_host) or { return err.msg }
+ host2 := unescape(host[zone..i], .encode_zone) or { return err.msg }
+ host3 := unescape(host[i..], .encode_host) or { return err.msg }
+ return host1 + host2 + host3
+ }
+ if idx := host.last_index(':') {
+ colon_port = host[idx..]
+ if !valid_optional_port(colon_port) {
+ return error(error_msg('parse_host: invalid port $colon_port after host ',
+ ''))
+ }
+ }
+ }
+ h := unescape(host, .encode_host) or { return err.msg }
+ return h
+ // host = h
+ // return host
+}
+
+// set_path sets the path and raw_path fields of the URL based on the provided
+// escaped path p. It maintains the invariant that raw_path is only specified
+// when it differs from the default encoding of the path.
+// For example:
+// - set_path('/foo/bar') will set path='/foo/bar' and raw_path=''
+// - set_path('/foo%2fbar') will set path='/foo/bar' and raw_path='/foo%2fbar'
+// set_path will return an error only if the provided path contains an invalid
+// escaping.
+pub fn (mut u URL) set_path(p string) ?bool {
+ path := unescape(p, .encode_path) ?
+ u.path = path
+ escp := escape(path, .encode_path)
+ if p == escp {
+ // Default encoding is fine.
+ u.raw_path = ''
+ } else {
+ u.raw_path = p
+ }
+ return true
+}
+
+// escaped_path returns the escaped form of u.path.
+// In general there are multiple possible escaped forms of any path.
+// escaped_path returns u.raw_path when it is a valid escaping of u.path.
+// Otherwise escaped_path ignores u.raw_path and computes an escaped
+// form on its own.
+// The String and request_uri methods use escaped_path to construct
+// their results.
+// In general, code should call escaped_path instead of
+// reading u.raw_path directly.
+pub fn (u &URL) escaped_path() string {
+ if u.raw_path != '' && valid_encoded_path(u.raw_path) {
+ unescape(u.raw_path, .encode_path) or { return '' }
+ return u.raw_path
+ }
+ if u.path == '*' {
+ return '*' // don't escape (Issue 11202)
+ }
+ return escape(u.path, .encode_path)
+}
+
+// valid_encoded_path reports whether s is a valid encoded path.
+// It must not contain any bytes that require escaping during path encoding.
+fn valid_encoded_path(s string) bool {
+ for i in 0 .. s.len {
+ // RFC 3986, Appendix A.
+ // pchar = unreserved / pct-encoded / sub-delims / ':' / '@'.
+ // should_escape is not quite compliant with the RFC,
+ // so we check the sub-delims ourselves and let
+ // should_escape handle the others.
+ x := s[i]
+ match x {
+ `!`, `$`, `&`, `\\`, `(`, `)`, `*`, `+`, `,`, `;`, `=`, `:`, `@` {
+ // ok
+ }
+ `[`, `]` {
+ // ok - not specified in RFC 3986 but left alone by modern browsers
+ }
+ `%` {
+ // ok - percent encoded, will decode
+ }
+ else {
+ if should_escape(s[i], .encode_path) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// valid_optional_port reports whether port is either an empty string
+// or matches /^:\d*$/
+fn valid_optional_port(port string) bool {
+ if port == '' {
+ return true
+ }
+ if port[0] != `:` {
+ return false
+ }
+ for b in port[1..] {
+ if b < `0` || b > `9` {
+ return false
+ }
+ }
+ return true
+}
+
+// str reassembles the URL into a valid URL string.
+// The general form of the result is one of:
+//
+// scheme:opaque?query#fragment
+// scheme://userinfo@host/path?query#fragment
+//
+// If u.opaque is non-empty, String uses the first form;
+// otherwise it uses the second form.
+// Any non-ASCII characters in host are escaped.
+// To obtain the path, String uses u.escaped_path().
+//
+// In the second form, the following rules apply:
+// - if u.scheme is empty, scheme: is omitted.
+// - if u.user is nil, userinfo@ is omitted.
+// - if u.host is empty, host/ is omitted.
+// - if u.scheme and u.host are empty and u.user is nil,
+// the entire scheme://userinfo@host/ is omitted.
+// - if u.host is non-empty and u.path begins with a /,
+// the form host/path does not add its own /.
+// - if u.raw_query is empty, ?query is omitted.
+// - if u.fragment is empty, #fragment is omitted.
+pub fn (u URL) str() string {
+ mut buf := strings.new_builder(200)
+ if u.scheme != '' {
+ buf.write_string(u.scheme)
+ buf.write_string(':')
+ }
+ if u.opaque != '' {
+ buf.write_string(u.opaque)
+ } else {
+ if u.scheme != '' || u.host != '' || !u.user.empty() {
+ if u.host != '' || u.path != '' || !u.user.empty() {
+ buf.write_string('//')
+ }
+ if !u.user.empty() {
+ buf.write_string(u.user.str())
+ buf.write_string('@')
+ }
+ if u.host != '' {
+ buf.write_string(escape(u.host, .encode_host))
+ }
+ }
+ path := u.escaped_path()
+ if path != '' && path[0] != `/` && u.host != '' {
+ buf.write_string('/')
+ }
+ if buf.len == 0 {
+ // RFC 3986 §4.2
+ // A path segment that contains a colon character (e.g., 'this:that')
+ // cannot be used as the first segment of a relative-path reference, as
+ // it would be mistaken for a scheme name. Such a segment must be
+ // preceded by a dot-segment (e.g., './this:that') to make a relative-
+ // path reference.
+ i := path.index_byte(`:`)
+ if i > -1 {
+ // TODO remove this when autofree handles tmp
+ // expressions like this
+ if i > -1 && path[..i].index_byte(`/`) == -1 {
+ buf.write_string('./')
+ }
+ }
+ }
+ buf.write_string(path)
+ }
+ if u.force_query || u.raw_query != '' {
+ buf.write_string('?')
+ buf.write_string(u.raw_query)
+ }
+ if u.fragment != '' {
+ buf.write_string('#')
+ buf.write_string(escape(u.fragment, .encode_fragment))
+ }
+ return buf.str()
+}
+
+// Values maps a string key to a list of values.
+// It is typically used for query parameters and form values.
+// Unlike in the http.Header map, the keys in a Values map
+// are case-sensitive.
+// parseQuery parses the URL-encoded query string and returns
+// a map listing the values specified for each key.
+// parseQuery always returns a non-nil map containing all the
+// valid query parameters found; err describes the first decoding error
+// encountered, if any.
+//
+// Query is expected to be a list of key=value settings separated by
+// ampersands or semicolons. A setting without an equals sign is
+// interpreted as a key set to an empty value.
+pub fn parse_query(query string) ?Values {
+ mut m := new_values()
+ parse_query_values(mut m, query) ?
+ return m
+}
+
+// parse_query_silent is the same as parse_query
+// but any errors will be silent
+fn parse_query_silent(query string) Values {
+ mut m := new_values()
+ parse_query_values(mut m, query) or {}
+ return m
+}
+
+fn parse_query_values(mut m Values, query string) ?bool {
+ mut had_error := false
+ mut q := query
+ for q != '' {
+ mut key := q
+ mut i := key.index_any('&;')
+ if i >= 0 {
+ q = key[i + 1..]
+ key = key[..i]
+ } else {
+ q = ''
+ }
+ if key == '' {
+ continue
+ }
+ mut value := ''
+ if idx := key.index('=') {
+ i = idx
+ value = key[i + 1..]
+ key = key[..i]
+ }
+ k := query_unescape(key) or {
+ had_error = true
+ continue
+ }
+ key = k
+ v := query_unescape(value) or {
+ had_error = true
+ continue
+ }
+ value = v
+ m.add(key, value)
+ }
+ if had_error {
+ return error(error_msg('parse_query_values: failed parsing query string', ''))
+ }
+ return true
+}
+
+// encode encodes the values into ``URL encoded'' form
+// ('bar=baz&foo=quux') sorted by key.
+pub fn (v Values) encode() string {
+ if v.len == 0 {
+ return ''
+ }
+ mut buf := strings.new_builder(200)
+ mut keys := []string{}
+ for k, _ in v.data {
+ keys << k
+ }
+ keys.sort()
+ for k in keys {
+ vs := v.data[k]
+ key_kscaped := query_escape(k)
+ for _, val in vs.data {
+ if buf.len > 0 {
+ buf.write_string('&')
+ }
+ buf.write_string(key_kscaped)
+ buf.write_string('=')
+ buf.write_string(query_escape(val))
+ }
+ }
+ return buf.str()
+}
+
+// resolve_path applies special path segments from refs and applies
+// them to base, per RFC 3986.
+fn resolve_path(base string, ref string) string {
+ mut full := ''
+ if ref == '' {
+ full = base
+ } else if ref[0] != `/` {
+ i := base.last_index('/') or { -1 }
+ full = base[..i + 1] + ref
+ } else {
+ full = ref
+ }
+ if full == '' {
+ return ''
+ }
+ mut dst := []string{}
+ src := full.split('/')
+ for _, elem in src {
+ match elem {
+ '.' {
+ // drop
+ }
+ '..' {
+ if dst.len > 0 {
+ dst = dst[..dst.len - 1]
+ }
+ }
+ else {
+ dst << elem
+ }
+ }
+ }
+ last := src[src.len - 1]
+ if last == '.' || last == '..' {
+ // Add final slash to the joined path.
+ dst << ''
+ }
+ return '/' + dst.join('/').trim_left('/')
+}
+
+// is_abs reports whether the URL is absolute.
+// Absolute means that it has a non-empty scheme.
+pub fn (u &URL) is_abs() bool {
+ return u.scheme != ''
+}
+
+// parse parses a URL in the context of the receiver. The provided URL
+// may be relative or absolute. parse returns nil, err on parse
+// failure, otherwise its return value is the same as resolve_reference.
+pub fn (u &URL) parse(ref string) ?URL {
+ refurl := parse(ref) ?
+ return u.resolve_reference(refurl)
+}
+
+// resolve_reference resolves a URI reference to an absolute URI from
+// an absolute base URI u, per RFC 3986 Section 5.2. The URI reference
+// may be relative or absolute. resolve_reference always returns a new
+// URL instance, even if the returned URL is identical to either the
+// base or reference. If ref is an absolute URL, then resolve_reference
+// ignores base and returns a copy of ref.
+pub fn (u &URL) resolve_reference(ref &URL) ?URL {
+ mut url := *ref
+ if ref.scheme == '' {
+ url.scheme = u.scheme
+ }
+ if ref.scheme != '' || ref.host != '' || !ref.user.empty() {
+ // The 'absoluteURI' or 'net_path' cases.
+ // We can ignore the error from set_path since we know we provided a
+ // validly-escaped path.
+ url.set_path(resolve_path(ref.escaped_path(), '')) ?
+ return url
+ }
+ if ref.opaque != '' {
+ url.user = user('')
+ url.host = ''
+ url.path = ''
+ return url
+ }
+ if ref.path == '' && ref.raw_query == '' {
+ url.raw_query = u.raw_query
+ if ref.fragment == '' {
+ url.fragment = u.fragment
+ }
+ }
+ // The 'abs_path' or 'rel_path' cases.
+ url.host = u.host
+ url.user = u.user
+ url.set_path(resolve_path(u.escaped_path(), ref.escaped_path())) ?
+ return url
+}
+
+// query parses raw_query and returns the corresponding values.
+// It silently discards malformed value pairs.
+// To check errors use parseQuery.
+pub fn (u &URL) query() Values {
+ v := parse_query_silent(u.raw_query)
+ return v
+}
+
+// request_uri returns the encoded path?query or opaque?query
+// string that would be used in an HTTP request for u.
+pub fn (u &URL) request_uri() string {
+ mut result := u.opaque
+ if result == '' {
+ result = u.escaped_path()
+ if result == '' {
+ result = '/'
+ }
+ } else {
+ if result.starts_with('//') {
+ result = u.scheme + ':' + result
+ }
+ }
+ if u.force_query || u.raw_query != '' {
+ result += '?' + u.raw_query
+ }
+ return result
+}
+
+// hostname returns u.host, stripping any valid port number if present.
+//
+// If the result is enclosed in square brackets, as literal IPv6 addresses are,
+// the square brackets are removed from the result.
+pub fn (u &URL) hostname() string {
+ host, _ := split_host_port(u.host)
+ return host
+}
+
+// port returns the port part of u.host, without the leading colon.
+// If u.host doesn't contain a port, port returns an empty string.
+pub fn (u &URL) port() string {
+ _, port := split_host_port(u.host)
+ return port
+}
+
+// split_host_port separates host and port. If the port is not valid, it returns
+// the entire input as host, and it doesn't check the validity of the host.
+// Per RFC 3986, it requires ports to be numeric.
+fn split_host_port(hostport string) (string, string) {
+ mut host := hostport
+ mut port := ''
+ colon := host.last_index_byte(`:`)
+ if colon != -1 {
+ if valid_optional_port(host[colon..]) {
+ port = host[colon + 1..]
+ host = host[..colon]
+ }
+ }
+ if host.starts_with('[') && host.ends_with(']') {
+ host = host[1..host.len - 1]
+ }
+ return host, port
+}
+
+// valid_userinfo reports whether s is a valid userinfo string per RFC 3986
+// Section 3.2.1:
+// userinfo = *( unreserved / pct-encoded / sub-delims / ':' )
+// unreserved = ALPHA / DIGIT / '-' / '.' / '_' / '~'
+// sub-delims = '!' / '$' / '&' / ''' / '(' / ')'
+// / '*' / '+' / ',' / ';' / '='
+//
+// It doesn't validate pct-encoded. The caller does that via fn unescape.
+pub fn valid_userinfo(s string) bool {
+ for r in s {
+ if `A` <= r && r <= `Z` {
+ continue
+ }
+ if `a` <= r && r <= `z` {
+ continue
+ }
+ if `0` <= r && r <= `9` {
+ continue
+ }
+ match r {
+ `-`, `.`, `_`, `:`, `~`, `!`, `$`, `&`, `\\`, `(`, `)`, `*`, `+`, `,`, `;`, `=`, `%`,
+ `@` {
+ continue
+ }
+ else {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// string_contains_ctl_byte reports whether s contains any ASCII control character.
+fn string_contains_ctl_byte(s string) bool {
+ for i in 0 .. s.len {
+ b := s[i]
+ if b < ` ` || b == 0x7f {
+ return true
+ }
+ }
+ return false
+}
+
+pub fn ishex(c byte) bool {
+ if `0` <= c && c <= `9` {
+ return true
+ } else if `a` <= c && c <= `f` {
+ return true
+ } else if `A` <= c && c <= `F` {
+ return true
+ }
+ return false
+}
+
+fn unhex(c byte) byte {
+ if `0` <= c && c <= `9` {
+ return c - `0`
+ } else if `a` <= c && c <= `f` {
+ return c - `a` + 10
+ } else if `A` <= c && c <= `F` {
+ return c - `A` + 10
+ }
+ return 0
+}
diff --git a/v_windows/v/vlib/net/urllib/urllib_test.v b/v_windows/v/vlib/net/urllib/urllib_test.v
new file mode 100644
index 0000000..0870c81
--- /dev/null
+++ b/v_windows/v/vlib/net/urllib/urllib_test.v
@@ -0,0 +1,51 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+import net.urllib
+
+fn test_net_urllib() {
+ test_query := 'Hellö Wörld@vlang'
+ assert urllib.query_escape(test_query) == 'Hell%C3%B6+W%C3%B6rld%40vlang'
+
+ test_url := 'https://joe:pass@www.mydomain.com:8080/som/url?param1=test1&param2=test2&foo=bar#testfragment'
+ u := urllib.parse(test_url) or {
+ assert false
+ return
+ }
+ assert u.scheme == 'https' && u.hostname() == 'www.mydomain.com' && u.port() == '8080'
+ && u.path == '/som/url' && u.fragment == 'testfragment' && u.user.username == 'joe'
+ && u.user.password == 'pass'
+}
+
+fn test_str() {
+ url := urllib.parse('https://en.wikipedia.org/wiki/Brazil_(1985_film)') or {
+ panic('unable to parse URL')
+ }
+ assert url.str() == 'https://en.wikipedia.org/wiki/Brazil_(1985_film)'
+}
+
+fn test_escape_unescape() {
+ original := 'те ст: т\\%'
+ escaped := urllib.query_escape(original)
+ assert escaped == '%D1%82%D0%B5+%D1%81%D1%82%3A+%D1%82%5C%25'
+ unescaped := urllib.query_unescape(escaped) or {
+ assert false
+ return
+ }
+ assert unescaped == original
+}
+
+fn test_parse_query() ? {
+ q1 := urllib.parse_query('format=%22%25l%3A+%25c+%25t%22') ?
+ q2 := urllib.parse_query('format="%l:+%c+%t"') ?
+ // dump(q1)
+ // dump(q2)
+ assert q1.data['format'].data == ['"%l: %c %t"']
+ assert q2.data['format'].data == ['"%l: %c %t"']
+}
+
+fn test_parse_missing_host() ? {
+ // issue #10311
+ url := urllib.parse('http:///') ?
+ assert url.str() == 'http://///'
+}
diff --git a/v_windows/v/vlib/net/urllib/values.v b/v_windows/v/vlib/net/urllib/values.v
new file mode 100644
index 0000000..ee5c329
--- /dev/null
+++ b/v_windows/v/vlib/net/urllib/values.v
@@ -0,0 +1,87 @@
+// Copyright (c) 2019-2021 Alexander Medvednikov. All rights reserved.
+// Use of this source code is governed by an MIT license
+// that can be found in the LICENSE file.
+module urllib
+
+struct Value {
+pub mut:
+ data []string
+}
+
+struct Values {
+pub mut:
+ data map[string]Value
+ len int
+}
+
+// new_values returns a new Values struct for creating
+// urlencoded query string parameters. it can also be to
+// post form data with application/x-www-form-urlencoded.
+// values.encode() will return the encoded data
+pub fn new_values() Values {
+ return Values{
+ data: map[string]Value{}
+ }
+}
+
+// Currently you will need to use all()[key].data
+// once map[string][]string is implemented
+// this will be fixed
+pub fn (v &Value) all() []string {
+ return v.data
+}
+
+// get gets the first value associated with the given key.
+// If there are no values associated with the key, get returns
+// a empty string.
+pub fn (v &Values) get(key string) string {
+ if v.data.len == 0 {
+ return ''
+ }
+ vs := v.data[key]
+ if vs.data.len == 0 {
+ return ''
+ }
+ return vs.data[0]
+}
+
+// get_all gets the all the values associated with the given key.
+// If there are no values associated with the key, get returns
+// a empty []string.
+pub fn (v &Values) get_all(key string) []string {
+ if v.data.len == 0 {
+ return []
+ }
+ vs := v.data[key]
+ if vs.data.len == 0 {
+ return []
+ }
+ return vs.data
+}
+
+// set sets the key to value. It replaces any existing
+// values.
+pub fn (mut v Values) set(key string, value string) {
+ mut a := v.data[key]
+ a.data = [value]
+ v.data[key] = a
+ v.len = v.data.len
+}
+
+// add adds the value to key. It appends to any existing
+// values associated with key.
+pub fn (mut v Values) add(key string, value string) {
+ mut a := v.data[key]
+ if a.data.len == 0 {
+ a.data = []
+ }
+ a.data << value
+ v.data[key] = a
+ v.len = v.data.len
+}
+
+// del deletes the values associated with key.
+pub fn (mut v Values) del(key string) {
+ v.data.delete(key)
+ v.len = v.data.len
+}
diff --git a/v_windows/v/vlib/net/util.v b/v_windows/v/vlib/net/util.v
new file mode 100644
index 0000000..33d7cec
--- /dev/null
+++ b/v_windows/v/vlib/net/util.v
@@ -0,0 +1,27 @@
+module net
+
+const (
+ socket_max_port = u16(0xFFFF)
+)
+
+// validate_port checks whether a port is valid
+// and returns the port or an error
+pub fn validate_port(port int) ?u16 {
+ if port <= net.socket_max_port {
+ return u16(port)
+ } else {
+ return err_port_out_of_range
+ }
+}
+
+// split address splits an address into its host name and its port
+pub fn split_address(addr string) ?(string, u16) {
+ port := addr.all_after_last(':').int()
+ address := addr.all_before_last(':')
+
+ // TODO(emily): Maybe do some more checking here
+ // to validate ipv6 address sanity?
+
+ p := validate_port(port) ?
+ return address, p
+}
diff --git a/v_windows/v/vlib/net/websocket/events.v b/v_windows/v/vlib/net/websocket/events.v
new file mode 100644
index 0000000..a442daf
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/events.v
@@ -0,0 +1,227 @@
+module websocket
+
+// MessageEventHandler represents a callback on a new message
+struct MessageEventHandler {
+ handler SocketMessageFn // callback function
+ handler2 SocketMessageFn2 // callback function with reference
+ is_ref bool // true if has a reference object
+ ref voidptr // referenced object
+}
+
+// ErrorEventHandler represents a callback on error
+struct ErrorEventHandler {
+ handler SocketErrorFn // callback function
+ handler2 SocketErrorFn2 // callback function with reference
+ is_ref bool // true if has a reference object
+ ref voidptr // referenced object
+}
+
+// OpenEventHandler represents a callback when connection is opened
+struct OpenEventHandler {
+ handler SocketOpenFn // callback function
+ handler2 SocketOpenFn2 // callback function with reference
+ is_ref bool // true if has a reference object
+ ref voidptr // referenced object
+}
+
+// CloseEventHandler represents a callback on a closing event
+struct CloseEventHandler {
+ handler SocketCloseFn // callback function
+ handler2 SocketCloseFn2 // callback function with reference
+ is_ref bool // true if has a reference object
+ ref voidptr // referenced object
+}
+
+pub type AcceptClientFn = fn (mut c ServerClient) ?bool
+
+pub type SocketMessageFn = fn (mut c Client, msg &Message) ?
+
+pub type SocketMessageFn2 = fn (mut c Client, msg &Message, v voidptr) ?
+
+pub type SocketErrorFn = fn (mut c Client, err string) ?
+
+pub type SocketErrorFn2 = fn (mut c Client, err string, v voidptr) ?
+
+pub type SocketOpenFn = fn (mut c Client) ?
+
+pub type SocketOpenFn2 = fn (mut c Client, v voidptr) ?
+
+pub type SocketCloseFn = fn (mut c Client, code int, reason string) ?
+
+pub type SocketCloseFn2 = fn (mut c Client, code int, reason string, v voidptr) ?
+
+// on_connect registers a callback when client connects to the server
+pub fn (mut s Server) on_connect(fun AcceptClientFn) ? {
+ if s.accept_client_callbacks.len > 0 {
+ return error('only one callback can be registered for accept client')
+ }
+ s.accept_client_callbacks << fun
+}
+
+// on_message registers a callback on new messages
+pub fn (mut s Server) on_message(fun SocketMessageFn) {
+ s.message_callbacks << MessageEventHandler{
+ handler: fun
+ }
+}
+
+// on_message_ref registers a callback on new messages and provides a reference object
+pub fn (mut s Server) on_message_ref(fun SocketMessageFn2, ref voidptr) {
+ s.message_callbacks << MessageEventHandler{
+ handler2: fun
+ ref: ref
+ is_ref: true
+ }
+}
+
+// on_close registers a callback on closed socket
+pub fn (mut s Server) on_close(fun SocketCloseFn) {
+ s.close_callbacks << CloseEventHandler{
+ handler: fun
+ }
+}
+
+// on_close_ref registers a callback on closed socket and provides a reference object
+pub fn (mut s Server) on_close_ref(fun SocketCloseFn2, ref voidptr) {
+ s.close_callbacks << CloseEventHandler{
+ handler2: fun
+ ref: ref
+ is_ref: true
+ }
+}
+
+// on_message registers a callback on new messages
+pub fn (mut ws Client) on_message(fun SocketMessageFn) {
+ ws.message_callbacks << MessageEventHandler{
+ handler: fun
+ }
+}
+
+// on_message_ref registers a callback on new messages and provides a reference object
+pub fn (mut ws Client) on_message_ref(fun SocketMessageFn2, ref voidptr) {
+ ws.message_callbacks << MessageEventHandler{
+ handler2: fun
+ ref: ref
+ is_ref: true
+ }
+}
+
+// on_error registers a callback on errors
+pub fn (mut ws Client) on_error(fun SocketErrorFn) {
+ ws.error_callbacks << ErrorEventHandler{
+ handler: fun
+ }
+}
+
+// on_error_ref registers a callback on errors and provides a reference object
+pub fn (mut ws Client) on_error_ref(fun SocketErrorFn2, ref voidptr) {
+ ws.error_callbacks << ErrorEventHandler{
+ handler2: fun
+ ref: ref
+ is_ref: true
+ }
+}
+
+// on_open registers a callback on successful opening the websocket
+pub fn (mut ws Client) on_open(fun SocketOpenFn) {
+ ws.open_callbacks << OpenEventHandler{
+ handler: fun
+ }
+}
+
+// on_open_ref registers a callback on successful opening the websocket
+// and provides a reference object
+pub fn (mut ws Client) on_open_ref(fun SocketOpenFn2, ref voidptr) {
+ ws.open_callbacks << OpenEventHandler{
+ handler2: fun
+ ref: ref
+ is_ref: true
+ }
+}
+
+// on_close registers a callback on closed socket
+pub fn (mut ws Client) on_close(fun SocketCloseFn) {
+ ws.close_callbacks << CloseEventHandler{
+ handler: fun
+ }
+}
+
+// on_close_ref registers a callback on closed socket and provides a reference object
+pub fn (mut ws Client) on_close_ref(fun SocketCloseFn2, ref voidptr) {
+ ws.close_callbacks << CloseEventHandler{
+ handler2: fun
+ ref: ref
+ is_ref: true
+ }
+}
+
+// send_connect_event invokes the on_connect callback
+fn (mut s Server) send_connect_event(mut c ServerClient) ?bool {
+ if s.accept_client_callbacks.len == 0 {
+ // If no callback all client will be accepted
+ return true
+ }
+ fun := s.accept_client_callbacks[0]
+ res := fun(mut c) ?
+ return res
+}
+
+// send_message_event invokes the on_message callback
+fn (mut ws Client) send_message_event(msg &Message) {
+ ws.debug_log('sending on_message event')
+ for ev_handler in ws.message_callbacks {
+ if !ev_handler.is_ref {
+ ev_handler.handler(ws, msg) or { ws.logger.error('send_message_event error: $err') }
+ } else {
+ ev_handler.handler2(ws, msg, ev_handler.ref) or {
+ ws.logger.error('send_message_event error: $err')
+ }
+ }
+ }
+}
+
+// send_error_event invokes the on_error callback
+fn (mut ws Client) send_error_event(error string) {
+ ws.debug_log('sending on_error event')
+ for ev_handler in ws.error_callbacks {
+ if !ev_handler.is_ref {
+ ev_handler.handler(mut ws, error) or {
+ ws.logger.error('send_error_event error: $error, err: $err')
+ }
+ } else {
+ ev_handler.handler2(mut ws, error, ev_handler.ref) or {
+ ws.logger.error('send_error_event error: $error, err: $err')
+ }
+ }
+ }
+}
+
+// send_close_event invokes the on_close callback
+fn (mut ws Client) send_close_event(code int, reason string) {
+ ws.debug_log('sending on_close event')
+ for ev_handler in ws.close_callbacks {
+ if !ev_handler.is_ref {
+ ev_handler.handler(mut ws, code, reason) or {
+ ws.logger.error('send_close_event error: $err')
+ }
+ } else {
+ ev_handler.handler2(mut ws, code, reason, ev_handler.ref) or {
+ ws.logger.error('send_close_event error: $err')
+ }
+ }
+ }
+}
+
+// send_open_event invokes the on_open callback
+fn (mut ws Client) send_open_event() {
+ ws.debug_log('sending on_open event')
+ for ev_handler in ws.open_callbacks {
+ if !ev_handler.is_ref {
+ ev_handler.handler(mut ws) or { ws.logger.error('send_open_event error: $err') }
+ } else {
+ ev_handler.handler2(mut ws, ev_handler.ref) or {
+ ws.logger.error('send_open_event error: $err')
+ }
+ }
+ }
+}
diff --git a/v_windows/v/vlib/net/websocket/handshake.v b/v_windows/v/vlib/net/websocket/handshake.v
new file mode 100644
index 0000000..9f3ab00
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/handshake.v
@@ -0,0 +1,185 @@
+[manualfree]
+module websocket
+
+import encoding.base64
+import strings
+
+// handshake manages the websocket handshake process
+fn (mut ws Client) handshake() ? {
+ nonce := get_nonce(ws.nonce_size)
+ seckey := base64.encode_str(nonce)
+ mut sb := strings.new_builder(1024)
+ defer {
+ unsafe { sb.free() }
+ }
+ sb.write_string('GET ')
+ sb.write_string(ws.uri.resource)
+ sb.write_string(ws.uri.querystring)
+ sb.write_string(' HTTP/1.1\r\nHost: ')
+ sb.write_string(ws.uri.hostname)
+ sb.write_string(':')
+ sb.write_string(ws.uri.port)
+ sb.write_string('\r\nUpgrade: websocket\r\nConnection: Upgrade\r\n')
+ sb.write_string('Sec-WebSocket-Key: ')
+ sb.write_string(seckey)
+ sb.write_string('\r\nSec-WebSocket-Version: 13')
+ for key in ws.header.keys() {
+ val := ws.header.custom_values(key).join(',')
+ sb.write_string('\r\n$key:$val')
+ }
+ sb.write_string('\r\n\r\n')
+ handshake := sb.str()
+ defer {
+ unsafe { handshake.free() }
+ }
+ handshake_bytes := handshake.bytes()
+ ws.debug_log('sending handshake: $handshake')
+ ws.socket_write(handshake_bytes) ?
+ ws.read_handshake(seckey) ?
+ unsafe { handshake_bytes.free() }
+}
+
+// handle_server_handshake manages websocket server handshake process
+fn (mut s Server) handle_server_handshake(mut c Client) ?(string, &ServerClient) {
+ msg := c.read_handshake_str() ?
+ handshake_response, client := s.parse_client_handshake(msg, mut c) ?
+ unsafe { msg.free() }
+ return handshake_response, client
+}
+
+// parse_client_handshake parses result from handshake process
+fn (mut s Server) parse_client_handshake(client_handshake string, mut c Client) ?(string, &ServerClient) {
+ s.logger.debug('server-> client handshake:\n$client_handshake')
+ lines := client_handshake.split_into_lines()
+ get_tokens := lines[0].split(' ')
+ if get_tokens.len < 3 {
+ return error_with_code('unexpected get operation, $get_tokens', 1)
+ }
+ if get_tokens[0].trim_space() != 'GET' {
+ return error_with_code("unexpected request '${get_tokens[0]}', expected 'GET'",
+ 2)
+ }
+ if get_tokens[2].trim_space() != 'HTTP/1.1' {
+ return error_with_code("unexpected request $get_tokens, expected 'HTTP/1.1'",
+ 3)
+ }
+ mut seckey := ''
+ mut flags := []Flag{}
+ mut key := ''
+ for i in 1 .. lines.len {
+ if lines[i].len <= 0 || lines[i] == '\r\n' {
+ continue
+ }
+ keys := lines[i].split(':')
+ match keys[0] {
+ 'Upgrade', 'upgrade' {
+ flags << .has_upgrade
+ }
+ 'Connection', 'connection' {
+ flags << .has_connection
+ }
+ 'Sec-WebSocket-Key', 'sec-websocket-key' {
+ key = keys[1].trim_space()
+ s.logger.debug('server-> got key: $key')
+ seckey = create_key_challenge_response(key) ?
+ s.logger.debug('server-> challenge: $seckey, response: ${keys[1]}')
+ flags << .has_accept
+ }
+ else {
+ // we ignore other headers like protocol for now
+ }
+ }
+ unsafe { keys.free() }
+ }
+ if flags.len < 3 {
+ return error_with_code('invalid client handshake, $client_handshake', 4)
+ }
+ server_handshake := 'HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: $seckey\r\n\r\n'
+ server_client := &ServerClient{
+ resource_name: get_tokens[1]
+ client_key: key
+ client: unsafe { c }
+ server: unsafe { s }
+ }
+ unsafe {
+ lines.free()
+ flags.free()
+ get_tokens.free()
+ seckey.free()
+ key.free()
+ }
+ return server_handshake, server_client
+}
+
+// read_handshake_str returns the handshake response
+fn (mut ws Client) read_handshake_str() ?string {
+ mut total_bytes_read := 0
+ mut msg := [1024]byte{}
+ mut buffer := [1]byte{}
+ for total_bytes_read < 1024 {
+ bytes_read := ws.socket_read_ptr(&buffer[0], 1) ?
+ if bytes_read == 0 {
+ return error_with_code('unexpected no response from handshake', 5)
+ }
+ msg[total_bytes_read] = buffer[0]
+ total_bytes_read++
+ if total_bytes_read > 5 && msg[total_bytes_read - 1] == `\n`
+ && msg[total_bytes_read - 2] == `\r` && msg[total_bytes_read - 3] == `\n`
+ && msg[total_bytes_read - 4] == `\r` {
+ break
+ }
+ }
+ res := msg[..total_bytes_read].bytestr()
+ return res
+}
+
+// read_handshake reads the handshake result and check if valid
+fn (mut ws Client) read_handshake(seckey string) ? {
+ mut msg := ws.read_handshake_str() ?
+ ws.check_handshake_response(msg, seckey) ?
+ unsafe { msg.free() }
+}
+
+// check_handshake_response checks the response from handshake and returns
+// the response and secure key provided by the websocket client
+fn (mut ws Client) check_handshake_response(handshake_response string, seckey string) ? {
+ ws.debug_log('handshake response:\n$handshake_response')
+ lines := handshake_response.split_into_lines()
+ header := lines[0]
+ if !header.starts_with('HTTP/1.1 101') && !header.starts_with('HTTP/1.0 101') {
+ return error_with_code('handshake_handler: invalid HTTP status response code, $header',
+ 6)
+ }
+ for i in 1 .. lines.len {
+ if lines[i].len <= 0 || lines[i] == '\r\n' {
+ continue
+ }
+ keys := lines[i].split(':')
+ match keys[0] {
+ 'Upgrade', 'upgrade' {
+ ws.flags << .has_upgrade
+ }
+ 'Connection', 'connection' {
+ ws.flags << .has_connection
+ }
+ 'Sec-WebSocket-Accept', 'sec-websocket-accept' {
+ ws.debug_log('seckey: $seckey')
+ challenge := create_key_challenge_response(seckey) ?
+ ws.debug_log('challenge: $challenge, response: ${keys[1]}')
+ if keys[1].trim_space() != challenge {
+ return error_with_code('handshake_handler: Sec-WebSocket-Accept header does not match computed sha1/base64 response.',
+ 7)
+ }
+ ws.flags << .has_accept
+ unsafe { challenge.free() }
+ }
+ else {}
+ }
+ unsafe { keys.free() }
+ }
+ unsafe { lines.free() }
+ if ws.flags.len < 3 {
+ ws.close(1002, 'invalid websocket HTTP headers') ?
+ return error_with_code('invalid websocket HTTP headers', 8)
+ }
+}
diff --git a/v_windows/v/vlib/net/websocket/io.v b/v_windows/v/vlib/net/websocket/io.v
new file mode 100644
index 0000000..5408a4e
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/io.v
@@ -0,0 +1,100 @@
+module websocket
+
+import net
+import time
+
+// socket_read reads from socket into the provided buffer
+fn (mut ws Client) socket_read(mut buffer []byte) ?int {
+ lock {
+ if ws.state in [.closed, .closing] || ws.conn.sock.handle <= 1 {
+ return error('socket_read: trying to read a closed socket')
+ }
+ if ws.is_ssl {
+ r := ws.ssl_conn.read_into(mut buffer) ?
+ return r
+ } else {
+ for {
+ r := ws.conn.read(mut buffer) or {
+ if err.code == net.err_timed_out_code {
+ continue
+ }
+ return err
+ }
+ return r
+ }
+ }
+ }
+ return none
+}
+
+// socket_read reads from socket into the provided byte pointer and length
+fn (mut ws Client) socket_read_ptr(buf_ptr &byte, len int) ?int {
+ lock {
+ if ws.state in [.closed, .closing] || ws.conn.sock.handle <= 1 {
+ return error('socket_read_ptr: trying to read a closed socket')
+ }
+ if ws.is_ssl {
+ r := ws.ssl_conn.socket_read_into_ptr(buf_ptr, len) ?
+ return r
+ } else {
+ for {
+ r := ws.conn.read_ptr(buf_ptr, len) or {
+ if err.code == net.err_timed_out_code {
+ continue
+ }
+ return err
+ }
+ return r
+ }
+ }
+ }
+ return none
+}
+
+// socket_write writes the provided byte array to the socket
+fn (mut ws Client) socket_write(bytes []byte) ?int {
+ lock {
+ if ws.state == .closed || ws.conn.sock.handle <= 1 {
+ ws.debug_log('socket_write: Socket allready closed')
+ return error('socket_write: trying to write on a closed socket')
+ }
+ if ws.is_ssl {
+ return ws.ssl_conn.write(bytes)
+ } else {
+ for {
+ n := ws.conn.write(bytes) or {
+ if err.code == net.err_timed_out_code {
+ continue
+ }
+ return err
+ }
+ return n
+ }
+ panic('reached unreachable code')
+ }
+ }
+}
+
+// shutdown_socket shuts down the socket properly when connection is closed
+fn (mut ws Client) shutdown_socket() ? {
+ ws.debug_log('shutting down socket')
+ if ws.is_ssl {
+ ws.ssl_conn.shutdown() ?
+ } else {
+ ws.conn.close() ?
+ }
+}
+
+// dial_socket connects tcp socket and initializes default configurations
+fn (mut ws Client) dial_socket() ?&net.TcpConn {
+ tcp_address := '$ws.uri.hostname:$ws.uri.port'
+ mut t := net.dial_tcp(tcp_address) ?
+ optval := int(1)
+ t.sock.set_option_int(.keep_alive, optval) ?
+ t.set_read_timeout(30 * time.second)
+ t.set_write_timeout(30 * time.second)
+ if ws.is_ssl {
+ ws.ssl_conn.connect(mut t, ws.uri.hostname) ?
+ }
+ return t
+}
diff --git a/v_windows/v/vlib/net/websocket/message.v b/v_windows/v/vlib/net/websocket/message.v
new file mode 100644
index 0000000..4c57232
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/message.v
@@ -0,0 +1,295 @@
+module websocket
+
+import encoding.utf8
+
+const (
+ header_len_offset = 2 // offset for lengthpart of websocket header
+ buffer_size = 256 // default buffer size
+ extended_payload16_end_byte = 4 // header length with 16-bit extended payload
+ extended_payload64_end_byte = 10 // header length with 64-bit extended payload
+)
+
+// Fragment represents a websocket data fragment
+struct Fragment {
+ data []byte // included data payload data in a fragment
+ opcode OPCode // interpretation of the payload data
+}
+
+// Frame represents a data frame header
+struct Frame {
+mut:
+ // length of the websocket header part
+ header_len int = 2
+ // size of total frame
+ frame_size int = 2
+ fin bool // true if final fragment of message
+ rsv1 bool // reserved for future use in websocket RFC
+ rsv2 bool // reserved for future use in websocket RFC
+ rsv3 bool // reserved for future use in websocket RFC
+ opcode OPCode // interpretation of the payload data
+ has_mask bool // true if the payload data is masked
+ payload_len int // payload length
+ masking_key [4]byte // all frames from client to server is masked with this key
+}
+
+const (
+ invalid_close_codes = [999, 1004, 1005, 1006, 1014, 1015, 1016, 1100, 2000, 2999, 5000, 65536]
+)
+
+// validate_client validates client frame rules from RFC6455
+pub fn (mut ws Client) validate_frame(frame &Frame) ? {
+ if frame.rsv1 || frame.rsv2 || frame.rsv3 {
+ ws.close(1002, 'rsv cannot be other than 0, not negotiated') ?
+ return error('rsv cannot be other than 0, not negotiated')
+ }
+ if (int(frame.opcode) >= 3 && int(frame.opcode) <= 7)
+ || (int(frame.opcode) >= 11 && int(frame.opcode) <= 15) {
+ ws.close(1002, 'use of reserved opcode') ?
+ return error('use of reserved opcode')
+ }
+ if frame.has_mask && !ws.is_server {
+ // server should never send masked frames
+ // to client, close connection
+ ws.close(1002, 'client got masked frame') ?
+ return error('client sent masked frame')
+ }
+ if is_control_frame(frame.opcode) {
+ if !frame.fin {
+ ws.close(1002, 'control message must not be fragmented') ?
+ return error('unexpected control frame with no fin')
+ }
+ if frame.payload_len > 125 {
+ ws.close(1002, 'control frames must not exceed 125 bytes') ?
+ return error('unexpected control frame payload length')
+ }
+ }
+ if frame.fin == false && ws.fragments.len == 0 && frame.opcode == .continuation {
+ err_msg := 'unexecpected continuation, there are no frames to continue, $frame'
+ ws.close(1002, err_msg) ?
+ return error(err_msg)
+ }
+}
+
+// is_control_frame returns true if the frame is a control frame
+fn is_control_frame(opcode OPCode) bool {
+ return opcode !in [.text_frame, .binary_frame, .continuation]
+}
+
+// is_data_frame returns true if the frame is a control frame
+fn is_data_frame(opcode OPCode) bool {
+ return opcode in [.text_frame, .binary_frame]
+}
+
+// read_payload reads the message payload from the socket
+fn (mut ws Client) read_payload(frame &Frame) ?[]byte {
+ if frame.payload_len == 0 {
+ return []byte{}
+ }
+ mut buffer := []byte{cap: frame.payload_len}
+ mut read_buf := [1]byte{}
+ mut bytes_read := 0
+ for bytes_read < frame.payload_len {
+ len := ws.socket_read_ptr(&read_buf[0], 1) ?
+ if len != 1 {
+ return error('expected read all message, got zero')
+ }
+ bytes_read += len
+ buffer << read_buf[0]
+ }
+ if bytes_read != frame.payload_len {
+ return error('failed to read payload')
+ }
+ if frame.has_mask {
+ for i in 0 .. frame.payload_len {
+ buffer[i] ^= frame.masking_key[i % 4] & 0xff
+ }
+ }
+ return buffer
+}
+
+// validate_utf_8 validates payload for valid utf8 encoding
+// - Future implementation needs to support fail fast utf errors for strict autobahn conformance
+fn (mut ws Client) validate_utf_8(opcode OPCode, payload []byte) ? {
+ if opcode in [.text_frame, .close] && !utf8.validate(payload.data, payload.len) {
+ ws.logger.error('malformed utf8 payload, payload len: ($payload.len)')
+ ws.send_error_event('Recieved malformed utf8.')
+ ws.close(1007, 'malformed utf8 payload') ?
+ return error('malformed utf8 payload')
+ }
+}
+
+// read_next_message reads 1 to n frames to compose a message
+pub fn (mut ws Client) read_next_message() ?Message {
+ for {
+ frame := ws.parse_frame_header() ?
+ ws.validate_frame(&frame) ?
+ frame_payload := ws.read_payload(&frame) ?
+ if is_control_frame(frame.opcode) {
+ // Control frames can interject other frames
+ // and need to be returned immediately
+ msg := Message{
+ opcode: OPCode(frame.opcode)
+ payload: frame_payload.clone()
+ }
+ unsafe { frame_payload.free() }
+ return msg
+ }
+ // if the message is fragmented we just put it on fragments
+ // a fragment is allowed to have zero size payload
+ if !frame.fin {
+ ws.fragments << &Fragment{
+ data: frame_payload.clone()
+ opcode: frame.opcode
+ }
+ unsafe { frame_payload.free() }
+ continue
+ }
+ if ws.fragments.len == 0 {
+ ws.validate_utf_8(frame.opcode, frame_payload) or {
+ ws.logger.error('UTF8 validation error: $err, len of payload($frame_payload.len)')
+ ws.send_error_event('UTF8 validation error: $err, len of payload($frame_payload.len)')
+ return err
+ }
+ msg := Message{
+ opcode: OPCode(frame.opcode)
+ payload: frame_payload.clone()
+ }
+ unsafe { frame_payload.free() }
+ return msg
+ }
+ defer {
+ ws.fragments = []
+ }
+ if is_data_frame(frame.opcode) {
+ ws.close(0, '') ?
+ return error('Unexpected frame opcode')
+ }
+ payload := ws.payload_from_fragments(frame_payload) ?
+ opcode := ws.opcode_from_fragments()
+ ws.validate_utf_8(opcode, payload) ?
+ msg := Message{
+ opcode: opcode
+ payload: payload.clone()
+ }
+ unsafe {
+ frame_payload.free()
+ payload.free()
+ }
+ return msg
+ }
+ return none
+}
+
+// payload_from_fragments returs the whole paylaod from fragmented message
+fn (ws Client) payload_from_fragments(fin_payload []byte) ?[]byte {
+ mut total_size := 0
+ for f in ws.fragments {
+ if f.data.len > 0 {
+ total_size += f.data.len
+ }
+ }
+ total_size += fin_payload.len
+ if total_size == 0 {
+ return []byte{}
+ }
+ mut total_buffer := []byte{cap: total_size}
+ for f in ws.fragments {
+ if f.data.len > 0 {
+ total_buffer << f.data
+ }
+ }
+ total_buffer << fin_payload
+ return total_buffer
+}
+
+// opcode_from_fragments returns the opcode for message from the first fragment sent
+fn (ws Client) opcode_from_fragments() OPCode {
+ return OPCode(ws.fragments[0].opcode)
+}
+
+// parse_frame_header parses next message by decoding the incoming frames
+pub fn (mut ws Client) parse_frame_header() ?Frame {
+ mut buffer := [256]byte{}
+ mut bytes_read := 0
+ mut frame := Frame{}
+ mut rbuff := [1]byte{}
+ mut mask_end_byte := 0
+ for ws.state == .open {
+ read_bytes := ws.socket_read_ptr(&rbuff[0], 1) ?
+ if read_bytes == 0 {
+ // this is probably a timeout or close
+ continue
+ }
+ buffer[bytes_read] = rbuff[0]
+ bytes_read++
+ // parses the first two header bytes to get basic frame information
+ if bytes_read == u64(websocket.header_len_offset) {
+ frame.fin = (buffer[0] & 0x80) == 0x80
+ frame.rsv1 = (buffer[0] & 0x40) == 0x40
+ frame.rsv2 = (buffer[0] & 0x20) == 0x20
+ frame.rsv3 = (buffer[0] & 0x10) == 0x10
+ frame.opcode = OPCode(int(buffer[0] & 0x7F))
+ frame.has_mask = (buffer[1] & 0x80) == 0x80
+ frame.payload_len = buffer[1] & 0x7F
+ // if has mask set the byte postition where mask ends
+ if frame.has_mask {
+ mask_end_byte = if frame.payload_len < 126 {
+ websocket.header_len_offset + 4
+ } else if frame.payload_len == 126 {
+ websocket.header_len_offset + 6
+ } else if frame.payload_len == 127 {
+ websocket.header_len_offset + 12
+ } else {
+ 0
+ } // impossible
+ }
+ frame.payload_len = frame.payload_len
+ frame.frame_size = frame.header_len + frame.payload_len
+ if !frame.has_mask && frame.payload_len < 126 {
+ break
+ }
+ }
+ if frame.payload_len == 126 && bytes_read == u64(websocket.extended_payload16_end_byte) {
+ frame.header_len += 2
+ frame.payload_len = 0
+ frame.payload_len |= buffer[2] << 8
+ frame.payload_len |= buffer[3]
+ frame.frame_size = frame.header_len + frame.payload_len
+ if !frame.has_mask {
+ break
+ }
+ }
+ if frame.payload_len == 127 && bytes_read == u64(websocket.extended_payload64_end_byte) {
+ frame.header_len += 8
+ // these shift operators needs 64 bit on clang with -prod flag
+ mut payload_len := u64(0)
+ payload_len |= u64(buffer[2]) << 56
+ payload_len |= u64(buffer[3]) << 48
+ payload_len |= u64(buffer[4]) << 40
+ payload_len |= u64(buffer[5]) << 32
+ payload_len |= u64(buffer[6]) << 24
+ payload_len |= u64(buffer[7]) << 16
+ payload_len |= u64(buffer[8]) << 8
+ payload_len |= u64(buffer[9])
+ frame.payload_len = int(payload_len)
+ if !frame.has_mask {
+ break
+ }
+ }
+ if frame.has_mask && bytes_read == mask_end_byte {
+ frame.masking_key[0] = buffer[mask_end_byte - 4]
+ frame.masking_key[1] = buffer[mask_end_byte - 3]
+ frame.masking_key[2] = buffer[mask_end_byte - 2]
+ frame.masking_key[3] = buffer[mask_end_byte - 1]
+ break
+ }
+ }
+ return frame
+}
+
+// unmask_sequence unmask any given sequence
+fn (f Frame) unmask_sequence(mut buffer []byte) {
+ for i in 0 .. buffer.len {
+ buffer[i] ^= f.masking_key[i % 4] & 0xff
+ }
+}
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/README.md b/v_windows/v/vlib/net/websocket/tests/autobahn/README.md
new file mode 100644
index 0000000..40724ee
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/README.md
@@ -0,0 +1,20 @@
+# Autobahn tests
+
+This is the autobahn automatic tests on build.
+The performance tests are skipped due to timeouts in Github actions.
+
+## Run it locally
+
+### Test the client
+
+This is how to test the client:
+
+1. Run the docker autobahn test suite by running the `docker-compose up`
+2. From the `local_run` folder, compile and run `autobahn_client.v` to test non ws (no TLS) and
+`autobahn_client_wss.v` to run the TLS tests
+3. Open `http://localhost:8080` and browse client test results for non TLS and `https://localhost:8081`
+if you ran the wss tests (it uses local certificat so you will get trust error but just accept use)
+
+### Test the server
+
+Todo: add information here \ No newline at end of file
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client.v b/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client.v
new file mode 100644
index 0000000..c65fdab
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client.v
@@ -0,0 +1,33 @@
+// use this test to test the websocket client in the autobahn test
+module main
+
+import net.websocket
+
+fn main() {
+ for i in 1 .. 304 {
+ println('\ncase: $i')
+ handle_case(i) or { println('error should be ok: $err') }
+ }
+ // update the reports
+ uri := 'ws://autobahn_server:9001/updateReports?agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn handle_case(case_nr int) ? {
+ uri := 'ws://autobahn_server:9001/runCase?case=$case_nr&agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.on_message(on_message)
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn on_message(mut ws websocket.Client, msg &websocket.Message) ? {
+ // autobahn tests expects to send same message back
+ if msg.opcode == .pong {
+ // We just wanna pass text and binary message back to autobahn
+ return
+ }
+ ws.write(msg.payload, msg.opcode) or { panic(err) }
+}
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client_wss.v b/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client_wss.v
new file mode 100644
index 0000000..c7a3c25
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_client_wss.v
@@ -0,0 +1,35 @@
+// use this test to test the websocket client in the autobahn test
+module main
+
+import net.websocket
+
+fn main() {
+ for i in 1 .. 304 {
+ println('\ncase: $i')
+ handle_case(i) or { println('error should be ok: $err') }
+ }
+ // update the reports
+ // uri := 'wss://localhost:9002/updateReports?agent=v-client'
+ uri := 'wss://autobahn_server_wss:9002/updateReports?agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn handle_case(case_nr int) ? {
+ uri := 'wss://autobahn_server_wss:9002/runCase?case=$case_nr&agent=v-client'
+ // uri := 'wss://localhost:9002/runCase?case=$case_nr&agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.on_message(on_message)
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn on_message(mut ws websocket.Client, msg &websocket.Message) ? {
+ // autobahn tests expects to send same message back
+ if msg.opcode == .pong {
+ // We just wanna pass text and binary message back to autobahn
+ return
+ }
+ ws.write(msg.payload, msg.opcode) or { panic(err) }
+}
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_server.v b/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_server.v
new file mode 100644
index 0000000..0493ca9
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/autobahn_server.v
@@ -0,0 +1,27 @@
+// use this to test websocket server to the autobahn test
+module main
+
+import net.websocket
+
+fn main() {
+ mut s := websocket.new_server(.ip6, 9002, '/')
+ s.on_message(on_message)
+ s.listen() or { panic(err) }
+}
+
+fn handle_case(case_nr int) ? {
+ uri := 'ws://localhost:9002/runCase?case=$case_nr&agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.on_message(on_message)
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn on_message(mut ws websocket.Client, msg &websocket.Message) ? {
+ // autobahn tests expects to send same message back
+ if msg.opcode == .pong {
+ // We just wanna pass text and binary message back to autobahn
+ return
+ }
+ ws.write(msg.payload, msg.opcode) or { panic(err) }
+}
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/docker-compose.yml b/v_windows/v/vlib/net/websocket/tests/autobahn/docker-compose.yml
new file mode 100644
index 0000000..30b58ec
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/docker-compose.yml
@@ -0,0 +1,21 @@
+version: '3'
+services:
+ server:
+ container_name: autobahn_server
+ build: fuzzing_server
+
+ ports:
+ - "9001:9001"
+ - "8080:8080"
+ server_wss:
+ container_name: autobahn_server_wss
+ build: fuzzing_server_wss
+
+ ports:
+ - "9002:9002"
+ - "8081:8080"
+ client:
+ container_name: autobahn_client
+ build:
+ dockerfile: vlib/net/websocket/tests/autobahn/ws_test/Dockerfile
+ context: ../../../../../
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/Dockerfile b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/Dockerfile
new file mode 100644
index 0000000..ca5201b
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/Dockerfile
@@ -0,0 +1,5 @@
+FROM crossbario/autobahn-testsuite
+COPY check_results.py /check_results.py
+RUN chmod +x /check_results.py
+
+COPY config /config
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/check_results.py b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/check_results.py
new file mode 100644
index 0000000..9275c3c
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/check_results.py
@@ -0,0 +1,46 @@
+import json
+
+nr_of_client_errs = 0
+nr_of_client_tests = 0
+
+nr_of_server_errs = 0
+nr_of_server_tests = 0
+
+with open("/reports/clients/index.json") as f:
+ data = json.load(f)
+
+ for i in data["v-client"]:
+ # Count errors
+ if (
+ data["v-client"][i]["behavior"] == "FAILED"
+ or data["v-client"][i]["behaviorClose"] == "FAILED"
+ ):
+ nr_of_client_errs = nr_of_client_errs + 1
+
+ nr_of_client_tests = nr_of_client_tests + 1
+
+with open("/reports/servers/index.json") as f:
+ data = json.load(f)
+
+ for i in data["AutobahnServer"]:
+ if (
+ data["AutobahnServer"][i]["behavior"] == "FAILED"
+ or data["AutobahnServer"][i]["behaviorClose"] == "FAILED"
+ ):
+ nr_of_server_errs = nr_of_server_errs + 1
+
+ nr_of_server_tests = nr_of_server_tests + 1
+
+if nr_of_client_errs > 0 or nr_of_server_errs > 0:
+ print(
+ "FAILED AUTOBAHN TESTS, CLIENT ERRORS {0}(of {1}), SERVER ERRORS {2}(of {3})".format(
+ nr_of_client_errs, nr_of_client_tests, nr_of_server_errs, nr_of_server_tests
+ )
+ )
+ exit(1)
+
+print(
+ "TEST SUCCESS!, CLIENT TESTS({0}), SERVER TESTS ({1})".format(
+ nr_of_client_tests, nr_of_server_tests
+ )
+)
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingclient.json b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingclient.json
new file mode 100644
index 0000000..b5efbb8
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingclient.json
@@ -0,0 +1,22 @@
+{
+ "options": {
+ "failByDrop": false
+ },
+ "outdir": "./reports/servers",
+ "servers": [
+ {
+ "agent": "AutobahnServer",
+ "url": "ws://autobahn_client:9002"
+ }
+ ],
+ "cases": [
+ "*"
+ ],
+ "exclude-cases": [
+ "9.*",
+ "11.*",
+ "12.*",
+ "13.*"
+ ],
+ "exclude-agent-cases": {}
+} \ No newline at end of file
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingserver.json b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingserver.json
new file mode 100644
index 0000000..3b044a1
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server/config/fuzzingserver.json
@@ -0,0 +1,14 @@
+{
+ "url": "ws://127.0.0.1:9001",
+ "outdir": "./reports/clients",
+ "cases": [
+ "*"
+ ],
+ "exclude-cases": [
+ "9.*",
+ "11.*",
+ "12.*",
+ "13.*"
+ ],
+ "exclude-agent-cases": {}
+} \ No newline at end of file
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/Dockerfile b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/Dockerfile
new file mode 100644
index 0000000..67114c4
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/Dockerfile
@@ -0,0 +1,9 @@
+FROM crossbario/autobahn-testsuite
+COPY check_results.py /check_results.py
+RUN chmod +x /check_results.py
+
+COPY config /config
+RUN chmod +rx /config/server.crt
+RUN chmod +rx /config/server.key
+
+EXPOSE 9002 9002 \ No newline at end of file
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/check_results.py b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/check_results.py
new file mode 100644
index 0000000..d75904c
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/check_results.py
@@ -0,0 +1,35 @@
+import json
+
+nr_of_client_errs = 0
+nr_of_client_tests = 0
+
+nr_of_server_errs = 0
+nr_of_server_tests = 0
+
+with open("/reports/clients/index.json") as f:
+ data = json.load(f)
+
+ for i in data["v-client"]:
+ # Count errors
+ if (
+ data["v-client"][i]["behavior"] == "FAILED"
+ or data["v-client"][i]["behaviorClose"] == "FAILED"
+ ):
+ nr_of_client_errs = nr_of_client_errs + 1
+
+ nr_of_client_tests = nr_of_client_tests + 1
+
+
+if nr_of_client_errs > 0 or nr_of_server_errs > 0:
+ print(
+ "FAILED AUTOBAHN TESTS, CLIENT ERRORS {0}(of {1}), SERVER ERRORS {2}(of {3})".format(
+ nr_of_client_errs, nr_of_client_tests, nr_of_server_errs, nr_of_server_tests
+ )
+ )
+ exit(1)
+
+print(
+ "TEST SUCCESS!, CLIENT TESTS({0}), SERVER TESTS ({1})".format(
+ nr_of_client_tests, nr_of_server_tests
+ )
+)
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/fuzzingserver.json b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/fuzzingserver.json
new file mode 100644
index 0000000..494dfff
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/fuzzingserver.json
@@ -0,0 +1,16 @@
+{
+ "url": "wss://127.0.0.1:9002",
+ "outdir": "./reports/clients",
+ "key": "/config/server.key",
+ "cert": "/config/server.crt",
+ "cases": [
+ "*"
+ ],
+ "exclude-cases": [
+ "9.*",
+ "11.*",
+ "12.*",
+ "13.*"
+ ],
+ "exclude-agent-cases": {}
+} \ No newline at end of file
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.crt b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.crt
new file mode 100644
index 0000000..d4071d1
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.crt
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDETCCAfkCFAtFKlcdB3jhD+AXPul81dwmZcs/MA0GCSqGSIb3DQEBCwUAMEUx
+CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
+cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjAxMTIxMDgyNjQ5WhcNMzAxMTE5MDgy
+NjQ5WjBFMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UE
+CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEAnbysLfcIr9+wpoJjb5r728j2e07agedOzh8VLuGnHqmKOUPN
+f8Ik707kEoBcFY7UM2A9G/1RMIysGp8eleQLMtNdeYc3KlKHBGFrOM3i4gCd7G44
+lERuKP1PKzRQ6RdVNUXn51XjfxjHWo7kHCEVvZowxvzxLxhwbSwmEmgzcQ1T6vj6
+Cdop87sdq00F+eOCfTdy+cl+R65sbImVdfY4EQ0QWAVdF3X6njLjpdmteppggbEa
+ECv3R3qNIV7/rflIPm1efbqp7R1ugvjLPJZ1u12ovtqkgsWbnEyzST8hbEEjsOTJ
+/cPkH2DaLdh7fMgfcVmqnYXd9T+gpsNGv98DjwIDAQABMA0GCSqGSIb3DQEBCwUA
+A4IBAQBG9GxUOjcrFd1ept9AOTzbxvIUvBiqIEzrL2/+3T1yPPAWQzOmBfZhIVVm
+EZeeU3xcvd7+AmX+2FPCAD+evjSHjKY048X1YksQS7mYChSgeJiknoJi3mAEAyw6
+oYGVkETksZLQfXtWTjgljbIQrwTA1s+EW0jvmvaJnWD3/8nFqmfly2/kxVsTcGEa
+wJGEUS53Cq6y6lLZ+ojjjj1iVCQ94U6L/0xPB9hgXOyL2+iQj+n38ruatnUNF77C
+UKS7N9BFF42eqVY83Xab0m25s93m8Z7J/63qu0eeA8p5t7+8lbGvOYpwReknLRMf
+pJfgSEWqWfSaetihbJl2Fmzg2SeJ
+-----END CERTIFICATE-----
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.csr b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.csr
new file mode 100644
index 0000000..6013ea9
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.csr
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICijCCAXICAQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx
+ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAJ28rC33CK/fsKaCY2+a+9vI9ntO2oHnTs4fFS7h
+px6pijlDzX/CJO9O5BKAXBWO1DNgPRv9UTCMrBqfHpXkCzLTXXmHNypShwRhazjN
+4uIAnexuOJREbij9Tys0UOkXVTVF5+dV438Yx1qO5BwhFb2aMMb88S8YcG0sJhJo
+M3ENU+r4+gnaKfO7HatNBfnjgn03cvnJfkeubGyJlXX2OBENEFgFXRd1+p4y46XZ
+rXqaYIGxGhAr90d6jSFe/635SD5tXn26qe0dboL4yzyWdbtdqL7apILFm5xMs0k/
+IWxBI7Dkyf3D5B9g2i3Ye3zIH3FZqp2F3fU/oKbDRr/fA48CAwEAAaAAMA0GCSqG
+SIb3DQEBCwUAA4IBAQARfNhaiioyJPZZ8Hkf9UPbi85djYLDYCC9EqBPHpYpGh15
+WdRsTModg/X5DeGwtWwRyGSP2ROMWa1NB5RHZ9buIgCIOeszhAvXVaQMlHmpNhSD
+/hWKGGpAEq12TKHxgi9eTOE2u9MhoJf1G6iGffVsHc8r52THvGqKBp3Bi8G1Pl6L
+2J1f5qX42K1DEnCx0gGnQkydO6E4UnMbsaDSFSODQwg5LpzSYoYUfpYHstMpqAqL
+rcEt869YKjemKuTCzHODWxfqlvVr9GctNjKG2WtoqnX+10x3tw/9lsNRKUelCQxb
+E56eujAoQdMxQ4OjwSnc/gbpWa5gXKYjpgAfx2kY
+-----END CERTIFICATE REQUEST-----
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.key b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.key
new file mode 100644
index 0000000..05c9d77
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAnbysLfcIr9+wpoJjb5r728j2e07agedOzh8VLuGnHqmKOUPN
+f8Ik707kEoBcFY7UM2A9G/1RMIysGp8eleQLMtNdeYc3KlKHBGFrOM3i4gCd7G44
+lERuKP1PKzRQ6RdVNUXn51XjfxjHWo7kHCEVvZowxvzxLxhwbSwmEmgzcQ1T6vj6
+Cdop87sdq00F+eOCfTdy+cl+R65sbImVdfY4EQ0QWAVdF3X6njLjpdmteppggbEa
+ECv3R3qNIV7/rflIPm1efbqp7R1ugvjLPJZ1u12ovtqkgsWbnEyzST8hbEEjsOTJ
+/cPkH2DaLdh7fMgfcVmqnYXd9T+gpsNGv98DjwIDAQABAoIBAE+IFfiHGiYzT0pl
+a+WV62+CAGVj+OCO1Dkxiui8dhsLuNnuyeqk5SKUUILTnZpxDaVp3OYD76/e/dfe
+avmApfTWhccE2lfIjLM0u29EwCTb0sSnPnfjmPep4QUTt8gPL7NQsAEAWVh4Eewj
+J/jW5bNXz0hFuQXZ+LXTEM8vIuDY4M0RX/jhEcCVr3QH8Sp/6JEeRY2Mbn5Z6LZ+
+BVuu8e4sCpamWOOWfoIQq3e3TbATFSNP9vzPLKvxwwAw9g5dAKPn3dvem8ofzaaF
+MeJ6T485mnx0naBrI+1qHLb3QcRpSZp6uEOp/4uvkCFm9S3dBGIwOGwHcybWFfFr
+StPfccECgYEAzN2f1BcvL3rt4970lG/MGNeLMpF7h7aWca0DzUNY5sCh+kvENHrD
+U4nH5EHoqxB1c036LKBhsrrrk5F/eQ8M+QEqpKUfqAYUrfy+HRAAeTYbhLkCysrL
++X/mlqYeyzMHj4Pjy5rqoy2TnJFnfIZYwYOL/OfA9IPwGpW2rxVSk1cCgYEAxRul
+9j0Ii3Te08TprfriDpAFQyLH54vqVwe8mkox3cdOyYvUNHdEmDNh3/7dadxVKsIx
+gIkPdGcizOw4elLKWnNFQN3+dCc3LN/zhsop0a6Ow2IatWQ8qOSqNYtD2DGj0w3j
+cJ/BZfacpr/OkAv0kjanYw4+ZSIH/r3Vjdli5okCgYBXltni4Ba4giJ7rrN7U2E7
+rcxBzpm2KIaiC4r4k7bK0clvLj2xAlvIt7vTB6rmmJ7esZQoyFl9BRX7fdW2eIzf
+WXRV+JNUT2VADjNqUZEiQdP6Ju/erF4RSnHYLyYzUpoE7irSvmVbZv0Zj8FjKD2C
+Xy/W7W8+G7roYuI8cS1g+QKBgQCDoHwK3SU4o9ouB0CZ64FMgkbRV4exi9D5P3Rm
+gIeed/uYQiV6x+7pyN5ijDtl9zp0rGwMTvsgG8O0n0b0AReaoYGs2NKU1J9W+1MQ
+Py8AFJbHyVrWqVKM4u77hL3QwQ2K4qpwym6HXdGs1UfnD+TKQ28yig+Gz9wQ9MqI
+yJPwKQKBgQCmZxhmX1SUe3DVnVulMHDLUldbRbFns0VZLiSDhY+hjOAEmnvEdEHp
+6L8/gvdTqUPF/VZQSQiZlii1oTIapQClI2oLfHcGytSorB+bpL7PxAKABp0pA6BS
+JkXzEiV1h5anbxiwid5ZICt6QGQvGvBF7b1VSb+8p9WglLBWZo36pw==
+-----END RSA PRIVATE KEY-----
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.pem b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.pem
new file mode 100644
index 0000000..d4071d1
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/fuzzing_server_wss/config/server.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDETCCAfkCFAtFKlcdB3jhD+AXPul81dwmZcs/MA0GCSqGSIb3DQEBCwUAMEUx
+CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
+cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjAxMTIxMDgyNjQ5WhcNMzAxMTE5MDgy
+NjQ5WjBFMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UE
+CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOC
+AQ8AMIIBCgKCAQEAnbysLfcIr9+wpoJjb5r728j2e07agedOzh8VLuGnHqmKOUPN
+f8Ik707kEoBcFY7UM2A9G/1RMIysGp8eleQLMtNdeYc3KlKHBGFrOM3i4gCd7G44
+lERuKP1PKzRQ6RdVNUXn51XjfxjHWo7kHCEVvZowxvzxLxhwbSwmEmgzcQ1T6vj6
+Cdop87sdq00F+eOCfTdy+cl+R65sbImVdfY4EQ0QWAVdF3X6njLjpdmteppggbEa
+ECv3R3qNIV7/rflIPm1efbqp7R1ugvjLPJZ1u12ovtqkgsWbnEyzST8hbEEjsOTJ
+/cPkH2DaLdh7fMgfcVmqnYXd9T+gpsNGv98DjwIDAQABMA0GCSqGSIb3DQEBCwUA
+A4IBAQBG9GxUOjcrFd1ept9AOTzbxvIUvBiqIEzrL2/+3T1yPPAWQzOmBfZhIVVm
+EZeeU3xcvd7+AmX+2FPCAD+evjSHjKY048X1YksQS7mYChSgeJiknoJi3mAEAyw6
+oYGVkETksZLQfXtWTjgljbIQrwTA1s+EW0jvmvaJnWD3/8nFqmfly2/kxVsTcGEa
+wJGEUS53Cq6y6lLZ+ojjjj1iVCQ94U6L/0xPB9hgXOyL2+iQj+n38ruatnUNF77C
+UKS7N9BFF42eqVY83Xab0m25s93m8Z7J/63qu0eeA8p5t7+8lbGvOYpwReknLRMf
+pJfgSEWqWfSaetihbJl2Fmzg2SeJ
+-----END CERTIFICATE-----
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/Dockerfile b/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/Dockerfile
new file mode 100644
index 0000000..ee39644
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/Dockerfile
@@ -0,0 +1,12 @@
+# Use this as docker builder with https://github.com/nektos/act
+# build with: docker build tests/autobahn/. -t myimage
+# use in act: act -P ubuntu-latest=myimage
+
+FROM node:12.6-buster-slim
+
+COPY config/fuzzingserver.json /config/fuzzingserver.json
+RUN chmod +775 /config/fuzzingserver.json
+RUN apt-get update && \
+ apt-get install -y \
+ docker \
+ docker-compose \ No newline at end of file
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client.v b/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client.v
new file mode 100644
index 0000000..ef5b281
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client.v
@@ -0,0 +1,33 @@
+// use this test to test the websocket client in the autobahn test
+module main
+
+import net.websocket
+
+fn main() {
+ for i in 1 .. 304 {
+ println('\ncase: $i')
+ handle_case(i) or { println('error should be ok: $err') }
+ }
+ // update the reports
+ uri := 'ws://localhost:9001/updateReports?agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn handle_case(case_nr int) ? {
+ uri := 'ws://localhost:9001/runCase?case=$case_nr&agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.on_message(on_message)
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn on_message(mut ws websocket.Client, msg &websocket.Message) ? {
+ // autobahn tests expects to send same message back
+ if msg.opcode == .pong {
+ // We just wanna pass text and binary message back to autobahn
+ return
+ }
+ ws.write(msg.payload, msg.opcode) or { panic(err) }
+}
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client_wss.v b/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client_wss.v
new file mode 100644
index 0000000..c7a3c25
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/local_run/autobahn_client_wss.v
@@ -0,0 +1,35 @@
+// use this test to test the websocket client in the autobahn test
+module main
+
+import net.websocket
+
+fn main() {
+ for i in 1 .. 304 {
+ println('\ncase: $i')
+ handle_case(i) or { println('error should be ok: $err') }
+ }
+ // update the reports
+ // uri := 'wss://localhost:9002/updateReports?agent=v-client'
+ uri := 'wss://autobahn_server_wss:9002/updateReports?agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn handle_case(case_nr int) ? {
+ uri := 'wss://autobahn_server_wss:9002/runCase?case=$case_nr&agent=v-client'
+ // uri := 'wss://localhost:9002/runCase?case=$case_nr&agent=v-client'
+ mut ws := websocket.new_client(uri) ?
+ ws.on_message(on_message)
+ ws.connect() ?
+ ws.listen() ?
+}
+
+fn on_message(mut ws websocket.Client, msg &websocket.Message) ? {
+ // autobahn tests expects to send same message back
+ if msg.opcode == .pong {
+ // We just wanna pass text and binary message back to autobahn
+ return
+ }
+ ws.write(msg.payload, msg.opcode) or { panic(err) }
+}
diff --git a/v_windows/v/vlib/net/websocket/tests/autobahn/ws_test/Dockerfile b/v_windows/v/vlib/net/websocket/tests/autobahn/ws_test/Dockerfile
new file mode 100644
index 0000000..b57cffd
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/tests/autobahn/ws_test/Dockerfile
@@ -0,0 +1,12 @@
+FROM thevlang/vlang:buster-build
+
+
+COPY ./ /src/
+
+WORKDIR /src
+
+RUN make CC=clang
+
+RUN /src/v /src/vlib/net/websocket/tests/autobahn/autobahn_server.v
+RUN chmod +x /src/vlib/net/websocket/tests/autobahn/autobahn_server
+ENTRYPOINT [ "/src/vlib/net/websocket/tests/autobahn/autobahn_server" ]
diff --git a/v_windows/v/vlib/net/websocket/uri.v b/v_windows/v/vlib/net/websocket/uri.v
new file mode 100644
index 0000000..7d388e1
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/uri.v
@@ -0,0 +1,16 @@
+module websocket
+
+// Uri represents an Uri for websocket connections
+struct Uri {
+mut:
+ url string // url to the websocket endpoint
+ hostname string // hostname of the websocket endpoint
+ port string // port of the websocket endpoint
+ resource string // resource of the websocket endpoint
+ querystring string // query string of the websocket endpoint
+}
+
+// str returns the string representation of the Uri
+pub fn (u Uri) str() string {
+ return u.url
+}
diff --git a/v_windows/v/vlib/net/websocket/utils.v b/v_windows/v/vlib/net/websocket/utils.v
new file mode 100644
index 0000000..4e48359
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/utils.v
@@ -0,0 +1,54 @@
+module websocket
+
+import rand
+import crypto.sha1
+import encoding.base64
+
+// htonl64 converts payload length to header bits
+fn htonl64(payload_len u64) []byte {
+ mut ret := []byte{len: 8}
+ ret[0] = byte(((payload_len & (u64(0xff) << 56)) >> 56) & 0xff)
+ ret[1] = byte(((payload_len & (u64(0xff) << 48)) >> 48) & 0xff)
+ ret[2] = byte(((payload_len & (u64(0xff) << 40)) >> 40) & 0xff)
+ ret[3] = byte(((payload_len & (u64(0xff) << 32)) >> 32) & 0xff)
+ ret[4] = byte(((payload_len & (u64(0xff) << 24)) >> 24) & 0xff)
+ ret[5] = byte(((payload_len & (u64(0xff) << 16)) >> 16) & 0xff)
+ ret[6] = byte(((payload_len & (u64(0xff) << 8)) >> 8) & 0xff)
+ ret[7] = byte(((payload_len & (u64(0xff) << 0)) >> 0) & 0xff)
+ return ret
+}
+
+// create_masking_key returs a new masking key to use when masking websocket messages
+fn create_masking_key() []byte {
+ mask_bit := byte(rand.intn(255))
+ buf := []byte{len: 4, init: `0`}
+ unsafe { C.memcpy(buf.data, &mask_bit, 4) }
+ return buf
+}
+
+// create_key_challenge_response creates a key challange response from security key
+fn create_key_challenge_response(seckey string) ?string {
+ if seckey.len == 0 {
+ return error('unexpected seckey lengt zero')
+ }
+ guid := '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+ sha1buf := seckey + guid
+ shabytes := sha1buf.bytes()
+ hash := sha1.sum(shabytes)
+ b64 := base64.encode(hash)
+ unsafe {
+ hash.free()
+ shabytes.free()
+ }
+ return b64
+}
+
+// get_nonce creates a randomized array used in handshake process
+fn get_nonce(nonce_size int) string {
+ mut nonce := []byte{len: nonce_size, cap: nonce_size}
+ alphanum := '0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz'
+ for i in 0 .. nonce_size {
+ nonce[i] = alphanum[rand.intn(alphanum.len)]
+ }
+ return unsafe { tos(nonce.data, nonce.len) }.clone()
+}
diff --git a/v_windows/v/vlib/net/websocket/websocket_client.v b/v_windows/v/vlib/net/websocket/websocket_client.v
new file mode 100644
index 0000000..48f8c5c
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/websocket_client.v
@@ -0,0 +1,488 @@
+// websocket module implements websocket client and a websocket server
+// attribution: @thecoderr the author of original websocket client
+[manualfree]
+module websocket
+
+import net
+import net.http
+import net.openssl
+import net.urllib
+import time
+import log
+import rand
+
+const (
+ empty_bytearr = []byte{} // used as empty response to avoid allocation
+)
+
+// Client represents websocket client
+pub struct Client {
+ is_server bool
+mut:
+ ssl_conn &openssl.SSLConn // secure connection used when wss is used
+ flags []Flag // flags used in handshake
+ fragments []Fragment // current fragments
+ message_callbacks []MessageEventHandler // all callbacks on_message
+ error_callbacks []ErrorEventHandler // all callbacks on_error
+ open_callbacks []OpenEventHandler // all callbacks on_open
+ close_callbacks []CloseEventHandler // all callbacks on_close
+pub:
+ is_ssl bool // true if secure socket is used
+ uri Uri // uri of current connection
+ id string // unique id of client
+pub mut:
+ header http.Header // headers that will be passed when connecting
+ conn &net.TcpConn // underlying TCP socket connection
+ nonce_size int = 16 // size of nounce used for masking
+ panic_on_callback bool // set to true of callbacks can panic
+ state State // current state of connection
+ logger &log.Log // logger used to log messages
+ resource_name string // name of current resource
+ last_pong_ut i64 // last time in unix time we got a pong message
+}
+
+// Flag represents different types of headers in websocket handshake
+enum Flag {
+ has_accept // Webs
+ has_connection
+ has_upgrade
+}
+
+// State represents the state of the websocket connection.
+pub enum State {
+ connecting = 0
+ open
+ closing
+ closed
+}
+
+// Message represents a whole message combined from 1 to n frames
+pub struct Message {
+pub:
+ opcode OPCode // websocket frame type of this message
+ payload []byte // payload of the message
+}
+
+// OPCode represents the supported websocket frame types
+pub enum OPCode {
+ continuation = 0x00
+ text_frame = 0x01
+ binary_frame = 0x02
+ close = 0x08
+ ping = 0x09
+ pong = 0x0A
+}
+
+// new_client instance a new websocket client
+pub fn new_client(address string) ?&Client {
+ uri := parse_uri(address) ?
+ return &Client{
+ conn: 0
+ is_server: false
+ ssl_conn: openssl.new_ssl_conn()
+ is_ssl: address.starts_with('wss')
+ logger: &log.Log{
+ level: .info
+ }
+ uri: uri
+ state: .closed
+ id: rand.uuid_v4()
+ header: http.new_header()
+ }
+}
+
+// connect connects to remote websocket server
+pub fn (mut ws Client) connect() ? {
+ ws.assert_not_connected() ?
+ ws.set_state(.connecting)
+ ws.logger.info('connecting to host $ws.uri')
+ ws.conn = ws.dial_socket() ?
+ // Todo: make setting configurable
+ ws.conn.set_read_timeout(time.second * 30)
+ ws.conn.set_write_timeout(time.second * 30)
+ ws.handshake() ?
+ ws.set_state(.open)
+ ws.logger.info('successfully connected to host $ws.uri')
+ ws.send_open_event()
+}
+
+// listen listens and processes incoming messages
+pub fn (mut ws Client) listen() ? {
+ mut log := 'Starting client listener, server($ws.is_server)...'
+ ws.logger.info(log)
+ unsafe { log.free() }
+ defer {
+ ws.logger.info('Quit client listener, server($ws.is_server)...')
+ if ws.state == .open {
+ ws.close(1000, 'closed by client') or {}
+ }
+ }
+ for ws.state == .open {
+ msg := ws.read_next_message() or {
+ if ws.state in [.closed, .closing] {
+ return
+ }
+ ws.debug_log('failed to read next message: $err')
+ ws.send_error_event('failed to read next message: $err')
+ return err
+ }
+ if ws.state in [.closed, .closing] {
+ return
+ }
+ ws.debug_log('got message: $msg.opcode')
+ match msg.opcode {
+ .text_frame {
+ log = 'read: text'
+ ws.debug_log(log)
+ unsafe { log.free() }
+ ws.send_message_event(msg)
+ unsafe { msg.free() }
+ }
+ .binary_frame {
+ ws.debug_log('read: binary')
+ ws.send_message_event(msg)
+ unsafe { msg.free() }
+ }
+ .ping {
+ ws.debug_log('read: ping, sending pong')
+ ws.send_control_frame(.pong, 'PONG', msg.payload) or {
+ ws.logger.error('error in message callback sending PONG: $err')
+ ws.send_error_event('error in message callback sending PONG: $err')
+ if ws.panic_on_callback {
+ panic(err)
+ }
+ continue
+ }
+ if msg.payload.len > 0 {
+ unsafe { msg.free() }
+ }
+ }
+ .pong {
+ ws.debug_log('read: pong')
+ ws.last_pong_ut = time.now().unix
+ ws.send_message_event(msg)
+ if msg.payload.len > 0 {
+ unsafe { msg.free() }
+ }
+ }
+ .close {
+ log = 'read: close'
+ ws.debug_log(log)
+ unsafe { log.free() }
+ defer {
+ ws.manage_clean_close()
+ }
+ if msg.payload.len > 0 {
+ if msg.payload.len == 1 {
+ ws.close(1002, 'close payload cannot be 1 byte') ?
+ return error('close payload cannot be 1 byte')
+ }
+ code := (int(msg.payload[0]) << 8) + int(msg.payload[1])
+ if code in invalid_close_codes {
+ ws.close(1002, 'invalid close code: $code') ?
+ return error('invalid close code: $code')
+ }
+ reason := if msg.payload.len > 2 { msg.payload[2..] } else { []byte{} }
+ if reason.len > 0 {
+ ws.validate_utf_8(.close, reason) ?
+ }
+ if ws.state !in [.closing, .closed] {
+ // sending close back according to spec
+ ws.debug_log('close with reason, code: $code, reason: $reason')
+ r := reason.bytestr()
+ ws.close(code, r) ?
+ }
+ unsafe { msg.free() }
+ } else {
+ if ws.state !in [.closing, .closed] {
+ ws.debug_log('close with reason, no code')
+ // sending close back according to spec
+ ws.close(1000, 'normal') ?
+ }
+ unsafe { msg.free() }
+ }
+ return
+ }
+ .continuation {
+ ws.logger.error('unexpected opcode continuation, nothing to continue')
+ ws.send_error_event('unexpected opcode continuation, nothing to continue')
+ ws.close(1002, 'nothing to continue') ?
+ return error('unexpected opcode continuation, nothing to continue')
+ }
+ }
+ }
+}
+
+// manage_clean_close closes connection in a clean websocket way
+fn (mut ws Client) manage_clean_close() {
+ ws.send_close_event(1000, 'closed by client')
+}
+
+// ping sends ping message to server
+pub fn (mut ws Client) ping() ? {
+ ws.send_control_frame(.ping, 'PING', []) ?
+}
+
+// pong sends pong message to server,
+pub fn (mut ws Client) pong() ? {
+ ws.send_control_frame(.pong, 'PONG', []) ?
+}
+
+// write_ptr writes len bytes provided a byteptr with a websocket messagetype
+pub fn (mut ws Client) write_ptr(bytes &byte, payload_len int, code OPCode) ?int {
+ // ws.debug_log('write_ptr code: $code')
+ if ws.state != .open || ws.conn.sock.handle < 1 {
+ // todo: send error here later
+ return error('trying to write on a closed socket!')
+ }
+ mut header_len := 2 + if payload_len > 125 { 2 } else { 0 } +
+ if payload_len > 0xffff { 6 } else { 0 }
+ if !ws.is_server {
+ header_len += 4
+ }
+ mut header := []byte{len: header_len, init: `0`} // [`0`].repeat(header_len)
+ header[0] = byte(int(code)) | 0x80
+ masking_key := create_masking_key()
+ if ws.is_server {
+ if payload_len <= 125 {
+ header[1] = byte(payload_len)
+ } else if payload_len > 125 && payload_len <= 0xffff {
+ len16 := C.htons(payload_len)
+ header[1] = 126
+ unsafe { C.memcpy(&header[2], &len16, 2) }
+ } else if payload_len > 0xffff && payload_len <= 0x7fffffff {
+ len_bytes := htonl64(u64(payload_len))
+ header[1] = 127
+ unsafe { C.memcpy(&header[2], len_bytes.data, 8) }
+ }
+ } else {
+ if payload_len <= 125 {
+ header[1] = byte(payload_len | 0x80)
+ header[2] = masking_key[0]
+ header[3] = masking_key[1]
+ header[4] = masking_key[2]
+ header[5] = masking_key[3]
+ } else if payload_len > 125 && payload_len <= 0xffff {
+ len16 := C.htons(payload_len)
+ header[1] = (126 | 0x80)
+ unsafe { C.memcpy(&header[2], &len16, 2) }
+ header[4] = masking_key[0]
+ header[5] = masking_key[1]
+ header[6] = masking_key[2]
+ header[7] = masking_key[3]
+ } else if payload_len > 0xffff && payload_len <= 0x7fffffff {
+ len64 := htonl64(u64(payload_len))
+ header[1] = (127 | 0x80)
+ unsafe { C.memcpy(&header[2], len64.data, 8) }
+ header[10] = masking_key[0]
+ header[11] = masking_key[1]
+ header[12] = masking_key[2]
+ header[13] = masking_key[3]
+ } else {
+ ws.close(1009, 'frame too large') ?
+ return error('frame too large')
+ }
+ }
+ len := header.len + payload_len
+ mut frame_buf := []byte{len: len}
+ unsafe {
+ C.memcpy(&frame_buf[0], &byte(header.data), header.len)
+ if payload_len > 0 {
+ C.memcpy(&frame_buf[header.len], bytes, payload_len)
+ }
+ }
+ if !ws.is_server {
+ for i in 0 .. payload_len {
+ frame_buf[header_len + i] ^= masking_key[i % 4] & 0xff
+ }
+ }
+ written_len := ws.socket_write(frame_buf) ?
+ unsafe {
+ frame_buf.free()
+ masking_key.free()
+ header.free()
+ }
+ return written_len
+}
+
+// write writes a byte array with a websocket messagetype to socket
+pub fn (mut ws Client) write(bytes []byte, code OPCode) ?int {
+ return ws.write_ptr(&byte(bytes.data), bytes.len, code)
+}
+
+// write_str, writes a string with a websocket texttype to socket
+pub fn (mut ws Client) write_string(str string) ?int {
+ return ws.write_ptr(str.str, str.len, .text_frame)
+}
+
+// close closes the websocket connection
+pub fn (mut ws Client) close(code int, message string) ? {
+ ws.debug_log('sending close, $code, $message')
+ if ws.state in [.closed, .closing] || ws.conn.sock.handle <= 1 {
+ ws.debug_log('close: Websocket allready closed ($ws.state), $message, $code handle($ws.conn.sock.handle)')
+ err_msg := 'Socket allready closed: $code'
+ return error(err_msg)
+ }
+ defer {
+ ws.shutdown_socket() or {}
+ ws.reset_state()
+ }
+ ws.set_state(.closing)
+ // mut code32 := 0
+ if code > 0 {
+ code_ := C.htons(code)
+ message_len := message.len + 2
+ mut close_frame := []byte{len: message_len}
+ close_frame[0] = byte(code_ & 0xFF)
+ close_frame[1] = byte(code_ >> 8)
+ // code32 = (close_frame[0] << 8) + close_frame[1]
+ for i in 0 .. message.len {
+ close_frame[i + 2] = message[i]
+ }
+ ws.send_control_frame(.close, 'CLOSE', close_frame) ?
+ unsafe { close_frame.free() }
+ } else {
+ ws.send_control_frame(.close, 'CLOSE', []) ?
+ }
+ ws.fragments = []
+}
+
+// send_control_frame sends a control frame to the server
+fn (mut ws Client) send_control_frame(code OPCode, frame_typ string, payload []byte) ? {
+ ws.debug_log('send control frame $code, frame_type: $frame_typ')
+ if ws.state !in [.open, .closing] && ws.conn.sock.handle > 1 {
+ return error('socket is not connected')
+ }
+ header_len := if ws.is_server { 2 } else { 6 }
+ frame_len := header_len + payload.len
+ mut control_frame := []byte{len: frame_len}
+ mut masking_key := if !ws.is_server { create_masking_key() } else { websocket.empty_bytearr }
+ defer {
+ unsafe {
+ control_frame.free()
+ if masking_key.len > 0 {
+ masking_key.free()
+ }
+ }
+ }
+ control_frame[0] = byte(int(code) | 0x80)
+ if !ws.is_server {
+ control_frame[1] = byte(payload.len | 0x80)
+ control_frame[2] = masking_key[0]
+ control_frame[3] = masking_key[1]
+ control_frame[4] = masking_key[2]
+ control_frame[5] = masking_key[3]
+ } else {
+ control_frame[1] = byte(payload.len)
+ }
+ if code == .close {
+ if payload.len >= 2 {
+ if !ws.is_server {
+ mut parsed_payload := []byte{len: payload.len + 1}
+ unsafe { C.memcpy(parsed_payload.data, &payload[0], payload.len) }
+ parsed_payload[payload.len] = `\0`
+ for i in 0 .. payload.len {
+ control_frame[6 + i] = (parsed_payload[i] ^ masking_key[i % 4]) & 0xff
+ }
+ unsafe { parsed_payload.free() }
+ } else {
+ unsafe { C.memcpy(&control_frame[2], &payload[0], payload.len) }
+ }
+ }
+ } else {
+ if !ws.is_server {
+ if payload.len > 0 {
+ for i in 0 .. payload.len {
+ control_frame[header_len + i] = (payload[i] ^ masking_key[i % 4]) & 0xff
+ }
+ }
+ } else {
+ if payload.len > 0 {
+ unsafe { C.memcpy(&control_frame[2], &payload[0], payload.len) }
+ }
+ }
+ }
+ ws.socket_write(control_frame) or {
+ return error('send_control_frame: error sending $frame_typ control frame.')
+ }
+}
+
+// parse_uri parses the url to a Uri
+fn parse_uri(url string) ?&Uri {
+ u := urllib.parse(url) ?
+ request_uri := u.request_uri()
+ v := request_uri.split('?')
+ mut port := u.port()
+ uri := u.str()
+ if port == '' {
+ port = if uri.starts_with('ws://') {
+ '80'
+ } else if uri.starts_with('wss://') {
+ '443'
+ } else {
+ u.port()
+ }
+ }
+ querystring := if v.len > 1 { '?' + v[1] } else { '' }
+ return &Uri{
+ url: url
+ hostname: u.hostname()
+ port: port
+ resource: v[0]
+ querystring: querystring
+ }
+}
+
+// set_state sets current state of the websocket connection
+fn (mut ws Client) set_state(state State) {
+ lock {
+ ws.state = state
+ }
+}
+
+// assert_not_connected returns error if the connection is not connected
+fn (ws Client) assert_not_connected() ? {
+ match ws.state {
+ .connecting { return error('connect: websocket is connecting') }
+ .open { return error('connect: websocket already open') }
+ .closing { return error('connect: reconnect on closing websocket not supported, please use new client') }
+ else {}
+ }
+}
+
+// reset_state resets the websocket and initialize default settings
+fn (mut ws Client) reset_state() {
+ lock {
+ ws.state = .closed
+ ws.ssl_conn = openssl.new_ssl_conn()
+ ws.flags = []
+ ws.fragments = []
+ }
+}
+
+// debug_log handles debug logging output for client and server
+fn (mut ws Client) debug_log(text string) {
+ if ws.is_server {
+ ws.logger.debug('server-> $text')
+ } else {
+ ws.logger.debug('client-> $text')
+ }
+}
+
+// free handles manual free memory of Message struct
+pub fn (m &Message) free() {
+ unsafe { m.payload.free() }
+}
+
+// free handles manual free memory of Client struct
+pub fn (c &Client) free() {
+ unsafe {
+ c.flags.free()
+ c.fragments.free()
+ c.message_callbacks.free()
+ c.error_callbacks.free()
+ c.open_callbacks.free()
+ c.close_callbacks.free()
+ c.header.free()
+ }
+}
diff --git a/v_windows/v/vlib/net/websocket/websocket_nix.c.v b/v_windows/v/vlib/net/websocket/websocket_nix.c.v
new file mode 100644
index 0000000..f986b98
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/websocket_nix.c.v
@@ -0,0 +1,10 @@
+module websocket
+
+// error_code returns the error code
+fn error_code() int {
+ return C.errno
+}
+
+const (
+ error_ewouldblock = C.EWOULDBLOCK // blocking error code
+)
diff --git a/v_windows/v/vlib/net/websocket/websocket_server.v b/v_windows/v/vlib/net/websocket/websocket_server.v
new file mode 100644
index 0000000..99af3e0
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/websocket_server.v
@@ -0,0 +1,189 @@
+module websocket
+
+import net
+import net.openssl
+import log
+import time
+import rand
+
+// Server represents a websocket server connection
+pub struct Server {
+mut:
+ logger &log.Log // logger used to log
+ ls &net.TcpListener // listener used to get incoming connection to socket
+ accept_client_callbacks []AcceptClientFn // accept client callback functions
+ message_callbacks []MessageEventHandler // new message callback functions
+ close_callbacks []CloseEventHandler // close message callback functions
+pub:
+ family net.AddrFamily = .ip
+ port int // port used as listen to incoming connections
+ is_ssl bool // true if secure connection (not supported yet on server)
+pub mut:
+ clients map[string]&ServerClient // clients connected to this server
+ ping_interval int = 30 // interval for sending ping to clients (seconds)
+ state State // current state of connection
+}
+
+// ServerClient represents a connected client
+struct ServerClient {
+pub:
+ resource_name string // resource that the client access
+ client_key string // unique key of client
+pub mut:
+ server &Server
+ client &Client
+}
+
+// new_server instance a new websocket server on provided port and route
+pub fn new_server(family net.AddrFamily, port int, route string) &Server {
+ return &Server{
+ ls: 0
+ family: family
+ port: port
+ logger: &log.Log{
+ level: .info
+ }
+ state: .closed
+ }
+}
+
+// set_ping_interval sets the interval that the server will send ping messages to clients
+pub fn (mut s Server) set_ping_interval(seconds int) {
+ s.ping_interval = seconds
+}
+
+// listen start listen and process to incoming connections from websocket clients
+pub fn (mut s Server) listen() ? {
+ s.logger.info('websocket server: start listen on port $s.port')
+ s.ls = net.listen_tcp(s.family, ':$s.port') ?
+ s.set_state(.open)
+ go s.handle_ping()
+ for {
+ mut c := s.accept_new_client() or { continue }
+ go s.serve_client(mut c)
+ }
+ s.logger.info('websocket server: end listen on port $s.port')
+}
+
+// Close closes server (not implemented yet)
+fn (mut s Server) close() {
+ // TODO: implement close when moving to net from x.net
+}
+
+// handle_ping sends ping to all clients every set interval
+fn (mut s Server) handle_ping() {
+ mut clients_to_remove := []string{}
+ for s.state == .open {
+ time.sleep(s.ping_interval * time.second)
+ for i, _ in s.clients {
+ mut c := s.clients[i]
+ if c.client.state == .open {
+ c.client.ping() or {
+ s.logger.debug('server-> error sending ping to client')
+ c.client.close(1002, 'Closing connection: ping send error') or {
+ // we want to continue even if error
+ continue
+ }
+ clients_to_remove << c.client.id
+ }
+ if (time.now().unix - c.client.last_pong_ut) > s.ping_interval * 2 {
+ clients_to_remove << c.client.id
+ c.client.close(1000, 'no pong received') or { continue }
+ }
+ }
+ }
+ // TODO: replace for with s.clients.delete_all(clients_to_remove) if (https://github.com/vlang/v/pull/6020) merges
+ for client in clients_to_remove {
+ lock {
+ s.clients.delete(client)
+ }
+ }
+ clients_to_remove.clear()
+ }
+}
+
+// serve_client accepts incoming connection and sets up the callbacks
+fn (mut s Server) serve_client(mut c Client) ? {
+ c.logger.debug('server-> Start serve client ($c.id)')
+ defer {
+ c.logger.debug('server-> End serve client ($c.id)')
+ }
+ mut handshake_response, mut server_client := s.handle_server_handshake(mut c) ?
+ accept := s.send_connect_event(mut server_client) ?
+ if !accept {
+ s.logger.debug('server-> client not accepted')
+ c.shutdown_socket() ?
+ return
+ }
+ // the client is accepted
+ c.socket_write(handshake_response.bytes()) ?
+ lock {
+ s.clients[server_client.client.id] = server_client
+ }
+ s.setup_callbacks(mut server_client)
+ c.listen() or {
+ s.logger.error(err.msg)
+ return err
+ }
+}
+
+// setup_callbacks initialize all callback functions
+fn (mut s Server) setup_callbacks(mut sc ServerClient) {
+ if s.message_callbacks.len > 0 {
+ for cb in s.message_callbacks {
+ if cb.is_ref {
+ sc.client.on_message_ref(cb.handler2, cb.ref)
+ } else {
+ sc.client.on_message(cb.handler)
+ }
+ }
+ }
+ if s.close_callbacks.len > 0 {
+ for cb in s.close_callbacks {
+ if cb.is_ref {
+ sc.client.on_close_ref(cb.handler2, cb.ref)
+ } else {
+ sc.client.on_close(cb.handler)
+ }
+ }
+ }
+ // set standard close so we can remove client if closed
+ sc.client.on_close_ref(fn (mut c Client, code int, reason string, mut sc ServerClient) ? {
+ c.logger.debug('server-> Delete client')
+ lock {
+ sc.server.clients.delete(sc.client.id)
+ }
+ }, sc)
+}
+
+// accept_new_client creates a new client instance for client that connects to the socket
+fn (mut s Server) accept_new_client() ?&Client {
+ mut new_conn := s.ls.accept() ?
+ c := &Client{
+ is_server: true
+ conn: new_conn
+ ssl_conn: openssl.new_ssl_conn()
+ logger: s.logger
+ state: .open
+ last_pong_ut: time.now().unix
+ id: rand.uuid_v4()
+ }
+ return c
+}
+
+// set_state sets current state in a thread safe way
+fn (mut s Server) set_state(state State) {
+ lock {
+ s.state = state
+ }
+}
+
+// free manages manual free of memory for Server instance
+pub fn (mut s Server) free() {
+ unsafe {
+ s.clients.free()
+ s.accept_client_callbacks.free()
+ s.message_callbacks.free()
+ s.close_callbacks.free()
+ }
+}
diff --git a/v_windows/v/vlib/net/websocket/websocket_test.v b/v_windows/v/vlib/net/websocket/websocket_test.v
new file mode 100644
index 0000000..35e15d3
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/websocket_test.v
@@ -0,0 +1,122 @@
+import os
+import net
+import net.websocket
+import time
+import rand
+
+// TODO: fix connecting to ipv4 websockets
+// (the server seems to work with .ip, but
+// Client can not connect, it needs to be passed
+// .ip too?)
+
+struct WebsocketTestResults {
+pub mut:
+ nr_messages int
+ nr_pong_received int
+}
+
+// Do not run these tests everytime, since they are flaky.
+// They have their own specialized CI runner.
+const github_job = os.getenv('GITHUB_JOB')
+
+const should_skip = github_job != '' && github_job != 'websocket_tests'
+
+// tests with internal ws servers
+fn test_ws_ipv6() {
+ if should_skip {
+ return
+ }
+ port := 30000 + rand.intn(1024)
+ go start_server(.ip6, port)
+ time.sleep(500 * time.millisecond)
+ ws_test(.ip6, 'ws://localhost:$port') or { assert false }
+}
+
+// tests with internal ws servers
+fn test_ws_ipv4() {
+ // TODO: fix client
+ if true || should_skip {
+ return
+ }
+ port := 30000 + rand.intn(1024)
+ go start_server(.ip, port)
+ time.sleep(500 * time.millisecond)
+ ws_test(.ip, 'ws://localhost:$port') or { assert false }
+}
+
+fn start_server(family net.AddrFamily, listen_port int) ? {
+ mut s := websocket.new_server(family, listen_port, '')
+ // make that in execution test time give time to execute at least one time
+ s.ping_interval = 1
+
+ s.on_connect(fn (mut s websocket.ServerClient) ?bool {
+ // here you can look att the client info and accept or not accept
+ // just returning a true/false
+ if s.resource_name != '/' {
+ panic('unexpected resource name in test')
+ return false
+ }
+ return true
+ }) ?
+ s.on_message(fn (mut ws websocket.Client, msg &websocket.Message) ? {
+ match msg.opcode {
+ .pong { ws.write_string('pong') or { panic(err) } }
+ else { ws.write(msg.payload, msg.opcode) or { panic(err) } }
+ }
+ })
+
+ s.on_close(fn (mut ws websocket.Client, code int, reason string) ? {
+ // not used
+ })
+ s.listen() or {}
+}
+
+// ws_test tests connect to the websocket server from websocket client
+fn ws_test(family net.AddrFamily, uri string) ? {
+ eprintln('connecting to $uri ...')
+
+ mut test_results := WebsocketTestResults{}
+ mut ws := websocket.new_client(uri) ?
+ ws.on_open(fn (mut ws websocket.Client) ? {
+ ws.pong() ?
+ assert true
+ })
+ ws.on_error(fn (mut ws websocket.Client, err string) ? {
+ println('error: $err')
+ // this can be thrown by internet connection problems
+ assert false
+ })
+
+ ws.on_message_ref(fn (mut ws websocket.Client, msg &websocket.Message, mut res WebsocketTestResults) ? {
+ println('client got type: $msg.opcode payload:\n$msg.payload')
+ if msg.opcode == .text_frame {
+ smessage := msg.payload.bytestr()
+ match smessage {
+ 'pong' {
+ res.nr_pong_received++
+ }
+ 'a' {
+ res.nr_messages++
+ }
+ else {
+ assert false
+ }
+ }
+ } else {
+ println('Binary message: $msg')
+ }
+ }, test_results)
+ ws.connect() or { panic('fail to connect') }
+ go ws.listen()
+ text := ['a'].repeat(2)
+ for msg in text {
+ ws.write(msg.bytes(), .text_frame) or { panic('fail to write to websocket') }
+ // sleep to give time to recieve response before send a new one
+ time.sleep(100 * time.millisecond)
+ }
+ // sleep to give time to recieve response before asserts
+ time.sleep(1500 * time.millisecond)
+ // We expect at least 2 pongs, one sent directly and one indirectly
+ assert test_results.nr_pong_received >= 2
+ assert test_results.nr_messages == 2
+}
diff --git a/v_windows/v/vlib/net/websocket/websocket_windows.c.v b/v_windows/v/vlib/net/websocket/websocket_windows.c.v
new file mode 100644
index 0000000..e9f4fc3
--- /dev/null
+++ b/v_windows/v/vlib/net/websocket/websocket_windows.c.v
@@ -0,0 +1,12 @@
+module websocket
+
+import net
+
+// error_code returns the error code
+fn error_code() int {
+ return C.WSAGetLastError()
+}
+
+const (
+ error_ewouldblock = net.WsaError.wsaewouldblock // blocking error code
+)