diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cad8b10ab96..c232120a898 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,7 +1,7 @@ 'name': 'build' 'env': - 'GO_VERSION': '1.19.10' + 'GO_VERSION': '1.19.11' 'NODE_VERSION': '14' 'on': diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2d46bf973ce..900a1478334 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,7 +1,7 @@ 'name': 'lint' 'env': - 'GO_VERSION': '1.19.10' + 'GO_VERSION': '1.19.11' 'on': 'push': diff --git a/.gitignore b/.gitignore index e5124973255..9e19558a1fa 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ *.db *.log *.snap +*.test /agh-backup/ /bin/ /build/* diff --git a/CHANGELOG.md b/CHANGELOG.md index 517776da03c..fb6c12b13ab 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,21 +14,27 @@ and this project adheres to ### Fixed -- Excessive RAM and CPU consumption by Safe Browsing and Parental Control - filters ([#5896]). +- `bufio.Scanner: token too long` and other errors when trying to add + filtering-rule lists with lines over 1024 bytes long or containing cosmetic + rules ([#6003]). -[#5896]: https://github.com/AdguardTeam/AdGuardHome/issues/5896 +### Removed + +- Default exposure of the non-standard ports 784 and 8853 for DNS-over-QUIC in + the `Dockerfile`. + +[#6003]: https://github.com/AdguardTeam/AdGuardHome/issues/6003 -[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.33...HEAD +[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.34...HEAD +[v0.107.34]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.33...v0.107.34 [v0.107.33]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.32...v0.107.33 [v0.107.32]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.31...v0.107.32 [v0.107.31]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.30...v0.107.31 diff --git a/Makefile b/Makefile index 864734d449d..61a3ed0ffa7 100644 --- a/Makefile +++ b/Makefile @@ -78,7 +78,7 @@ build: deps quick-build quick-build: js-build go-build -ci: deps test +ci: deps test go-bench go-fuzz deps: js-deps go-deps lint: js-lint go-lint @@ -104,8 +104,10 @@ js-deps: js-lint: ; $(NPM) $(NPM_FLAGS) run lint js-test: ; $(NPM) $(NPM_FLAGS) run test +go-bench: ; $(ENV) "$(SHELL)" ./scripts/make/go-bench.sh go-build: ; $(ENV) "$(SHELL)" ./scripts/make/go-build.sh go-deps: ; $(ENV) "$(SHELL)" ./scripts/make/go-deps.sh +go-fuzz: ; $(ENV) "$(SHELL)" ./scripts/make/go-fuzz.sh go-lint: ; $(ENV) "$(SHELL)" ./scripts/make/go-lint.sh go-tools: ; $(ENV) "$(SHELL)" ./scripts/make/go-tools.sh diff --git a/bamboo-specs/release.yaml b/bamboo-specs/release.yaml index d82b6f2f550..5ca1f8c0e02 100644 --- a/bamboo-specs/release.yaml +++ b/bamboo-specs/release.yaml @@ -7,7 +7,7 @@ # Make sure to sync any changes with the branch overrides below. 'variables': 'channel': 'edge' - 'dockerGo': 'adguard/golang-ubuntu:6.7' + 'dockerGo': 'adguard/golang-ubuntu:6.8' 'stages': - 'Build frontend': @@ -272,7 +272,7 @@ # need to build a few of these. 'variables': 'channel': 'beta' - 'dockerGo': 'adguard/golang-ubuntu:6.7' + 'dockerGo': 'adguard/golang-ubuntu:6.8' # release-vX.Y.Z branches are the branches from which the actual final # release is built. - '^release-v[0-9]+\.[0-9]+\.[0-9]+': @@ -287,4 +287,4 @@ # are the ones that actually get released. 'variables': 'channel': 'release' - 'dockerGo': 'adguard/golang-ubuntu:6.7' + 'dockerGo': 'adguard/golang-ubuntu:6.8' diff --git a/bamboo-specs/snapcraft.yaml b/bamboo-specs/snapcraft.yaml index 53efff417e6..ac8a22177b0 100644 --- a/bamboo-specs/snapcraft.yaml +++ b/bamboo-specs/snapcraft.yaml @@ -10,7 +10,7 @@ # Make sure to sync any changes with the branch overrides below. 'variables': 'channel': 'edge' - 'dockerGo': 'adguard/golang-ubuntu:6.7' + 'dockerGo': 'adguard/golang-ubuntu:6.8' 'snapcraftChannel': 'edge' 'stages': @@ -191,7 +191,7 @@ # need to build a few of these. 'variables': 'channel': 'beta' - 'dockerGo': 'adguard/golang-ubuntu:6.7' + 'dockerGo': 'adguard/golang-ubuntu:6.8' 'snapcraftChannel': 'beta' # release-vX.Y.Z branches are the branches from which the actual final # release is built. @@ -207,5 +207,5 @@ # are the ones that actually get released. 'variables': 'channel': 'release' - 'dockerGo': 'adguard/golang-ubuntu:6.7' + 'dockerGo': 'adguard/golang-ubuntu:6.8' 'snapcraftChannel': 'candidate' diff --git a/bamboo-specs/test.yaml b/bamboo-specs/test.yaml index e2cc8cafda9..39361afe488 100644 --- a/bamboo-specs/test.yaml +++ b/bamboo-specs/test.yaml @@ -5,7 +5,7 @@ 'key': 'AHBRTSPECS' 'name': 'AdGuard Home - Build and run tests' 'variables': - 'dockerGo': 'adguard/golang-ubuntu:6.7' + 'dockerGo': 'adguard/golang-ubuntu:6.8' 'stages': - 'Tests': diff --git a/client/src/__locales/en.json b/client/src/__locales/en.json index 644f466d043..6b73220a353 100644 --- a/client/src/__locales/en.json +++ b/client/src/__locales/en.json @@ -444,7 +444,7 @@ "client_confirm_delete": "Are you sure you want to delete client \"{{key}}\"?", "list_confirm_delete": "Are you sure you want to delete this list?", "auto_clients_title": "Runtime clients", - "auto_clients_desc": "Devices not on the list of Persistent clients that may still use AdGuard Home", + "auto_clients_desc": "Information about IP addresses of devices that are using or may use AdGuard Home. This information is gathered from several sources, including hosts files, reverse DNS, etc.", "access_title": "Access settings", "access_desc": "Here you can configure access rules for the AdGuard Home DNS server", "access_allowed_title": "Allowed clients", diff --git a/docker/Dockerfile b/docker/Dockerfile index 6424faf7c6e..38198aa62c1 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,6 +1,6 @@ # A docker file for scripts/make/build-docker.sh. -FROM alpine:3.17 +FROM alpine:3.18 ARG BUILD_DATE ARG VERSION @@ -25,8 +25,6 @@ RUN apk --no-cache add ca-certificates libcap tzdata && \ mkdir -p /opt/adguardhome/conf /opt/adguardhome/work && \ chown -R nobody: /opt/adguardhome -RUN apk --no-cache add tini - ARG DIST_DIR ARG TARGETARCH ARG TARGETOS @@ -43,43 +41,18 @@ RUN setcap 'cap_net_bind_service=+eip' /opt/adguardhome/AdGuardHome # 68 : UDP : DHCP (client) # 80 : TCP : HTTP (main) # 443 : TCP, UDP : HTTPS, DNS-over-HTTPS (incl. HTTP/3), DNSCrypt (main) -# 784 : UDP : DNS-over-QUIC (experimental) # 853 : TCP, UDP : DNS-over-TLS, DNS-over-QUIC # 3000 : TCP, UDP : HTTP(S) (alt, incl. HTTP/3) -# 3001 : TCP, UDP : HTTP(S) (beta, incl. HTTP/3) # 5443 : TCP, UDP : DNSCrypt (alt) # 6060 : TCP : HTTP (pprof) -# 8853 : UDP : DNS-over-QUIC (experimental) -# -# TODO(a.garipov): Remove the old, non-standard 784 and 8853 ports for -# DNS-over-QUIC in a future release. -EXPOSE 53/tcp 53/udp 67/udp 68/udp 80/tcp 443/tcp 443/udp 784/udp\ - 853/tcp 853/udp 3000/tcp 3000/udp 5443/tcp\ - 5443/udp 6060/tcp 8853/udp +EXPOSE 53/tcp 53/udp 67/udp 68/udp 80/tcp 443/tcp 443/udp 853/tcp\ + 853/udp 3000/tcp 3000/udp 5443/tcp 5443/udp 6060/tcp WORKDIR /opt/adguardhome/work -# Install helpers for healthcheck. -COPY --chown=nobody:nogroup\ - ./${DIST_DIR}/docker/scripts\ - /opt/adguardhome/scripts - -HEALTHCHECK \ - --interval=30s \ - --timeout=10s \ - --retries=3 \ - CMD [ "/opt/adguardhome/scripts/healthcheck.sh" ] - -# It seems that the healthckech script sometimes spawns zombie processes, so we -# need a way to handle them, since AdGuard Home doesn't know how to keep track -# of the processes delegated to it by the OS. Use tini as entry point because -# it needs the PID=1 to be the default parent for orphaned processes. -# -# See https://github.com/adguardTeam/adGuardHome/issues/3290. -ENTRYPOINT [ "/sbin/tini", "--" ] +ENTRYPOINT ["/opt/adguardhome/AdGuardHome"] CMD [ \ - "/opt/adguardhome/AdGuardHome", \ "--no-check-update", \ "-c", "/opt/adguardhome/conf/AdGuardHome.yaml", \ "-w", "/opt/adguardhome/work" \ diff --git a/docker/dns-bind.awk b/docker/dns-bind.awk deleted file mode 100644 index abb5747c5e3..00000000000 --- a/docker/dns-bind.awk +++ /dev/null @@ -1,29 +0,0 @@ -/^[^[:space:]]/ { is_dns = /^dns:/ } - -/^[[:space:]]+bind_hosts:/ { if (is_dns) prev_line = FNR } - -/^[[:space:]]+- .+/ { - if (FNR - prev_line == 1) { - addrs[$2] = true - prev_line = FNR - - if ($2 == "0.0.0.0" || $2 == "'::'") { - # Drop all the other addresses. - delete addrs - addrs[""] = true - prev_line = -1 - } - } -} - -/^[[:space:]]+port:/ { if (is_dns) port = $2 } - -END { - for (addr in addrs) { - if (match(addr, ":")) { - print "[" addr "]:" port - } else { - print addr ":" port - } - } -} diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh deleted file mode 100755 index a50de230b7c..00000000000 --- a/docker/healthcheck.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/sh - -# AdGuard Home Docker healthcheck script - -# Exit the script if a pipeline fails (-e), prevent accidental filename -# expansion (-f), and consider undefined variables as errors (-u). -set -e -f -u - -# Function error_exit is an echo wrapper that writes to stderr and stops the -# script execution with code 1. -error_exit() { - echo "$1" 1>&2 - - exit 1 -} - -agh_dir="/opt/adguardhome" -readonly agh_dir - -filename="${agh_dir}/conf/AdGuardHome.yaml" -readonly filename - -if ! [ -f "$filename" ] -then - wget "http://127.0.0.1:3000" -O /dev/null -q || exit 1 - - exit 0 -fi - -help_dir="${agh_dir}/scripts" -readonly help_dir - -# Parse web host - -web_url="$( awk -f "${help_dir}/web-bind.awk" "$filename" )" -readonly web_url - -if [ "$web_url" = '' ] -then - error_exit "no web bindings could be retrieved from $filename" -fi - -# TODO(e.burkov): Deal with 0 port. -case "$web_url" -in -(*':0') - error_exit '0 in web port is not supported by healthcheck' - ;; -(*) - # Go on. - ;; -esac - -# Parse DNS hosts - -dns_hosts="$( awk -f "${help_dir}/dns-bind.awk" "$filename" )" -readonly dns_hosts - -if [ "$dns_hosts" = '' ] -then - error_exit "no DNS bindings could be retrieved from $filename" -fi - -first_dns="$( echo "$dns_hosts" | head -n 1 )" -readonly first_dns - -# TODO(e.burkov): Deal with 0 port. -case "$first_dns" -in -(*':0') - error_exit '0 in DNS port is not supported by healthcheck' - ;; -(*) - # Go on. - ;; -esac - -# Check - -# Skip SSL certificate validation since there is no guarantee the container -# trusts the one used. It should be safe to drop the SSL validation since the -# current script intended to be used from inside the container and only checks -# the endpoint availability, ignoring the content of the response. -# -# See https://github.com/AdguardTeam/AdGuardHome/issues/5642. -wget --no-check-certificate "$web_url" -O /dev/null -q || exit 1 - -test_fqdn="healthcheck.adguardhome.test." -readonly test_fqdn - -# The awk script currently returns only port prefixed with colon in case of -# unspecified address. -case "$first_dns" -in -(':'*) - nslookup -type=a "$test_fqdn" "127.0.0.1${first_dns}" > /dev/null ||\ - nslookup -type=a "$test_fqdn" "[::1]${first_dns}" > /dev/null ||\ - error_exit "nslookup failed for $host" - ;; -(*) - echo "$dns_hosts" | while read -r host - do - nslookup -type=a "$test_fqdn" "$host" > /dev/null ||\ - error_exit "nslookup failed for $host" - done - ;; -esac diff --git a/docker/web-bind.awk b/docker/web-bind.awk deleted file mode 100644 index 2ae64a4c1fa..00000000000 --- a/docker/web-bind.awk +++ /dev/null @@ -1,5 +0,0 @@ -# Don't consider the HTTPS hostname since the enforced HTTPS redirection should -# work if the SSL check skipped. See file docker/healthcheck.sh. -/^[^[:space:]]/ { is_http = /^http:/ } - -/^[[:space:]]+address:/ { if (is_http) print "http://" $2 } diff --git a/go.mod b/go.mod index f10b0887498..de8b2ad7e52 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( // TODO(a.garipov): Update to a tagged version when it's released. github.com/AdguardTeam/dnsproxy v0.50.3-0.20230628054307-31e374065768 - github.com/AdguardTeam/golibs v0.13.3 + github.com/AdguardTeam/golibs v0.13.4 github.com/AdguardTeam/urlfilter v0.16.1 github.com/NYTimes/gziphandler v1.1.1 github.com/ameshkov/dnscrypt/v2 v2.2.7 diff --git a/go.sum b/go.sum index e708812c848..196b7903a94 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ github.com/AdguardTeam/dnsproxy v0.50.3-0.20230628054307-31e374065768 h1:5Ia6wA+ github.com/AdguardTeam/dnsproxy v0.50.3-0.20230628054307-31e374065768/go.mod h1:CQhZTkqC8X0ID6glrtyaxgqRRdiYfn1gJulC1cZ5Dn8= github.com/AdguardTeam/golibs v0.4.0/go.mod h1:skKsDKIBB7kkFflLJBpfGX+G8QFTx0WKUzB6TIgtUj4= github.com/AdguardTeam/golibs v0.10.4/go.mod h1:rSfQRGHIdgfxriDDNgNJ7HmE5zRoURq8R+VdR81Zuzw= -github.com/AdguardTeam/golibs v0.13.3 h1:RT3QbzThtaLiFLkIUDS6/hlGEXrh0zYvdf4bd7UWpGo= -github.com/AdguardTeam/golibs v0.13.3/go.mod h1:wkJ6EUsN4np/9Gp7+9QeooY9E2U2WCLJYAioLCzkHsI= +github.com/AdguardTeam/golibs v0.13.4 h1:ACTwIR1pEENBijHcEWtiMbSh4wWQOlIHRxmUB8oBHf8= +github.com/AdguardTeam/golibs v0.13.4/go.mod h1:wkJ6EUsN4np/9Gp7+9QeooY9E2U2WCLJYAioLCzkHsI= github.com/AdguardTeam/gomitmproxy v0.2.0/go.mod h1:Qdv0Mktnzer5zpdpi5rAwixNJzW2FN91LjKJCkVbYGU= github.com/AdguardTeam/urlfilter v0.16.1 h1:ZPi0rjqo8cQf2FVdzo6cqumNoHZx2KPXj2yZa1A5BBw= github.com/AdguardTeam/urlfilter v0.16.1/go.mod h1:46YZDOV1+qtdRDuhZKVPSSp7JWWes0KayqHrKAFBdEI= diff --git a/internal/aghnet/addr.go b/internal/aghnet/addr.go new file mode 100644 index 00000000000..e3013125e4b --- /dev/null +++ b/internal/aghnet/addr.go @@ -0,0 +1,43 @@ +package aghnet + +import ( + "fmt" + "strings" + + "github.com/AdguardTeam/golibs/stringutil" +) + +// NormalizeDomain returns a lowercased version of host without the final dot, +// unless host is ".", in which case it returns it unchanged. That is a special +// case that to allow matching queries like: +// +// dig IN NS '.' +func NormalizeDomain(host string) (norm string) { + if host == "." { + return host + } + + return strings.ToLower(strings.TrimSuffix(host, ".")) +} + +// NewDomainNameSet returns nil and error, if list has duplicate or empty domain +// name. Otherwise returns a set, which contains domain names normalized using +// [NormalizeDomain]. +func NewDomainNameSet(list []string) (set *stringutil.Set, err error) { + set = stringutil.NewSet() + + for i, host := range list { + if host == "" { + return nil, fmt.Errorf("at index %d: hostname is empty", i) + } + + host = NormalizeDomain(host) + if set.Has(host) { + return nil, fmt.Errorf("duplicate hostname %q at index %d", host, i) + } + + set.Add(host) + } + + return set, nil +} diff --git a/internal/aghnet/addr_test.go b/internal/aghnet/addr_test.go new file mode 100644 index 00000000000..2bb30e31aae --- /dev/null +++ b/internal/aghnet/addr_test.go @@ -0,0 +1,59 @@ +package aghnet_test + +import ( + "testing" + + "github.com/AdguardTeam/AdGuardHome/internal/aghnet" + "github.com/AdguardTeam/golibs/testutil" + "github.com/stretchr/testify/assert" +) + +func TestNewDomainNameSet(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + wantErrMsg string + in []string + }{{ + name: "nil", + wantErrMsg: "", + in: nil, + }, { + name: "success", + wantErrMsg: "", + in: []string{ + "Domain.Example", + ".", + }, + }, { + name: "dups", + wantErrMsg: `duplicate hostname "domain.example" at index 1`, + in: []string{ + "Domain.Example", + "domain.example", + }, + }, { + name: "bad_domain", + wantErrMsg: "at index 0: hostname is empty", + in: []string{ + "", + }, + }} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + set, err := aghnet.NewDomainNameSet(tc.in) + testutil.AssertErrorMsg(t, tc.wantErrMsg, err) + if err != nil { + return + } + + for _, host := range tc.in { + assert.Truef(t, set.Has(aghnet.NormalizeDomain(host)), "%q not matched", host) + } + }) + } +} diff --git a/internal/aghnet/hostgen.go b/internal/aghnet/hostgen.go index e4031f5423c..059b7e4a3b3 100644 --- a/internal/aghnet/hostgen.go +++ b/internal/aghnet/hostgen.go @@ -1,12 +1,8 @@ package aghnet import ( - "fmt" "net/netip" "strings" - - "github.com/AdguardTeam/golibs/errors" - "github.com/AdguardTeam/golibs/stringutil" ) // GenerateHostname generates the hostname from ip. In case of using IPv4 the @@ -29,32 +25,8 @@ func GenerateHostname(ip netip.Addr) (hostname string) { hostname = ip.StringExpanded() if ip.Is4() { - return strings.Replace(hostname, ".", "-", -1) - } - - return strings.Replace(hostname, ":", "-", -1) -} - -// NewDomainNameSet returns nil and error, if list has duplicate or empty -// domain name. Otherwise returns a set, which contains non-FQDN domain names, -// and nil error. -func NewDomainNameSet(list []string) (set *stringutil.Set, err error) { - set = stringutil.NewSet() - - for i, v := range list { - host := strings.ToLower(strings.TrimSuffix(v, ".")) - // TODO(a.garipov): Think about ignoring empty (".") names in the - // future. - if host == "" { - return nil, errors.Error("host name is empty") - } - - if set.Has(host) { - return nil, fmt.Errorf("duplicate host name %q at index %d", host, i) - } - - set.Add(host) + return strings.ReplaceAll(hostname, ".", "-") } - return set, nil + return strings.ReplaceAll(hostname, ":", "-") } diff --git a/internal/aghtest/interface.go b/internal/aghtest/interface.go index 66f0211b610..cce49776aa4 100644 --- a/internal/aghtest/interface.go +++ b/internal/aghtest/interface.go @@ -2,8 +2,8 @@ package aghtest import ( "context" + "io" "io/fs" - "net" "github.com/AdguardTeam/AdGuardHome/internal/aghos" "github.com/AdguardTeam/AdGuardHome/internal/next/agh" @@ -19,23 +19,23 @@ import ( // Package fs -// type check -var _ fs.FS = &FS{} - -// FS is a mock [fs.FS] implementation for tests. +// FS is a fake [fs.FS] implementation for tests. type FS struct { OnOpen func(name string) (fs.File, error) } +// type check +var _ fs.FS = (*FS)(nil) + // Open implements the [fs.FS] interface for *FS. func (fsys *FS) Open(name string) (fs.File, error) { return fsys.OnOpen(name) } // type check -var _ fs.GlobFS = &GlobFS{} +var _ fs.GlobFS = (*GlobFS)(nil) -// GlobFS is a mock [fs.GlobFS] implementation for tests. +// GlobFS is a fake [fs.GlobFS] implementation for tests. type GlobFS struct { // FS is embedded here to avoid implementing all it's methods. FS @@ -48,9 +48,9 @@ func (fsys *GlobFS) Glob(pattern string) ([]string, error) { } // type check -var _ fs.StatFS = &StatFS{} +var _ fs.StatFS = (*StatFS)(nil) -// StatFS is a mock [fs.StatFS] implementation for tests. +// StatFS is a fake [fs.StatFS] implementation for tests. type StatFS struct { // FS is embedded here to avoid implementing all it's methods. FS @@ -62,47 +62,34 @@ func (fsys *StatFS) Stat(name string) (fs.FileInfo, error) { return fsys.OnStat(name) } -// Package net - -// type check -var _ net.Listener = (*Listener)(nil) - -// Listener is a mock [net.Listener] implementation for tests. -type Listener struct { - OnAccept func() (conn net.Conn, err error) - OnAddr func() (addr net.Addr) - OnClose func() (err error) -} +// Package io -// Accept implements the [net.Listener] interface for *Listener. -func (l *Listener) Accept() (conn net.Conn, err error) { - return l.OnAccept() +// Writer is a fake [io.Writer] implementation for tests. +type Writer struct { + OnWrite func(b []byte) (n int, err error) } -// Addr implements the [net.Listener] interface for *Listener. -func (l *Listener) Addr() (addr net.Addr) { - return l.OnAddr() -} +var _ io.Writer = (*Writer)(nil) -// Close implements the [net.Listener] interface for *Listener. -func (l *Listener) Close() (err error) { - return l.OnClose() +// Write implements the [io.Writer] interface for *Writer. +func (w *Writer) Write(b []byte) (n int, err error) { + return w.OnWrite(b) } // Module adguard-home // Package aghos -// type check -var _ aghos.FSWatcher = (*FSWatcher)(nil) - -// FSWatcher is a mock [aghos.FSWatcher] implementation for tests. +// FSWatcher is a fake [aghos.FSWatcher] implementation for tests. type FSWatcher struct { OnEvents func() (e <-chan struct{}) OnAdd func(name string) (err error) OnClose func() (err error) } +// type check +var _ aghos.FSWatcher = (*FSWatcher)(nil) + // Events implements the [aghos.FSWatcher] interface for *FSWatcher. func (w *FSWatcher) Events() (e <-chan struct{}) { return w.OnEvents() @@ -120,16 +107,16 @@ func (w *FSWatcher) Close() (err error) { // Package agh -// type check -var _ agh.ServiceWithConfig[struct{}] = (*ServiceWithConfig[struct{}])(nil) - -// ServiceWithConfig is a mock [agh.ServiceWithConfig] implementation for tests. +// ServiceWithConfig is a fake [agh.ServiceWithConfig] implementation for tests. type ServiceWithConfig[ConfigType any] struct { OnStart func() (err error) OnShutdown func(ctx context.Context) (err error) OnConfig func() (c ConfigType) } +// type check +var _ agh.ServiceWithConfig[struct{}] = (*ServiceWithConfig[struct{}])(nil) + // Start implements the [agh.ServiceWithConfig] interface for // *ServiceWithConfig. func (s *ServiceWithConfig[_]) Start() (err error) { @@ -152,10 +139,7 @@ func (s *ServiceWithConfig[ConfigType]) Config() (c ConfigType) { // Package upstream -// type check -var _ upstream.Upstream = (*UpstreamMock)(nil) - -// UpstreamMock is a mock [upstream.Upstream] implementation for tests. +// UpstreamMock is a fake [upstream.Upstream] implementation for tests. // // TODO(a.garipov): Replace with all uses of Upstream with UpstreamMock and // rename it to just Upstream. @@ -165,6 +149,9 @@ type UpstreamMock struct { OnClose func() (err error) } +// type check +var _ upstream.Upstream = (*UpstreamMock)(nil) + // Address implements the [upstream.Upstream] interface for *UpstreamMock. func (u *UpstreamMock) Address() (addr string) { return u.OnAddress() diff --git a/internal/dnsforward/dnsforward.go b/internal/dnsforward/dnsforward.go index a3f9fa738fb..70abd6600b7 100644 --- a/internal/dnsforward/dnsforward.go +++ b/internal/dnsforward/dnsforward.go @@ -17,6 +17,7 @@ import ( "github.com/AdguardTeam/AdGuardHome/internal/dhcpd" "github.com/AdguardTeam/AdGuardHome/internal/filtering" "github.com/AdguardTeam/AdGuardHome/internal/querylog" + "github.com/AdguardTeam/AdGuardHome/internal/rdns" "github.com/AdguardTeam/AdGuardHome/internal/stats" "github.com/AdguardTeam/dnsproxy/proxy" "github.com/AdguardTeam/dnsproxy/upstream" @@ -277,17 +278,6 @@ func (s *Server) Resolve(host string) ([]net.IPAddr, error) { return s.internalProxy.LookupIPAddr(host) } -// RDNSExchanger is a resolver for clients' addresses. -type RDNSExchanger interface { - // Exchange tries to resolve the ip in a suitable way, i.e. either as local - // or as external. - Exchange(ip net.IP) (host string, err error) - - // ResolvesPrivatePTR returns true if the RDNSExchanger is able to - // resolve PTR requests for locally-served addresses. - ResolvesPrivatePTR() (ok bool) -} - const ( // ErrRDNSNoData is returned by [RDNSExchanger.Exchange] when the answer // section of response is either NODATA or has no PTR records. @@ -299,10 +289,10 @@ const ( ) // type check -var _ RDNSExchanger = (*Server)(nil) +var _ rdns.Exchanger = (*Server)(nil) -// Exchange implements the RDNSExchanger interface for *Server. -func (s *Server) Exchange(ip net.IP) (host string, err error) { +// Exchange implements the [rdns.Exchanger] interface for *Server. +func (s *Server) Exchange(ip netip.Addr) (host string, err error) { s.serverLock.RLock() defer s.serverLock.RUnlock() @@ -310,7 +300,7 @@ func (s *Server) Exchange(ip net.IP) (host string, err error) { return "", nil } - arpa, err := netutil.IPToReversedAddr(ip) + arpa, err := netutil.IPToReversedAddr(ip.AsSlice()) if err != nil { return "", fmt.Errorf("reversing ip: %w", err) } @@ -335,7 +325,7 @@ func (s *Server) Exchange(ip net.IP) (host string, err error) { } var resolver *proxy.Proxy - if s.privateNets.Contains(ip) { + if s.isPrivateIP(ip) { if !s.conf.UsePrivateRDNS { return "", nil } @@ -350,8 +340,12 @@ func (s *Server) Exchange(ip net.IP) (host string, err error) { return "", err } + return hostFromPTR(ctx.Res) +} + +// hostFromPTR returns domain name from the PTR response or error. +func hostFromPTR(resp *dns.Msg) (host string, err error) { // Distinguish between NODATA response and a failed request. - resp := ctx.Res if resp.Rcode != dns.RcodeSuccess && resp.Rcode != dns.RcodeNameError { return "", fmt.Errorf( "received %s response: %w", @@ -370,12 +364,25 @@ func (s *Server) Exchange(ip net.IP) (host string, err error) { return "", ErrRDNSNoData } -// ResolvesPrivatePTR implements the RDNSExchanger interface for *Server. -func (s *Server) ResolvesPrivatePTR() (ok bool) { +// isPrivateIP returns true if the ip is private. +func (s *Server) isPrivateIP(ip netip.Addr) (ok bool) { + return s.privateNets.Contains(ip.AsSlice()) +} + +// ShouldResolveClient returns false if ip is a loopback address, or ip is +// private and resolving of private addresses is disabled. +func (s *Server) ShouldResolveClient(ip netip.Addr) (ok bool) { + if ip.IsLoopback() { + return false + } + + isPrivate := s.isPrivateIP(ip) + s.serverLock.RLock() defer s.serverLock.RUnlock() - return s.conf.UsePrivateRDNS + return s.conf.ResolveClients && + (s.conf.UsePrivateRDNS || !isPrivate) } // Start starts the DNS server. diff --git a/internal/dnsforward/dnsforward_test.go b/internal/dnsforward/dnsforward_test.go index f7ff57a31f9..705227a1f73 100644 --- a/internal/dnsforward/dnsforward_test.go +++ b/internal/dnsforward/dnsforward_test.go @@ -1273,11 +1273,11 @@ func TestServer_Exchange(t *testing.T) { ) var ( - onesIP = net.IP{1, 1, 1, 1} - localIP = net.IP{192, 168, 1, 1} + onesIP = netip.MustParseAddr("1.1.1.1") + localIP = netip.MustParseAddr("192.168.1.1") ) - revExtIPv4, err := netutil.IPToReversedAddr(onesIP) + revExtIPv4, err := netutil.IPToReversedAddr(onesIP.AsSlice()) require.NoError(t, err) extUpstream := &aghtest.UpstreamMock{ @@ -1290,7 +1290,7 @@ func TestServer_Exchange(t *testing.T) { }, } - revLocIPv4, err := netutil.IPToReversedAddr(localIP) + revLocIPv4, err := netutil.IPToReversedAddr(localIP.AsSlice()) require.NoError(t, err) locUpstream := &aghtest.UpstreamMock{ @@ -1330,7 +1330,7 @@ func TestServer_Exchange(t *testing.T) { want string wantErr error locUpstream upstream.Upstream - req net.IP + req netip.Addr }{{ name: "external_good", want: onesHost, @@ -1354,7 +1354,7 @@ func TestServer_Exchange(t *testing.T) { want: "", wantErr: ErrRDNSNoData, locUpstream: locUpstream, - req: net.IP{192, 168, 1, 2}, + req: netip.MustParseAddr("192.168.1.2"), }, { name: "invalid_answer", want: "", @@ -1396,3 +1396,57 @@ func TestServer_Exchange(t *testing.T) { assert.Empty(t, host) }) } + +func TestServer_ShouldResolveClient(t *testing.T) { + srv := &Server{ + privateNets: netutil.SubnetSetFunc(netutil.IsLocallyServed), + } + + testCases := []struct { + ip netip.Addr + want require.BoolAssertionFunc + name string + resolve bool + usePrivate bool + }{{ + name: "default", + ip: netip.MustParseAddr("1.1.1.1"), + want: require.True, + resolve: true, + usePrivate: true, + }, { + name: "no_rdns", + ip: netip.MustParseAddr("1.1.1.1"), + want: require.False, + resolve: false, + usePrivate: true, + }, { + name: "loopback", + ip: netip.MustParseAddr("127.0.0.1"), + want: require.False, + resolve: true, + usePrivate: true, + }, { + name: "private_resolve", + ip: netip.MustParseAddr("192.168.0.1"), + want: require.True, + resolve: true, + usePrivate: true, + }, { + name: "private_no_resolve", + ip: netip.MustParseAddr("192.168.0.1"), + want: require.False, + resolve: true, + usePrivate: false, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + srv.conf.ResolveClients = tc.resolve + srv.conf.UsePrivateRDNS = tc.usePrivate + + ok := srv.ShouldResolveClient(tc.ip) + tc.want(t, ok) + }) + } +} diff --git a/internal/dnsforward/filter_test.go b/internal/dnsforward/filter_test.go index 3fbe58cc338..1e3c48225c7 100644 --- a/internal/dnsforward/filter_test.go +++ b/internal/dnsforward/filter_test.go @@ -21,6 +21,8 @@ func TestHandleDNSRequest_filterDNSResponse(t *testing.T) { ||cname.specific^$dnstype=~CNAME ||0.0.0.1^$dnstype=~A ||::1^$dnstype=~AAAA +0.0.0.0 duplicate.domain +0.0.0.0 duplicate.domain ` forwardConf := ServerConfig{ @@ -137,6 +139,17 @@ func TestHandleDNSRequest_filterDNSResponse(t *testing.T) { }, A: netutil.IPv4Zero(), }}, + }, { + req: createTestMessage("duplicate.domain."), + name: "duplicate_domain", + wantAns: []dns.RR{&dns.A{ + Hdr: dns.RR_Header{ + Name: "duplicate.domain.", + Rrtype: dns.TypeA, + Class: dns.ClassINET, + }, + A: netutil.IPv4Zero(), + }}, }} for _, tc := range testCases { diff --git a/internal/dnsforward/msg.go b/internal/dnsforward/msg.go index 36c82f0b0e3..507ae8e89fe 100644 --- a/internal/dnsforward/msg.go +++ b/internal/dnsforward/msg.go @@ -26,11 +26,25 @@ func (s *Server) makeResponse(req *dns.Msg) (resp *dns.Msg) { return resp } -// ipsFromRules extracts non-IP addresses from the filtering result rules. +// containsIP returns true if the IP is already in the list. +func containsIP(ips []net.IP, ip net.IP) bool { + for _, a := range ips { + if a.Equal(ip) { + return true + } + } + + return false +} + +// ipsFromRules extracts unique non-IP addresses from the filtering result +// rules. func ipsFromRules(resRules []*filtering.ResultRule) (ips []net.IP) { for _, r := range resRules { - if r.IP != nil { - ips = append(ips, r.IP) + // len(resRules) and len(ips) are actually small enough for O(n^2) to do + // not raise performance questions. + if ip := r.IP; ip != nil && !containsIP(ips, ip) { + ips = append(ips, ip) } } diff --git a/internal/dnsforward/stats.go b/internal/dnsforward/stats.go index 4d4f1324761..b142c86f010 100644 --- a/internal/dnsforward/stats.go +++ b/internal/dnsforward/stats.go @@ -2,9 +2,9 @@ package dnsforward import ( "net" - "strings" "time" + "github.com/AdguardTeam/AdGuardHome/internal/aghnet" "github.com/AdguardTeam/AdGuardHome/internal/filtering" "github.com/AdguardTeam/AdGuardHome/internal/querylog" "github.com/AdguardTeam/AdGuardHome/internal/stats" @@ -24,7 +24,7 @@ func (s *Server) processQueryLogsAndStats(dctx *dnsContext) (rc resultCode) { pctx := dctx.proxyCtx q := pctx.Req.Question[0] - host := strings.ToLower(strings.TrimSuffix(q.Name, ".")) + host := aghnet.NormalizeDomain(q.Name) ip, _ := netutil.IPAndPortFromAddr(pctx.Addr) ip = slices.Clone(ip) @@ -139,11 +139,10 @@ func (s *Server) updateStats( clientIP string, ) { pctx := ctx.proxyCtx - e := stats.Entry{} - e.Domain = strings.ToLower(pctx.Req.Question[0].Name) - if e.Domain != "." { - // Remove last ".", but save the domain as is for "." queries. - e.Domain = e.Domain[:len(e.Domain)-1] + e := stats.Entry{ + Domain: aghnet.NormalizeDomain(pctx.Req.Question[0].Name), + Result: stats.RNotFiltered, + Time: uint32(elapsed / 1000), } if clientID := ctx.clientID; clientID != "" { @@ -152,9 +151,6 @@ func (s *Server) updateStats( e.Client = clientIP } - e.Time = uint32(elapsed / 1000) - e.Result = stats.RNotFiltered - switch res.Reason { case filtering.FilteredSafeBrowsing: e.Result = stats.RSafeBrowsing @@ -162,7 +158,8 @@ func (s *Server) updateStats( e.Result = stats.RParental case filtering.FilteredSafeSearch: e.Result = stats.RSafeSearch - case filtering.FilteredBlockList, + case + filtering.FilteredBlockList, filtering.FilteredInvalid, filtering.FilteredBlockedService: e.Result = stats.RFiltered diff --git a/internal/filtering/filter.go b/internal/filtering/filter.go index 44dc7f7662d..fa512c2985a 100644 --- a/internal/filtering/filter.go +++ b/internal/filtering/filter.go @@ -1,10 +1,7 @@ package filtering import ( - "bufio" - "bytes" "fmt" - "hash/crc32" "io" "net/http" "os" @@ -14,6 +11,7 @@ import ( "time" "github.com/AdguardTeam/AdGuardHome/internal/aghalg" + "github.com/AdguardTeam/AdGuardHome/internal/filtering/rulelist" "github.com/AdguardTeam/golibs/errors" "github.com/AdguardTeam/golibs/log" "github.com/AdguardTeam/golibs/stringutil" @@ -29,9 +27,9 @@ const filterDir = "filters" // TODO(e.burkov): Use more deterministic approach. var nextFilterID = time.Now().Unix() -// FilterYAML respresents a filter list in the configuration file. +// FilterYAML represents a filter list in the configuration file. // -// TODO(e.burkov): Investigate if the field oredering is important. +// TODO(e.burkov): Investigate if the field ordering is important. type FilterYAML struct { Enabled bool URL string // URL or a file path @@ -213,7 +211,7 @@ func (d *DNSFilter) loadFilters(array []FilterYAML) { err := d.load(filter) if err != nil { - log.Error("Couldn't load filter %d contents due to %s", filter.ID, err) + log.Error("filtering: loading filter %d: %s", filter.ID, err) } } } @@ -338,7 +336,8 @@ func (d *DNSFilter) refreshFiltersArray(filters *[]FilterYAML, force bool) (int, updateFlags = append(updateFlags, updated) if err != nil { nfail++ - log.Printf("Failed to update filter %s: %s\n", uf.URL, err) + log.Info("filtering: updating filter from url %q: %s\n", uf.URL, err) + continue } } @@ -367,7 +366,13 @@ func (d *DNSFilter) refreshFiltersArray(filters *[]FilterYAML, force bool) (int, continue } - log.Info("Updated filter #%d. Rules: %d -> %d", f.ID, f.RulesCount, uf.RulesCount) + log.Info( + "filtering: updated filter %d; rule count: %d (was %d)", + f.ID, + uf.RulesCount, + f.RulesCount, + ) + f.Name = uf.Name f.RulesCount = uf.RulesCount f.checksum = uf.checksum @@ -397,9 +402,10 @@ func (d *DNSFilter) refreshFiltersArray(filters *[]FilterYAML, force bool) (int, // // TODO(a.garipov, e.burkov): What the hell? func (d *DNSFilter) refreshFiltersIntl(block, allow, force bool) (int, bool) { - log.Debug("filtering: updating...") - updNum := 0 + log.Debug("filtering: starting updating") + defer func() { log.Debug("filtering: finished updating, %d updated", updNum) }() + var lists []FilterYAML var toUpd []bool isNetErr := false @@ -437,131 +443,9 @@ func (d *DNSFilter) refreshFiltersIntl(block, allow, force bool) (int, bool) { } } - log.Debug("filtering: update finished: %d lists updated", updNum) - return updNum, false } -// isPrintableText returns true if data is printable UTF-8 text with CR, LF, TAB -// characters. -// -// TODO(e.burkov): Investigate the purpose of this and improve the -// implementation. Perhaps, use something from the unicode package. -func isPrintableText(data string) (ok bool) { - for _, c := range []byte(data) { - if (c >= ' ' && c != 0x7f) || c == '\n' || c == '\r' || c == '\t' { - continue - } - - return false - } - - return true -} - -// scanLinesWithBreak is essentially a [bufio.ScanLines] which keeps trailing -// line breaks. -func scanLinesWithBreak(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - - if i := bytes.IndexByte(data, '\n'); i >= 0 { - return i + 1, data[0 : i+1], nil - } - - if atEOF { - return len(data), data, nil - } - - // Request more data. - return 0, nil, nil -} - -// parseFilter copies filter's content from src to dst and returns the number of -// rules, number of bytes written, checksum, and title of the parsed list. dst -// must not be nil. -func (d *DNSFilter) parseFilter( - src io.Reader, - dst io.Writer, -) (rulesNum, written int, checksum uint32, title string, err error) { - scanner := bufio.NewScanner(src) - scanner.Split(scanLinesWithBreak) - - titleFound := false - for n := 0; scanner.Scan(); written += n { - line := scanner.Text() - var isRule bool - var likelyTitle string - isRule, likelyTitle, err = d.parseFilterLine(line, !titleFound, written == 0) - if err != nil { - return 0, written, 0, "", err - } - - if isRule { - rulesNum++ - } else if likelyTitle != "" { - title, titleFound = likelyTitle, true - } - - checksum = crc32.Update(checksum, crc32.IEEETable, []byte(line)) - - n, err = dst.Write([]byte(line)) - if err != nil { - return 0, written, 0, "", fmt.Errorf("writing filter line: %w", err) - } - } - - if err = scanner.Err(); err != nil { - return 0, written, 0, "", fmt.Errorf("scanning filter contents: %w", err) - } - - return rulesNum, written, checksum, title, nil -} - -// parseFilterLine returns true if the passed line is a rule. line is -// considered a rule if it's not a comment and contains no title. -func (d *DNSFilter) parseFilterLine( - line string, - lookForTitle bool, - testHTML bool, -) (isRule bool, title string, err error) { - if !isPrintableText(line) { - return false, "", errors.Error("filter contains non-printable characters") - } - - line = strings.TrimSpace(line) - if line == "" || line[0] == '#' { - return false, "", nil - } - - if testHTML && isHTML(line) { - return false, "", errors.Error("data is HTML, not plain text") - } - - if line[0] == '!' && lookForTitle { - match := d.filterTitleRegexp.FindStringSubmatch(line) - if len(match) > 1 { - title = match[1] - } - - return false, title, nil - } - - return true, "", nil -} - -// isHTML returns true if the line contains HTML tags instead of plain text. -// line shouldn have no leading space symbols. -// -// TODO(ameshkov): It actually gives too much false-positives. Perhaps, just -// check if trimmed string begins with angle bracket. -func isHTML(line string) (ok bool) { - line = strings.ToLower(line) - - return strings.HasPrefix(line, "= log.DEBUG { timer := log.StartTimer() - defer timer.LogElapsed("safebrowsing lookup for %q", host) + defer timer.LogElapsed("filtering: safebrowsing lookup for %q", host) } res = Result{ @@ -1079,7 +1077,7 @@ func (d *DNSFilter) checkParental( if log.GetLevel() >= log.DEBUG { timer := log.StartTimer() - defer timer.LogElapsed("parental lookup for %q", host) + defer timer.LogElapsed("filtering: parental lookup for %q", host) } res = Result{ diff --git a/internal/filtering/filtering_test.go b/internal/filtering/filtering_test.go index 8636606b4b4..e7b55d6f2c6 100644 --- a/internal/filtering/filtering_test.go +++ b/internal/filtering/filtering_test.go @@ -547,7 +547,7 @@ func TestWhitelist(t *testing.T) { }} d, setts := newForTest(t, nil, filters) - err := d.SetFilters(filters, whiteFilters, false) + err := d.setFilters(filters, whiteFilters, false) require.NoError(t, err) t.Cleanup(d.Close) diff --git a/internal/filtering/hashprefix/cache.go b/internal/filtering/hashprefix/cache.go index d4211b726bb..190f2116e65 100644 --- a/internal/filtering/hashprefix/cache.go +++ b/internal/filtering/hashprefix/cache.go @@ -47,7 +47,7 @@ func fromCacheItem(item *cacheItem) (data []byte) { data = binary.BigEndian.AppendUint64(data, uint64(expiry)) for _, v := range item.hashes { - // nolint:looppointer // The subsilce is used for a copy. + // nolint:looppointer // The subslice of v is used for a copy. data = append(data, v[:]...) } @@ -63,7 +63,7 @@ func (c *Checker) findInCache( i := 0 for _, hash := range hashes { - // nolint:looppointer // The subsilce is used for a safe cache lookup. + // nolint:looppointer // The has subslice is used for a cache lookup. data := c.cache.Get(hash[:prefixLen]) if data == nil { hashes[i] = hash @@ -98,34 +98,36 @@ func (c *Checker) storeInCache(hashesToRequest, respHashes []hostnameHash) { for _, hash := range respHashes { var pref prefix - // nolint:looppointer // The subsilce is used for a copy. + // nolint:looppointer // The hash subslice is used for a copy. copy(pref[:], hash[:]) hashToStore[pref] = append(hashToStore[pref], hash) } for pref, hash := range hashToStore { - // nolint:looppointer // The subsilce is used for a safe cache lookup. - c.setCache(pref[:], hash) + c.setCache(pref, hash) } for _, hash := range hashesToRequest { - // nolint:looppointer // The subsilce is used for a safe cache lookup. - pref := hash[:prefixLen] - val := c.cache.Get(pref) + // nolint:looppointer // The hash subslice is used for a cache lookup. + val := c.cache.Get(hash[:prefixLen]) if val == nil { + var pref prefix + // nolint:looppointer // The hash subslice is used for a copy. + copy(pref[:], hash[:]) + c.setCache(pref, nil) } } } // setCache stores hash in cache. -func (c *Checker) setCache(pref []byte, hashes []hostnameHash) { +func (c *Checker) setCache(pref prefix, hashes []hostnameHash) { item := &cacheItem{ expiry: time.Now().Add(c.cacheTime), hashes: hashes, } - c.cache.Set(pref, fromCacheItem(item)) + c.cache.Set(pref[:], fromCacheItem(item)) log.Debug("%s: stored in cache: %v", c.svc, pref) } diff --git a/internal/filtering/hashprefix/hashprefix.go b/internal/filtering/hashprefix/hashprefix.go index ed0e3ae25d1..002552544be 100644 --- a/internal/filtering/hashprefix/hashprefix.go +++ b/internal/filtering/hashprefix/hashprefix.go @@ -173,7 +173,7 @@ func (c *Checker) getQuestion(hashes []hostnameHash) (q string) { b := &strings.Builder{} for _, hash := range hashes { - // nolint:looppointer // The subsilce is used for safe hex encoding. + // nolint:looppointer // The hash subslice is used for hex encoding. stringutil.WriteToBuilder(b, hex.EncodeToString(hash[:prefixLen]), ".") } diff --git a/internal/filtering/http.go b/internal/filtering/http.go index 4aef0409df1..8d3f202fadd 100644 --- a/internal/filtering/http.go +++ b/internal/filtering/http.go @@ -95,7 +95,7 @@ func (d *DNSFilter) handleFilteringAddURL(w http.ResponseWriter, r *http.Request r, w, http.StatusBadRequest, - "Couldn't fetch filter from url %s: %s", + "Couldn't fetch filter from URL %q: %s", filt.URL, err, ) diff --git a/internal/filtering/rewrites.go b/internal/filtering/rewrites.go index 3e10da5574e..e3625c844d5 100644 --- a/internal/filtering/rewrites.go +++ b/internal/filtering/rewrites.go @@ -122,7 +122,7 @@ func matchDomainWildcard(host, wildcard string) (ok bool) { return isWildcard(wildcard) && strings.HasSuffix(host, wildcard[1:]) } -// legacyRewriteSortsBefore sorts rewirtes according to the following priority: +// legacyRewriteSortsBefore sorts rewrites according to the following priority: // // 1. A and AAAA > CNAME; // 2. wildcard > exact; diff --git a/internal/filtering/rulelist/error.go b/internal/filtering/rulelist/error.go new file mode 100644 index 00000000000..54322da5d2f --- /dev/null +++ b/internal/filtering/rulelist/error.go @@ -0,0 +1,9 @@ +package rulelist + +import "github.com/AdguardTeam/golibs/errors" + +// ErrHTML is returned by [Parser.Parse] if the data is likely to be HTML. +// +// TODO(a.garipov): This error is currently returned to the UI. Stop that and +// make it all-lowercase. +const ErrHTML errors.Error = "data is HTML, not plain text" diff --git a/internal/filtering/rulelist/parser.go b/internal/filtering/rulelist/parser.go new file mode 100644 index 00000000000..24d19b9c15c --- /dev/null +++ b/internal/filtering/rulelist/parser.go @@ -0,0 +1,191 @@ +package rulelist + +import ( + "bufio" + "bytes" + "fmt" + "hash/crc32" + "io" + + "github.com/AdguardTeam/golibs/errors" + "golang.org/x/exp/slices" +) + +// Parser is a filtering-rule parser that collects data, such as the checksum +// and the title, as well as counts rules and removes comments. +type Parser struct { + title string + rulesCount int + written int + checksum uint32 + titleFound bool +} + +// NewParser returns a new filtering-rule parser. +func NewParser() (p *Parser) { + return &Parser{} +} + +// ParseResult contains information about the results of parsing a +// filtering-rule list by [Parser.Parse]. +type ParseResult struct { + // Title is the title contained within the filtering-rule list, if any. + Title string + + // RulesCount is the number of rules in the list. It excludes empty lines + // and comments. + RulesCount int + + // BytesWritten is the number of bytes written to dst. + BytesWritten int + + // Checksum is the CRC-32 checksum of the rules content. That is, excluding + // empty lines and comments. + Checksum uint32 +} + +// Parse parses data from src into dst using buf during parsing. r is never +// nil. +func (p *Parser) Parse(dst io.Writer, src io.Reader, buf []byte) (r *ParseResult, err error) { + s := bufio.NewScanner(src) + + // Don't use [DefaultRuleBufSize] as the maximum size, since some + // filtering-rule lists compressed by e.g. HostlistsCompiler can have very + // large lines. The buffer optimization still works for the more common + // case of reasonably-sized lines. + // + // See https://github.com/AdguardTeam/AdGuardHome/issues/6003. + s.Buffer(buf, bufio.MaxScanTokenSize) + + // Use a one-based index for lines and columns, since these errors end up in + // the frontend, and users are more familiar with one-based line and column + // indexes. + lineNum := 1 + for s.Scan() { + var n int + n, err = p.processLine(dst, s.Bytes(), lineNum) + p.written += n + if err != nil { + // Don't wrap the error, because it's informative enough as is. + return p.result(), err + } + + lineNum++ + } + + r = p.result() + err = s.Err() + + return r, errors.Annotate(err, "scanning filter contents: %w") +} + +// result returns the current parsing result. +func (p *Parser) result() (r *ParseResult) { + return &ParseResult{ + Title: p.title, + RulesCount: p.rulesCount, + BytesWritten: p.written, + Checksum: p.checksum, + } +} + +// processLine processes a single line. It may write to dst, and if it does, n +// is the number of bytes written. +func (p *Parser) processLine(dst io.Writer, line []byte, lineNum int) (n int, err error) { + trimmed := bytes.TrimSpace(line) + if p.written == 0 && isHTMLLine(trimmed) { + return 0, ErrHTML + } + + badIdx, isRule := 0, false + if p.titleFound { + badIdx, isRule = parseLine(trimmed) + } else { + badIdx, isRule = p.parseLineTitle(trimmed) + } + if badIdx != -1 { + return 0, fmt.Errorf( + "line %d: character %d: likely binary character %q", + lineNum, + badIdx+bytes.Index(line, trimmed)+1, + trimmed[badIdx], + ) + } + + if !isRule { + return 0, nil + } + + p.rulesCount++ + p.checksum = crc32.Update(p.checksum, crc32.IEEETable, trimmed) + + // Assume that there is generally enough space in the buffer to add a + // newline. + n, err = dst.Write(append(trimmed, '\n')) + + return n, errors.Annotate(err, "writing rule line: %w") +} + +// isHTMLLine returns true if line is likely an HTML line. line is assumed to +// be trimmed of whitespace characters. +func isHTMLLine(line []byte) (isHTML bool) { + return hasPrefixFold(line, []byte("= l && bytes.EqualFold(b[:l], prefix) +} + +// parseLine returns true if the parsed line is a filtering rule. line is +// assumed to be trimmed of whitespace characters. badIdx is the index of the +// first character that may indicate that this is a binary file, or -1 if none. +// +// A line is considered a rule if it's not empty, not a comment, and contains +// only printable characters. +func parseLine(line []byte) (badIdx int, isRule bool) { + if len(line) == 0 || line[0] == '#' || line[0] == '!' { + return -1, false + } + + badIdx = slices.IndexFunc(line, likelyBinary) + + return badIdx, badIdx == -1 +} + +// likelyBinary returns true if b is likely to be a byte from a binary file. +func likelyBinary(b byte) (ok bool) { + return (b < ' ' || b == 0x7f) && b != '\n' && b != '\r' && b != '\t' +} + +// parseLineTitle is like [parseLine] but additionally looks for a title. line +// is assumed to be trimmed of whitespace characters. +func (p *Parser) parseLineTitle(line []byte) (badIdx int, isRule bool) { + if len(line) == 0 || line[0] == '#' { + return -1, false + } + + if line[0] != '!' { + badIdx = slices.IndexFunc(line, likelyBinary) + + return badIdx, badIdx == -1 + } + + const titlePattern = "! Title: " + if !bytes.HasPrefix(line, []byte(titlePattern)) { + return -1, false + } + + title := bytes.TrimSpace(line[len(titlePattern):]) + if title != nil { + // Note that title can be a non-nil empty slice. Consider that normal + // and just stop looking for other titles. + p.title = string(title) + p.titleFound = true + } + + return -1, false +} diff --git a/internal/filtering/rulelist/parser_test.go b/internal/filtering/rulelist/parser_test.go new file mode 100644 index 00000000000..3ca3565d6e2 --- /dev/null +++ b/internal/filtering/rulelist/parser_test.go @@ -0,0 +1,276 @@ +package rulelist_test + +import ( + "bufio" + "bytes" + "strings" + "testing" + + "github.com/AdguardTeam/AdGuardHome/internal/aghtest" + "github.com/AdguardTeam/AdGuardHome/internal/filtering/rulelist" + "github.com/AdguardTeam/golibs/errors" + "github.com/AdguardTeam/golibs/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestParser_Parse(t *testing.T) { + t.Parallel() + + longRule := strings.Repeat("a", rulelist.DefaultRuleBufSize+1) + "\n" + tooLongRule := strings.Repeat("a", bufio.MaxScanTokenSize+1) + "\n" + + testCases := []struct { + name string + in string + wantDst string + wantErrMsg string + wantTitle string + wantRulesNum int + wantWritten int + }{{ + name: "empty", + in: "", + wantDst: "", + wantErrMsg: "", + wantTitle: "", + wantRulesNum: 0, + wantWritten: 0, + }, { + name: "html", + in: testRuleTextHTML, + wantErrMsg: rulelist.ErrHTML.Error(), + wantTitle: "", + wantRulesNum: 0, + wantWritten: 0, + }, { + name: "comments", + in: "# Comment 1\n" + + "! Comment 2\n", + wantErrMsg: "", + wantTitle: "", + wantRulesNum: 0, + wantWritten: 0, + }, {}, { + name: "rule", + in: testRuleTextBlocked, + wantDst: testRuleTextBlocked, + wantErrMsg: "", + wantRulesNum: 1, + wantTitle: "", + wantWritten: len(testRuleTextBlocked), + }, { + name: "html_in_rule", + in: testRuleTextBlocked + testRuleTextHTML, + wantDst: testRuleTextBlocked + testRuleTextHTML, + wantErrMsg: "", + wantTitle: "", + wantRulesNum: 2, + wantWritten: len(testRuleTextBlocked) + len(testRuleTextHTML), + }, { + name: "title", + in: "! Title: Test Title \n" + + "! Title: Bad, Ignored Title\n" + + testRuleTextBlocked, + wantDst: testRuleTextBlocked, + wantErrMsg: "", + wantTitle: "Test Title", + wantRulesNum: 1, + wantWritten: len(testRuleTextBlocked), + }, { + name: "cosmetic_with_zwnj", + in: testRuleTextCosmetic, + wantDst: testRuleTextCosmetic, + wantErrMsg: "", + wantTitle: "", + wantRulesNum: 1, + wantWritten: len(testRuleTextCosmetic), + }, { + name: "bad_char", + in: "! Title: Test Title \n" + + testRuleTextBlocked + + ">>>\x7F<<<", + wantDst: testRuleTextBlocked, + wantErrMsg: "line 3: " + + "character 4: " + + "likely binary character '\\x7f'", + wantTitle: "Test Title", + wantRulesNum: 1, + wantWritten: len(testRuleTextBlocked), + }, { + name: "too_long", + in: tooLongRule, + wantDst: "", + wantErrMsg: "scanning filter contents: bufio.Scanner: token too long", + wantTitle: "", + wantRulesNum: 0, + wantWritten: 0, + }, { + name: "longer_than_default", + in: longRule, + wantDst: longRule, + wantErrMsg: "", + wantTitle: "", + wantRulesNum: 1, + wantWritten: len(longRule), + }, { + name: "bad_tab_and_comment", + in: testRuleTextBadTab, + wantDst: testRuleTextBadTab, + wantErrMsg: "", + wantTitle: "", + wantRulesNum: 1, + wantWritten: len(testRuleTextBadTab), + }, { + name: "etc_hosts_tab_and_comment", + in: testRuleTextEtcHostsTab, + wantDst: testRuleTextEtcHostsTab, + wantErrMsg: "", + wantTitle: "", + wantRulesNum: 1, + wantWritten: len(testRuleTextEtcHostsTab), + }} + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + dst := &bytes.Buffer{} + buf := make([]byte, rulelist.DefaultRuleBufSize) + + p := rulelist.NewParser() + r, err := p.Parse(dst, strings.NewReader(tc.in), buf) + require.NotNil(t, r) + + testutil.AssertErrorMsg(t, tc.wantErrMsg, err) + assert.Equal(t, tc.wantDst, dst.String()) + assert.Equal(t, tc.wantTitle, r.Title) + assert.Equal(t, tc.wantRulesNum, r.RulesCount) + assert.Equal(t, tc.wantWritten, r.BytesWritten) + + if tc.wantWritten > 0 { + assert.NotZero(t, r.Checksum) + } + }) + } +} + +func TestParser_Parse_writeError(t *testing.T) { + t.Parallel() + + dst := &aghtest.Writer{ + OnWrite: func(b []byte) (n int, err error) { + return 1, errors.Error("test error") + }, + } + buf := make([]byte, rulelist.DefaultRuleBufSize) + + p := rulelist.NewParser() + r, err := p.Parse(dst, strings.NewReader(testRuleTextBlocked), buf) + require.NotNil(t, r) + + testutil.AssertErrorMsg(t, "writing rule line: test error", err) + assert.Equal(t, 1, r.BytesWritten) +} + +func TestParser_Parse_checksums(t *testing.T) { + t.Parallel() + + const ( + withoutComments = testRuleTextBlocked + withComments = "! Some comment.\n" + + " " + testRuleTextBlocked + + "# Another comment.\n" + ) + + buf := make([]byte, rulelist.DefaultRuleBufSize) + + p := rulelist.NewParser() + r, err := p.Parse(&bytes.Buffer{}, strings.NewReader(withoutComments), buf) + require.NotNil(t, r) + require.NoError(t, err) + + gotWithoutComments := r.Checksum + + p = rulelist.NewParser() + + r, err = p.Parse(&bytes.Buffer{}, strings.NewReader(withComments), buf) + require.NotNil(t, r) + require.NoError(t, err) + + gotWithComments := r.Checksum + assert.Equal(t, gotWithoutComments, gotWithComments) +} + +var ( + resSink *rulelist.ParseResult + errSink error +) + +func BenchmarkParser_Parse(b *testing.B) { + dst := &bytes.Buffer{} + src := strings.NewReader(strings.Repeat(testRuleTextBlocked, 1000)) + buf := make([]byte, rulelist.DefaultRuleBufSize) + p := rulelist.NewParser() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + resSink, errSink = p.Parse(dst, src, buf) + dst.Reset() + } + + require.NoError(b, errSink) + require.NotNil(b, resSink) + + // Most recent result, on a ThinkPad X13 with a Ryzen Pro 7 CPU: + // + // goos: linux + // goarch: amd64 + // pkg: github.com/AdguardTeam/AdGuardHome/internal/filtering/rulelist + // cpu: AMD Ryzen 7 PRO 4750U with Radeon Graphics + // BenchmarkParser_Parse-16 100000000 128.0 ns/op 48 B/op 1 allocs/op +} + +func FuzzParser_Parse(f *testing.F) { + const n = 64 + + testCases := []string{ + "", + "# Comment", + "! Comment", + "! Title ", + "! Title XXX", + testRuleTextBadTab, + testRuleTextBlocked, + testRuleTextCosmetic, + testRuleTextEtcHostsTab, + testRuleTextHTML, + "1.2.3.4", + "1.2.3.4 etc-hosts.example", + ">>>\x00<<<", + ">>>\x7F<<<", + strings.Repeat("a", rulelist.DefaultRuleBufSize+1), + strings.Repeat("a", bufio.MaxScanTokenSize+1), + } + + for _, tc := range testCases { + f.Add(tc) + } + + buf := make([]byte, n) + + f.Fuzz(func(t *testing.T, input string) { + require.Eventually(t, func() (ok bool) { + dst := &bytes.Buffer{} + src := strings.NewReader(input) + + p := rulelist.NewParser() + r, _ := p.Parse(dst, src, buf) + require.NotNil(t, r) + + return true + }, testTimeout, testTimeout/100) + }) +} diff --git a/internal/filtering/rulelist/rulelist.go b/internal/filtering/rulelist/rulelist.go new file mode 100644 index 00000000000..464650a1f20 --- /dev/null +++ b/internal/filtering/rulelist/rulelist.go @@ -0,0 +1,9 @@ +// Package rulelist contains the implementation of the standard rule-list +// filter that wraps an urlfilter filtering-engine. +// +// TODO(a.garipov): Expand. +package rulelist + +// DefaultRuleBufSize is the default length of a buffer used to read a line with +// a filtering rule, in bytes. +const DefaultRuleBufSize = 1024 diff --git a/internal/filtering/rulelist/rulelist_test.go b/internal/filtering/rulelist/rulelist_test.go new file mode 100644 index 00000000000..aec6f33bafe --- /dev/null +++ b/internal/filtering/rulelist/rulelist_test.go @@ -0,0 +1,19 @@ +package rulelist_test + +import "time" + +// testTimeout is the common timeout for tests. +const testTimeout = 1 * time.Second + +// Common texts for tests. +const ( + testRuleTextBadTab = "||bad-tab-and-comment.example^\t# A comment.\n" + testRuleTextBlocked = "||blocked.example^\n" + testRuleTextEtcHostsTab = "0.0.0.0 tab..example^\t# A comment.\n" + testRuleTextHTML = "\n" + + // testRuleTextCosmetic is a cosmetic rule with a zero-width non-joiner. + // + // See https://github.com/AdguardTeam/AdGuardHome/issues/6003. + testRuleTextCosmetic = "||cosmetic.example## :has-text(/\u200c/i)\n" +) diff --git a/internal/filtering/servicelist.go b/internal/filtering/servicelist.go index 66acb108842..56988193f6f 100644 --- a/internal/filtering/servicelist.go +++ b/internal/filtering/servicelist.go @@ -1505,6 +1505,7 @@ var blockedServices = []blockedService{{ "||aus.social^", "||awscommunity.social^", "||climatejustice.social^", + "||cupoftea.social^", "||cyberplace.social^", "||defcon.social^", "||det.social^", @@ -1530,6 +1531,7 @@ var blockedServices = []blockedService{{ "||masto.pt^", "||mastodon.au^", "||mastodon.bida.im^", + "||mastodon.com.tr^", "||mastodon.eus^", "||mastodon.green^", "||mastodon.ie^", @@ -1551,11 +1553,11 @@ var blockedServices = []blockedService{{ "||mastodont.cat^", "||mastodontech.de^", "||mastodontti.fi^", - "||mastouille.fr^", "||mathstodon.xyz^", "||metalhead.club^", "||mindly.social^", "||mstdn.ca^", + "||mstdn.jp^", "||mstdn.party^", "||mstdn.plus^", "||mstdn.social^", @@ -1567,7 +1569,6 @@ var blockedServices = []blockedService{{ "||nrw.social^", "||o3o.ca^", "||ohai.social^", - "||pewtix.com^", "||piaille.fr^", "||pol.social^", "||ravenation.club^", @@ -1582,20 +1583,19 @@ var blockedServices = []blockedService{{ "||social.linux.pizza^", "||social.politicaconciencia.org^", "||social.vivaldi.net^", - "||sself.co^", "||stranger.social^", "||sueden.social^", "||tech.lgbt^", "||techhub.social^", "||theblower.au^", "||tkz.one^", - "||todon.eu^", "||toot.aquilenet.fr^", "||toot.community^", "||toot.funami.tech^", "||toot.io^", "||toot.wales^", "||troet.cafe^", + "||twingyeo.kr^", "||union.place^", "||universeodon.com^", "||urbanists.social^", diff --git a/internal/home/config.go b/internal/home/config.go index b7ac5bcc0b9..60cbe621518 100644 --- a/internal/home/config.go +++ b/internal/home/config.go @@ -30,32 +30,30 @@ import ( const dataDir = "data" // logSettings are the logging settings part of the configuration file. -// -// TODO(a.garipov): Put them into a separate object. type logSettings struct { // File is the path to the log file. If empty, logs are written to stdout. // If "syslog", logs are written to syslog. - File string `yaml:"log_file"` + File string `yaml:"file"` // MaxBackups is the maximum number of old log files to retain. // // NOTE: MaxAge may still cause them to get deleted. - MaxBackups int `yaml:"log_max_backups"` + MaxBackups int `yaml:"max_backups"` // MaxSize is the maximum size of the log file before it gets rotated, in // megabytes. The default value is 100 MB. - MaxSize int `yaml:"log_max_size"` + MaxSize int `yaml:"max_size"` // MaxAge is the maximum duration for retaining old log files, in days. - MaxAge int `yaml:"log_max_age"` + MaxAge int `yaml:"max_age"` // Compress determines, if the rotated log files should be compressed using // gzip. - Compress bool `yaml:"log_compress"` + Compress bool `yaml:"compress"` // LocalTime determines, if the time used for formatting the timestamps in // is the computer's local time. - LocalTime bool `yaml:"log_localtime"` + LocalTime bool `yaml:"local_time"` // Verbose determines, if verbose (aka debug) logging is enabled. Verbose bool `yaml:"verbose"` @@ -142,7 +140,8 @@ type configuration struct { // Keep this field sorted to ensure consistent ordering. Clients *clientsConfig `yaml:"clients"` - logSettings `yaml:",inline"` + // Log is a block with log configuration settings. + Log logSettings `yaml:"log"` OSConfig *osConfig `yaml:"os"` @@ -241,6 +240,7 @@ type tlsConfigSettings struct { type queryLogConfig struct { // Ignored is the list of host names, which should not be written to log. + // "." is considered to be the root domain. Ignored []string `yaml:"ignored"` // Interval is the interval for query log's files rotation. @@ -390,7 +390,7 @@ var config = &configuration{ HostsFile: true, }, }, - logSettings: logSettings{ + Log: logSettings{ Compress: false, LocalTime: false, MaxBackups: 0, @@ -421,19 +421,19 @@ func (c *configuration) getConfigFilename() string { // separate method in order to configure logger before the actual configuration // is parsed and applied. func readLogSettings() (ls *logSettings) { - ls = &logSettings{} + conf := &configuration{} yamlFile, err := readConfigFile() if err != nil { - return ls + return &logSettings{} } - err = yaml.Unmarshal(yamlFile, ls) + err = yaml.Unmarshal(yamlFile, conf) if err != nil { log.Error("Couldn't get logging settings from the configuration: %s", err) } - return ls + return &conf.Log } // validateBindHosts returns error if any of binding hosts from configuration is diff --git a/internal/home/dns.go b/internal/home/dns.go index 3a37f751db4..fbbda42369a 100644 --- a/internal/home/dns.go +++ b/internal/home/dns.go @@ -17,6 +17,7 @@ import ( "github.com/AdguardTeam/AdGuardHome/internal/dnsforward" "github.com/AdguardTeam/AdGuardHome/internal/filtering" "github.com/AdguardTeam/AdGuardHome/internal/querylog" + "github.com/AdguardTeam/AdGuardHome/internal/rdns" "github.com/AdguardTeam/AdGuardHome/internal/stats" "github.com/AdguardTeam/AdGuardHome/internal/whois" "github.com/AdguardTeam/dnsproxy/proxy" @@ -167,30 +168,77 @@ func initDNSServer( return fmt.Errorf("dnsServer.Prepare: %w", err) } - if config.Clients.Sources.RDNS { - Context.rdns = NewRDNS(Context.dnsServer, &Context.clients, config.DNS.UsePrivateRDNS) - } - + initRDNS() initWHOIS() return nil } +const ( + // defaultQueueSize is the size of queue of IPs for rDNS and WHOIS + // processing. + defaultQueueSize = 255 + + // defaultCacheSize is the maximum size of the cache for rDNS and WHOIS + // processing. It must be greater than zero. + defaultCacheSize = 10_000 + + // defaultIPTTL is the Time to Live duration for IP addresses cached by + // rDNS and WHOIS. + defaultIPTTL = 1 * time.Hour +) + +// initRDNS initializes the rDNS. +func initRDNS() { + Context.rdnsCh = make(chan netip.Addr, defaultQueueSize) + + // TODO(s.chzhen): Add ability to disable it on dns server configuration + // update in [dnsforward] package. + r := rdns.New(&rdns.Config{ + Exchanger: Context.dnsServer, + CacheSize: defaultCacheSize, + CacheTTL: defaultIPTTL, + }) + + go processRDNS(r) +} + +// processRDNS processes reverse DNS lookup queries. It is intended to be used +// as a goroutine. +func processRDNS(r rdns.Interface) { + defer log.OnPanic("rdns") + + for ip := range Context.rdnsCh { + ok := Context.dnsServer.ShouldResolveClient(ip) + if !ok { + continue + } + + host, changed := r.Process(ip) + if host == "" || !changed { + continue + } + + ok = Context.clients.AddHost(ip, host, ClientSourceRDNS) + if ok { + continue + } + + log.Debug( + "dns: can't set rdns info for client %q: already set with higher priority source", + ip, + ) + } +} + // initWHOIS initializes the WHOIS. // // TODO(s.chzhen): Consider making configurable. func initWHOIS() { const ( - // defaultQueueSize is the size of queue of IPs for WHOIS processing. - defaultQueueSize = 255 - // defaultTimeout is the timeout for WHOIS requests. defaultTimeout = 5 * time.Second - // defaultCacheSize is the maximum size of the cache. If it's zero, - // cache size is unlimited. - defaultCacheSize = 10_000 - // defaultMaxConnReadSize is an upper limit in bytes for reading from // net.Conn. defaultMaxConnReadSize = 64 * 1024 @@ -200,9 +248,6 @@ func initWHOIS() { // defaultMaxInfoLen is the maximum length of whois.Info fields. defaultMaxInfoLen = 250 - - // defaultIPTTL is the Time to Live duration for cached IP addresses. - defaultIPTTL = 1 * time.Hour ) Context.whoisCh = make(chan netip.Addr, defaultQueueSize) @@ -274,11 +319,7 @@ func onDNSRequest(pctx *proxy.DNSContext) { return } - srcs := config.Clients.Sources - if srcs.RDNS && !ip.IsLoopback() { - Context.rdns.Begin(ip) - } - + Context.rdnsCh <- ip Context.whoisCh <- ip } @@ -517,11 +558,7 @@ func startDNSServer() error { const topClientsNumber = 100 // the number of clients to get for _, ip := range Context.stats.TopClientsIP(topClientsNumber) { - srcs := config.Clients.Sources - if srcs.RDNS && !ip.IsLoopback() { - Context.rdns.Begin(ip) - } - + Context.rdnsCh <- ip Context.whoisCh <- ip } diff --git a/internal/home/home.go b/internal/home/home.go index 572168fd586..bdc0f86c1d9 100644 --- a/internal/home/home.go +++ b/internal/home/home.go @@ -56,7 +56,6 @@ type homeContext struct { stats stats.Interface // statistics module queryLog querylog.QueryLog // query log module dnsServer *dnsforward.Server // DNS module - rdns *RDNS // rDNS module dhcpServer dhcpd.Interface // DHCP module auth *Auth // HTTP authentication module filters *filtering.DNSFilter // DNS filtering module @@ -83,6 +82,9 @@ type homeContext struct { client *http.Client appSignalChannel chan os.Signal // Channel for receiving OS signals by the console app + // rdnsCh is the channel for receiving IPs for rDNS processing. + rdnsCh chan netip.Addr + // whoisCh is the channel for receiving IPs for WHOIS processing. whoisCh chan netip.Addr @@ -468,7 +470,7 @@ func setupDNSFilteringConf(conf *filtering.Config) (err error) { ServiceName: pcService, TXTSuffix: pcTXTSuffix, CacheTime: cacheTime, - CacheSize: conf.SafeBrowsingCacheSize, + CacheSize: conf.ParentalCacheSize, }) conf.SafeSearchConf.CustomResolver = safeSearchResolver{} @@ -829,20 +831,21 @@ func configureLogger(opts options) (err error) { // getLogSettings returns a log settings object properly initialized from opts. func getLogSettings(opts options) (ls *logSettings) { ls = readLogSettings() + configLogSettings := config.Log // Command-line arguments can override config settings. - if opts.verbose || config.Verbose { + if opts.verbose || configLogSettings.Verbose { ls.Verbose = true } - ls.File = stringutil.Coalesce(opts.logFile, config.File, ls.File) + ls.File = stringutil.Coalesce(opts.logFile, configLogSettings.File, ls.File) // Handle default log settings overrides. - ls.Compress = config.Compress - ls.LocalTime = config.LocalTime - ls.MaxBackups = config.MaxBackups - ls.MaxSize = config.MaxSize - ls.MaxAge = config.MaxAge + ls.Compress = configLogSettings.Compress + ls.LocalTime = configLogSettings.LocalTime + ls.MaxBackups = configLogSettings.MaxBackups + ls.MaxSize = configLogSettings.MaxSize + ls.MaxAge = configLogSettings.MaxAge if opts.runningAsService && ls.File == "" && runtime.GOOS == "windows" { // When running as a Windows service, use eventlog by default if diff --git a/internal/home/rdns.go b/internal/home/rdns.go deleted file mode 100644 index cae7a9c3e45..00000000000 --- a/internal/home/rdns.go +++ /dev/null @@ -1,143 +0,0 @@ -package home - -import ( - "encoding/binary" - "net/netip" - "sync/atomic" - "time" - - "github.com/AdguardTeam/AdGuardHome/internal/dnsforward" - "github.com/AdguardTeam/golibs/cache" - "github.com/AdguardTeam/golibs/errors" - "github.com/AdguardTeam/golibs/log" -) - -// RDNS resolves clients' addresses to enrich their metadata. -type RDNS struct { - exchanger dnsforward.RDNSExchanger - clients *clientsContainer - - // ipCh used to pass client's IP to rDNS workerLoop. - ipCh chan netip.Addr - - // ipCache caches the IP addresses to be resolved by rDNS. The resolved - // address stays here while it's inside clients. After leaving clients the - // address will be resolved once again. If the address couldn't be - // resolved, cache prevents further attempts to resolve it for some time. - ipCache cache.Cache - - // usePrivate stores the state of current private reverse-DNS resolving - // settings. - usePrivate atomic.Bool -} - -// Default AdGuard Home reverse DNS values. -const ( - revDNSCacheSize = 10000 - - // TODO(e.burkov): Make these values configurable. - revDNSCacheTTL = 24 * 60 * 60 - revDNSFailureCacheTTL = 1 * 60 * 60 - - revDNSQueueSize = 256 -) - -// NewRDNS creates and returns initialized RDNS. -func NewRDNS( - exchanger dnsforward.RDNSExchanger, - clients *clientsContainer, - usePrivate bool, -) (rDNS *RDNS) { - rDNS = &RDNS{ - exchanger: exchanger, - clients: clients, - ipCache: cache.New(cache.Config{ - EnableLRU: true, - MaxCount: revDNSCacheSize, - }), - ipCh: make(chan netip.Addr, revDNSQueueSize), - } - - rDNS.usePrivate.Store(usePrivate) - - go rDNS.workerLoop() - - return rDNS -} - -// ensurePrivateCache ensures that the state of the RDNS cache is consistent -// with the current private client RDNS resolving settings. -// -// TODO(e.burkov): Clearing cache each time this value changed is not a perfect -// approach since only unresolved locally-served addresses should be removed. -// Implement when improving the cache. -func (r *RDNS) ensurePrivateCache() { - usePrivate := r.exchanger.ResolvesPrivatePTR() - if r.usePrivate.CompareAndSwap(!usePrivate, usePrivate) { - r.ipCache.Clear() - } -} - -// isCached returns true if ip is already cached and not expired yet. It also -// caches it otherwise. -func (r *RDNS) isCached(ip netip.Addr) (ok bool) { - ipBytes := ip.AsSlice() - now := uint64(time.Now().Unix()) - if expire := r.ipCache.Get(ipBytes); len(expire) != 0 { - return binary.BigEndian.Uint64(expire) > now - } - - return false -} - -// cache caches the ip address for ttl seconds. -func (r *RDNS) cache(ip netip.Addr, ttl uint64) { - ipData := ip.AsSlice() - - ttlData := [8]byte{} - binary.BigEndian.PutUint64(ttlData[:], uint64(time.Now().Unix())+ttl) - - r.ipCache.Set(ipData, ttlData[:]) -} - -// Begin adds the ip to the resolving queue if it is not cached or already -// resolved. -func (r *RDNS) Begin(ip netip.Addr) { - r.ensurePrivateCache() - - if r.isCached(ip) || r.clients.clientSource(ip) > ClientSourceRDNS { - return - } - - select { - case r.ipCh <- ip: - log.Debug("rdns: %q added to queue", ip) - default: - log.Debug("rdns: queue is full") - } -} - -// workerLoop handles incoming IP addresses from ipChan and adds it into -// clients. -func (r *RDNS) workerLoop() { - defer log.OnPanic("rdns") - - for ip := range r.ipCh { - ttl := uint64(revDNSCacheTTL) - - host, err := r.exchanger.Exchange(ip.AsSlice()) - if err != nil { - log.Debug("rdns: resolving %q: %s", ip, err) - if errors.Is(err, dnsforward.ErrRDNSFailed) { - // Cache failure for a less time. - ttl = revDNSFailureCacheTTL - } - } - - r.cache(ip, ttl) - - if host != "" { - _ = r.clients.AddHost(ip, host, ClientSourceRDNS) - } - } -} diff --git a/internal/home/rdns_test.go b/internal/home/rdns_test.go deleted file mode 100644 index 5582bf5b276..00000000000 --- a/internal/home/rdns_test.go +++ /dev/null @@ -1,264 +0,0 @@ -package home - -import ( - "bytes" - "encoding/binary" - "fmt" - "net" - "net/netip" - "sync" - "testing" - "time" - - "github.com/AdguardTeam/AdGuardHome/internal/aghalg" - "github.com/AdguardTeam/AdGuardHome/internal/aghtest" - "github.com/AdguardTeam/dnsproxy/upstream" - "github.com/AdguardTeam/golibs/cache" - "github.com/AdguardTeam/golibs/log" - "github.com/AdguardTeam/golibs/netutil" - "github.com/AdguardTeam/golibs/stringutil" - "github.com/miekg/dns" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRDNS_Begin(t *testing.T) { - aghtest.ReplaceLogLevel(t, log.DEBUG) - w := &bytes.Buffer{} - aghtest.ReplaceLogWriter(t, w) - - ip1234, ip1235 := netip.MustParseAddr("1.2.3.4"), netip.MustParseAddr("1.2.3.5") - - testCases := []struct { - cliIDIndex map[string]*Client - customChan chan netip.Addr - name string - wantLog string - ip netip.Addr - wantCacheHit int - wantCacheMiss int - }{{ - cliIDIndex: map[string]*Client{}, - customChan: nil, - name: "cached", - wantLog: "", - ip: ip1234, - wantCacheHit: 1, - wantCacheMiss: 0, - }, { - cliIDIndex: map[string]*Client{}, - customChan: nil, - name: "not_cached", - wantLog: "rdns: queue is full", - ip: ip1235, - wantCacheHit: 0, - wantCacheMiss: 1, - }, { - cliIDIndex: map[string]*Client{"1.2.3.5": {}}, - customChan: nil, - name: "already_in_clients", - wantLog: "", - ip: ip1235, - wantCacheHit: 0, - wantCacheMiss: 1, - }, { - cliIDIndex: map[string]*Client{}, - customChan: make(chan netip.Addr, 1), - name: "add_to_queue", - wantLog: `rdns: "1.2.3.5" added to queue`, - ip: ip1235, - wantCacheHit: 0, - wantCacheMiss: 1, - }} - - for _, tc := range testCases { - w.Reset() - - ipCache := cache.New(cache.Config{ - EnableLRU: true, - MaxCount: revDNSCacheSize, - }) - ttl := make([]byte, binary.Size(uint64(0))) - binary.BigEndian.PutUint64(ttl, uint64(time.Now().Add(100*time.Hour).Unix())) - - rdns := &RDNS{ - ipCache: ipCache, - exchanger: &rDNSExchanger{ - ex: aghtest.NewErrorUpstream(), - }, - clients: &clientsContainer{ - list: map[string]*Client{}, - idIndex: tc.cliIDIndex, - ipToRC: map[netip.Addr]*RuntimeClient{}, - allTags: stringutil.NewSet(), - }, - } - ipCache.Clear() - ipCache.Set(net.IP{1, 2, 3, 4}, ttl) - - if tc.customChan != nil { - rdns.ipCh = tc.customChan - defer close(tc.customChan) - } - - t.Run(tc.name, func(t *testing.T) { - rdns.Begin(tc.ip) - assert.Equal(t, tc.wantCacheHit, ipCache.Stats().Hit) - assert.Equal(t, tc.wantCacheMiss, ipCache.Stats().Miss) - assert.Contains(t, w.String(), tc.wantLog) - }) - } -} - -// rDNSExchanger is a mock dnsforward.RDNSExchanger implementation for tests. -type rDNSExchanger struct { - ex upstream.Upstream - usePrivate bool -} - -// Exchange implements dnsforward.RDNSExchanger interface for *RDNSExchanger. -func (e *rDNSExchanger) Exchange(ip net.IP) (host string, err error) { - rev, err := netutil.IPToReversedAddr(ip) - if err != nil { - return "", fmt.Errorf("reversing ip: %w", err) - } - - req := &dns.Msg{ - Question: []dns.Question{{ - Name: dns.Fqdn(rev), - Qclass: dns.ClassINET, - Qtype: dns.TypePTR, - }}, - } - - resp, err := e.ex.Exchange(req) - if err != nil { - return "", err - } - - if len(resp.Answer) == 0 { - return "", nil - } - - return resp.Answer[0].Header().Name, nil -} - -// Exchange implements dnsforward.RDNSExchanger interface for *RDNSExchanger. -func (e *rDNSExchanger) ResolvesPrivatePTR() (ok bool) { - return e.usePrivate -} - -func TestRDNS_ensurePrivateCache(t *testing.T) { - data := []byte{1, 2, 3, 4} - - ipCache := cache.New(cache.Config{ - EnableLRU: true, - MaxCount: revDNSCacheSize, - }) - - ex := &rDNSExchanger{ - ex: aghtest.NewErrorUpstream(), - } - - rdns := &RDNS{ - ipCache: ipCache, - exchanger: ex, - } - - rdns.ipCache.Set(data, data) - require.NotZero(t, rdns.ipCache.Stats().Count) - - ex.usePrivate = !ex.usePrivate - - rdns.ensurePrivateCache() - require.Zero(t, rdns.ipCache.Stats().Count) -} - -func TestRDNS_WorkerLoop(t *testing.T) { - aghtest.ReplaceLogLevel(t, log.DEBUG) - w := &bytes.Buffer{} - aghtest.ReplaceLogWriter(t, w) - - localIP := netip.MustParseAddr("192.168.1.1") - revIPv4, err := netutil.IPToReversedAddr(localIP.AsSlice()) - require.NoError(t, err) - - revIPv6, err := netutil.IPToReversedAddr(net.ParseIP("2a00:1450:400c:c06::93")) - require.NoError(t, err) - - locUpstream := &aghtest.UpstreamMock{ - OnAddress: func() (addr string) { return "local.upstream.example" }, - OnExchange: func(req *dns.Msg) (resp *dns.Msg, err error) { - return aghalg.Coalesce( - aghtest.MatchedResponse(req, dns.TypePTR, revIPv4, "local.domain"), - aghtest.MatchedResponse(req, dns.TypePTR, revIPv6, "ipv6.domain"), - new(dns.Msg).SetRcode(req, dns.RcodeNameError), - ), nil - }, - } - - errUpstream := aghtest.NewErrorUpstream() - - testCases := []struct { - ups upstream.Upstream - cliIP netip.Addr - wantLog string - name string - wantClientSource clientSource - }{{ - ups: locUpstream, - cliIP: localIP, - wantLog: "", - name: "all_good", - wantClientSource: ClientSourceRDNS, - }, { - ups: errUpstream, - cliIP: netip.MustParseAddr("192.168.1.2"), - wantLog: `rdns: resolving "192.168.1.2": test upstream error`, - name: "resolve_error", - wantClientSource: ClientSourceNone, - }, { - ups: locUpstream, - cliIP: netip.MustParseAddr("2a00:1450:400c:c06::93"), - wantLog: "", - name: "ipv6_good", - wantClientSource: ClientSourceRDNS, - }} - - for _, tc := range testCases { - w.Reset() - - cc := newClientsContainer(t) - ch := make(chan netip.Addr) - rdns := &RDNS{ - exchanger: &rDNSExchanger{ - ex: tc.ups, - }, - clients: cc, - ipCh: ch, - ipCache: cache.New(cache.Config{ - EnableLRU: true, - MaxCount: revDNSCacheSize, - }), - } - - t.Run(tc.name, func(t *testing.T) { - var wg sync.WaitGroup - wg.Add(1) - go func() { - rdns.workerLoop() - wg.Done() - }() - - ch <- tc.cliIP - close(ch) - wg.Wait() - - if tc.wantLog != "" { - assert.Contains(t, w.String(), tc.wantLog) - } - - assert.Equal(t, tc.wantClientSource, cc.clientSource(tc.cliIP)) - }) - } -} diff --git a/internal/home/upgrade.go b/internal/home/upgrade.go index b6df4cad2f9..96b46b77b8a 100644 --- a/internal/home/upgrade.go +++ b/internal/home/upgrade.go @@ -23,7 +23,7 @@ import ( ) // currentSchemaVersion is the current schema version. -const currentSchemaVersion = 23 +const currentSchemaVersion = 24 // These aliases are provided for convenience. type ( @@ -98,6 +98,7 @@ func upgradeConfigSchema(oldVersion int, diskConf yobj) (err error) { upgradeSchema20to21, upgradeSchema21to22, upgradeSchema22to23, + upgradeSchema23to24, } n := 0 @@ -1325,6 +1326,110 @@ func upgradeSchema22to23(diskConf yobj) (err error) { return nil } +// upgradeSchema23to24 performs the following changes: +// +// # BEFORE: +// 'log_file': "" +// 'log_max_backups': 0 +// 'log_max_size': 100 +// 'log_max_age': 3 +// 'log_compress': false +// 'log_localtime': false +// 'verbose': false +// +// # AFTER: +// 'log': +// 'file': "" +// 'max_backups': 0 +// 'max_size': 100 +// 'max_age': 3 +// 'compress': false +// 'local_time': false +// 'verbose': false +func upgradeSchema23to24(diskConf yobj) (err error) { + log.Printf("Upgrade yaml: 23 to 24") + diskConf["schema_version"] = 24 + + logObj := yobj{} + err = coalesceError( + moveField[string](diskConf, logObj, "log_file", "file"), + moveField[int](diskConf, logObj, "log_max_backups", "max_backups"), + moveField[int](diskConf, logObj, "log_max_size", "max_size"), + moveField[int](diskConf, logObj, "log_max_age", "max_age"), + moveField[bool](diskConf, logObj, "log_compress", "compress"), + moveField[bool](diskConf, logObj, "log_localtime", "local_time"), + moveField[bool](diskConf, logObj, "verbose", "verbose"), + ) + if err != nil { + // Don't wrap the error, because it's informative enough as is. + return err + } + + if len(logObj) != 0 { + diskConf["log"] = logObj + } + + delete(diskConf, "log_file") + delete(diskConf, "log_max_backups") + delete(diskConf, "log_max_size") + delete(diskConf, "log_max_age") + delete(diskConf, "log_compress") + delete(diskConf, "log_localtime") + delete(diskConf, "verbose") + + return nil +} + +// moveField gets field value for key from diskConf, and then set this value +// in newConf for newKey. +func moveField[T any](diskConf, newConf yobj, key, newKey string) (err error) { + ok, newVal, err := fieldValue[T](diskConf, key) + if !ok { + return err + } + + switch v := newVal.(type) { + case int, bool, string: + newConf[newKey] = v + default: + return fmt.Errorf("invalid type of %s: %T", key, newVal) + } + + return nil +} + +// fieldValue returns the value of type T for key in diskConf object. +func fieldValue[T any](diskConf yobj, key string) (ok bool, field any, err error) { + fieldVal, ok := diskConf[key] + if !ok { + return false, new(T), nil + } + + f, ok := fieldVal.(T) + if !ok { + return false, nil, fmt.Errorf("unexpected type of %s: %T", key, fieldVal) + } + + return true, f, nil +} + +// coalesceError returns the first non-nil error. It is named after function +// COALESCE in SQL. If all errors are nil, it returns nil. +// +// TODO(a.garipov): Consider a similar helper to group errors together to show +// as many errors as possible. +// +// TODO(a.garipov): Think of ways to merge with [aghalg.Coalesce]. +func coalesceError(errors ...error) (res error) { + for _, err := range errors { + if err != nil { + return err + } + } + + return nil +} + // TODO(a.garipov): Replace with log.Output when we port it to our logging // package. func funcName() string { diff --git a/internal/home/upgrade_test.go b/internal/home/upgrade_test.go index 9f3f54dd4c3..a440ccfc9ff 100644 --- a/internal/home/upgrade_test.go +++ b/internal/home/upgrade_test.go @@ -1306,3 +1306,76 @@ func TestUpgradeSchema22to23(t *testing.T) { }) } } + +func TestUpgradeSchema23to24(t *testing.T) { + const newSchemaVer = 24 + + testCases := []struct { + in yobj + want yobj + name string + wantErrMsg string + }{{ + name: "empty", + in: yobj{}, + want: yobj{ + "schema_version": newSchemaVer, + }, + wantErrMsg: "", + }, { + name: "ok", + in: yobj{ + "log_file": "/test/path.log", + "log_max_backups": 1, + "log_max_size": 2, + "log_max_age": 3, + "log_compress": true, + "log_localtime": true, + "verbose": true, + }, + want: yobj{ + "log": yobj{ + "file": "/test/path.log", + "max_backups": 1, + "max_size": 2, + "max_age": 3, + "compress": true, + "local_time": true, + "verbose": true, + }, + "schema_version": newSchemaVer, + }, + wantErrMsg: "", + }, { + name: "invalid", + in: yobj{ + "log_file": "/test/path.log", + "log_max_backups": 1, + "log_max_size": 2, + "log_max_age": 3, + "log_compress": "", + "log_localtime": true, + "verbose": true, + }, + want: yobj{ + "log_file": "/test/path.log", + "log_max_backups": 1, + "log_max_size": 2, + "log_max_age": 3, + "log_compress": "", + "log_localtime": true, + "verbose": true, + "schema_version": newSchemaVer, + }, + wantErrMsg: "unexpected type of log_compress: string", + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := upgradeSchema23to24(tc.in) + testutil.AssertErrorMsg(t, tc.wantErrMsg, err) + + assert.Equal(t, tc.want, tc.in) + }) + } +} diff --git a/internal/next/websvc/waitlistener_internal_test.go b/internal/next/websvc/waitlistener_internal_test.go index 6911d137723..089c053186d 100644 --- a/internal/next/websvc/waitlistener_internal_test.go +++ b/internal/next/websvc/waitlistener_internal_test.go @@ -6,13 +6,13 @@ import ( "sync/atomic" "testing" - "github.com/AdguardTeam/AdGuardHome/internal/aghtest" + "github.com/AdguardTeam/golibs/testutil/fakenet" "github.com/stretchr/testify/assert" ) func TestWaitListener_Accept(t *testing.T) { var accepted atomic.Bool - var l net.Listener = &aghtest.Listener{ + var l net.Listener = &fakenet.Listener{ OnAccept: func() (conn net.Conn, err error) { accepted.Store(true) diff --git a/internal/querylog/qlog.go b/internal/querylog/qlog.go index 1607fd72fa2..be9f1feece6 100644 --- a/internal/querylog/qlog.go +++ b/internal/querylog/qlog.go @@ -4,7 +4,6 @@ package querylog import ( "fmt" "os" - "strings" "sync" "time" @@ -161,10 +160,7 @@ func (l *queryLog) clear() { // newLogEntry creates an instance of logEntry from parameters. func newLogEntry(params *AddParams) (entry *logEntry) { q := params.Question.Question[0] - qHost := q.Name - if qHost != "." { - qHost = strings.ToLower(q.Name[:len(q.Name)-1]) - } + qHost := aghnet.NormalizeDomain(q.Name) entry = &logEntry{ // TODO(d.kolyshev): Export this timestamp to func params. diff --git a/internal/rdns/rdns.go b/internal/rdns/rdns.go new file mode 100644 index 00000000000..e352da522ce --- /dev/null +++ b/internal/rdns/rdns.go @@ -0,0 +1,132 @@ +// Package rdns processes reverse DNS lookup queries. +package rdns + +import ( + "net/netip" + "time" + + "github.com/AdguardTeam/golibs/errors" + "github.com/AdguardTeam/golibs/log" + "github.com/bluele/gcache" +) + +// Interface processes rDNS queries. +type Interface interface { + // Process makes rDNS request and returns domain name. changed indicates + // that domain name was updated since last request. + Process(ip netip.Addr) (host string, changed bool) +} + +// Empty is an empty [Inteface] implementation which does nothing. +type Empty struct{} + +// type check +var _ Interface = (*Empty)(nil) + +// Process implements the [Interface] interface for Empty. +func (Empty) Process(_ netip.Addr) (host string, changed bool) { + return "", false +} + +// Exchanger is a resolver for clients' addresses. +type Exchanger interface { + // Exchange tries to resolve the ip in a suitable way, i.e. either as local + // or as external. + Exchange(ip netip.Addr) (host string, err error) +} + +// Config is the configuration structure for Default. +type Config struct { + // Exchanger resolves IP addresses to domain names. + Exchanger Exchanger + + // CacheSize is the maximum size of the cache. It must be greater than + // zero. + CacheSize int + + // CacheTTL is the Time to Live duration for cached IP addresses. + CacheTTL time.Duration +} + +// Default is the default rDNS query processor. +type Default struct { + // cache is the cache containing IP addresses of clients. An active IP + // address is resolved once again after it expires. If IP address couldn't + // be resolved, it stays here for some time to prevent further attempts to + // resolve the same IP. + cache gcache.Cache + + // exchanger resolves IP addresses to domain names. + exchanger Exchanger + + // cacheTTL is the Time to Live duration for cached IP addresses. + cacheTTL time.Duration +} + +// New returns a new default rDNS query processor. conf must not be nil. +func New(conf *Config) (r *Default) { + return &Default{ + cache: gcache.New(conf.CacheSize).LRU().Build(), + exchanger: conf.Exchanger, + cacheTTL: conf.CacheTTL, + } +} + +// type check +var _ Interface = (*Default)(nil) + +// Process implements the [Interface] interface for Default. +func (r *Default) Process(ip netip.Addr) (host string, changed bool) { + fromCache, expired := r.findInCache(ip) + if !expired { + return fromCache, false + } + + host, err := r.exchanger.Exchange(ip) + if err != nil { + log.Debug("rdns: resolving %q: %s", ip, err) + } + + item := &cacheItem{ + expiry: time.Now().Add(r.cacheTTL), + host: host, + } + + err = r.cache.Set(ip, item) + if err != nil { + log.Debug("rdns: cache: adding item %q: %s", ip, err) + } + + return host, fromCache == "" || host != fromCache +} + +// findInCache finds domain name in the cache. expired is true if host is not +// valid anymore. +func (r *Default) findInCache(ip netip.Addr) (host string, expired bool) { + val, err := r.cache.Get(ip) + if err != nil { + if !errors.Is(err, gcache.KeyNotFoundError) { + log.Debug("rdns: cache: retrieving %q: %s", ip, err) + } + + return "", true + } + + item, ok := val.(*cacheItem) + if !ok { + log.Debug("rdns: cache: %q bad type %T", ip, val) + + return "", true + } + + return item.host, time.Now().After(item.expiry) +} + +// cacheItem represents an item that we will store in the cache. +type cacheItem struct { + // expiry is the time when cacheItem will expire. + expiry time.Time + + // host is the domain name of a runtime client. + host string +} diff --git a/internal/rdns/rdns_test.go b/internal/rdns/rdns_test.go new file mode 100644 index 00000000000..8694eba3df7 --- /dev/null +++ b/internal/rdns/rdns_test.go @@ -0,0 +1,105 @@ +package rdns_test + +import ( + "net/netip" + "testing" + "time" + + "github.com/AdguardTeam/AdGuardHome/internal/rdns" + "github.com/AdguardTeam/golibs/netutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// fakeRDNSExchanger is a mock [rdns.Exchanger] implementation for tests. +type fakeRDNSExchanger struct { + OnExchange func(ip netip.Addr) (host string, err error) +} + +// type check +var _ rdns.Exchanger = (*fakeRDNSExchanger)(nil) + +// Exchange implements [rdns.Exchanger] interface for *fakeRDNSExchanger. +func (e *fakeRDNSExchanger) Exchange(ip netip.Addr) (host string, err error) { + return e.OnExchange(ip) +} + +func TestDefault_Process(t *testing.T) { + ip1 := netip.MustParseAddr("1.2.3.4") + revAddr1, err := netutil.IPToReversedAddr(ip1.AsSlice()) + require.NoError(t, err) + + ip2 := netip.MustParseAddr("4.3.2.1") + revAddr2, err := netutil.IPToReversedAddr(ip2.AsSlice()) + require.NoError(t, err) + + localIP := netip.MustParseAddr("192.168.0.1") + localRevAddr1, err := netutil.IPToReversedAddr(localIP.AsSlice()) + require.NoError(t, err) + + config := &rdns.Config{ + CacheSize: 100, + CacheTTL: time.Hour, + } + + testCases := []struct { + name string + addr netip.Addr + want string + }{{ + name: "first", + addr: ip1, + want: revAddr1, + }, { + name: "second", + addr: ip2, + want: revAddr2, + }, { + name: "empty", + addr: netip.MustParseAddr("0.0.0.0"), + want: "", + }, { + name: "private", + addr: localIP, + want: localRevAddr1, + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + hit := 0 + onExchange := func(ip netip.Addr) (host string, err error) { + hit++ + + switch ip { + case ip1: + return revAddr1, nil + case ip2: + return revAddr2, nil + case localIP: + return localRevAddr1, nil + default: + return "", nil + } + } + exchanger := &fakeRDNSExchanger{ + OnExchange: onExchange, + } + + config.Exchanger = exchanger + r := rdns.New(config) + + got, changed := r.Process(tc.addr) + require.True(t, changed) + + assert.Equal(t, tc.want, got) + assert.Equal(t, 1, hit) + + // From cache. + got, changed = r.Process(tc.addr) + require.False(t, changed) + + assert.Equal(t, tc.want, got) + assert.Equal(t, 1, hit) + }) + } +} diff --git a/internal/stats/http_test.go b/internal/stats/http_test.go index 8388fc74b1f..69e75d6b2a4 100644 --- a/internal/stats/http_test.go +++ b/internal/stats/http_test.go @@ -86,7 +86,7 @@ func TestHandleStatsConfig(t *testing.T) { }, }, wantCode: http.StatusUnprocessableEntity, - wantErr: "ignored: duplicate host name \"ignor.ed\" at index 1\n", + wantErr: "ignored: duplicate hostname \"ignor.ed\" at index 1\n", }, { name: "ignored_empty", body: getConfigResp{ @@ -97,7 +97,7 @@ func TestHandleStatsConfig(t *testing.T) { }, }, wantCode: http.StatusUnprocessableEntity, - wantErr: "ignored: host name is empty\n", + wantErr: "ignored: at index 0: hostname is empty\n", }, { name: "enabled_is_null", body: getConfigResp{ diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 540064a75aa..47b2e330021 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -10,9 +10,10 @@ require ( github.com/kyoh86/looppointer v0.2.1 github.com/securego/gosec/v2 v2.16.0 github.com/uudashr/gocognit v1.0.6 - golang.org/x/tools v0.10.0 + golang.org/x/tools v0.11.0 golang.org/x/vuln v0.2.0 - honnef.co/go/tools v0.4.3 + // TODO(a.garipov): Return to tagged releases once a new one appears. + honnef.co/go/tools v0.5.0-0.dev.0.20230709092525-bc759185c5ee mvdan.cc/gofumpt v0.5.0 mvdan.cc/unparam v0.0.0-20230610194454-9ea02bef9868 ) @@ -26,9 +27,9 @@ require ( github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/exp/typeparams v0.0.0-20230626212559-97b1e661b5df // indirect - golang.org/x/mod v0.11.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20230711023510-fffb14384f22 // indirect + golang.org/x/mod v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.9.0 // indirect + golang.org/x/sys v0.10.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/internal/tools/go.sum b/internal/tools/go.sum index 5f85cfa4f0f..d2244861f91 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -52,21 +52,21 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp/typeparams v0.0.0-20230626212559-97b1e661b5df h1:jfUqBujZx2dktJVEmZpCkyngz7MWrVv1y9kLOqFNsqw= -golang.org/x/exp/typeparams v0.0.0-20230626212559-97b1e661b5df/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230711023510-fffb14384f22 h1:e8iSCQYXZ4EB6q3kIfy2fgPFTvDbozqzRe4OuIOyrL4= +golang.org/x/exp/typeparams v0.0.0-20230711023510-fffb14384f22/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -82,8 +82,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -96,8 +96,8 @@ golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= -golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= +golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/vuln v0.2.0 h1:Dlz47lW0pvPHU7tnb10S8vbMn9GnV2B6eyT7Tem5XBI= golang.org/x/vuln v0.2.0/go.mod h1:V0eyhHwaAaHrt42J9bgrN6rd12f6GU4T0Lu0ex2wDg4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -107,8 +107,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw= -honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA= +honnef.co/go/tools v0.5.0-0.dev.0.20230709092525-bc759185c5ee h1:mpyvMqtlVZTwEv78QL3S2ZDTMHMO1fgNwr2kC7+K7oU= +honnef.co/go/tools v0.5.0-0.dev.0.20230709092525-bc759185c5ee/go.mod h1:GUV+uIBCLpdf0/v6UhHHG/yzI/z6qPskBeQCjcNB96k= mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= mvdan.cc/unparam v0.0.0-20230610194454-9ea02bef9868 h1:F4Q7pXcrU9UiU1fq0ZWqSOxKjNAteRuDr7JDk7uVLRQ= diff --git a/scripts/README.md b/scripts/README.md index 579ee08a1a4..bbf5a9cf376 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -18,7 +18,7 @@ Run `make init` from the project root. -## `make/`: Makefile Scripts +## `make/`: Makefile scripts The release channels are: `development` (the default), `edge`, `beta`, and `release`. If verbosity levels aren't documented here, there are only two: `0`, @@ -26,7 +26,7 @@ don't print anything, and `1`, be verbose. - ### `build-docker.sh`: Build A Multi-Architecture Docker Image + ### `build-docker.sh`: Build a multi-architecture Docker image Required environment: @@ -51,7 +51,7 @@ Optional environment: - ### `build-release.sh`: Build A Release For All Platforms + ### `build-release.sh`: Build a release for all platforms Required environment: @@ -101,7 +101,22 @@ Required environment: - ### `go-build.sh`: Build The Backend + ### `go-bench.sh`: Run backend benchmarks + +Optional environment: + + * `GO`: set an alternative name for the Go compiler. + + * `TIMEOUT_FLAGS`: set timeout flags for tests. The default value is + `--timeout=30s`. + + * `VERBOSE`: verbosity level. `1` shows every command that is run and every + Go package that is processed. `2` also shows subcommands and environment. + The default value is `0`, don't be verbose. + + + + ### `go-build.sh`: Build the backend Optional environment: @@ -135,19 +150,37 @@ Required environment: - ### `go-deps.sh`: Install Backend Dependencies + ### `go-deps.sh`: Install backend dependencies + +Optional environment: + + * `GO`: set an alternative name for the Go compiler. + + * `VERBOSE`: verbosity level. `1` shows every command that is run and every + Go package that is processed. `2` also shows subcommands and environment. + The default value is `0`, don't be verbose. + + + + ### `go-fuzz.sh`: Run backend fuzz tests Optional environment: * `GO`: set an alternative name for the Go compiler. + * `FUZZTIME_FLAGS`: set fuss flags for tests. The default value is + `--fuzztime=20s`. + + * `TIMEOUT_FLAGS`: set timeout flags for tests. The default value is + `--timeout=30s`. + * `VERBOSE`: verbosity level. `1` shows every command that is run and every Go package that is processed. `2` also shows subcommands and environment. The default value is `0`, don't be verbose. - ### `go-lint.sh`: Run Backend Static Analyzers + ### `go-lint.sh`: Run backend static analyzers Don't forget to run `make go-tools` once first! @@ -163,7 +196,7 @@ Optional environment: - ### `go-test.sh`: Run Backend Tests + ### `go-test.sh`: Run backend tests Optional environment: @@ -173,7 +206,7 @@ Optional environment: `1`, use the race detector. * `TIMEOUT_FLAGS`: set timeout flags for tests. The default value is - `--timeout 30s`. + `--timeout=30s`. * `VERBOSE`: verbosity level. `1` shows every command that is run and every Go package that is processed. `2` also shows subcommands. The default @@ -181,7 +214,7 @@ Optional environment: - ### `go-tools.sh`: Install Backend Tooling + ### `go-tools.sh`: Install backend tooling Installs the Go static analysis and other tools into `${PWD}/bin`. Either add `${PWD}/bin` to your `$PATH` before all other entries, or use the commands @@ -236,25 +269,29 @@ Optional environment: ### Usage - * `go run main.go help`: print usage. + * `go run ./scripts/translations help`: print usage. - * `go run main.go download [-n ]`: download and save all translations. - `n` is optional flag where count is a number of concurrent downloads. + * `go run ./scripts/translations download [-n ]`: download and save + all translations. `n` is optional flag where count is a number of + concurrent downloads. - * `go run main.go upload`: upload the base `en` locale. + * `go run ./scripts/translations upload`: upload the base `en` locale. - * `go run main.go summary`: show the current locales summary. + * `go run ./scripts/translations summary`: show the current locales summary. - * `go run main.go unused`: show the list of unused strings. + * `go run ./scripts/translations unused`: show the list of unused strings. - * `go run main.go auto-add`: add locales with additions to the git and - restore locales with deletions. + * `go run ./scripts/translations auto-add`: add locales with additions to the + git and restore locales with deletions. After the download you'll find the output locales in the `client/src/__locales/` directory. Optional environment: + * `DOWNLOAD_LANGUAGES`: set a list of specific languages to `download`. For + example `ar be bg`. + * `UPLOAD_LANGUAGE`: set an alternative language for `upload`. * `TWOSKY_URI`: set an alternative URL for `download` or `upload`. diff --git a/scripts/make/build-docker.sh b/scripts/make/build-docker.sh index 971017a28c6..ffdfb228ed7 100644 --- a/scripts/make/build-docker.sh +++ b/scripts/make/build-docker.sh @@ -107,18 +107,6 @@ cp "${dist_dir}/AdGuardHome_linux_arm_7/AdGuardHome/AdGuardHome"\ cp "${dist_dir}/AdGuardHome_linux_ppc64le/AdGuardHome/AdGuardHome"\ "${dist_docker}/AdGuardHome_linux_ppc64le_" -# Copy the helper scripts. See file docker/Dockerfile. -dist_docker_scripts="${dist_docker}/scripts" -readonly dist_docker_scripts - -mkdir -p "$dist_docker_scripts" -cp "./docker/dns-bind.awk"\ - "${dist_docker_scripts}/dns-bind.awk" -cp "./docker/web-bind.awk"\ - "${dist_docker_scripts}/web-bind.awk" -cp "./docker/healthcheck.sh"\ - "${dist_docker_scripts}/healthcheck.sh" - # Don't use quotes with $docker_version_tag and $docker_channel_tag, because we # want word splitting and or an empty space if tags are empty. # diff --git a/scripts/make/go-bench.sh b/scripts/make/go-bench.sh new file mode 100644 index 00000000000..5ddf5d03083 --- /dev/null +++ b/scripts/make/go-bench.sh @@ -0,0 +1,55 @@ +#!/bin/sh + +verbose="${VERBOSE:-0}" +readonly verbose + +# Verbosity levels: +# 0 = Don't print anything except for errors. +# 1 = Print commands, but not nested commands. +# 2 = Print everything. +if [ "$verbose" -gt '1' ] +then + set -x + v_flags='-v=1' + x_flags='-x=1' +elif [ "$verbose" -gt '0' ] +then + set -x + v_flags='-v=1' + x_flags='-x=0' +else + set +x + v_flags='-v=0' + x_flags='-x=0' +fi +readonly v_flags x_flags + +set -e -f -u + +if [ "${RACE:-1}" -eq '0' ] +then + race_flags='--race=0' +else + race_flags='--race=1' +fi +readonly race_flags + +go="${GO:-go}" + +count_flags='--count=1' +shuffle_flags='--shuffle=on' +timeout_flags="${TIMEOUT_FLAGS:---timeout=30s}" +readonly go count_flags shuffle_flags timeout_flags + +"$go" test\ + "$count_flags"\ + "$shuffle_flags"\ + "$race_flags"\ + "$timeout_flags"\ + "$x_flags"\ + "$v_flags"\ + --bench='.'\ + --benchmem\ + --benchtime=1s\ + --run='^$'\ + ./... diff --git a/scripts/make/go-fuzz.sh b/scripts/make/go-fuzz.sh new file mode 100644 index 00000000000..8f10b4f1056 --- /dev/null +++ b/scripts/make/go-fuzz.sh @@ -0,0 +1,58 @@ +#!/bin/sh + +verbose="${VERBOSE:-0}" +readonly verbose + +# Verbosity levels: +# 0 = Don't print anything except for errors. +# 1 = Print commands, but not nested commands. +# 2 = Print everything. +if [ "$verbose" -gt '1' ] +then + set -x + v_flags='-v=1' + x_flags='-x=1' +elif [ "$verbose" -gt '0' ] +then + set -x + v_flags='-v=1' + x_flags='-x=0' +else + set +x + v_flags='-v=0' + x_flags='-x=0' +fi +readonly v_flags x_flags + +set -e -f -u + +if [ "${RACE:-1}" -eq '0' ] +then + race_flags='--race=0' +else + race_flags='--race=1' +fi +readonly race_flags + +go="${GO:-go}" + +count_flags='--count=1' +shuffle_flags='--shuffle=on' +timeout_flags="${TIMEOUT_FLAGS:---timeout=30s}" +fuzztime_flags="${FUZZTIME_FLAGS:---fuzztime=20s}" + +readonly go count_flags shuffle_flags timeout_flags fuzztime_flags + +# TODO(a.garipov): File an issue about using --fuzz with multiple packages. +"$go" test\ + "$count_flags"\ + "$shuffle_flags"\ + "$race_flags"\ + "$timeout_flags"\ + "$x_flags"\ + "$v_flags"\ + "$fuzztime_flags"\ + --fuzz='.'\ + --run='^$'\ + ./internal/filtering/rulelist/\ + ; diff --git a/scripts/make/go-lint.sh b/scripts/make/go-lint.sh index 3409ed2b8d2..b36db02fc15 100644 --- a/scripts/make/go-lint.sh +++ b/scripts/make/go-lint.sh @@ -35,7 +35,7 @@ set -f -u go_version="$( "${GO:-go}" version )" readonly go_version -go_min_version='go1.19.10' +go_min_version='go1.19.11' go_version_msg=" warning: your go version (${go_version}) is different from the recommended minimal one (${go_min_version}). if you have the version installed, please set the GO environment variable. @@ -176,10 +176,14 @@ run_linter gocognit --over 10\ ./internal/aghchan/\ ./internal/aghhttp/\ ./internal/aghio/\ + ./internal/filtering/hashprefix/\ + ./internal/filtering/rulelist/\ ./internal/next/\ + ./internal/rdns/\ ./internal/tools/\ ./internal/version/\ ./internal/whois/\ + ./scripts/\ ; run_linter ineffassign ./... @@ -210,6 +214,8 @@ run_linter gosec --quiet\ ./internal/dhcpd\ ./internal/dhcpsvc\ ./internal/dnsforward\ + ./internal/filtering/hashprefix/\ + ./internal/filtering/rulelist/\ ./internal/next\ ./internal/schedule\ ./internal/stats\ @@ -218,8 +224,7 @@ run_linter gosec --quiet\ ./internal/whois\ ; -# TODO(a.garipov): Enable --blank? -run_linter errcheck --asserts ./... +run_linter errcheck ./... staticcheck_matrix=' darwin: GOOS=darwin diff --git a/scripts/translations/download.go b/scripts/translations/download.go new file mode 100644 index 00000000000..c64698975ad --- /dev/null +++ b/scripts/translations/download.go @@ -0,0 +1,177 @@ +package main + +import ( + "flag" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/AdguardTeam/AdGuardHome/internal/aghio" + "github.com/AdguardTeam/golibs/errors" + "github.com/AdguardTeam/golibs/log" + "golang.org/x/exp/slices" +) + +// download and save all translations. +func (c *twoskyClient) download() (err error) { + var numWorker int + + flagSet := flag.NewFlagSet("download", flag.ExitOnError) + flagSet.Usage = func() { + usage("download command error") + } + flagSet.IntVar(&numWorker, "n", 1, "number of concurrent downloads") + + err = flagSet.Parse(os.Args[2:]) + if err != nil { + // Don't wrap the error since it's informative enough as is. + return err + } + + if numWorker < 1 { + usage("count must be positive") + } + + downloadURI := c.uri.JoinPath("download") + + client := &http.Client{ + Timeout: 10 * time.Second, + } + + wg := &sync.WaitGroup{} + failed := &sync.Map{} + uriCh := make(chan *url.URL, len(c.langs)) + + for i := 0; i < numWorker; i++ { + wg.Add(1) + go downloadWorker(wg, failed, client, uriCh) + } + + for lang := range c.langs { + uri := translationURL(downloadURI, defaultBaseFile, c.projectID, lang) + + uriCh <- uri + } + + close(uriCh) + wg.Wait() + + printFailedLocales(failed) + + return nil +} + +// printFailedLocales prints sorted list of failed downloads, if any. +func printFailedLocales(failed *sync.Map) { + keys := []string{} + failed.Range(func(k, _ any) bool { + s, ok := k.(string) + if !ok { + panic("unexpected type") + } + + keys = append(keys, s) + + return true + }) + + if len(keys) == 0 { + return + } + + slices.Sort(keys) + log.Info("failed locales: %s", strings.Join(keys, " ")) +} + +// downloadWorker downloads translations by received urls and saves them. +// Where failed is a map for storing failed downloads. +func downloadWorker( + wg *sync.WaitGroup, + failed *sync.Map, + client *http.Client, + uriCh <-chan *url.URL, +) { + defer wg.Done() + + for uri := range uriCh { + q := uri.Query() + code := q.Get("language") + + err := saveToFile(client, uri, code) + if err != nil { + log.Error("download: worker: %s", err) + failed.Store(code, struct{}{}) + } + } +} + +// saveToFile downloads translation by url and saves it to a file, or returns +// error. +func saveToFile(client *http.Client, uri *url.URL, code string) (err error) { + data, err := getTranslation(client, uri.String()) + if err != nil { + log.Info("%s", data) + + return fmt.Errorf("getting translation: %s", err) + } + + name := filepath.Join(localesDir, code+".json") + err = os.WriteFile(name, data, 0o664) + if err != nil { + return fmt.Errorf("writing file: %s", err) + } + + fmt.Println(name) + + return nil +} + +// getTranslation returns received translation data and error. If err is not +// nil, data may contain a response from server for inspection. +func getTranslation(client *http.Client, url string) (data []byte, err error) { + resp, err := client.Get(url) + if err != nil { + return nil, fmt.Errorf("requesting: %w", err) + } + + defer log.OnCloserError(resp.Body, log.ERROR) + + if resp.StatusCode != http.StatusOK { + err = fmt.Errorf("url: %q; status code: %s", url, http.StatusText(resp.StatusCode)) + + // Go on and download the body for inspection. + } + + limitReader, lrErr := aghio.LimitReader(resp.Body, readLimit) + if lrErr != nil { + // Generally shouldn't happen, since the only error returned by + // [aghio.LimitReader] is an argument error. + panic(fmt.Errorf("limit reading: %w", lrErr)) + } + + data, readErr := io.ReadAll(limitReader) + + return data, errors.WithDeferred(err, readErr) +} + +// translationURL returns a new url.URL with provided query parameters. +func translationURL(oldURL *url.URL, baseFile, projectID string, lang langCode) (uri *url.URL) { + uri = &url.URL{} + *uri = *oldURL + + q := uri.Query() + q.Set("format", "json") + q.Set("filename", baseFile) + q.Set("project", projectID) + q.Set("language", string(lang)) + + uri.RawQuery = q.Encode() + + return uri +} diff --git a/scripts/translations/main.go b/scripts/translations/main.go index 0d4e78712a9..7abadab7491 100644 --- a/scripts/translations/main.go +++ b/scripts/translations/main.go @@ -6,25 +6,16 @@ import ( "bufio" "bytes" "encoding/json" - "flag" "fmt" - "io" - "mime/multipart" - "net/http" - "net/textproto" "net/url" "os" "os/exec" "path/filepath" "strings" - "sync" "time" - "github.com/AdguardTeam/AdGuardHome/internal/aghhttp" - "github.com/AdguardTeam/AdGuardHome/internal/aghio" "github.com/AdguardTeam/AdGuardHome/internal/aghos" "github.com/AdguardTeam/golibs/errors" - "github.com/AdguardTeam/golibs/httphdr" "github.com/AdguardTeam/golibs/log" "golang.org/x/exp/maps" "golang.org/x/exp/slices" @@ -38,7 +29,8 @@ const ( srcDir = "./client/src" twoskyURI = "https://twosky.int.agrd.dev/api/v1" - readLimit = 1 * 1024 * 1024 + readLimit = 1 * 1024 * 1024 + uploadTimeout = 10 * time.Second ) // langCode is a language code. @@ -62,31 +54,26 @@ func main() { usage("") } - uriStr := os.Getenv("TWOSKY_URI") - if uriStr == "" { - uriStr = twoskyURI - } - - uri, err := url.Parse(uriStr) + conf, err := readTwoskyConfig() check(err) - projectID := os.Getenv("TWOSKY_PROJECT_ID") - if projectID == "" { - projectID = defaultProjectID - } - - conf, err := readTwoskyConf() - check(err) + var cli *twoskyClient switch os.Args[1] { case "summary": err = summary(conf.Languages) case "download": - err = download(uri, projectID, conf.Languages) + cli, err = conf.toClient() + check(err) + + err = cli.download() case "unused": err = unused(conf.LocalizableFiles[0]) case "upload": - err = upload(uri, projectID, conf.BaseLangcode) + cli, err = conf.toClient() + check(err) + + err = cli.upload() case "auto-add": err = autoAdd(conf.LocalizableFiles[0]) default: @@ -133,51 +120,131 @@ Commands: os.Exit(0) } -// twoskyConf is the configuration structure for localization. -type twoskyConf struct { +// twoskyConfig is the configuration structure for localization. +type twoskyConfig struct { Languages languages `json:"languages"` ProjectID string `json:"project_id"` BaseLangcode langCode `json:"base_locale"` LocalizableFiles []string `json:"localizable_files"` } -// readTwoskyConf returns configuration. -func readTwoskyConf() (t twoskyConf, err error) { - defer func() { err = errors.Annotate(err, "parsing twosky conf: %w") }() +// readTwoskyConfig returns twosky configuration. +func readTwoskyConfig() (t *twoskyConfig, err error) { + defer func() { err = errors.Annotate(err, "parsing twosky config: %w") }() b, err := os.ReadFile(twoskyConfFile) if err != nil { // Don't wrap the error since it's informative enough as is. - return twoskyConf{}, err + return nil, err } - var tsc []twoskyConf + var tsc []twoskyConfig err = json.Unmarshal(b, &tsc) if err != nil { err = fmt.Errorf("unmarshalling %q: %w", twoskyConfFile, err) - return twoskyConf{}, err + return nil, err } if len(tsc) == 0 { err = fmt.Errorf("%q is empty", twoskyConfFile) - return twoskyConf{}, err + return nil, err } conf := tsc[0] for _, lang := range conf.Languages { if lang == "" { - return twoskyConf{}, errors.Error("language is empty") + return nil, errors.Error("language is empty") } } if len(conf.LocalizableFiles) == 0 { - return twoskyConf{}, errors.Error("no localizable files specified") + return nil, errors.Error("no localizable files specified") + } + + return &conf, nil +} + +// twoskyClient is the twosky client with methods for download and upload +// translations. +type twoskyClient struct { + // uri is the base URL. + uri *url.URL + + // langs is the map of languages to download. + langs languages + + // projectID is the name of the project. + projectID string + + // baseLang is the base language code. + baseLang langCode +} + +// toClient reads values from environment variables or defaults, validates +// them, and returns the twosky client. +func (t *twoskyConfig) toClient() (cli *twoskyClient, err error) { + defer func() { err = errors.Annotate(err, "filling config: %w") }() + + uriStr := os.Getenv("TWOSKY_URI") + if uriStr == "" { + uriStr = twoskyURI + } + uri, err := url.Parse(uriStr) + if err != nil { + return nil, err + } + + projectID := os.Getenv("TWOSKY_PROJECT_ID") + if projectID == "" { + projectID = defaultProjectID + } + + baseLang := t.BaseLangcode + uLangStr := os.Getenv("UPLOAD_LANGUAGE") + if uLangStr != "" { + baseLang = langCode(uLangStr) + } + + langs := t.Languages + dlLangStr := os.Getenv("DOWNLOAD_LANGUAGES") + if dlLangStr != "" { + var dlLangs languages + dlLangs, err = validateLanguageStr(dlLangStr, langs) + if err != nil { + return nil, err + } + + langs = dlLangs } - return conf, nil + return &twoskyClient{ + uri: uri, + projectID: projectID, + baseLang: baseLang, + langs: langs, + }, nil +} + +// validateLanguageStr validates languages codes that contain in the str and +// returns language map, where key is language code and value is display name. +func validateLanguageStr(str string, all languages) (langs languages, err error) { + langs = make(languages) + codes := strings.Fields(str) + + for _, k := range codes { + lc := langCode(k) + name, ok := all[lc] + if !ok { + return nil, fmt.Errorf("validating languages: unexpected language code %q", k) + } + + langs[lc] = name + } + + return langs, nil } // readLocales reads file with name fn and returns a map, where key is text @@ -233,163 +300,33 @@ func summary(langs languages) (err error) { return nil } -// download and save all translations. uri is the base URL. projectID is the -// name of the project. -func download(uri *url.URL, projectID string, langs languages) (err error) { - var numWorker int - - flagSet := flag.NewFlagSet("download", flag.ExitOnError) - flagSet.Usage = func() { - usage("download command error") - } - flagSet.IntVar(&numWorker, "n", 1, "number of concurrent downloads") +// unused prints unused text labels. +func unused(basePath string) (err error) { + defer func() { err = errors.Annotate(err, "unused: %w") }() - err = flagSet.Parse(os.Args[2:]) + baseLoc, err := readLocales(basePath) if err != nil { - // Don't wrap the error since it's informative enough as is. return err } - if numWorker < 1 { - usage("count must be positive") - } - - downloadURI := uri.JoinPath("download") - - client := &http.Client{ - Timeout: 10 * time.Second, - } - - wg := &sync.WaitGroup{} - uriCh := make(chan *url.URL, len(langs)) - - for i := 0; i < numWorker; i++ { - wg.Add(1) - go downloadWorker(wg, client, uriCh) - } - - for lang := range langs { - uri = translationURL(downloadURI, defaultBaseFile, projectID, lang) - - uriCh <- uri - } - - close(uriCh) - wg.Wait() - - return nil -} - -// downloadWorker downloads translations by received urls and saves them. -func downloadWorker(wg *sync.WaitGroup, client *http.Client, uriCh <-chan *url.URL) { - defer wg.Done() - - for uri := range uriCh { - data, err := getTranslation(client, uri.String()) - if err != nil { - log.Error("download worker: getting translation: %s", err) - log.Info("download worker: error response:\n%s", data) - - continue - } - - q := uri.Query() - code := q.Get("language") - - // Fix some TwoSky weirdnesses. - // - // TODO(a.garipov): Remove when those are fixed. - code = strings.ToLower(code) - - name := filepath.Join(localesDir, code+".json") - err = os.WriteFile(name, data, 0o664) - if err != nil { - log.Error("download worker: writing file: %s", err) - - continue - } - - fmt.Println(name) - } -} - -// getTranslation returns received translation data and error. If err is not -// nil, data may contain a response from server for inspection. -func getTranslation(client *http.Client, url string) (data []byte, err error) { - resp, err := client.Get(url) + locDir := filepath.Clean(localesDir) + js, err := findJS(locDir) if err != nil { - return nil, fmt.Errorf("requesting: %w", err) - } - - defer log.OnCloserError(resp.Body, log.ERROR) - - if resp.StatusCode != http.StatusOK { - err = fmt.Errorf("url: %q; status code: %s", url, http.StatusText(resp.StatusCode)) - - // Go on and download the body for inspection. - } - - limitReader, lrErr := aghio.LimitReader(resp.Body, readLimit) - if lrErr != nil { - // Generally shouldn't happen, since the only error returned by - // [aghio.LimitReader] is an argument error. - panic(fmt.Errorf("limit reading: %w", lrErr)) + return err } - data, readErr := io.ReadAll(limitReader) - - return data, errors.WithDeferred(err, readErr) + return findUnused(js, baseLoc) } -// translationURL returns a new url.URL with provided query parameters. -func translationURL(oldURL *url.URL, baseFile, projectID string, lang langCode) (uri *url.URL) { - uri = &url.URL{} - *uri = *oldURL - - // Fix some TwoSky weirdnesses. - // - // TODO(a.garipov): Remove when those are fixed. - switch lang { - case "si-lk": - lang = "si-LK" - case "zh-hk": - lang = "zh-HK" - default: - // Go on. - } - - q := uri.Query() - q.Set("format", "json") - q.Set("filename", baseFile) - q.Set("project", projectID) - q.Set("language", string(lang)) - - uri.RawQuery = q.Encode() - - return uri -} - -// unused prints unused text labels. -func unused(basePath string) (err error) { - baseLoc, err := readLocales(basePath) - if err != nil { - return fmt.Errorf("unused: %w", err) - } - - locDir := filepath.Clean(localesDir) - - fileNames := []string{} - err = filepath.Walk(srcDir, func(name string, info os.FileInfo, err error) error { +// findJS returns list of JavaScript and JSON files or error. +func findJS(locDir string) (fileNames []string, err error) { + walkFn := func(name string, _ os.FileInfo, err error) error { if err != nil { log.Info("warning: accessing a path %q: %s", name, err) return nil } - if info.IsDir() { - return nil - } - if strings.HasPrefix(name, locDir) { return nil } @@ -400,13 +337,14 @@ func unused(basePath string) (err error) { } return nil - }) + } + err = filepath.Walk(srcDir, walkFn) if err != nil { - return fmt.Errorf("filepath walking %q: %w", srcDir, err) + return nil, fmt.Errorf("filepath walking %q: %w", srcDir, err) } - return findUnused(fileNames, baseLoc) + return fileNames, nil } // findUnused prints unused text labels from fileNames. @@ -445,155 +383,63 @@ func findUnused(fileNames []string, loc locales) (err error) { return nil } -// upload base translation. uri is the base URL. projectID is the name of the -// project. baseLang is the base language code. -func upload(uri *url.URL, projectID string, baseLang langCode) (err error) { - defer func() { err = errors.Annotate(err, "upload: %w") }() - - uploadURI := uri.JoinPath("upload") - - lang := baseLang - - langStr := os.Getenv("UPLOAD_LANGUAGE") - if langStr != "" { - lang = langCode(langStr) - } - - basePath := filepath.Join(localesDir, defaultBaseFile) - - formData := map[string]string{ - "format": "json", - "language": string(lang), - "filename": defaultBaseFile, - "project": projectID, - } - - buf, cType, err := prepareMultipartMsg(formData, basePath) - if err != nil { - return fmt.Errorf("preparing multipart msg: %w", err) - } - - err = send(uploadURI.String(), cType, buf) - if err != nil { - return fmt.Errorf("sending multipart msg: %w", err) - } - - return nil -} - -// prepareMultipartMsg prepares translation data for upload. -func prepareMultipartMsg( - formData map[string]string, - basePath string, -) (buf *bytes.Buffer, cType string, err error) { - buf = &bytes.Buffer{} - w := multipart.NewWriter(buf) - var fw io.Writer - - for k, v := range formData { - err = w.WriteField(k, v) - if err != nil { - return nil, "", fmt.Errorf("writing field: %w", err) - } - } +// autoAdd adds locales with additions to the git and restores locales with +// deletions. +func autoAdd(basePath string) (err error) { + defer func() { err = errors.Annotate(err, "auto add: %w") }() - file, err := os.Open(basePath) + adds, dels, err := changedLocales() if err != nil { - return nil, "", fmt.Errorf("opening file: %w", err) + // Don't wrap the error since it's informative enough as is. + return err } - defer func() { - err = errors.WithDeferred(err, file.Close()) - }() - - h := make(textproto.MIMEHeader) - h.Set(httphdr.ContentType, aghhttp.HdrValApplicationJSON) - - d := fmt.Sprintf("form-data; name=%q; filename=%q", "file", defaultBaseFile) - h.Set(httphdr.ContentDisposition, d) - - fw, err = w.CreatePart(h) - if err != nil { - return nil, "", fmt.Errorf("creating part: %w", err) + if slices.Contains(dels, basePath) { + return errors.Error("base locale contains deletions") } - _, err = io.Copy(fw, file) + err = handleAdds(adds) if err != nil { - return nil, "", fmt.Errorf("copying: %w", err) + // Don't wrap the error since it's informative enough as is. + return nil } - err = w.Close() + err = handleDels(dels) if err != nil { - return nil, "", fmt.Errorf("closing writer: %w", err) + // Don't wrap the error since it's informative enough as is. + return nil } - return buf, w.FormDataContentType(), nil + return nil } -// send POST request to uriStr. -func send(uriStr, cType string, buf *bytes.Buffer) (err error) { - var client http.Client - - req, err := http.NewRequest(http.MethodPost, uriStr, buf) - if err != nil { - return fmt.Errorf("bad request: %w", err) - } - - req.Header.Set(httphdr.ContentType, cType) - - resp, err := client.Do(req) - if err != nil { - return fmt.Errorf("client post form: %w", err) +// handleAdds adds locales with additions to the git. +func handleAdds(locales []string) (err error) { + if len(locales) == 0 { + return nil } - defer func() { - err = errors.WithDeferred(err, resp.Body.Close()) - }() + args := append([]string{"add"}, locales...) + code, out, err := aghos.RunCommand("git", args...) - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("status code is not ok: %q", http.StatusText(resp.StatusCode)) + if err != nil || code != 0 { + return fmt.Errorf("git add exited with code %d output %q: %w", code, out, err) } return nil } -// autoAdd adds locales with additions to the git and restores locales with -// deletions. -func autoAdd(basePath string) (err error) { - defer func() { err = errors.Annotate(err, "auto add: %w") }() - - adds, dels, err := changedLocales() - if err != nil { - // Don't wrap the error since it's informative enough as is. - return err - } - - if slices.Contains(dels, basePath) { - return errors.Error("base locale contains deletions") - } - - var ( - args []string - code int - out []byte - ) - - if len(adds) > 0 { - args = append([]string{"add"}, adds...) - code, out, err = aghos.RunCommand("git", args...) - - if err != nil || code != 0 { - return fmt.Errorf("git add exited with code %d output %q: %w", code, out, err) - } +// handleDels restores locales with deletions. +func handleDels(locales []string) (err error) { + if len(locales) == 0 { + return nil } - if len(dels) > 0 { - args = append([]string{"restore"}, dels...) - code, out, err = aghos.RunCommand("git", args...) + args := append([]string{"restore"}, locales...) + code, out, err := aghos.RunCommand("git", args...) - if err != nil || code != 0 { - return fmt.Errorf("git restore exited with code %d output %q: %w", code, out, err) - } + if err != nil || code != 0 { + return fmt.Errorf("git restore exited with code %d output %q: %w", code, out, err) } return nil diff --git a/scripts/translations/upload.go b/scripts/translations/upload.go new file mode 100644 index 00000000000..b9cfd4bf796 --- /dev/null +++ b/scripts/translations/upload.go @@ -0,0 +1,120 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "os" + "path/filepath" + + "github.com/AdguardTeam/AdGuardHome/internal/aghhttp" + "github.com/AdguardTeam/golibs/errors" + "github.com/AdguardTeam/golibs/httphdr" + "github.com/AdguardTeam/golibs/mapsutil" +) + +// upload base translation. +func (c *twoskyClient) upload() (err error) { + defer func() { err = errors.Annotate(err, "upload: %w") }() + + uploadURI := c.uri.JoinPath("upload") + basePath := filepath.Join(localesDir, defaultBaseFile) + + formData := map[string]string{ + "format": "json", + "language": string(c.baseLang), + "filename": defaultBaseFile, + "project": c.projectID, + } + + buf, cType, err := prepareMultipartMsg(formData, basePath) + if err != nil { + return fmt.Errorf("preparing multipart msg: %w", err) + } + + err = send(uploadURI.String(), cType, buf) + if err != nil { + return fmt.Errorf("sending multipart msg: %w", err) + } + + return nil +} + +// prepareMultipartMsg prepares translation data for upload. +func prepareMultipartMsg( + formData map[string]string, + basePath string, +) (buf *bytes.Buffer, cType string, err error) { + buf = &bytes.Buffer{} + w := multipart.NewWriter(buf) + var fw io.Writer + + err = mapsutil.OrderedRangeError(formData, w.WriteField) + if err != nil { + return nil, "", fmt.Errorf("writing field: %w", err) + } + + file, err := os.Open(basePath) + if err != nil { + return nil, "", fmt.Errorf("opening file: %w", err) + } + + defer func() { + err = errors.WithDeferred(err, file.Close()) + }() + + h := make(textproto.MIMEHeader) + h.Set(httphdr.ContentType, aghhttp.HdrValApplicationJSON) + + d := fmt.Sprintf("form-data; name=%q; filename=%q", "file", defaultBaseFile) + h.Set(httphdr.ContentDisposition, d) + + fw, err = w.CreatePart(h) + if err != nil { + return nil, "", fmt.Errorf("creating part: %w", err) + } + + _, err = io.Copy(fw, file) + if err != nil { + return nil, "", fmt.Errorf("copying: %w", err) + } + + err = w.Close() + if err != nil { + return nil, "", fmt.Errorf("closing writer: %w", err) + } + + return buf, w.FormDataContentType(), nil +} + +// send POST request to uriStr. +func send(uriStr, cType string, buf *bytes.Buffer) (err error) { + client := http.Client{ + Timeout: uploadTimeout, + } + + req, err := http.NewRequest(http.MethodPost, uriStr, buf) + if err != nil { + return fmt.Errorf("bad request: %w", err) + } + + req.Header.Set(httphdr.ContentType, cType) + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("client post form: %w", err) + } + + defer func() { + err = errors.WithDeferred(err, resp.Body.Close()) + }() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("status code is not ok: %q", http.StatusText(resp.StatusCode)) + } + + return nil +}